text stringlengths 8 6.05M |
|---|
# Given an array nums of integers,
# return how many of them contain an even number of digits.
class Solution:
def findNumbers(self, nums):
return len([n for n in nums if len(str(n)) % 2 == 0])
if __name__ == '__main__':
test_input = [12, 345, 2, 6, 7896]
print(Solution.findNumbers(Solution, test_input))
|
import pickle
from redis import StrictRedis
from redis.client import StrictPipeline
from .utils import CacheKey
from .decorators import using_cache
def _unpickle(data):
try:
return pickle.loads(data) if data else None
except pickle.UnpicklingError as err:
return data
def _pickle(data):
return pickle.dumps(data)
class CacheProxy:
def __init__(self, app):
redis_config = {
'host': app.config.get('REDIS_HOST', 'localhost'),
'port': app.config.get('REDIS_PORT', 6379),
'db': app.config.get('REDIS_DB', 0),
'password': app.config.get('REDIS_PASS', None),
}
self.default_ex = app.config.get('REDIS_DEFAUTL_EX', None)
self.client = StrictRedis(**redis_config)
# 事务
def pipeline(self, *args, **kwargs) -> StrictPipeline:
return self.client.pipeline(*args, **kwargs)
# 键值
def get(self, name):
return _unpickle(self.client.get(str(name)))
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
return self.client.set(str(name), pickle.dumps(value),
ex or self.default_ex, px, nx, xx)
def delete(self, *names):
return self.client.delete(*map(str, names))
def expire(self, name, time):
return self.client.expire(str(name), time)
def persist(self, name):
return self.client.persist(str(name))
def ttl(self, name):
return self.client.ttl(str(name))
def keys(self, pattern='*'):
return [k.decode('utf-8')
for k in self.client.keys(str(CacheKey(pattern)))]
# bit
def bitcount(self, name, start=None, end=None):
return self.client.bitcount(str(name), start=start, end=end)
def setbit(self, name, offset, value):
return self.client.setbit(str(name), offset=offset, value=value)
# 数字
def nget(self, name) -> int:
rst = self.client.get(str(name))
return int(rst) if rst is not None else None
def nset(self, name, value: int, ex=None, px=None, nx=False, xx=False):
return self.client.set(str(name), str(value),
ex or self.default_ex, px, nx, xx)
def ndecr(self, name, amount: int = 1) -> int:
return self.client.decr(str(name), amount)
def nincr(self, name, amount: int = 1) -> int:
return int(self.client.incr(str(name), amount))
# 列表
def lrange(self, name, start, end) -> tuple:
return tuple(map(_unpickle, self.client.lrange(str(name), start, end)))
def rpush(self, name, *values):
return self.client.rpush(str(name), *map(_pickle, values))
def lpop(self, name):
return _unpickle(self.client.lpop(str(name)))
def llen(self, name):
return self.client.llen(str(name))
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import textwrap
import pytest
import yaml
from pants.backend.javascript.goals import lockfile
from pants.backend.javascript.goals.lockfile import (
GeneratePackageLockJsonFile,
KnownPackageJsonUserResolveNamesRequest,
RequestedPackageJsonUserResolveNames,
)
from pants.backend.javascript.nodejs_project import AllNodeJSProjects
from pants.backend.javascript.package_json import (
AllPackageJson,
PackageJsonForGlobs,
PackageJsonTarget,
)
from pants.backend.javascript.subsystems.nodejs import UserChosenNodeJSResolveAliases
from pants.core.goals.generate_lockfiles import (
GenerateLockfileResult,
KnownUserResolveNames,
UserGenerateLockfiles,
)
from pants.core.target_types import FileTarget
from pants.engine.fs import DigestContents, PathGlobs
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*lockfile.rules(),
QueryRule(
KnownUserResolveNames, (KnownPackageJsonUserResolveNamesRequest, AllNodeJSProjects)
),
QueryRule(AllNodeJSProjects, ()),
QueryRule(PackageJsonForGlobs, (PathGlobs,)),
QueryRule(AllPackageJson, (PathGlobs,)),
QueryRule(GenerateLockfileResult, (GeneratePackageLockJsonFile,)),
QueryRule(
UserGenerateLockfiles,
(
RequestedPackageJsonUserResolveNames,
AllNodeJSProjects,
UserChosenNodeJSResolveAliases,
),
),
],
target_types=[PackageJsonTarget, FileTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def given_package_with_name(name: str) -> str:
return json.dumps({"name": name, "version": "0.0.1"})
def given_package_with_workspaces(
name: str, version: str, dependencies: dict[str, str] | None = None, *workspaces: str
) -> str:
return json.dumps(
{
"name": name,
"version": version,
"private": True,
"dependencies": dependencies or {},
"workspaces": list(workspaces),
}
)
def test_resolves_are_dotted_package_paths(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/foo/BUILD": "package_json()",
"src/js/foo/package.json": given_package_with_name("ham"),
"src/js/bar/BUILD": "package_json()",
"src/js/bar/package.json": given_package_with_name("spam"),
}
)
projects = rule_runner.request(AllNodeJSProjects, [])
resolves = rule_runner.request(
KnownUserResolveNames, (projects, KnownPackageJsonUserResolveNamesRequest())
)
assert set(resolves.names) == {"js.foo", "js.bar"}
def test_user_can_override_resolve_aliases(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/foo/BUILD": "package_json()",
"src/js/foo/package.json": given_package_with_name("ham"),
"src/js/bar/BUILD": "package_json()",
"src/js/bar/package.json": given_package_with_name("spam"),
}
)
projects = rule_runner.request(AllNodeJSProjects, [])
rule_runner.set_options(["--nodejs-resolves={'user:1': 'src/js/foo/package-lock.json'}"])
resolves = rule_runner.request(
KnownUserResolveNames, (projects, KnownPackageJsonUserResolveNamesRequest())
)
assert set(resolves.names) == {"user:1", "js.bar"}
def test_user_override_non_existing_resolve_is_an_error(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/foo/BUILD": "package_json()",
"src/js/foo/package.json": given_package_with_name("ham"),
}
)
projects = rule_runner.request(AllNodeJSProjects, [])
rule_runner.set_options(["--nodejs-resolves={'user:1': 'does/not/exist/package-lock.json'}"])
with pytest.raises(ExecutionError):
rule_runner.request(
KnownUserResolveNames, (projects, KnownPackageJsonUserResolveNamesRequest())
)
@pytest.mark.parametrize(
"alias_args, expected_resolve",
[
pytest.param(
["--nodejs-resolves={'my-resolve': 'src/js/package-lock.json'}"],
"my-resolve",
id="Aliased resolve",
),
pytest.param([""], "js", id="Default resolve"),
],
)
def test_generates_lockfile_with_expected_resolve_name(
rule_runner: RuleRunner, alias_args: list[str], expected_resolve: str
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package_with_name("ham"),
}
)
projects = rule_runner.request(AllNodeJSProjects, [])
rule_runner.set_options(alias_args)
[lockfile] = rule_runner.request(
UserGenerateLockfiles,
(
projects,
RequestedPackageJsonUserResolveNames((expected_resolve,)),
UserChosenNodeJSResolveAliases(),
),
)
assert lockfile.resolve_name == expected_resolve
def test_generates_lockfile_for_package_json_project(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package_with_name("ham"),
}
)
[project] = rule_runner.request(AllNodeJSProjects, [])
lockfile = rule_runner.request(
GenerateLockfileResult,
(
GeneratePackageLockJsonFile(
resolve_name="js",
lockfile_dest="src/js/package-lock.json",
project=project,
diff=False,
),
),
)
digest_contents = rule_runner.request(DigestContents, [lockfile.digest])
assert json.loads(digest_contents[0].content) == {
"name": "ham",
"version": "0.0.1",
"lockfileVersion": 2,
"requires": True,
"packages": {"": {"name": "ham", "version": "0.0.1"}},
}
def test_generates_lockfile_for_npm_package_json_workspace(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package_with_workspaces("ham", "1.0.0", None, "a"),
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": given_package_with_workspaces("spam", "0.1.0"),
}
)
[project] = rule_runner.request(AllNodeJSProjects, [])
lockfile = rule_runner.request(
GenerateLockfileResult,
(
GeneratePackageLockJsonFile(
resolve_name="js",
lockfile_dest="src/js/package-lock.json",
project=project,
diff=False,
),
),
)
digest_contents = rule_runner.request(DigestContents, [lockfile.digest])
assert json.loads(digest_contents[0].content) == {
"name": "ham",
"version": "1.0.0",
"lockfileVersion": 2,
"requires": True,
"dependencies": {"spam": {"version": "file:a"}},
"packages": {
"": {"name": "ham", "version": "1.0.0", "workspaces": ["a"]},
"a": {"name": "spam", "version": "0.1.0"},
"node_modules/spam": {"link": True, "resolved": "a"},
},
}
def test_generates_lockfile_for_pnpm_package_json_workspace(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--nodejs-package-manager=pnpm"], env_inherit={"PATH"})
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/pnpm-workspace.yaml": "",
"src/js/package.json": given_package_with_workspaces(
"ham", "1.0.0", {"spam": "workspace:*"}
),
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": given_package_with_workspaces("spam", "0.1.0"),
}
)
[project] = rule_runner.request(AllNodeJSProjects, [])
lockfile = rule_runner.request(
GenerateLockfileResult,
(
GeneratePackageLockJsonFile(
resolve_name="js",
lockfile_dest="src/js/pnpm-lock.yaml",
project=project,
diff=False,
),
),
)
digest_contents = rule_runner.request(DigestContents, [lockfile.digest])
assert yaml.safe_load(digest_contents[0].content) == {
"importers": {
".": {"dependencies": {"spam": "link:a"}, "specifiers": {"spam": "workspace:*"}},
"a": {"specifiers": {}},
},
"lockfileVersion": 5.3,
}
def test_generates_lockfile_for_yarn_package_json_workspace(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--nodejs-package-manager=yarn"], env_inherit={"PATH"})
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": given_package_with_workspaces(
"ham", "1.0.0", {"spam": "*"}, "a"
),
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": given_package_with_workspaces("spam", "0.1.0"),
}
)
[project] = rule_runner.request(AllNodeJSProjects, [])
lockfile = rule_runner.request(
GenerateLockfileResult,
(
GeneratePackageLockJsonFile(
resolve_name="js",
lockfile_dest="src/js/yarn.lock",
project=project,
diff=False,
),
),
)
digest_contents = rule_runner.request(DigestContents, [lockfile.digest])
assert (
digest_contents[0].content.decode().strip()
== textwrap.dedent(
"""\
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"""
).strip()
)
|
class Monster(object):
def __init__(self, name):
self.name = name
self.species = type(self).__name__
self.hit_points = self.initial_hit_points
def describe(self):
if self.is_alive():
print('{} is a {} with {} hit points'.format(self.name, self.species, self.hit_points))
else:
print('{} is a dead {}'.format(self.name, self.species))
def is_alive(self):
return self.hit_points > 0
def damage(self, damage_points):
if self.is_alive():
self.hit_points -= damage_points
if not self.is_alive():
print('{} is dead'.format(self.name))
else:
print('{} is already dead'.format(self.name))
def heal(self):
if self.is_alive():
self.hit_points = self.initial_hit_points
else:
print('A dead self cannot be healed')
def attack(self, other):
if self.is_alive():
self.describe_attack(other)
other.damage(self.attack_points)
else:
print('A dead self cannot attack')
class Giant(Monster):
initial_hit_points = 10
attack_points = 3
def describe_attack(self, other):
print('{} swings a club at {}'.format(self.name, other.name))
class Dragon(Monster):
initial_hit_points = 20
attack_points = 4
def describe_attack(self, other):
print('{} breathes fire on {}'.format(self.name, other.name))
class Wyvern(Monster):
initial_hit_points = 15
attack_points = 5
def describe_attack(self, other):
print('{} swipes at {} with its tail'.format(monster.name, other_monster.name))
if __name__ == '__main__':
gerald = Giant('Gerald')
debbie = Dragon('Debbie')
wallace = Wyvern('Wallace')
gerald.describe()
debbie.describe()
debbie.attack(gerald)
gerald.attack(debbie)
debbie.attack(gerald)
gerald.attack(debbie)
debbie.attack(gerald)
gerald.attack(debbie)
gerald.describe()
debbie.describe()
|
import os
print os.path.exists('/etc/passwd')
print os.path.isfile('ch_05_11.py')
print os.path.isdir('ch_05_11.py')
print os.path.islink('ch_05_11.py')
print os.path.realpath('ch_05_11.py')
print os.path.getsize('ch_05_11.py')
print os.path.getmtime('ch_05_11.py')
import time
print time.ctime(os.path.getmtime('ch_05_11.py')) |
from setuptools import setup
longDescription = open('README.md').read()
setup(
name='ditherpy',
version='1.0.0',
description='Image dithering made easy with a diverse set of styles',
author='Gabriel Victor',
author_email='gabrielvcf@outlook.com',
url='https://github.com/gabrielvictorcf/ditherpy',
download_url='https://github.com/gabrielvictorcf/ditherpy',
long_description=longDescription,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
keywords='ditherpy image-dithering dither',
license='MIT',
packages=['ditherpy'],
entry_points={'console_scripts': ['ditherpy=ditherpy.__main__:main']},
include_package_data=True,
install_requires=[
'pillow >= 8.1.2'
],
python_requires='>=3.6',
) |
import json
class GameTemp:
""""" This class will hold any long term but temporary variables or info. This class is in essence to place
where any 'Global' info needs to be stored during game run time. The saving functions can also be placed
inside this object. """""
def __init__(self, game_name):
with open(game_name + '/settings.json') as file:
game_settings = json.load(file)
self.game_path = game_settings['path']
self.current_room = None
self.player = None
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'link-objects',
'type': 'executable',
'actions': [
{
'action_name': 'build extra object',
'inputs': ['extra.c'],
'outputs': ['extra.o'],
'action': ['gcc', '-o', 'extra.o', '-c', 'extra.c'],
'process_outputs_as_sources': 1,
},
],
'sources': [
'base.c',
],
},
],
}
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'mattw2' on 2014-11-27.
Copyright (c) 2014 'mattw2'. All rights reserved.
python run.py MongoNHMConditionTask --local-scheduler --date 20141127
"""
from ke2mongo.tasks.mongo import MongoTask
class MongoNHMConditionTask(MongoTask):
"""
Import NHM Condition Export file into MongoDB
"""
module = 'enhmcondition'
|
# coding: utf-8
import os
import sys
import string
from shlex import shlex
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
from configparser import ConfigParser
text_type = str
else:
from ConfigParser import SafeConfigParser as ConfigParser
text_type = unicode
class UndefinedValueError(Exception):
pass
class Undefined(object):
"""
Class to represent undefined type.
"""
pass
# Reference instance to represent undefined values
undefined = Undefined()
class Config(object):
"""
Handle .env file format used by Foreman.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, repository):
self.repository = repository
def _cast_boolean(self, value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
value = str(value)
if value.lower() not in self._BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return self._BOOLEANS[value.lower()]
def get(self, option, default=undefined, cast=undefined):
"""
Return the value for option or default if defined.
"""
if option in self.repository:
value = self.repository.get(option)
else:
value = default
if isinstance(value, Undefined):
raise UndefinedValueError('%s option not found and default value was not defined.' % option)
if isinstance(cast, Undefined):
cast = lambda v: v # nop
elif cast is bool:
cast = self._cast_boolean
return cast(value)
def __call__(self, *args, **kwargs):
"""
Convenient shortcut to get.
"""
return self.get(*args, **kwargs)
class RepositoryBase(object):
def __init__(self, source):
raise NotImplementedError
def __contains__(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class RepositoryIni(RepositoryBase):
"""
Retrieves option keys from .ini files.
"""
SECTION = 'settings'
def __init__(self, source):
self.parser = ConfigParser()
self.parser.readfp(open(source))
def __contains__(self, key):
return (key in os.environ or
self.parser.has_option(self.SECTION, key))
def get(self, key):
return (os.environ.get(key) or
self.parser.get(self.SECTION, key))
class RepositoryEnv(RepositoryBase):
"""
Retrieves option keys from .env files with fall back to os.environ.
"""
def __init__(self, source):
self.data = {}
for line in open(source):
line = line.strip()
if not line or line.startswith('#') or '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip().strip('\'"')
self.data[k] = v
def __contains__(self, key):
return key in os.environ or key in self.data
def get(self, key):
return os.environ.get(key) or self.data[key]
class RepositoryShell(RepositoryBase):
"""
Retrieves option keys from os.environ.
"""
def __init__(self, source=None):
pass
def __contains__(self, key):
return key in os.environ
def get(self, key):
return os.environ[key]
class AutoConfig(object):
"""
Autodetects the config file and type.
"""
SUPPORTED = {
'settings.ini': RepositoryIni,
'.env': RepositoryEnv,
}
def __init__(self):
self.config = None
def _find_file(self, path):
# look for all files in the current path
for configfile in self.SUPPORTED:
filename = os.path.join(path, configfile)
if os.path.isfile(filename):
return filename
# search the parent
parent = os.path.dirname(path)
if parent and parent != os.path.sep:
return self._find_file(parent)
# reached root without finding any files.
return ''
def _load(self, path):
# Avoid unintended permission errors
try:
filename = self._find_file(path)
except Exception:
filename = ''
Repository = self.SUPPORTED.get(os.path.basename(filename))
if not Repository:
Repository = RepositoryShell
self.config = Config(Repository(filename))
def _caller_path(self):
# MAGIC! Get the caller's module path.
frame = sys._getframe()
path = os.path.dirname(frame.f_back.f_back.f_code.co_filename)
return path
def __call__(self, *args, **kwargs):
if not self.config:
self._load(self._caller_path())
return self.config(*args, **kwargs)
# A pré-instantiated AutoConfig to improve decouple's usability
# now just import config and start using with no configuration.
config = AutoConfig()
# Helpers
class Csv(object):
"""
Produces a csv parser that return a list of transformed elements.
"""
def __init__(self, cast=text_type, delimiter=',', strip=string.whitespace):
"""
Parameters:
cast -- callable that transforms the item just before it's added to the list.
delimiter -- string of delimiters chars passed to shlex.
strip -- string of non-relevant characters to be passed to str.strip after the split.
"""
self.cast = cast
self.delimiter = delimiter
self.strip = strip
def __call__(self, value):
"""The actual transformation"""
transform = lambda s: self.cast(s.strip(self.strip))
splitter = shlex(value, posix=True)
splitter.whitespace = self.delimiter
splitter.whitespace_split = True
return [transform(s) for s in splitter]
|
#!/usr/bin/python
from nsapi import api_request,CTE
from ns import id_str
from itertools import ifilterfalse
import json,urllib2
from getpass import getuser
try:
from os import uname
except:
def uname():
return "Non-UNIX system?"
from socket import gethostname
import logging,sys
logging.basicConfig(format="%(asctime)s %(message)s")
if '-v' in sys.argv or '--verbose' in sys.argv:
logging.getLogger().setLevel(logging.DEBUG)
uname_tuple=uname()
uname_str=str((uname_tuple[0],uname_tuple[2],uname_tuple[4]))
user_agent=getuser()+'@'+gethostname()+" WA members validator ("+uname_str+")"
nat_is_wa = {}
nat_exists = {}
def check_wa_status(nat):
if nat not in nat_is_wa:
try:
natxml = None
try:
natxml = api_request({'nation':nat,'q':'wa'},user_agent)
nat_is_wa[nat] = natxml.find('UNSTATUS').text != 'Non-member'
finally:
del natxml
nat_exists[nat] = True
except CTE:
nat_is_wa[nat] = False
nat_exists[nat] = False
return nat_is_wa[nat]
def exists(nat):
if nat not in nat_exists:
check_wa_status(nat)
return nat_exists[nat]
wa_xml = api_request({'wa':'1','q':'members'},user_agent)
wa_members = wa_xml.find('MEMBERS').text.split(',')
del wa_xml
def page(nat):
i=wa_members.index(nat)
return i-i%15
__page_url__fmts="http://www.nationstates.net/page=list_nations/un=UN?start=%d"
def page_url(nat):
return __page_url__fmts%page(nat)
wa_member_set=set(wa_members)
# I'm relying on some other code I run nightly to generate a list of nations in
# the WA as of the last major update which I run right after each major update
# ends here, which makes a list of WA members in update order at the below URL.
major_wa_members = map(id_str,json.load(urllib2.urlopen('http://www.thenorthpacific.org/api/wa.json')))
major_wa_member_set=set(major_wa_members)
new_wa_set = wa_member_set - major_wa_member_set
ex_wa_set = major_wa_member_set - wa_member_set
erroneously_listed = []
erroneously_not_listed = []
for nat in ifilterfalse(check_wa_status,new_wa_set):
erroneously_listed.append(nat)
erroneously_not_listed = filter(check_wa_status,ex_wa_set)
print "The following nations listed in http://www.nationstates.net/cgi-bin/api.cgi/wa=1/q=members are not WA nations:"
print "[spoiler=%d nations erroneously listed][list=1]" % len(erroneously_listed)
for nat in erroneously_listed:
if exists(nat):
cte=""
else:
cte="(CTE)"
print "[*][nation]%s[/nation] %s [[url=%s]page[/url]]"%(nat,cte,page_url(nat))
print "[/list][/spoiler]"
print "Worse, the following WA nations are not listed in [url]http://www.nationstates.net/cgi-bin/api.cgi/wa=1/q=members[/url]:"
print "[spoiler=%d nations erroneously not listed][list=1]" % len(erroneously_not_listed)
for nat in erroneously_not_listed:
print "[*][nation]%s[/nation]" % nat
print "[/list][/spoiler]"
|
""" Tkinter GUI for Lyrics Analyzer """
# import libraries
import tkinter as tk
from lyrics_tool import LyricsTool
class app:
def __init__(self):
self.label4=None
def main(self):
# set up Tkinter
root = tk.Tk()
root.title("Explicit Lyrics Detector")
canvas1 = tk.Canvas(root, width = 500, height = 250)
canvas1.pack()
# main title
label1 = tk.Label(root, text='Explicit Lyrics Detector')
label1.config(font=('helvetica', 24))
canvas1.create_window(250, 25, window=label1)
# start note text entry field
label2 = tk.Label(root, text='Song Title')
label2.config(font=('helvetica', 10))
canvas1.create_window(150, 100, window=label2)
entry1 = tk.Entry (root)
canvas1.create_window(150, 75, window=entry1)
# start note text entry field
label3 = tk.Label(root, text='Artist')
label3.config(font=('helvetica', 10))
canvas1.create_window(350, 100, window=label3)
entry2 = tk.Entry (root)
canvas1.create_window(350, 75, window=entry2)
# transpose button
def VetLyricsButton():
if self.label4 == None:
pass
else:
self.label4.destroy()
# get data
song_title = entry1.get()
artist = entry2.get()
# transpose note
v = LyricsTool()
clean = v.vet(song_title, artist)
if clean==True:
state='Clean Lyrics'
else:
state='Explicit lyrics detected!'
# display transposed note to user
self.label4 = tk.Label(root, text= state)
self.label4.config(font=('helvetica', 14))
canvas1.create_window(250, 200, window=self.label4)
# primary button for transposing notes
button1 = tk.Button(text='Submit', command=VetLyricsButton)
canvas1.create_window(250, 150, window=button1)
# main loop
root.mainloop()
a = app()
a.main()
|
from .base_model import VolModel
import QuantLib as ql
class BlackCubic(VolModel):
def __init__(self, vol_data):
super().__init__(vol_data)
self.surface = ql.BlackVarianceSurface(self.calculation_date, self.calendar,
self.expiration_dates, self.strikes,
self.implied_vols, self.day_count)
def get_vol(self, strike, time, recalibrate=True):
return self.surface.blackVol(time, strike)
|
def isValid( s: str) -> bool:
obj = {
")": "(",
"}": "{",
"]": "["
}
queue = []
for i in s:
if i in obj:
# print(obj[i] == queue[-1])
if obj[i] == queue[-1]:
queue.pop()
else:
return "false"
else:
queue.append(i)
print(len(queue))
return "true" if len(queue) == 0 else "false"
print(isValid("(]")) |
import matplotlib.pyplot as plt
try: from matplotlib import animation
except: animation = None
from IPython.core.pylabtools import print_figure
from IPython.core import page
try:
from IPython.core.magic import Magics, magics_class, cell_magic, line_cell_magic
except:
from nose.plugins.skip import SkipTest
raise SkipTest("IPython extension requires IPython >= 0.13")
from param import ipython as param_ext
from tempfile import NamedTemporaryFile
from functools import wraps
import traceback, itertools, string
from dataviews import Stack
from plots import Plot, GridLayoutPlot, viewmap, channel_modes
from sheetviews import GridLayout, CoordinateGrid
from views import View, Overlay, Annotation, Layout
from options import options, channels, PlotOpts, StyleOpts, ChannelOpts
# Variables controlled via the %view magic
PERCENTAGE_SIZE, FPS, FIGURE_FORMAT = 100, 20, 'png'
ENABLE_TRACEBACKS=True # To assist with debugging of display hooks
GIF_TAG = "<center><img src='data:image/gif;base64,{b64}'/><center/>"
VIDEO_TAG = """<center><video controls>
<source src="data:video/{mime_type};base64,{b64}" type="video/{mime_type}">
Your browser does not support the video tag.
</video><center/>"""
# 'format name':(animation writer, mime_type, anim_kwargs, extra_args, tag)
ANIMATION_OPTS = {
'webm':('ffmpeg', 'webm', {},
['-vcodec', 'libvpx', '-b', '1000k'],
VIDEO_TAG),
'h264':('ffmpeg', 'mp4', {'codec':'libx264'},
['-pix_fmt', 'yuv420p'],
VIDEO_TAG),
'gif':('imagemagick', 'gif', {'fps':10}, [],
GIF_TAG)
}
#========#
# Magics #
#========#
# ANSI color codes for the IPython pager
red = '\x1b[1;31m%s\x1b[0m'
blue = '\x1b[1;34m%s\x1b[0m'
green = '\x1b[1;32m%s\x1b[0m'
cyan = '\x1b[1;36m%s\x1b[0m'
# Corresponding HTML color codes
html_red = '#980f00'
html_blue = '#00008e'
@magics_class
class ViewMagic(Magics):
"""
Magic to allow easy control over the display of dataviews. The
figure and animation output formats, the animation frame rate and
figure size can all be controlled.
Usage: %view [png|svg] [webm|h264|gif[:<fps>]] [<percent size>]
"""
anim_formats = ['webm','h264','gif']
def __init__(self, *args, **kwargs):
super(ViewMagic, self).__init__(*args, **kwargs)
self.usage_info = "Usage: %view [png|svg] [webm|h264|gif[:<fps>]] [<percent size>]"
self.usage_info += " (Arguments may be in any order)"
@classmethod
def option_completer(cls, k,v):
return cls.anim_formats + ['png', 'svg']
def _set_animation_options(self, anim_spec):
"""
Parse the animation format and fps from the specification string.
"""
global VIDEO_FORMAT, FPS
format_choice, fps_str = ((anim_spec, None) if (':' not in anim_spec)
else anim_spec.rsplit(':'))
if format_choice not in self.anim_formats:
print "Valid animations types: %s" % ', '.join(self.anim_formats)
return False
elif fps_str is None:
VIDEO_FORMAT = format_choice
return True
try:
fps = int(fps_str)
except:
print "Invalid frame rate: '%s'" % fps_str
return False
VIDEO_FORMAT, FPS = format_choice, fps
if format_choice == 'gif':
ANIMATION_OPTS['gif'][2]['fps'] = fps
return True
def _set_size(self, size_spec):
global PERCENTAGE_SIZE
try: size = int(size_spec)
except: size = None
if (size is None) or (size < 0):
print "Percentage size must be an integer larger than zero."
return False
else:
PERCENTAGE_SIZE = size
return True
def _parse_settings(self, opts):
global FIGURE_FORMAT
fig_fmt = [('svg' in opts), ('png' in opts)]
if all(fig_fmt):
success = False
print "Please select either png or svg for static output"
elif True in fig_fmt:
figure_format = ['svg', 'png'][fig_fmt.index(True)]
FIGURE_FORMAT= figure_format
opts.remove(figure_format)
elif len(opts) == 0: success = True
if not len(opts) or len(opts) > 2:
success = not len(opts)
elif len(opts) == 1:
success = (self._set_animation_options(opts[0].lower())
if opts[0][0].isalpha() else self._set_size(opts[0]))
elif sum(el[0].isalpha() for el in opts) in [0,2]:
success = False
else:
(anim, size) = (opts if opts[0][0].isalpha()
else (opts[1], opts[0]))
anim_success = self._set_animation_options(anim.lower())
size_success = self._set_size(size)
success = anim_success and size_success
return success
@line_cell_magic
def view(self, line, cell=None):
global FIGURE_FORMAT, VIDEO_FORMAT, PERCENTAGE_SIZE, FPS
start_opts = [FIGURE_FORMAT, VIDEO_FORMAT, PERCENTAGE_SIZE, FPS]
opts = line.split()
success = self._parse_settings(opts)
if cell is None and success:
info = (VIDEO_FORMAT.upper(), FIGURE_FORMAT.upper(), PERCENTAGE_SIZE, FPS)
print "Displaying %s animation and %s figures [%d%% size, %s FPS]" % info
elif cell and success:
self.shell.run_cell(cell)
[FIGURE_FORMAT, VIDEO_FORMAT, PERCENTAGE_SIZE, FPS] = start_opts
else:
print self.usage_info
@magics_class
class ChannelMagic(Magics):
custom_channels = {}
@cell_magic
def channels(self, line, cell=None):
ChannelMagic.custom_channels = self._parse_channels(str(line))
self.shell.run_cell(cell)
ChannelMagic.custom_channels = {}
@classmethod
def _set_overlay_labels(cls, obj, label):
"""
Labels on Overlays are used to index channel definitions.
"""
if isinstance(obj, GridLayout):
for subview in obj.values():
cls._set_overlay_labels(subview, label)
elif isinstance(obj, Stack) and issubclass(obj.type, Overlay):
for overlay in obj:
overlay.label = label
elif isinstance(obj, Overlay):
obj.label = label
@classmethod
def _set_channels(cls, obj, custom_channels, prefix):
cls._set_overlay_labels(obj, prefix)
for name, (pattern, params) in custom_channels.items():
channels[prefix + '_' + name] = ChannelOpts(name, pattern,
**params)
@classmethod
def set_channels(cls, obj):
prefix = 'Custom[<' + obj.name + '>]'
if cls.custom_channels:
cls._set_channels(obj, cls.custom_channels, prefix)
def _parse_channels(self, line):
"""
Parse the arguments to the magic, returning a dictionary of
{'channel op name' : ('pattern', kwargs).
"""
tokens = line.split()
if tokens == []: return {}
channel_split = [(el+']') for el in line.rsplit(']') if el.strip()]
spec_split = [el.rsplit('=>') for el in channel_split]
channels = {}
for head, tail in spec_split:
head = head.strip()
op_match = [op for op in channel_modes if tail.strip().startswith(op)]
if len(op_match) != 1:
raise Exception("Unrecognized channel operation: ", tail.split()[0])
argument_str = tail.replace(op_match[0],'')
try:
eval_str = argument_str.replace('[','dict(').replace(']', ')')
args = eval(eval_str)
except:
raise Exception("Could not evaluate: %s" % argument_str)
op = op_match[0]
params = set(p for p in channel_modes[op].params().keys() if p!='name')
mismatch_keys = set(args.keys()) - params
if mismatch_keys:
raise Exception("Parameter(s) %r not accepted by %s operation"
% (', '.join(mismatch_keys), op))
valid_chars = string.letters + string.digits + '_* '
if not head.count('*') or any(l not in valid_chars for l in head):
raise Exception("Invalid characters in overlay pattern specification: %s" % head)
pattern = ' * '.join(el.strip() for el in head.rsplit('*'))
channel_modes[op].instance(**args)
channels[op] = (pattern, args)
return channels
@classmethod
def option_completer(cls, k,v):
"""
Tab completion hook for the %opts and %%opts magic.
"""
line = v.text_until_cursor
if line.endswith(']') or (line.count('[') - line.count(']')) % 2:
line_tail = line[len('%%channels'):]
op_name = line_tail[::-1].rsplit('[')[1][::-1].strip().split()[-1]
if op_name in channel_modes:
return channel_modes[op_name].params().keys()
else:
return channel_modes.keys()
@magics_class
class OptsMagic(Magics):
"""
The %opts and %%opts line and cell magics allow customization of
how dataviews are displayed. The %opts line magic updates or
creates new options for either StyleOpts (i.e. matplotlib options)
or in the PlotOpts (plot settings). The %%opts cell magic sets
custom display options associated on the displayed view object
which will persist every time that object is displayed.
"""
# Attributes set by the magic and read when display hooks run
custom_options = {}
show_info = False
show_labels = False
def __init__(self, *args, **kwargs):
super(OptsMagic, self).__init__(*args, **kwargs)
styles_list = [el.style_opts for el in viewmap.values()]
params_lists = [[k for (k,v) in el.params().items()
if not v.constant] for el in viewmap.values()]
# List of all parameters and styles for tab completion
OptsMagic.all_styles = sorted(set([s for styles in styles_list for s in styles]))
OptsMagic.all_params = sorted(set([p for plist in params_lists for p in plist]))
@classmethod
def pprint_kws(cls, style):
return ', '.join("%s=%r" % (k,v) for (k,v) in style.items.items())
@classmethod
def collect(cls, obj, attr='style'):
"""
Given a composite view object, build a dictionary of either
the 'style' or 'label' attributes across all contained
atoms. This method works across overlays, grid layouts and
stacks. The return is a dictionary with the collected string
values as keys for the the associated view type.
"""
group = {}
if isinstance(obj, (Overlay, Layout, GridLayout)):
for subview in obj:
group.update(cls.collect(subview, attr))
elif isinstance(obj, Stack) and not issubclass(obj.type, Overlay):
key_lists = [cls.collect(el, attr).keys() for el in obj]
values = set(el for els in key_lists for el in els)
for val in values:
group.update({val:obj.type})
elif isinstance(obj, Stack):
for subview in obj.top:
group.update(cls.collect(subview, attr))
else:
value = '' if getattr(obj, attr, None) is None else getattr(obj, attr)
group.update({value:type(obj)})
return group
@classmethod
def _basename(cls, name):
"""
Strips out the 'Custom' prefix from styles names that have
been customized by an object identifier.
"""
split = name.rsplit('>]_')
if not name.startswith('Custom'): return name
elif len(split) == 2: return split[1]
else:
raise Exception("Invalid style name %s" % name)
@classmethod
def _set_style_names(cls, obj, custom_name_map):
"""
Update the style names on a composite view to the custom style
name for all matches. A match occurs when the basename of the
view.style is found in the supplied dictionary.
"""
if isinstance(obj, GridLayout):
for subview in obj.values():
cls._set_style_names(subview, custom_name_map)
elif isinstance(obj.style, list):
obj.style = [custom_name_map.get(cls._basename(s), s) for s in obj.style]
else:
style = cls._basename(obj.style)
obj.style = custom_name_map.get(style, obj.style)
@classmethod
def set_view_options(cls, obj):
"""
To be called by the display hook which supplies the view
object to be displayed. Any custom options are defined on the
object as necessary and if there is an error, an HTML message
is returned.
"""
prefix = 'Custom[<' + obj.name + '>]_'
# Implements the %%labels magic
if cls.show_labels:
labels = cls.collect(obj, 'label').keys()
info = (len(labels), labels.count(''))
summary = ("%d objects inspected, %d without labels. "
"The set of labels found:<br><br> " % info)
label_list = '<br> '.join(['<b>%s</b>' % l for l in sorted(set(labels)) if l])
return summary + label_list
# Nothing to be done
if not any([cls.custom_options, cls.show_info]): return
styles = cls.collect(obj, 'style')
# The set of available style basenames present in the object
available_styles = set(cls._basename(s) for s in styles)
custom_styles = set(s for s in styles if s.startswith('Custom'))
mismatches = set(cls.custom_options.keys()) - (available_styles | set(channel_modes))
if cls.show_info or mismatches:
return cls._option_key_info(obj, available_styles, mismatches, custom_styles)
# Test the options are valid
error = cls._keyword_info(styles, cls.custom_options)
if error: return error
# Link the object to the new custom style
cls._set_style_names(obj, dict((k, prefix + k) for k in cls.custom_options))
# Define the Styles in the OptionMaps
cls._define_options(cls.custom_options, prefix=prefix)
@classmethod
def _keyword_info(cls, styles, custom_options):
"""
Check that the keywords in the StyleOpts or PlotOpts are
valid. If not, the appropriate HTML error message is returned.
"""
errmsg = ''
for key, (plot_kws, style_kws) in custom_options.items():
for name, viewtype in styles.items():
plottype = viewmap[viewtype]
if cls._basename(name) != key: continue
# Plot options checks
params = [k for (k,v) in plottype.params().items() if not v.constant]
mismatched_params = set(plot_kws.keys()) - set(params)
if mismatched_params:
info = (', '.join('<b>%r</b>' % el for el in mismatched_params),
'<b>%r</b>' % plottype.name,
', '.join('<b>%s</b>' % el for el in params))
errmsg += "Keywords %s not in valid %s plot options: <br> %s" % info
# Style options checks
style_opts = plottype.style_opts
mismatched_opts = set(style_kws.keys()) - set(style_opts)
if style_opts == [] and mismatched_opts:
errmsg += 'No styles accepted by %s. <br>' % plottype.name
elif mismatched_opts:
spacing = '<br><br>' if errmsg else ''
info = (spacing,
', '.join('<b>%r</b>' % el for el in mismatched_opts),
'<b>%r</b>' % plottype.name,
', '.join('<b>%s</b>' % el for el in style_opts))
errmsg += "%sKeywords %s not in valid %s style options: <br> %s" % info
return errmsg
@classmethod
def _option_key_info(cls, obj, available_styles, mismatches, custom_styles):
"""
Format the information about valid options keys as HTML,
listing mismatched names and the available keys.
"""
fmt = ' <code><font color="%s">%%s</font>%%s : ' % html_red
fmt+= '<font color="%s">[%%s]</font> %%s</code><br>' % html_blue
obj_name = "<b>%s</b>" % obj.__class__.__name__
if len(available_styles) == 0:
return "<b>No keys are available in the current %s</b>" % obj_name
mismatch_str = ', '.join('<b>%r</b>' % el for el in mismatches)
unavailable_msg = '%s not in customizable' % mismatch_str if mismatch_str else 'Customizable'
s = "%s %s options:<br>" % (unavailable_msg, obj_name)
max_len = max(len(s) for s in available_styles)
for name in sorted(available_styles):
padding = ' '*(max_len - len(name))
s += fmt % (name, padding,
cls.pprint_kws(options.plotting(name)),
cls.pprint_kws(options.style(name)))
if custom_styles:
s += '<br>Options that have been customized for the displayed view only:<br>'
custom_names = [style_name.rsplit('>]_')[1] for style_name in custom_styles]
max_len = max(len(s) for s in custom_names)
for custom_style, custom_name in zip(custom_styles, custom_names):
padding = ' '*(max_len - len(custom_name))
s += fmt % (custom_name, padding,
cls.pprint_kws(options.plotting(custom_style)),
cls.pprint_kws(options.style(custom_style)))
return s
def _parse_keywords(self, line):
"""
Parse the arguments to the magic, returning a dictionary with
style name keys and tuples of keywords as values. The first
element of the tuples are the plot keyword options and the
second element are the style keyword options.
"""
tokens = line.split()
if tokens == []: return {}
elif not tokens[0][0].isupper():
raise SyntaxError("First token must be a option name (a capitalized string)")
# Split the input by the capitalized tokens
style_names, tuples = [], []
for upper, vals in itertools.groupby(tokens, key=lambda x: x[0].isupper()):
values = list(vals)
if upper and len(values) != 1:
raise SyntaxError("Options should be split by keywords")
elif upper:
style_names.append(values[0])
else:
parse_string = ' '.join(values).replace(',', ' ')
if not parse_string.startswith('[') and parse_string.count(']')==0:
plotstr, stylestr = '', parse_string
elif [parse_string.count(el) for el in '[]'] != [1,1]:
raise SyntaxError("Plot options not supplied in a well formed list.")
else:
split_ind = parse_string.index(']')
plotstr = parse_string[1:split_ind]
stylestr = parse_string[split_ind+1:]
try:
# Evalute the strings to obtain dictionaries
dicts = [eval('dict(%s)' % ', '.join(els))
for els in [plotstr.split(), stylestr.split()]]
tuples.append(tuple(dicts))
except:
raise SyntaxError("Could not parse keywords from '%s'" % parse_string)
return dict((k,v) for (k,v) in zip(style_names, tuples) if v != ({},{}))
@classmethod
def _define_options(cls, kwarg_map, prefix='', verbose=False):
"""
Define the style and plot options.
"""
lens, strs = [0,0,0], []
for name, (plot_kws, style_kws) in kwarg_map.items():
plot_update = name in options.plotting
if plot_update and plot_kws:
options[prefix+name] = options.plotting[name](**plot_kws)
elif plot_kws:
options[prefix+name] = PlotOpts(**plot_kws)
style_update = name in options.style
if style_update and style_kws:
options[prefix+name] = options.style[name](**style_kws)
elif style_kws:
options[prefix+name] = StyleOpts(**style_kws)
if verbose:
plotstr = '[%s]' % cls.pprint_kws(options.plotting[name]) if name in options.plotting else ''
stylestr = cls.pprint_kws(options.style[name]) if name in options.style else ''
strs.append((name+':', plotstr, stylestr))
lens = [max(len(name)+1, lens[0]),
max(len(plotstr), lens[1]),
max(len(stylestr),lens[2])]
if verbose:
heading = "Plot and Style Options"
title = '%s\n%s' % (heading, '='*len(heading))
description = "Each line describes the options associated with a single key:"
msg = '%s\n\n%s\n\n %s %s %s\n\n' % (green % title, description,
red % 'Name:', blue % '[Plot Options]',
'Style Options')
for (name, plot_str, style_str) in strs:
msg += "%s %s %s\n" % (red % name.ljust(lens[0]),
blue % plot_str.ljust(lens[1]),
style_str.ljust(lens[2]))
page.page(msg)
@classmethod
def option_completer(cls, k,v):
"""
Tab completion hook for the %opts and %%opts magic.
"""
line = v.text_until_cursor
if line.endswith(']') or (line.count('[') - line.count(']')) % 2:
return [el+'=' for el in cls.all_params]
else:
return [el+'=' for el in cls.all_styles] + options.options()
def _line_magic(self, line):
"""
Update or create new options in for the plot or style
options. Plot options keyword-value pairs, when supplied need
to be give in square brackets after the option key. Any style
keywords then following the closing square bracket. The -v
flag toggles verbose output.
Usage: %opts [-v] <Key> [ [<keyword>=<value>...]] [<keyword>=<value>...]
"""
verbose = False
if str(line).startswith('-v'):
verbose = True
line = line.replace('-v', '')
kwarg_map = self._parse_keywords(str(line))
if not kwarg_map:
info = (len(options.style.keys()),
len([k for k in options.style.keys() if k.startswith('Custom')]))
print "There are %d style options defined (%d custom object styles)." % info
info = (len(options.plotting.keys()),
len([k for k in options.plotting.keys() if k.startswith('Custom')]))
print "There are %d plot options defined (%d custom object plot settings)." % info
return
self._define_options(kwarg_map, verbose=verbose)
@cell_magic
def labels(self, line, cell=None):
"""
Simple magic to see the full list of defined labels for the
displayed view object.
"""
if line != '':
raise Exception("%%labels magics accepts no arguments.")
OptsMagic.show_labels = True
self.shell.run_cell(cell)
OptsMagic.show_labels = False
@line_cell_magic
def opts(self, line='', cell=None):
"""
Set custom display options unique to the displayed view. The
keyword-value pairs in the square brackets (if present) set
the plot parameters. Keyword-value pairs outside the square
brackets are matplotlib style options.
Usage: %%opts <Key> [ [<keyword>=<value>...] ] [<keyword>=<value>...]
Multiple keys may be listed, setting plot and style options in
this way.
"""
if cell is None:
return self._line_magic(str(line))
elif not line.strip():
OptsMagic.show_info=True
else:
OptsMagic.custom_options = self._parse_keywords(str(line))
# Run the cell in the updated environment
self.shell.run_cell(cell)
# Reset the class attributes
OptsMagic.custom_options = {}
OptsMagic.show_info=False
#==================#
# Helper functions #
#==================#
def select_format(format_priority):
for fmt in format_priority:
try:
anim = animation.FuncAnimation(plt.figure(),
lambda x: x, frames=[0,1])
animate(anim, *ANIMATION_OPTS[fmt])
return fmt
except: pass
return format_priority[-1]
def get_plot_size():
factor = PERCENTAGE_SIZE / 100.0
return (Plot.size[0] * factor,
Plot.size[1] * factor)
def animate(anim, writer, mime_type, anim_kwargs, extra_args, tag):
if extra_args != []:
anim_kwargs = dict(anim_kwargs, extra_args=extra_args)
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.%s' % mime_type) as f:
anim.save(f.name, writer=writer, **anim_kwargs)
video = open(f.name, "rb").read()
anim._encoded_video = video.encode("base64")
return tag.format(b64=anim._encoded_video,
mime_type=mime_type)
def HTML_video(plot, view):
anim = plot.anim(fps=FPS)
writers = animation.writers.avail
for fmt in [VIDEO_FORMAT] + ANIMATION_OPTS.keys():
if ANIMATION_OPTS[fmt][0] in writers:
try:
return animate(anim, *ANIMATION_OPTS[fmt])
except: pass
return "<b>Could not generate %s animation</b>" % VIDEO_FORMAT
def figure_display(fig, size=None, message=None):
if size is not None:
inches = size / float(fig.dpi)
fig.set_size_inches(inches, inches)
mime_type = 'svg+xml' if FIGURE_FORMAT.lower()=='svg' else 'png'
prefix = 'data:image/%s;base64,' % mime_type
b64 = prefix + print_figure(fig, FIGURE_FORMAT).encode("base64")
if size is not None:
html = "<center><img height='%d' width='%d' src='%s'/><center/>" % (size, size, b64)
else:
html = "<center><img src='%s' /><center/>" % b64
plt.close(fig)
return html if (message is None) else '<b>%s</b></br>%s' % (message, html)
def figure_fallback(plotobj):
message = ('Cannot import matplotlib.animation' if animation is None
else 'Failed to generate matplotlib animation')
fig = plotobj()
return figure_display(fig, message=message)
#===============#
# Display hooks #
#===============#
def process_view_magics(obj):
"Hook into %%opts and %%channels magics to process display view"
invalid_styles = OptsMagic.set_view_options(obj)
if invalid_styles: return invalid_styles
invalid_channels = ChannelMagic.set_channels(obj)
if invalid_channels: return invalid_channels
HOOK_OPTIONS = ['display_tracebacks']
CAPTURED = {'view': None, 'display':None}
def display_hook(fn):
@wraps(fn)
def wrapped(view, **kwargs):
global CAPTURED
if 'view' in HOOK_OPTIONS:
CAPTURED['view'] = view
try:
retval = fn(view, **kwargs)
except:
if 'display_tracebacks' in HOOK_OPTIONS:
traceback.print_exc()
return
if 'display' in HOOK_OPTIONS:
CAPTURED['display'] = retval
return retval
return wrapped
@display_hook
def animation_display(anim):
return animate(anim, *ANIMATION_OPTS[VIDEO_FORMAT])
@display_hook
def stack_display(stack, size=256):
if not isinstance(stack, Stack): return None
magic_info = process_view_magics(stack)
if magic_info: return magic_info
opts = dict(options.plotting(stack).opts, size=get_plot_size())
stackplot = viewmap[stack.type](stack, **opts)
if len(stackplot) == 1:
fig = stackplot()
return figure_display(fig)
try: return HTML_video(stackplot, stack)
except: return figure_fallback(stackplot)
@display_hook
def layout_display(grid, size=256):
grid = GridLayout([grid]) if isinstance(grid, Layout) else grid
if not isinstance(grid, (GridLayout)): return None
magic_info = process_view_magics(grid)
if magic_info: return magic_info
grid_size = (grid.shape[1]*get_plot_size()[1],
grid.shape[0]*get_plot_size()[0])
opts = dict(size=grid_size)
gridplot = GridLayoutPlot(grid, **opts)
if len(gridplot)==1:
fig = gridplot()
return figure_display(fig)
try: return HTML_video(gridplot, grid)
except: return figure_fallback(gridplot)
@display_hook
def projection_display(grid, size=256):
if not isinstance(grid, CoordinateGrid): return None
size_factor = 0.17
grid_size = (size_factor*grid.shape[1]*get_plot_size()[1],
size_factor*grid.shape[0]*get_plot_size()[0])
magic_info = process_view_magics(grid)
if magic_info: return magic_info
opts = dict(options.plotting(grid.values()[-1]).opts, size=grid_size)
gridplot = viewmap[grid.__class__](grid, **opts)
if len(gridplot)==1:
fig = gridplot()
return figure_display(fig)
try: return HTML_video(gridplot, grid)
except: return figure_fallback(gridplot)
@display_hook
def view_display(view, size=256):
if not isinstance(view, View): return None
if isinstance(view, Annotation): return None
magic_info = process_view_magics(view)
if magic_info: return magic_info
opts = dict(options.plotting(view).opts, size=get_plot_size())
fig = viewmap[view.__class__](view, **opts)()
return figure_display(fig)
def update_matplotlib_rc():
"""
Default changes to the matplotlib rc used by IPython Notebook.
"""
import matplotlib
rc= {'figure.figsize': (6.0,4.0),
'figure.facecolor': 'white',
'figure.edgecolor': 'white',
'font.size': 10,
'savefig.dpi': 72,
'figure.subplot.bottom' : .125
}
matplotlib.rcParams.update(rc)
all_line_magics = sorted(['%params', '%opts', '%view'])
all_cell_magics = sorted(['%%view', '%%opts', '%%labels'])
message = """Welcome to the Dataviews IPython extension! (http://ioam.github.io/imagen/)"""
message += '\nAvailable magics: %s' % ', '.join(all_line_magics + all_cell_magics)
_loaded = False
VIDEO_FORMAT = select_format(['webm','h264','gif'])
def load_ipython_extension(ip, verbose=True):
if verbose: print message
global _loaded
if not _loaded:
_loaded = True
param_ext.load_ipython_extension(ip)
ip.register_magics(ViewMagic)
ip.register_magics(OptsMagic)
ip.register_magics(ChannelMagic)
# Configuring tab completion
ip.set_hook('complete_command', ChannelMagic.option_completer, str_key = '%channels')
ip.set_hook('complete_command', ChannelMagic.option_completer, str_key = '%%channels')
ip.set_hook('complete_command', ViewMagic.option_completer, str_key = '%view')
ip.set_hook('complete_command', ViewMagic.option_completer, str_key = '%%view')
ip.set_hook('complete_command', OptsMagic.option_completer, str_key = '%%opts')
ip.set_hook('complete_command', OptsMagic.option_completer, str_key = '%opts')
html_formatter = ip.display_formatter.formatters['text/html']
html_formatter.for_type_by_name('matplotlib.animation', 'FuncAnimation', animation_display)
html_formatter.for_type(View, view_display)
html_formatter.for_type(Stack, stack_display)
html_formatter.for_type(Layout, layout_display)
html_formatter.for_type(GridLayout, layout_display)
html_formatter.for_type(CoordinateGrid, projection_display)
update_matplotlib_rc()
|
'''
cpagrip
Get the Best Nutella Package!
http://zh.moneymethods.net/click.php?c=7&key=bbcprqa35ns2a5f44z14k2k3
health
'''
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def web_submit(submit,chrome_driver,debug=0):
# test
if debug == 1:
site = 'http://zh.moneymethods.net/click.php?c=19&key=y045n22fs96cl78o07jqgkh3'
print(site)
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
# sleep(2000)
excel = 'health'
# page1
# gender
num_gender = random.randint(0,1)
if num_gender == 0:
chrome_driver.find_element_by_xpath('//*[@id="form-step-one-top"]/div[2]/div/div/label[2]').click()
# date_of_birth
element = chrome_driver.find_element_by_xpath('//*[@id="form-step-one-top"]/div[3]/div/input')
element.click()
sleep(1)
date_of_birth = Submit_handle.get_auto_birthday('')
element.send_keys(date_of_birth[0])
element.send_keys(date_of_birth[1])
element.send_keys(date_of_birth[2])
sleep(1)
zipcode = Submit_handle.get_zip(submit['health'])
print('zipcode:',zipcode)
chrome_driver.find_element_by_xpath('//*[@id="form-step-one-top"]/div[4]/div/input').send_keys(zipcode)
sleep(1)
handle = chrome_driver.current_window_handle
chrome_driver.find_element_by_xpath('//*[@id="top"]').click()
sleep(3)
while True:
'''
turn to other page
'''
handles=chrome_driver.window_handles
if len(handles)>1:
break
for i in handles:
if i != handle:
chrome_driver.switch_to.window(i)
# page2
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[2]/div[3]/a').click()
sleep(2)
# # None of these happen
# try:
# chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/div[3]/div[2]/div/div[6]/label/span').click()
# # continue
# chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/div[3]/div[2]/a').click()
# except:
# pass
sleep(3)
# household size
index = random.randint(1,4)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="houseHoldSize"]'))
s1.select_by_index(index)
# household income
# index = random.randint(1,4)
# s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="houseHoldIncome"]'))
# s1.select_by_index(index)
sleep(2)
# continue
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[4]/div[2]/a').click()
# height
num_info = Submit_handle.get_height_info()
chrome_driver.find_element_by_xpath('//*[@id="step2b-height_ft"]').send_keys(num_info['Height_FT'])
chrome_driver.find_element_by_xpath('//*[@id="step2b-height_in"]').send_keys(num_info['Height_Inch'])
chrome_driver.find_element_by_xpath('//*[@id="step2b-weight"]').send_keys(num_info['Weight'])
sleep(2)
# checkbox
index = random.randint(0,2)
if index==0:
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[5]/div[2]/div/div[1]/div[3]/div[1]/div/label').click()
elif index == 1:
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[5]/div[2]/div/div[1]/div[3]/div[2]/div/label').click()
else:
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[5]/div[2]/div/div[1]/div[3]/div[1]/div/label').click()
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[5]/div[2]/div/div[1]/div[3]/div[2]/div/label').click()
sleep(2)
# continue
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[5]/div[2]/a').click()
sleep(5)
# page3
# firstname
chrome_driver.find_element_by_xpath('//*[@id="step3-firstname"]').send_keys(submit['health']['firstname'])
# lastname
chrome_driver.find_element_by_xpath('//*[@id="step3-lastname"]').send_keys(submit['health']['lastname'])
# email
chrome_driver.find_element_by_xpath('//*[@id="step3-email"]').send_keys(submit['health']['email'])
# phone
cellphone = Submit_handle.chansfer_float_into_int(submit['health']['homephone'].split('.')[0])
element = chrome_driver.find_element_by_xpath('//*[@id="step3-phone"]')
element.click()
# db.update_plan_status(1,submit['ID'])
print('cellphone:',cellphone)
element.send_keys(cellphone)
# address
chrome_driver.find_element_by_xpath('//*[@id="step3-address1"]').send_keys(submit['health']['address'])
sleep(3)
# get my quote
chrome_driver.find_element_by_xpath('/html/body/div[1]/div[3]/form/div[6]/div[2]/a').click()
db.update_plan_status(1,submit['ID'])
sleep(60)
def test():
Mission_list = ['10000']
excel = 'Uspd_big'
Excel_name = [excel,'']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# print(submit)
[print(item,submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
# zipcode = Submit_handle.get_zip(submit['health']['zip'])
# print('zipcode:',zipcode)
# cellphone = Submit_handle.chansfer_float_into_int(submit['health']['homephone'].split('.')[0])
# print('cellphone:',cellphone)
# print(len(cellphone))
submit['Mission_Id'] = '10045'
# chrome_driver = Chrome_driver.get_chrome(submit)
# web_submit(submit,chrome_driver,1)
# date_of_birth = Submit_handle.get_auto_birthday(submit['health']['date_of_birth'])
# print(date_of_birth)
# web_submit(submit,1)
# print(submit['health'])
# print(submit['health']['state'])
# print(submit['health']['city'])
# print(submit['health']['zip'])
# print(submit['health']['date_of_birth'])
# print(submit['health']['ssn'])
def test1():
num_gender = random.randint(0,1)
print(num_gender)
if __name__=='__main__':
test()
print('......')
|
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='connector.scripts',
version=version,
description="Odoo connector usefull recipe startup script",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Simon ANDRE',
author_email='sandre@anybox.fr',
url='anybox.fr',
license='AGPL v3',
namespace_packages=['connector'],
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'setuptools',
'argparse',
],
entry_points="""
"""
)
|
#!/usr/bin/env python
import random
import string
import time
from threading import Timer
# This imports the list of words from external python file
from words import word_list
def get_word():
# This function randomly selects a word from the imported list a new game round and returns this word
word = random.choice(word_list).upper()
return word
def guess_letter():
# This function allows the computer player to guess a random letter and returns this letter
alphabet = string.ascii_letters.upper()
guess = random.choice(alphabet)
return guess
def loading_guess():
# This function displays text that simulates the computer taking time to guess a letter
for i in range(0, 30):
loading_text = "The computer is choosing a letter" + "." * i
# loading_text replaces previous loading_text for each iteration
print(loading_text, end="\r")
time.sleep(0.1)
def play_hangman():
# This function contains the code for the main hangman gameplay and logic
playing = True
start = False
tries = 6
letters_guessed = list(get_word())
words_guessed = []
word_list_lengths = []
for x in word_list:
# creates list of all word lengths
word_list_lengths.append(len(x))
print("\n")
print("Let's play Hangman! \n")
time.sleep(0.5)
# shows user randomly selected word for game
print("Let's see if the computer can figure out what your word is. (It's {}"")".format(get_word()))
time.sleep(1.5)
# this loop allows the computer to guess another letter until the word is completed or there are no more tries left
while playing == True and tries > 0:
# this loop allows the user to give the computer a hint before guessing starts
while start == False:
try:
letters_hint = int(input(
"Give the computer a hint. How many letters are there in your chosen word?: "))
# this if statement checks validity of hint: hint must be less than or equal to max possible word length AND more than or equal to min possible word length
if letters_hint > max(word_list_lengths) or letters_hint < min(word_list_lengths):
raise ValueError
else:
start = True
# error raised if hint is not valid integer
except ValueError:
print("Please make sure you enter a valid number \n")
time.sleep(1)
print("\n")
print("Number of tries left: {}".format(tries))
print("Here we go!")
# creates new thread where loading_guess function is called
Timer(3, loading_guess).start()
time.sleep(7)
print("\n", end="\n")
print("The computer guessed {} \n".format(guess_letter()))
tries -= 1 # the number of tries remaining decreases by 1 each loop
# Rest of code goes here
print("End of current code \n")
def main():
# this function calls the play_hangman function and repeats it when the user says yes
play_hangman()
replay = True
# this loop asks the user if they want to play again
while replay == True:
answer = input("Do you want to play again? (Yes/No) - ")
if answer.upper() == "YES" or answer.upper() == "Y":
print("----------------------------------------------------------------------------------------------------------------- \n")
play_hangman()
else:
replay = False
print("\n")
print("Thank you for playing!")
if __name__ == "__main__":
main()
|
N, M = input().split()
N = int(N)
M = int(M)
all = {}
for _ in range(N):
group = input()
member = int(input())
member_list = []
for _ in range(member):
member_list.append(input())
member_list = sorted(member_list)
member_list = [group] + member_list
all[group] = member_list
for _ in range(M):
Q = input()
Qn = int(input())
if Qn == 0:
for i in range(1, len(all[Q])):
print(all[Q][i])
elif Qn == 1:
for v in all.values():
if Q in v:
print(v[0]); break
# Done |
# Script by Connor Maloney - 100977005
import os
import csv
import urllib.request
from time import sleep
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
# Input user info
studentName = input("Enter student name (ex: Connor Maloney): ")
studentNumber = int(input("Enter student number (ex: 100977005): "))
province = input("Enter 2-letter province/territory (ex: MB for Manitoba, SK for Saskatchewan): ")
cityName = input("Enter city name (ex: BRANDON CDA or REGINA RCS): ")
yearXrange = int(input("Enter starting range in years (ex: 1956): "))
yearYrange = int(input("Enter ending range in years (ex: 1958): "))
# Lits of years for progress bar print
items = list(range((yearYrange+1 - yearXrange) * 12))
l = len(items)
class ClimateData:
def __init__(self, month, year, temp, precip, lat, lon):
self.month = month
self.year = year
self.temp = temp
self.precip = precip
self.lat = lat
self.lon = lon
def __str__(self):
return f'{self.month}/{self.year} Tm: {self.temp} P: {self.precip} Lat: {self.lat} Lon: {self.lon}'
climate_data = [] # Store all climateData
counter = 0
print(f"Pulling data for {cityName} from {yearXrange} to {yearYrange}...")
# Main loop
for y in range(yearXrange, yearYrange+1):
yearly_data = []
for m in range(1,13):
url = f'http://climate.weather.gc.ca/prods_servs/cdn_climate_summary_report_e.html?intYear={y}&intMonth={m}&prov={province}&dataFormat=csv&btnSubmit=Download+data'
with urllib.request.urlopen(url) as response:
csvfile = response.read().decode('utf-8').splitlines()
for line in csvfile:
try:
if cityName.upper() in line:
row = line.split(',')
tempData = ClimateData(m, y, row[4], row[14], row[1], row[2])
yearly_data.append(tempData)
printProgressBar(counter, l, prefix = f'Pulling {tempData}:', suffix = 'Complete', length = 50)
counter+=1
except:
print("ERROR: Could not find city data")
pass
climate_data.append(yearly_data)
def build_summary_per_year(data):
avg = round(sum(float((t.temp).strip('\"')) for t in data)/len(data), 2)
precip_avg = round(sum(float((p.precip).strip('\"')) for p in data)/len(data), 2)
return (
f"Year,{data[0].year},,Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec,Average\n"
f"Mean Temperature Tm (°C),,,{','.join([a.temp for a in data])},{avg},\n"
f"Total Precipitation P (mm),,,{','.join([a.precip for a in data])},{precip_avg},\n"
)
# workaround to incredibly strange f-string error inserting backslash
yearly_summaries = ",,,,,,,,,,,,,,,\n".join([build_summary_per_year(cd) for cd in climate_data])
f = open("demofile.csv", "a")
f.write(f'''TSES 3002 2019 Swarm Assignment Data Template,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
Name:,{studentName},,,,,,,,,,,,,,
Student Number:,{studentNumber},,,,,,,,,,,,,,
Site (Name as shown in database):,{cityName},,,,,,,,,,,,,,
Site Latitude (as shown in database):,{str(climate_data[0][0].lat)},,,,,,,,,,,,,,
Site Longiture (as shown in database):,{str(climate_data[0][0].lon)},,,,,,,,,,,,,,
Years:,{','.join([str(d[0].year) for d in climate_data])},,,,,,,,,,,,
,,,,,,,,,,,,,,,
{yearly_summaries }
,,,,,,,,,,,,,,,
NOTES:,Assignment asks for BRANDON RCS outpost but only BRANDON CDA outpost was available,,,,,,,,,,,,,,''')
print(f"Download complete! File placed in {os.getcwd()} Please open the new demofile.csv there. For graphs, edit this in excel as .xlsx and work from there. Happy spreadsheeting!")
print("Note: If you already have an existing demofile.csv, please delete it and run this script to generate a new one.")
|
#~!/usr/bin/env python3
"""Some functions doing arithmatic."""
__appname__ = 'cfexercises1'
__author__ = 'Zongyi Hu (zh2720@ic.ac.uk)'
__version__ = '0.0.1'
import sys
"""function sqrt"""
def foo_1(x = 1):
return x ** 0.5
foo_1(3)
foo_1(9)
foo_1(49)
"""function compare two numbers"""
def foo_2(x=1, y=1):
if x > y:
return x
return y
foo_2(2,3)
foo_2(3,2)
"""bubble sort"""
def foo_3(x=1, y=1, z=1):
if x > y:
tmp = y
y = x
x = tmp
if y > z:
tmp = z
z = y
y = tmp
return [x, y, z]
foo_3(1, 3, 2)
"""factorial"""
def foo_4(x=1):
result = 1
for i in range(1, x + 1):
result = result * i
return result
"""factorial"""
def foo_5(x=1):# a recursive function that calculaes the facrotial of x
if x == 1:
return 1
return x * foo_5(x - 1)
"""factorial"""
def foo_6(x=1): # Calculate the factorial of x in a different way
facto = 1
while x >= 1:
facto = facto * x
x = x - 1
return facto
"""main function"""
def main(argv):
print(foo_1())
print(foo_2())
print(foo_3())
print(foo_4())
print(foo_5())
print(foo_6())
return(0)
if __name__ == "__main__":
status = main(sys.argv)
sys.exit(status) |
from random import randint
from model import Applicant
class ApplicantCodeGenerator:
"""This class generate the application code"""
def __init__(self):
# First, generate the application_code, then compare the earlier password
# if tha current app code equal with the earlier, generate a new one
self.__earlier_app_code = []
self.__query_the_earliest_app_code()
self.__is_valid_pass = False
self.application_code = ""
self.__code_generator()
def __query_the_earliest_app_code(self):
applications = Applicant.select()
for application in applications:
self.__earlier_app_code.append(application.applicant_code)
def __code_generator(self):
counter = 0
while counter != 6:
self.application_code += str(randint(0, 10))
counter += 1
self.__check_earlier_app_code()
def __check_earlier_app_code(self):
# if the current password in the earlier,call the code generator again,
# what check the earlier app code again
while self.__is_valid_pass is False:
if self.application_code not in self.__earlier_app_code:
self.__is_valid_pass = True
else:
self.__code_generator() |
import pandas as pd
import numpy as np
import scipy
import matplotlib
import seaborn as sns
from sklearn.cross_validation import train_test_split
df_train = pd.read_csv('Kaggle_Datasets/Facebook/train.csv')
df_test = pd.read_csv('Kaggle_Datasets/Facebook/test.csv')
class PredictionModel():
def __init__(self, df, xsize=1, ysize=0.5, xslide=0.5, yslide=0.25, xcol='x', ycol='y'):
self.df = df
self.xsize = xsize
self.ysize = ysize
self.xslide = xslide
self.yslide = yslide
self.xcol = xcol
self.ycol = ycol
self.windows = self.generate_windows()
self.slices = self.slice_df()
def frange(self, x, y, jump):
while x < y:
yield x
x += jump
yield y
def generate_windows(self):
ranges = []
result = []
xmin, xmax = self.df.x.min(), self.df.x.max()
ymin, ymax = self.df.y.min(), self.df.y.max()
xranges = list(self.frange(xmin, xmax-self.xsize, self.xslide))
yranges = list(self.frange(ymin, ymax-self.ysize, self.yslide))
ylen = len(yranges)
for x in xranges:
subrange = [x] * ylen
ranges.extend(zip(subrange, yranges))
for x1, y1 in ranges:
x2, y2 = x1 + self.xsize, y1 + self.ysize
result.append(((x1, y1), (x1+self.xsize, y1+self.ysize)))
return result
def slice_df(self):
slices = {}
for window in self.windows:
slices[window] = ModelStore(self.df, window, self.xcol, self.ycol)
return slices
class ModelStore():
def __init__(self, df, window, xcol, ycol):
self.window = window
(self.x1, self.y1), (self.x2, self.y2) = self.window
self.df = df[(df[xcol] >= self.x1) & (df[xcol] <= self.x2) & (df[ycol] >= self.y1) & (df[ycol] <= self.y2)]
self.unique_place_count = len(self.df.place_id.unique())
self.model = None
self.df['hours'] = self.df.time / 60.0
self.df['days'] = self.df.time / (60*24)
self.df['hours_cycle'] = self.df.hours % 24
self.df['days_cycle'] = self.df.days % 7
self.total_count = len(self.df)
def train(self, model='logistic'):
from sklearn.ensemble import RandomForestClassifier
self.model = RandomForestClassifier(n_estimators=5) # x, y, accuracy, hours_cycle, days_cycle
self.train_df = self.df.sort_values('row_id')[['x', 'y', 'accuracy', 'hours_cycle', 'days_cycle']]
self.values = self.df.sort_values('row_id')['place_id']
self.model.fit(self.train_df, self.values)
def describe(self):
return '{}: {}, {}'.format(self, window, self.total_count, self.unique_place_count)
df_under_1 = df_train[(df_train.x <= 1.0) & (df_train.y <= 1.0)]
train, test = train_test_split(df_under_1, test_size = 0.2)
pred_model = PredictionModel(df=train)
print pred_model.slices
for window, model in pred_model.slices.iteritems():
print model.describe()
model.train()
|
from typing import Union
from uuid import UUID
from .base import BaseFunction
from ..request import (
Request,
SSEContextManager,
)
class BackgroundTask(BaseFunction):
"""
Provides server-sent events streaming functions.
"""
task_id: UUID
def __init__(self, task_id: Union[UUID, str]) -> None:
self.task_id = task_id if isinstance(task_id, UUID) else UUID(task_id)
# only supported in AsyncAPISession
def listen_events(self) -> SSEContextManager:
"""
Opens an event stream of the background task updates.
:returns: a context manager that returns an :class:`SSEResponse` object.
"""
params = {
'task_id': str(self.task_id),
}
request = Request(
'GET', '/events/background-task',
params=params,
)
return request.connect_events()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-27 09:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spider', '0002_cctvworldinfo_front_image_url'),
]
operations = [
migrations.AlterField(
model_name='cctvworldinfo',
name='front_image_url',
field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='图片地址'),
),
]
|
from django.urls import path
from parking import views
urlpatterns = [
path('getCar/<int:carNo>', views.parking_slots)
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pymonzo.monzo_api import MonzoAPI # noqa
__version__ = '0.2.2'
|
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
def treeValid(trainXSet, trainYSet, validXSet, validYSet, trainNum, validNum, batch_size):
errorCount = 0.0
acc = []
k = []
mTest = np.random.randint(0, validNum, 500)
for j in range(0, 50):
k.append(j) # trainNum // batch_size
model = DecisionTreeClassifier(max_depth=j + 1)
model.fit(trainXSet[:2500], trainYSet[:2500])
for i in range(len(mTest)):
classifierResult = model.predict(validXSet[mTest[i]].reshape(1, -1))
# print("KNN得到的辨识结果是: %d, 实际值是: %d" % (classifierResult, testY[i]))
if (classifierResult != validYSet[mTest[i]]): errorCount += 1.0
acc.append(((1 - errorCount / float(len(mTest))) * 100))
errorCount = 0.0
indexTmp = np.argwhere(acc == np.amax(acc))
index = []
for i in range(len(indexTmp)):
index.append(indexTmp[i][0])
plt.plot(k, acc)
plt.title('DecisionTree Correct rate', fontsize=24)
plt.xlabel('Depth', fontsize=14)
plt.ylabel('Correct rate(%)', fontsize=14)
plt.show()
print("\nValid DecisionTree辨识率为: %f %" % np.mean(acc))
return int(np.mean(index))
def fit(trainXSet, trainYSet, depth):
print("start tree")
model = DecisionTreeClassifier(max_depth=depth)
model.fit(trainXSet, trainYSet)
print("finished tree")
return model
def treePredict(model, testX, testY):
errorCount = 0.0
mTest = len(testX)
for i in range(mTest):
classifierResult = model.predict(testX[i].reshape(1, -1))
# print("DecisionTree得到的辨识结果是: %d, 实际值是: %d" % (classifierResult, testY[i]))
if (classifierResult != testY[i]): errorCount += 1.0
acc = (1 - errorCount / float(mTest)) * 100
# print("\nDecisionTree辨识错误数量为: %d" % errorCount)
# print("\nDecisionTree辨识率为: %f %" % acc)
return acc
|
from Helper import *
from PIL import Image
import csv
import random
import pickle
# Set global variables
from settings import *
# TODO(Sören): Pickle file should also be saved in data_directory
class ImageInformation(object):
def __init__(self, data_path, result_path, dataset_name):
"""Name of dataset folder;
path for the data directory;
filenames for names of files;
sizes for height and weight of the files"""
self.dataset_name = dataset_name
self.filenames = None
self.images = None
self.labels = None
self.data_path = data_path
self.result_path = result_path
self.sizes = None
self.dict = {}
def get_file_names(self, testing=False):
"""Gets all filenames from the path variable and saves it within filenames."""
filenames = deque()
names = os.listdir(self.data_path)
size = len(names)
if testing:
size = 10
for i in range(0, size):
if names[i].endswith('.jpg'):
filenames.append(names[i][:-4])
else:
continue
filenames = ListUtils().deque_to_numpy_array(filenames)
# Shuffle the filenames
random.shuffle(filenames)
return filenames
def get_dimensions(self):
"""Get the dimensions (width and height) of all images and save the
result within sizes."""
sizes = np.zeros(shape=(len(self.filenames), 4))
for i in range(0, self.filenames.size):
path_to_file = self.data_path + '/' + self.filenames[i] + '.jpg'
size_in_bytes = os.stat(path_to_file).st_size
im = Image.open(path_to_file)
sizes[i, 0] = im.height
sizes[i, 1] = im.width
sizes[i, 2] = im.width/im.height
sizes[i, 3] = size_in_bytes
return sizes
def get_labels(self):
"""Get the labels of the files. Marked as 0 if cat and as 1 if dog."""
filenames = self.filenames
labels = np.zeros(shape=(len(filenames), 1))
for i in range(0, len(filenames)):
if filenames[i].startswith('cat'):
labels[i] = 0
elif filenames[i].startswith('dog'):
labels[i] = 1
return labels
def write_data_to_csv_file(self):
"""Write the data to a .csv file"""
filename = self.result_path + '/' + self.dataset_name + Utils.get_current_date() + '.csv'
with open(filename, 'w') as csvfile:
fieldnames = ['index','filename', 'label', 'height', 'width', 'ratio', 'size']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(self.filenames)):
writer.writerow({'index': i+1,
'filename': self.filenames[i],
'label': self.labels[i, 0],
'height': self.sizes[i, 0],
'width': self.sizes[i, 1],
'ratio': self.sizes[i, 2].round(4),
'size': self.sizes[i, 3]})
def create_dictionary(self):
"""Create a dictionary which shall be pickled later."""
data_as_dict = {'data': self.images,
'labels': self.labels,
'filenames': self.filenames,
'heigth': self.sizes[:, 0],
'width': self.sizes[:, 1],
'ratio': self.sizes[:, 2],
'size': self.sizes[:, 3]}
return data_as_dict
def pickle_information(self):
with open(self.result_path + '/' + self.dataset_name + Utils.get_current_date() + '.pickle', 'wb') as f:
pickle.dump(self.dict, f, pickle.HIGHEST_PROTOCOL)
def plot_information(self):
"""Plot the information."""
pass
def get_information(self, testing=False):
"""Get all important information from the data."""
# Get necessary information
self.filenames = self.get_file_names(testing)
self.sizes = self.get_dimensions()
self.labels = self.get_labels()
self.dict = self.create_dictionary()
# Save the information
self.save_information()
def save_information(self):
self.write_data_to_csv_file()
self.pickle_information()
def main():
"""Get the image information of the original data"""
"""Get information about the original train data."""
# Get the directory
name = 'train1'
data_directory = global_path_to_original_train_data
result_directory = global_path_to_other_results
# Get connected information
data = ImageInformation(data_directory, result_directory, name)
data.get_information()
# Display status information
print(name + ' is done.')
"""Get information about the original test data."""
# Get the directory
name = 'test1'
data_directory = global_path_to_original_test_data
result_directory = global_path_to_other_results
# Get connected information
data = ImageInformation(data_directory, result_directory, name)
data.get_information(data_directory)
# Display status information
print(name + ' is done.')
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import json
import bottle_mysql
import bottle
from bottle import route, run, template
app = bottle.Bottle()
# dbhost is optional, default is localhost
plugin = bottle_mysql.Plugin(dbuser='root', dbpass='', dbname='sakila')
app.install(plugin)
@app.route('/movies')
def movies(db):
db.execute('SELECT * from film;')
rows = db.fetchall()
if rows:
return template('show_film', films=rows)
@app.route('/shows/<cinema_id>')
def shows(db, cinema_id):
cinema_id = int(cinema_id)
db.execute('SELECT * FROM `show` as sh INNER JOIN `film` fi ON sh.film_film_id = fi.film_id WHERE sh.theater_theater_id IN (SELECT theater_id FROM `theater` WHERE cinema_cinema_id = %d);' % (cinema_id, ))
rows = db.fetchall()
if rows:
return template('show_shows', shows=rows)
@app.route('/cinemas')
def cinemas(db):
db.execute('SELECT * FROM `cinema`;')
rows = db.fetchall()
if rows:
return template('show_cinemas', cinemas=rows)
@app.route('/tickets/<show_id>')
def tickets(db, show_id):
show_id = int(show_id)
db.execute("""SELECT
se . *, ti.ticket_id
FROM
`show` sh
INNER JOIN
`seat` se ON sh.theater_theater_id = se.theater_theater_id
LEFT JOIN
`ticket` ti ON se.seat_id = ti.seat_seat_id
WHERE
sh.show_id = %d
ORDER BY col;""" % (show_id,))
rows = db.fetchall()
rows_fixed = {}
rows_list = []
for row in rows:
if not row['row'] in rows_fixed:
rows_fixed[row['row']] = []
rows_fixed[row['row']].append(row)
for key,value in rows_fixed.items():
rows_list.append(value)
if rows:
return template('show_tickets', tickets=rows_list)
@app.route('/task1')
def task1(db):
db.execute('SELECT SQL_NO_CACHE se.* FROM `show` sh INNER JOIN `seat` se ON sh.theater_theater_id = se.theater_theater_id WHERE sh.show_id = 1 ORDER BY col;')
rows = db.fetchall()
return template('<pre style="white-space: pre-wrap;white-space: -moz-pre-wrap;white-space: -pre-wrap;white-space: -o-pre-wrap;word-wrap: break-word;">{{html}}</pre>', html=json.dumps(rows))
@app.route('/task2')
def task2(db):
db.execute("""SELECT
ti . *, sh.start_date
FROM
`ticket` ti
INNER JOIN
`show` sh ON ti.show_show_id = sh.show_id
WHERE
start_date > now()
AND start_date < DATE_ADD(now(), INTERVAL 1 YEAR);""")
rows = db.fetchall()
return template('<strong>{{text}}</strong>', text='done!')
@app.route('/')
def index(db):
return template('index')
app.run(host='localhost', port=8080, reloader=True) |
#-*-coding:utf-8-*-
"""
"创建者:Li Zhen
"创建时间:2019/4/4 17:41
"描述:TODO
"""
import torch
import torch.nn as nn
import numpy as np
data = np.loadtxt('german.data-numeric')
n, l = data.shape
for j in range(l - 1):
meanVal = np.mean(data[:, j])
stdVal = np.std(data[:, j])
data[:, j] = (data[:, j] - meanVal) / stdVal
np.random.shuffle(data)
train_data = data[:900, :l-1]
train_lab = data[:900, l-1]-1
test_data = data[:900, :l-1]
test_lab = data[900:, l-1]-1
class LR(nn.Module):
def __init__(self):
super(LR, self).__init__()
self.fc = nn.Linear(24, 2)
def forward(self, x):
out = self.fc(x)
out = torch.sigmoid(out)
return out
def test(pred, lab):
t = pred.max(-1)[1] == lab
return torch.mean(t.float())
|
import glob
import os.path as osp
from typing import Union, Optional, List, Tuple, Callable
from tabulate import tabulate
from graphgallery import functional as gf
from ..data.preprocess import (train_val_test_split_tabular,
get_train_val_test_split,
get_train_val_test_split_gcn)
class Dataset:
def __init__(self,
name,
root=None,
*,
transform=None,
verbose=True,
url=None):
if root is None:
root = 'datasets'
assert isinstance(root, str), root
root = osp.abspath(osp.expanduser(root))
if url:
self._url = url
self.root = root
self.name = str(name)
self.verbose = verbose
self._graph = None
self.split_cache = None
self.splits = gf.BunchDict()
self.transform = gf.get(transform)
@property
def g(self):
"""alias of graph"""
return self.graph
@property
def graph(self):
"""alias of graph"""
return self.transform(self._graph)
@staticmethod
def available_datasets():
return dict()
@property
def url(self) -> str:
return self._url
def split_nodes(self,
train_size: float = 0.1,
val_size: float = 0.1,
test_size: float = 0.8,
random_state: Optional[int] = None) -> dict:
assert all((train_size, val_size))
graph = self.graph
assert not graph.multiple, "NOT Supported for multiple graph"
if test_size is None:
test_size = 1.0 - train_size - val_size
assert train_size + val_size + test_size <= 1.0
label = graph.node_label
train_nodes, val_nodes, test_nodes = train_val_test_split_tabular(
label.shape[0],
train_size,
val_size,
test_size,
stratify=label,
random_state=random_state)
self.splits.update(
dict(train_nodes=train_nodes,
val_nodes=val_nodes,
test_nodes=test_nodes))
return self.splits
def split_nodes_as_gcn(self,
num_samples: int = 20,
random_state: Optional[int] = None) -> dict:
graph = self.graph
assert not graph.multiple, "NOT Supported for multiple graph"
label = graph.node_label
train_nodes, val_nodes, test_nodes = get_train_val_test_split_gcn(
label,
num_samples,
random_state=random_state)
self.splits.update(
dict(train_nodes=train_nodes,
val_nodes=val_nodes,
test_nodes=test_nodes))
return self.splits
def split_nodes_by_sample(self,
train_samples_per_class: int,
val_samples_per_class: int,
test_samples_per_class: int,
random_state: Optional[int] = None) -> dict:
graph = self.graph
assert not graph.multiple, "NOT Supported for multiple graph"
self._graph = graph.eliminate_classes(train_samples_per_class + val_samples_per_class).standardize()
label = self._graph.node_label
train_nodes, val_nodes, test_nodes = get_train_val_test_split(
label,
train_samples_per_class,
val_samples_per_class,
test_samples_per_class,
random_state=random_state)
self.splits.update(
dict(train_nodes=train_nodes,
val_nodes=val_nodes,
test_nodes=test_nodes))
return self.splits
def split_edges(self,
train_size=None,
val_size=None,
test_size=None,
random_state: Optional[int] = None) -> dict:
raise NotImplementedError
def split_graphs(self,
train_size=None,
val_size=None,
test_size=None,
split_by=None,
random_state: Optional[int] = None) -> dict:
raise NotImplementedError
def show(self, *filepaths) -> None:
if not filepaths:
filepaths = self.list_files()
table_headers = ["File Path", "File Name"]
items = [osp.split(path) for path in filepaths]
table = tabulate(items, headers=table_headers,
tablefmt="fancy_grid")
print(f"Files in dataset '{self}':\n" + table)
def list_files(self):
return glob.glob(osp.join(self.download_dir, '*'))
def __repr__(self):
return f"{self.__class__.__name__}({self.name}, root={self.root})"
__str__ = __repr__
|
# 1.读取图片
# 2. 传递读取之后的信息
import os
import socket
pics = b''
base_file = os.path.abspath(__file__)
file_dir = os.path.dirname(base_file)
pic_file_path = os.path.join(file_dir,'cluo.jpg')
# a = os.path.join(os.path.dirname(os.path.abspath(__file__)),'cluo.jpg')
ip = '127.0.0.1'
port=33333
addr = (ip,port)
ss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ss.connect(addr)
with open (pic_file_path,'rb+') as f: # open是puthon 交给系统底层打开的,不是python打开的
while 1:
data = f. read(1024)
if not data:
break
pics += data
ss.send(pics)
ss.close()
# pic_file_copy_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'cluo2.jpg')
# with open(pic_file_copy_path,'wb+') as f:
# f.write(pics)
|
from unittest import TestCase
from shouldhavedone import text_lab
__author__ = 'andi'
# what is the english scrabble score of cloud
class TestScrabble_score(TestCase):
def test_scrabble_score(self):
question = "what is the english scrabble score of cloud"
self.assertEqual("8", text_lab.scrabble_score(question))
def test_anagram(self):
question = """which of the following is an anagram of "listen": google, enlists, banana, silent, silent"""
self.assertEquals("silent, silent", text_lab.anagram(question)) |
palindrome = input("Input palindrome: ")
def palindrome_check(some_string):
stop = int(len(some_string)/2)
if some_string[0:stop:1] == some_string[len(some_string):stop:-1]:
return "Is palindrome"
else:
return "Not palindrome"
print(palindrome_check(palindrome))
|
import xlrd
import xlwt
from requests import request
def cf_ranklist(contest_code):
guild_handles = {}
book=xlrd.open_workbook('data.xlsx')
sheet=book.sheet_by_index(0)
n=sheet.nrows
guild_handles = {}
for i in range(n):
name = sheet.cell_value(i,2)
handle = sheet.cell_value(i,5).split('/')[-1]
guild_handles[handle]=name
url = "https://codeforces.com/api/contest.ratingChanges?contestId="+str(contest_code)
page = request('GET',url)
if not page.ok:
return []
data = page.json()
ranklist = []
counter = 1
for row in data['result']:
username = row['handle'] # works for non team handles else only first handle considered
if guild_handles.get(username,None):
ranklist.append((counter,row['rank'],username,guild_handles[username],row['oldRating'],row['newRating']))
counter += 1
rank = xlwt.Workbook()
sheet3 = rank.add_sheet('ranklist')
for i in range(len(ranklist)):
for j in range(6):
sheet3.write(i,j,ranklist[i][j])
rank.save('final.csv')
#return ranklist
cf_ranklist(int(input()))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/6 11:51
# @Author : YJ@SHOU
# @File : read
# @Software: PyCharm
import threading
import os
import json
from bson.objectid import ObjectId
from pymongo import MongoClient
osm = MongoClient("mongodb://user:passwd@mongodb_url")#需要连接的mongodb数据库用户名及密码
db = osm.ship
col = db.shipData_up
s = col.find()
#print(s)
for item in s:
#item = {col.find({"MMSI"})}
#print(item)
list=item["TRACK"]
for i in range(0,len(list)):
items = {
"MMSI": item['MMSI'],
"TRACK": []
}
lists={}
if (float(list[i]['LAT']) > float(0) and float(list[i]['LAT']) < float(40)) and (
float(list[i]['LON']) > float(20) and float(list[i]['LON']) < float(75)):
lists = {
"TIME": list[i]['TIME'],
"LAT": list[i]['LAT'],
"LON": list[i]['LON'],
"WAY": float(list[i]['WAY']),
"SPEED": float(list[i]['SPEED']),
"SID": list[i]['SID']
}
items["TRACK"].append(lists)
#print (list[i])
osm = MongoClient("mongodb://user:passwd@mongodb_url")
db = osm.ship
coll = db.shipData_in
#print(item)
item['_id'] = ObjectId()
coll.insert(items) |
#Technology tree file |
# coding=UTF-8
import os
import glob
print(os.listdir('.'))
print("")
for file in glob.glob('*.py'):
print(file[0:-3])
for line in open(file):
if not line.strip().startswith('#'):
print(line)
|
# −∗− coding: utf−8 −∗−
import pandas as pd
import re
from pytib import Segment
from grapheme import graphemes
from grapheme import length
'''
NLP stuffs take time to do. So have patience.
'''
sent_punct_re = "༄|༅|༆|༇|༈|།|༎|༏|༐|༑|༔|;|:"
particles_re = "གི|ཀྱི|གྱི|ཡི|གིས|ཀྱིས|གྱིས|ཡིས|སུ|ཏུ|དུ|རུ|སྟེ|ཏེ|དེ|ཀྱང|ཡང|འང|གམ|ངམ|དམ|ནམ|བམ|མམ|འམ|རམ|ལམ|སམ|ཏམ|གོ|ངོ|དོ|ནོ|མོ|འོ|རོ|ལོ|སོ|ཏོ|ཅིང|ཅེས|ཅེའོ|ཅེ་ན|ཅིག|ཞིང|ཞེས|ཞེའོ|ཞེ་ན|ཞིག|ཤིང|ཤེའོ|ཤེ་ན|ཤིག|ལ|ན|ནས|ལས|ནི|དང|གང|ཅི|ཇི|གིན|གྱིན|ཀྱིན|ཡིན|པ|བ|པོ|བོ"
stopwords = ["འི་", "གི་", "ཀྱི་", "གྱི་", "ཡི་", "གིས་", "ཀྱིས་", "གྱིས་",
"ཡིས་", "སུ་", "ཏུ་", "དུ་", "རུ་", "སྟེ་", "ཏེ་", "དེ་",
"ཀྱང་", "ཡང་", "འང་", "གམ་", "ངམ་", "དམ་", "ནམ་", "བམ་",
"མམ་", "འམ་", "རམ་", "ལམ་", "སམ་", "ཏམ་", "གོ་", "ངོ་",
"དོ་", "ནོ་", "མོ་", "འོ་", "རོ་", "ལོ་", "སོ་", "ཏོ་", "ཅིང་",
"ཅེས་", "ཅེའོ་", "ཅེ་ན་", "ཅིག་", "ཞིང་", "ཞེས་", "ཞེའོ་",
"ཞེ་ན་", "ཞིག་", "ཤིང་", "ཤེའོ་", "ཤེ་ན་", "ཤིག་", "ལ་", "ན་",
"ནས་", "ལས་", "ནི་", "དང་", "གང་", "ཅི་", "ཇི་", "གིན་",
"གྱིན་", "ཀྱིན་", "ཡིན་", "པ་", "བ་", "པོ་", "བ་ོ", "ར་", "ས་",
"མ་", "་_", "ལ", "ན"]
latins_re = r'[A-Z]|[a-z]|[0-9]|[\\$%&\'()*+,./:;<=>?@^_`[\]{}~]'
def ingest_text(filename, mode='blob'):
if mode is not "blob":
out = open(filename).readlines()
else:
out = open(filename).read()
return out
def export(data, name='default', export_format='to_csv'):
'''
WHAT
----
Takes data and exports it to a file on local drive. Note that
saving will take place automatically on present working directory
and any file with same name will be automatically overwritten.
PARAMS
------
data: data in a list or dataframe or series
export_to: to_html, to_json, to_csv, to_excel, to_latex,
to_msgpack, to_sql, to_clipboard
'''
temp = data
if name is 'default':
file_type = export_format.split('_')[1]
name = 'export_from_boke.' + file_type
method_to_call = find_method(temp, export_format)
method_to_call(name)
def type_convert(data):
temp = data
if isinstance(temp, pd.core.frame.DataFrame) is False:
temp = pd.DataFrame(temp)
temp = temp.set_index(temp.columns[0])
return temp
def remove_latins(data):
temp = type_convert(data)
out = temp[temp.index.str.contains(latins_re) == False]
return out
def find_method(data, function):
temp = type_convert(data)
method_to_call = getattr(temp, function)
return method_to_call
def show_latins(data):
temp = type_convert(data)
return temp[temp.index.str.contains(latins_re) == True]
def create_meta(data):
'''
Accepts as input a list and outputs a dataframe with meta-data.
'''
# new[~new.index.isin(particles)]
temp = data
temp = temp.reset_index()
temp.columns = ['text', 'count']
temp['chars'] = temp.text.apply(length)
temp['bytes'] = temp.text.str.len()
temp['sentence_ending'] = temp.text.str.contains('་') == False
temp['sentence_ending'] = temp['sentence_ending'].astype(int)
temp['stopword'] = temp.text.isin(stopwords)
return temp
def text_to_chars(text):
'''
Takes as input Tibetan text, and creates a list of individual characters.
'''
temp = graphemes(text)
out = list(temp)
return out
def text_to_syllables(text):
'''
Takes as input Tibetan text, and creates a list of individual syllables.
'''
out = text.split('་')
return out
def text_to_words(text, mode='list'):
'''
OPTIONS
-------
mode: either 'list' or 'whitespaces'
'''
temp = re.sub(sent_punct_re, "", text)
seg = Segment()
temp = seg.segment(temp, uknown=0)
if mode is 'list':
temp = temp.split()
return temp
def text_to_sentence(text):
'''
Takes as input Tibetan text, and creates a list of individual sentences.
'''
out = re.split(sent_punct_re, text)
return out
def syllable_grams(data, grams=2, save_to_file=None):
'''
Takes in a list of syllables and creates syllable pairs.
Note that there is no intelligence involved, so the syllable pairs
might not result in actual words (even though they often do).
OUTPUT: a list with the syllable pairs (and optionally saved file)
OPTIONS: if 'save_to_file' is not None, need to be a filename.
'''
entities = pd.DataFrame(data)
entities.columns = ['text']
l = []
a = 0
for i in entities['text']:
a += 1
try:
l.append(entities['text'][a] + " " + entities['text'][a + 1])
except KeyError:
l.append("blank")
if save_to_file is not None:
out = pd.Series(l)
out.to_csv(save_to_file)
return l
def syllable_counts(syllable_list):
'''
Takes as input a list or series with syllables or other syllable entities
such as syllable pairs.
'''
out = pd.Series(syllable_list).value_counts()
out = pd.DataFrame(out)
out.columns = ['counts']
return out
def share_by_order(data):
'''
Takes as input a frequency dataframe with column name counts expected.
'''
total = data['counts'].sum()
print("Total syllable pairs : %d \n" % total)
orders = [10, 100, 1000, 10000, 100000]
for i in orders:
share = data['counts'][:i].sum() / total.astype(float) * 100
print(("TOP %d : %.2f%%") % (i, share))
|
# Read from input.txt
input_file = open("input.txt")
input_arr = []
for line in input_file:
args = line.split(' ')
counts = args[0].split('-')
letter = args[1][0]
password = args[2]
input_arr.append((int(counts[0]), int(counts[1]), letter, password))
def count_valid_passwords(arr):
res = 0
for line in arr:
curr_num_letter = 0
min_count, max_count, letter, password = line
for c in password:
if c == letter:
curr_num_letter += 1
if curr_num_letter > max_count:
break
if min_count <= curr_num_letter <= max_count:
res += 1
return res
def count_valid_password_2(arr):
res = 0
for line in arr:
i, j, letter, password = line
# indices are 1 index'ed
if password[i-1] == letter and password[j-1] == letter or password[i-1] != letter and password[j-1] != letter:
continue
res += 1
return res
if __name__ == "__main__":
print(count_valid_passwords(input_arr)) # 586
print(count_valid_password_2(input_arr)) # 352 |
def switch_dict(dic):
output = {}
for key,val in dic.items():
if val not in output:
output[val] = [key]
else:
output[val].append(key)
return output
'''
In this kata, you will take the keys and values of a dictionary and swap them around.
You will be given a dictionary, and then you will want to return a dictionary with the old values as the keys, and list the old keys as values under their original keys.
For example, given the dictionary: {'Ice': 'Cream', 'Age': '21', 'Light': 'Cream', 'Double': 'Cream'},
you should return: {'Cream': ['Ice', 'Double', 'Light'], '21': ['Age']}
Notes:
The dictionary given will only contain strings
The dictionary given will not be empty
You do not have to sort the items in the lists
'''
|
#!/usr/bin/python2
import os
def unzip(file_list):
cmd=""
for file_name in file_list:
cmd="C:\\\"Program Files\"\\7-Zip\\7z.exe x "+file_name
os.popen(cmd)
def unzip_all(filepath):
ls = []
get_files_by_type(filepath,"zip", ls)
unzip(ls)
def get_files_by_type(filepath, filetype, ls):
filelist = os.listdir(filepath)
for tmp in filelist:
pathtmp = os.path.join(filepath, tmp)
if True == os.path.isdir(pathtmp):
get_files_by_type(pathtmp,"zip", ls)
elif pathtmp[pathtmp.rfind('.') + 1:].lower() == filetype:
ls.append(pathtmp)
def get_keypvalue(attr,line):
catch=line
if(catch.find(attr)):
idx=catch.index(attr)
catch=catch[idx:]
if(catch.find(' ')):
idx=catch.find(' ')
catch=catch[:idx]
if(catch.find("=")):
idx=catch.index("=")
_key=str(catch[:idx]).lstrip("\"").rstrip("\"").lstrip("'").rstrip("'")
_val=str(catch[idx+1:]).lstrip("\"").rstrip("\"").lstrip("'").rstrip("'")
return [_key,_val]
return None
def set_properties(dict_properties,prop_name,prop_value):
if (dict_properties.get(prop_name) != None):
dict_properties[prop_name] = prop_value
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
#get specific key's value of a list composed by dict
#ex: li=[{'key':'x','value':'value1'},{'key':'y','value':'value2'}..
#get_value_by_key(li,'key','x','value') -> output: value1
def get_value_by_key(li,key_name, key_match, prop_name):
if len(li)==0:
return None
if type(li[0])!=dict:
return None
for tmp in li:
if tmp.get(key_name) and tmp.get(prop_name):
if tmp[key_name]==key_match:
return tmp[prop_name]
return None
|
from threading import Thread
import random, time
class Consumer(Thread):
def __init__(self, queue, condition, id):
super(Consumer, self).__init__()
self.queue = queue
self.condition = condition
self.id = id
def run(self):
while True:
self.condition.acquire()
if not self.queue:
print "Nothing is in the queue, waiting to be notified..."
self.condition.wait()
print "Consumer %s got notified by the producer" % self.id
num = self.queue.pop(0)
print "Consumed %d by consumer %s" % (num, self.id)
self.condition.release()
time.sleep(random.random()) |
from dolfin import *
#mesh = RectangleMesh(Point(0,0,0), Point(1,1,1) ,5,5,5)
mesh = BoxMesh(Point(0,0,0), Point(1,1,1) ,5,5,5)
print(mesh.coordinates())
mesh.coordinates()[:,:] *= 0.001
print(mesh.coordinates())
|
# Generated by Django 2.0.3 on 2018-05-04 15:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0045_auto_20180504_2041'),
]
operations = [
migrations.RemoveField(
model_name='takencourse',
name='taken_course',
),
migrations.AddField(
model_name='takencourse',
name='taken_category_url',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='takencourse',
name='taken_course_category',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='takencourse',
name='taken_course_image',
field=models.ImageField(null=True, upload_to='User/TakenCourses'),
),
migrations.AddField(
model_name='takencourse',
name='taken_course_title',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='takencourse',
name='taken_course_url',
field=models.CharField(max_length=255, null=True, unique=True),
),
]
|
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, NotFound
from rest_flex_fields import FlexFieldsModelSerializer
from ...menu.v1.serializers import OptionNestedSerializer
from ...users.v1.serializers import UserSignUpSerializer
from ..models import Order
class OrderSerializer(FlexFieldsModelSerializer):
option_id = serializers.UUIDField(write_only=True)
option = serializers.PrimaryKeyRelatedField(read_only=True)
user = serializers.PrimaryKeyRelatedField(read_only=True)
additional_notes = serializers.CharField(allow_blank=True)
class Meta:
model = Order
fields = ('id','option_id', 'additional_notes', 'created_at', 'updated_at', 'option', 'user')
expandable_fields = {
'option': OptionNestedSerializer,
'user': UserSignUpSerializer
}
def create(self, validated_data):
return Order.objects.place_order( **validated_data)
|
import sys
sys.path.append('../500_common')
import lib_reserve
a = "Chrome3res"
b = "Profile 3"
lib_reserve.main(a, b, None, waitTime=10) |
from gym.envs.registration import register
register(
id='homeenergy-v0',
entry_point='gym_homeenergy.envs:homeenergyEnv',
)
register(
id='homeenergy-extrahard-v0',
entry_point='gym_homeenergy.envs:homeenergyExtraHardEnv',
)
|
#! /usr/local/bin/python
import json
import netCDF4 as nc
import os
save_dir = '/Users/michaesm/Downloads/'
ncml_file='/Users/michaesm/Downloads/CE04OSPS-SF01B-2A-CTDPFA107-streamed-ctdpf_sbe43_sample-20150906T000000-20150909T120000.nc'
# ncml_file = 'http://opendap-devel.ooi.rutgers.edu:8090/thredds/dodsC/eov-3/Coastal_Endurance/CE04OSPS/2A-CTDPFA107/streamed/CE04OSPS-SF01B-2A-CTDPFA107-ctdpf_sbe43_sample-streamed/CE04OSPS-SF01B-2A-CTDPFA107-ctdpf_sbe43_sample-streamed.ncml'
# ncml_file = 'http://opendap-devel.ooi.rutgers.edu:8090/thredds/dodsC/eov-3/Coastal_Endurance/CE05MOAS/05-CTDGVM000/recovered_host/CE05MOAS-GL311-05-CTDGVM000-ctdgv_m_glider_instrument_recovered-recovered_host/CE05MOAS-GL311-05-CTDGVM000-recovered_host-ctdgv_m_glider_instrument_recovered-20141216T185931-20141216T185952.nc'
# ncml_file = '/Users/michaesm/Downloads/CE05MOAS-GL311-05-CTDGVM000-recovered_host-ctdgv_m_glider_instrument_recovered-20141216T185931-20141216T185952.nc'
# ncml_file = '/Users/michaesm/Downloads/CE04OSPS-SF01B-3A-FLORTD104-streamed-flort_d_data_record-20151005T000000-20151011T235959.nc'
# ncml_file = '/Users/michaesm/Downloads/CE04OSPS-SF01B-2A-CTDPFA107-streamed-ctdpf_sbe43_sample-20150805T120000-20150808T235959.nc'
# ncml_file = '/Users/michaesm/Downloads/CE04OSPS-SF01B-3A-FLORTD104-streamed-flort_d_data_record-20151207T000000-20151207T221815.nc'
def read_json(open_file, var_name):
qp = nc.chartostring((open_file.variables[var_name][:]))
try:
parsed_json = json.loads(qp[0])
return parsed_json
except:
parsed_json = 'n/a'
return parsed_json
def parse_json_response(content):
for key, value in content.iteritems():
print str(key) + ": " + str(value)
if type(value) is dict:
parse_json_response(value)
file_name = os.path.basename(ncml_file)
print "Printing provenance information for " + file_name
print "key : Value"
file_load = nc.Dataset(ncml_file)
prov_list = [s for s in file_load.variables if 'provenance' in s]
prov_list = [s for s in prov_list if not 'keys' in s]
for var in prov_list:
try:
p_json = read_json(file_load, var)
except ValueError:
continue
save_file = os.path.join(save_dir, file_name)
outfile = open(save_file + "-" + var + ".json", "w")
json.dump(p_json, outfile)
outfile.close()
|
class Schedualer():
def __init__(self):
self._prog = None
def _set_prog(self, prog):
self._prog = prog
def _build(self, learning_rate):
raise NotImplementedError()
|
"""ImageLoader class.
"""
from napari.layers.image._image_slice_data import ImageSliceData
class ImageLoader:
"""The default synchronous ImageLoader."""
def load(self, data: ImageSliceData) -> bool:
"""Load the ImageSliceData synchronously.
Parameters
----------
data : ImageSliceData
The data to load.
Returns
-------
bool
True if load happened synchronously.
"""
data.load_sync()
return True
def match(self, data: ImageSliceData) -> bool:
"""Return True if data matches what we are loading.
Parameters
----------
data : ImageSliceData
Does this data match what we are loading?
Returns
-------
bool
Return True if data matches.
"""
return True # Always true for synchronous loader.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Network extension implementations
"""
from cliff import hooks
from openstack.network.v2 import network as network_sdk
from openstack import resource
from openstackclient.network.v2 import network
from osc_lib.cli import parseractions
from openstackclient.i18n import _
_get_attrs_network_new = network._get_attrs_network
def _get_attrs_network_extension(client_manager, parsed_args):
attrs = _get_attrs_network_new(client_manager, parsed_args)
if ('apic_svi_enable' in parsed_args and
parsed_args.apic_svi_enable):
attrs['apic:svi'] = True
if ('apic_svi_disable' in parsed_args and
parsed_args.apic_svi_disable):
attrs['apic:svi'] = False
if parsed_args.apic_bgp_enable:
attrs['apic:bgp_enable'] = True
if parsed_args.apic_bgp_disable:
attrs['apic:bgp_enable'] = False
if parsed_args.apic_bgp_type:
attrs['apic:bgp_type'] = parsed_args.apic_bgp_type
if parsed_args.apic_bgp_asn:
attrs['apic:bgp_asn'] = parsed_args.apic_bgp_asn
if parsed_args.apic_nested_domain_name:
attrs['apic:nested_domain_name'
] = parsed_args.apic_nested_domain_name
if parsed_args.apic_nested_domain_type:
attrs['apic:nested_domain_type'
] = parsed_args.apic_nested_domain_type
if parsed_args.apic_nested_domain_infra_vlan:
attrs['apic:nested_domain_infra_vlan'
] = parsed_args.apic_nested_domain_infra_vlan
if parsed_args.apic_nested_domain_service_vlan:
attrs['apic:nested_domain_service_vlan'
] = parsed_args.apic_nested_domain_service_vlan
if parsed_args.apic_nested_domain_node_network_vlan:
attrs['apic:nested_domain_node_network_vlan'
] = parsed_args.apic_nested_domain_node_network_vlan
if parsed_args.apic_nested_domain_allowed_vlans:
attrs['apic:nested_domain_allowed_vlans'
] = list(map(int,
parsed_args.apic_nested_domain_allowed_vlans.split(
",")))
if parsed_args.apic_extra_provided_contracts is not None:
if parsed_args.apic_extra_provided_contracts:
attrs['apic:extra_provided_contracts'
] = parsed_args.apic_extra_provided_contracts.split(",")
else:
attrs['apic:extra_provided_contracts'] = []
if parsed_args.apic_extra_consumed_contracts is not None:
if parsed_args.apic_extra_consumed_contracts:
attrs['apic:extra_consumed_contracts'
] = parsed_args.apic_extra_consumed_contracts.split(",")
else:
attrs['apic:extra_consumed_contracts'] = []
if parsed_args.apic_epg_contract_masters:
attrs['apic:epg_contract_masters'
] = parsed_args.apic_epg_contract_masters.split(",")
if ('apic_distinguished_names' in parsed_args and
parsed_args.apic_distinguished_names):
result = {}
for element in parsed_args.apic_distinguished_names:
result.update(element)
attrs['apic:distinguished_names'] = result
if parsed_args.apic_policy_enforcement_pref:
attrs['apic:policy_enforcement_pref'
] = parsed_args.apic_policy_enforcement_pref
if parsed_args.apic_no_nat_cidrs:
attrs['apic:no_nat_cidrs'] = parsed_args.apic_no_nat_cidrs.split(",")
if ('no_apic_no_nat_cidrs' in parsed_args and
parsed_args.no_apic_no_nat_cidrs):
attrs['apic:no_nat_cidrs'] = []
if ('apic_multi_ext_nets' in parsed_args and
parsed_args.apic_multi_ext_nets):
attrs['apic:multi_ext_nets'] = parsed_args.apic_multi_ext_nets
if parsed_args.external:
if ('apic_nat_type' in parsed_args and
parsed_args.apic_nat_type is not None):
attrs['apic:nat_type'] = parsed_args.apic_nat_type
if parsed_args.apic_external_cidrs:
attrs['apic:external_cidrs'
] = parsed_args.apic_external_cidrs.split(",")
if ('apic_no_external_cidrs' in parsed_args and
parsed_args.apic_no_external_cidrs):
attrs['apic:external_cidrs'] = []
return attrs
network._get_attrs_network = _get_attrs_network_extension
network_sdk.Network.apic_synchronization_state = resource.Body(
'apic:synchronization_state')
network_sdk.Network.apic_svi = resource.Body('apic:svi')
network_sdk.Network.apic_bgp = resource.Body('apic:bgp_enable')
network_sdk.Network.apic_bgp_type = resource.Body('apic:bgp_type')
network_sdk.Network.apic_bgp_asn = resource.Body('apic:bgp_asn')
network_sdk.Network.apic_nested_domain_name = resource.Body(
'apic:nested_domain_name')
network_sdk.Network.apic_nested_domain_type = resource.Body(
'apic:nested_domain_type')
network_sdk.Network.apic_nested_domain_infra_vlan = resource.Body(
'apic:nested_domain_infra_vlan')
network_sdk.Network.apic_nested_domain_service_vlan = resource.Body(
'apic:nested_domain_service_vlan')
network_sdk.Network.apic_nested_domain_node_network_vlan = resource.Body(
'apic:nested_domain_node_network_vlan')
network_sdk.Network.apic_nested_domain_allowed_vlans = resource.Body(
'apic:nested_domain_allowed_vlans')
network_sdk.Network.apic_extra_provided_contracts = resource.Body(
'apic:extra_provided_contracts')
network_sdk.Network.apic_extra_consumed_contracts = resource.Body(
'apic:extra_consumed_contracts')
network_sdk.Network.apic_epg_contract_masters = resource.Body(
'apic:epg_contract_masters')
network_sdk.Network.apic_distinguished_names = resource.Body(
'apic:distinguished_names')
network_sdk.Network.apic_policy_enforcement_pref = resource.Body(
'apic:policy_enforcement_pref')
network_sdk.Network.apic_nat_type = resource.Body('apic:nat_type')
network_sdk.Network.apic_external_cidrs = resource.Body('apic:external_cidrs')
network_sdk.Network.apic_no_nat_cidrs = resource.Body('apic:no_nat_cidrs')
network_sdk.Network.apic_multi_ext_nets = resource.Body('apic:multi_ext_nets')
class CreateNetworkExtension(hooks.CommandHook):
def get_parser(self, parser):
parser.add_argument(
'--apic-svi-enable',
action='store_true',
default=None,
dest='apic_svi_enable',
help=_("Set APIC SVI to true\n"
"Default value for apic_svi is False ")
)
parser.add_argument(
'--apic-svi-disable',
action='store_true',
dest='apic_svi_disable',
help=_("Set APIC SVI to false\n"
"Default value for apic_svi is False ")
)
parser.add_argument(
'--apic-bgp-enable',
action='store_true',
default=None,
dest='apic_bgp_enable',
help=_("Set APIC BGP to true\n"
"Default value for apic_bgp is False ")
)
parser.add_argument(
'--apic-bgp-disable',
action='store_true',
dest='apic_bgp_disable',
help=_("Set APIC BGP to false\n"
"Default value for apic_bgp is False ")
)
parser.add_argument(
'--apic-bgp-type',
metavar="<string>",
dest='apic_bgp_type',
help=_("APIC BGP Type\n"
"Default value is 'default_export'\n"
"Valid values: default_export, '' ")
)
parser.add_argument(
'--apic-bgp-asn',
metavar="<integer>",
dest='apic_bgp_asn',
help=_("APIC BGP ASN\n"
"Default value is 0\n"
"Valid values: non negative integer ")
)
parser.add_argument(
'--apic-nested-domain-name',
metavar="<string>",
dest='apic_nested_domain_name',
help=_("APIC nested domain name\n"
"Default value is '' ")
)
parser.add_argument(
'--apic-nested-domain-type',
metavar="<string>",
dest='apic_nested_domain_type',
help=_("APIC nested domain type\n"
"Default value is '' ")
)
parser.add_argument(
'--apic-nested-domain-infra-vlan',
metavar="<integer>",
dest='apic_nested_domain_infra_vlan',
help=_("APIC nested domain infra vlan\n"
"Valid values: integer between 1 and 4093 ")
)
parser.add_argument(
'--apic-nested-domain-service-vlan',
metavar="<integer>",
dest='apic_nested_domain_service_vlan',
help=_("APIC nested domain service vlan\n"
"Valid values: integer between 1 and 4093 ")
)
parser.add_argument(
'--apic-nested-domain-node-network-vlan',
metavar="<integer>",
dest='apic_nested_domain_node_network_vlan',
help=_("APIC nested domain node network vlan\n"
"Valid values: integer between 1 and 4093 ")
)
parser.add_argument(
'--apic-nested-domain-allowed-vlans',
metavar="<int,int>",
dest='apic_nested_domain_allowed_vlans',
help=_("APIC nested domain allowed vlans\n"
"Data is passed as comma separated integers\n"
"Valid values: integers between 1 and 4093\n"
"Syntax Example: 1 or 1,2 ")
)
parser.add_argument(
'--apic-extra-provided-contracts',
metavar="<aaa,bbb>",
dest='apic_extra_provided_contracts',
help=_("APIC extra provided contracts\n"
"Data is passed as comma separated strings\n"
"Default value is []\n"
"Valid values: list of unique strings\n"
"Syntax Example: foo or foo,bar ")
)
parser.add_argument(
'--apic-extra-consumed-contracts',
metavar="<aaa,bbb>",
dest='apic_extra_consumed_contracts',
help=_("APIC extra consumed contracts\n"
"Data is passed as comma separated strings\n"
"Default value is []\n"
"Valid values: list of unique strings\n"
"Syntax Example: foo or foo,bar ")
)
parser.add_argument(
'--apic-epg-contract-masters',
metavar="<aaa,bbb>",
dest='apic_epg_contract_masters',
help=_("APIC epg contract masters\n"
"Data is passed as comma separated strings\n"
"Default value is []\n"
"Syntax Example: foo or foo,bar ")
)
parser.add_argument(
'--apic-distinguished-names',
metavar="<ExternalNetwork=aaa,BridgeDomain=bbb>",
dest='apic_distinguished_names',
action=parseractions.MultiKeyValueAction,
optional_keys=['ExternalNetwork', 'BridgeDomain'],
help=_("APIC distinguished names\n"
"Custom data to be passed as apic:distinguished_names\n"
"Data is passed as <key>=<value>, where "
"valid keys are 'ExternalNetwork' and 'BridgeDomain'\n"
"Both the keys are optional\n"
"Syntax Example: BridgeDomain=aaa or ExternalNetwork=bbb "
"or ExternalNetwork=aaa,BridgeDomain=bbb ")
)
parser.add_argument(
'--apic-policy-enforcement-pref',
metavar="<string>",
dest='apic_policy_enforcement_pref',
help=_("APIC Policy Enforcement Pref\n"
"Default value is 'unenforced'\n"
"Valid values: unenforced, enforced, '' ")
)
parser.add_argument(
'--apic-nat-type',
metavar="<string>",
dest='apic_nat_type',
help=_("APIC nat type for external network\n"
"For external type networks only\n"
"Default value is 'distributed'\n"
"Valid values: distributed, edge, '' ")
)
parser.add_argument(
'--apic-external-cidrs',
metavar="<subnet1,subnet2>",
dest='apic_external_cidrs',
help=_("APIC external CIDRS for external network\n"
"For external type networks only\n"
"Data is passed as comma separated valid ip subnets\n"
"Default value is ['0.0.0.0/0']\n"
"Syntax Example: 10.10.10.0/24 "
"or 10.10.10.0/24,20.20.20.0/24 ")
)
parser.add_argument(
'--apic-no-nat-cidrs',
metavar="<subnet1,subnet2>",
dest='apic_no_nat_cidrs',
help=_("APIC CIDRS for a network to config no NAT routing\n"
"Data is passed as comma separated valid ip subnets\n"
"Default value is []\n"
"Syntax Example: 10.10.10.0/24 "
"or 10.10.10.0/24,20.20.20.0/24 ")
)
parser.add_argument(
'--apic-multi-ext-nets',
action='store_true',
default=None,
dest='apic_multi_ext_nets',
help=_("Makes it possible to associate multi external networks "
"with a single L3Outside\n"
"Default value is False ")
)
return parser
def get_epilog(self):
return ''
def before(self, parsed_args):
return parsed_args
def after(self, parsed_args, return_code):
return return_code
class SetNetworkExtension(hooks.CommandHook):
def get_parser(self, parser):
parser.add_argument(
'--apic-bgp-enable',
action='store_true',
default=None,
dest='apic_bgp_enable',
help=_("Set APIC BGP to true\n"
"Default value for apic_bgp is False ")
)
parser.add_argument(
'--apic-bgp-disable',
action='store_true',
dest='apic_bgp_disable',
help=_("Set APIC BGP to false\n"
"Default value for apic_bgp is False ")
)
parser.add_argument(
'--apic-bgp-type',
metavar="<string>",
dest='apic_bgp_type',
help=_("APIC BGP Type\n"
"Default value is 'default_export'\n"
"Valid values: default_export, '' ")
)
parser.add_argument(
'--apic-bgp-asn',
metavar="<integer>",
dest='apic_bgp_asn',
help=_("APIC BGP ASN\n"
"Default value is 0\n"
"Valid values: non negative integer ")
)
parser.add_argument(
'--apic-nested-domain-name',
metavar="<string>",
dest='apic_nested_domain_name',
help=_("APIC nested domain name\n"
"Default value is '' ")
)
parser.add_argument(
'--apic-nested-domain-type',
metavar="<string>",
dest='apic_nested_domain_type',
help=_("APIC nested domain type\n"
"Default value is '' ")
)
parser.add_argument(
'--apic-nested-domain-infra-vlan',
metavar="<integer>",
dest='apic_nested_domain_infra_vlan',
help=_("APIC nested domain infra vlan\n"
"Valid values: integer between 1 and 4093 ")
)
parser.add_argument(
'--apic-nested-domain-service-vlan',
metavar="<integer>",
dest='apic_nested_domain_service_vlan',
help=_("APIC nested domain service vlan\n"
"Valid values: integer between 1 and 4093 ")
)
parser.add_argument(
'--apic-nested-domain-node-network-vlan',
metavar="<integer>",
dest='apic_nested_domain_node_network_vlan',
help=_("APIC nested domain node network vlan\n"
"Valid values: integer between 1 and 4093 ")
)
parser.add_argument(
'--apic-nested-domain-allowed-vlans',
metavar="<int,int>",
dest='apic_nested_domain_allowed_vlans',
help=_("APIC nested domain allowed vlans. "
"Data is passed as comma separated integers\n"
"Valid values: integers between 1 and 4093\n"
"Syntax Example: 1 or 1,2 ")
)
parser.add_argument(
'--apic-extra-provided-contracts',
metavar="<aaa,bbb>",
dest='apic_extra_provided_contracts',
help=_("APIC extra provided contracts\n"
"Data is passed as comma separated of strings.\n"
"Default value is []\n"
"Valid values: list of unique strings\n"
"Syntax Example: foo or foo,bar ")
)
parser.add_argument(
'--apic-extra-consumed-contracts',
metavar="<aaa,bbb>",
dest='apic_extra_consumed_contracts',
help=_("APIC extra consumed contracts\n"
"Data is passed as comma separated strings\n"
"Default value is []\n"
"Valid values: list of unique strings\n"
"Syntax Example: foo or foo,bar ")
)
parser.add_argument(
'--apic-epg-contract-masters',
metavar="<aaa,bbb,ccc>",
dest='apic_epg_contract_masters',
help=_("APIC epg contract masters\n"
"Data is passed as comma separated strings\n"
"Default value is []\n"
"Syntax Example: foo or foo,bar ")
)
parser.add_argument(
'--apic-policy-enforcement-pref',
metavar="<string>",
dest='apic_policy_enforcement_pref',
help=_("APIC Policy Enforcement Pref\n"
"Default value is 'unenforced'\n"
"Valid values: unenforced, enforced, '' ")
)
parser.add_argument(
'--apic-external-cidrs',
metavar="<subnet1,subnet2>",
dest='apic_external_cidrs',
help=_("APIC external CIDRS for external network\n"
"For external type networks only\n"
"Data is passed as comma separated valid ip subnets\n"
"Need to pass the --external argument wth this field\n"
"Default value is ['0.0.0.0/0']\n"
"Syntax Example: 10.10.10.0/24 "
"or 10.10.10.0/24,20.20.20.0/24 ")
)
parser.add_argument(
'--apic-no-external-cidrs',
dest='apic_no_external_cidrs',
action='store_true',
help=_("Reset APIC external CIDRS for external network\n"
"For external type networks only\n"
"Need to pass the --external argument wth this field\n"
"Resets the apic:external_cidrs field to 0.0.0.0/0 ")
)
parser.add_argument(
'--apic-no-nat-cidrs',
metavar="<subnet1,subnet2>",
dest='apic_no_nat_cidrs',
help=_("APIC CIDRS for a network to config no NAT routing\n"
"Data is passed as comma separated valid ip subnets\n"
"Default value is []\n"
"Syntax Example: 10.10.10.0/24 "
"or 10.10.10.0/24,20.20.20.0/24 ")
)
parser.add_argument(
'--no-apic-no-nat-cidrs',
dest='no_apic_no_nat_cidrs',
action='store_true',
help=_("Reset APIC no NAT CIDRS for a network\n"
"Resets the apic:no_nat_cidrs field to []")
)
return parser
def get_epilog(self):
return ''
def before(self, parsed_args):
return parsed_args
def after(self, parsed_args, return_code):
return return_code
class ShowNetworkExtension(hooks.CommandHook):
def get_parser(self, parser):
return parser
def get_epilog(self):
return ''
def before(self, parsed_args):
return parsed_args
def after(self, parsed_args, return_code):
return return_code
|
# Code from https://github.com/marcoconti83/read-ods-with-odfpy
# FILE: ODSReader.py
# Copyright 2011 Marco Conti
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTICE OF CHANGES:
# - removed getSheet method
# - renamed ODSReader.SHEETS to ODSReader.sheets
# - read out c.data instead of n.data in line 82
# Thanks to grt for the fixes
import odf.opendocument
from odf.table import Table, TableRow, TableCell
from odf.text import P
# http://stackoverflow.com/a/4544699/1846474
class GrowingList(list):
def __setitem__(self, index, value):
if index >= len(self):
self.extend([None]*(index + 1 - len(self)))
list.__setitem__(self, index, value)
class ODSReader:
# loads the file
def __init__(self, file, clonespannedcolumns=None):
self.clonespannedcolumns = clonespannedcolumns
self.doc = odf.opendocument.load(file)
self.sheets = {}
for sheet in self.doc.spreadsheet.getElementsByType(Table):
self.readSheet(sheet)
# reads a sheet in the sheet dictionary, storing each sheet as an
# array (rows) of arrays (columns)
def readSheet(self, sheet):
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
row_comment = ""
arrCells = GrowingList()
cells = row.getElementsByType(TableCell)
# for each cell
count = 0
for cell in cells:
# repeated value?
repeat = cell.getAttribute("numbercolumnsrepeated")
if(not repeat):
repeat = 1
spanned = int(cell.getAttribute('numbercolumnsspanned') or 0)
# clone spanned cells
if self.clonespannedcolumns is not None and spanned > 1:
repeat = spanned
ps = cell.getElementsByType(P)
textContent = ""
# for each text/text:span node
for p in ps:
for n in p.childNodes:
if (n.nodeType == 1 and n.tagName == "text:span"):
for c in n.childNodes:
if (c.nodeType == 3):
textContent = u'{}{}'.format(textContent, c.data)
if (n.nodeType == 3):
textContent = u'{}{}'.format(textContent, n.data)
if(textContent):
if(textContent[0] != "#"): # ignore comments cells
for rr in range(int(repeat)): # repeated?
arrCells[count]=textContent
count+=1
else:
row_comment = row_comment + textContent + " "
else:
for rr in range(int(repeat)):
count+=1
# if row contained something
if(len(arrCells)):
arrRows.append(arrCells)
#else:
# print ("Empty or commented row (", row_comment, ")")
self.sheets[name] = arrRows |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 19 11:29:23 2019
@author: Yoeri
"""
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""a class to manage the bullets"""
def __init__(self, ai_game):
"""create the bullet at the ships location"""
super().__init__()
self.screen = ai_game.screen
self.settings = ai_game.settings
self.color = self.settings.bullet_color
#create a bullet at (0,0) and then correct position
self.rect = pygame.Rect(0, 0, self.settings.bullet_width,
self.settings.bullet_height)
self.rect.midtop = ai_game.ship.rect.midtop
#store the bullet postion in decimal value
self.y = float(self.rect.y)
def update(self):
"""move the bullets up the screen"""
#update per decimal position of the bullet
self.y -= self.settings.bullet_speed
#update the tect position
self.rect.y = self.y
def draw_bullet(self):
"""draw the bullet to the screen"""
pygame.draw.rect(self.screen, self.color, self.rect)
|
from flask import Blueprint
manage = Blueprint('manage',__name__)
from . import manage_view
|
import logging
_LOGGER = logging.getLogger(__name__)
class PoolData:
def __init__(self, config_entry):
self.sensors = set()
self.binary_sensors = set()
self.switches = set()
self.config_entry = config_entry
self.pool = None
class CurrentPoolDataResponse:
def __init__(self, data):
data = data
self.pid = data["id"]
class PoolDataResponse:
def __init__(self, data):
self.data_fields = []
self.data_fields.append(Field("temperature", data["main"]["temperature"], "C"))
self.data_fields.append(Field("filtration_type", data["filtration"]["type"]))
self.data_fields.append(Field("filtration_state", data["filtration"]["onoff"]))
self.data_fields.append(Field("light_type", data["light"]["type"]))
self.data_fields.append(Field("light_status", data["light"]["status"]))
self.data_fields.append(Field("ph", data["modules"]["ph"]["currentValue"]))
self.data_fields.append(Field("ph_target", data["modules"]["ph"]["status"]["hi_value"]))
self.data_fields.append(Field("ph_relay_state", data["modules"]["ph"]["status"]["status"]))
self.data_fields.append(Field("ph_color", data["modules"]["ph"]["status"]["color"]["hex"]))
self.data_fields.append(Field("rx", data["modules"]["rx"]["currentValue"]))
self.data_fields.append(Field("rx_target", data["modules"]["rx"]["status"]["value"]))
self.data_fields.append(Field("rx_relay_state", data["modules"]["rx"]["status"]["relayStatus"]["status"]))
self.data_fields.append(Field("rx_color", data["modules"]["rx"]["status"]["color"]["hex"]))
for field in self.data_fields:
_LOGGER.debug("%s", str(field))
class Field:
def __init__(self, name: str, value, unit: str = ""):
self.name = name
self.value = value
self.unit = unit
def __str__(self):
str_rep = str(self.name) + " " + str(self.value)
if self.unit is not None:
str_rep += self.unit
return str_rep
class Pool:
def __init__(self):
self.pid = ""
self.ref = ""
self.location = ""
self.connected = 0
self.name = ""
def parse(self, data):
self.pid = data.get("id")
self.ref = data.get("reference")
self.location = data.get("location")
self.connected = data.get("connected")
self.name = data.get("name")
def __str__(self):
return str(self.__dict__)
class PoolResponse:
def __init__(self):
self.pools = []
def parse(self, data):
for item in data.get("pools"):
pool = Pool()
pool.parse(item)
self.pools.append(pool)
|
def checkList(L):
L.sort()
if(L[0] == 1):
newL = list(range(min(L), max(L)+1))
if(L == newL):
# print(str(L))
return True
return False
def solution(A):
if not A:
return 0
counter = 0
L = []
flag = False
for value in A:
L.append(value)
if(value == 1):
flag = True
if flag:
if(checkList(L)):
counter = counter + 1
return counter
print(str(solution([2, 1, 3, 5, 4])))
print(str(solution([2, 3, 4, 1, 5])))
print(str(solution([1, 3, 4, 2, 5])))
|
$NetBSD: patch-src_blockdiag_tests_test__rst__directives.py,v 1.1 2011/12/12 11:33:28 obache Exp $
* Fix testcase using 'with' statement are failed in python2.5 environment
https://bitbucket.org/tk0miya/blockdiag/changeset/f078235db9b9
--- src/blockdiag/tests/test_rst_directives.py.orig 2011-11-21 02:41:13.000000000 +0000
+++ src/blockdiag/tests/test_rst_directives.py
@@ -5,7 +5,7 @@ import os
import sys
import tempfile
import unittest2
-from utils import stderr_wrapper
+from utils import stderr_wrapper, assertRaises
from docutils import nodes
from docutils.core import publish_doctree
from docutils.parsers.rst import directives as docutils
@@ -150,20 +150,20 @@ class TestRstDirectives(unittest2.TestCa
self.assertFalse('target' in doctree[0])
@use_tmpdir
+ @assertRaises(RuntimeError)
def test_rst_directives_with_block_fontpath1(self, path):
- with self.assertRaises(RuntimeError):
- directives.setup(format='SVG', fontpath=['dummy.ttf'],
- outputdir=path)
- text = ".. blockdiag::\n :alt: hello world\n\n { A -> B }"
- doctree = publish_doctree(text)
+ directives.setup(format='SVG', fontpath=['dummy.ttf'],
+ outputdir=path)
+ text = ".. blockdiag::\n :alt: hello world\n\n { A -> B }"
+ doctree = publish_doctree(text)
@use_tmpdir
+ @assertRaises(RuntimeError)
def test_rst_directives_with_block_fontpath2(self, path):
- with self.assertRaises(RuntimeError):
- directives.setup(format='SVG', fontpath='dummy.ttf',
- outputdir=path)
- text = ".. blockdiag::\n :alt: hello world\n\n { A -> B }"
- doctree = publish_doctree(text)
+ directives.setup(format='SVG', fontpath='dummy.ttf',
+ outputdir=path)
+ text = ".. blockdiag::\n :alt: hello world\n\n { A -> B }"
+ doctree = publish_doctree(text)
@use_tmpdir
def test_rst_directives_with_block_maxwidth(self, path):
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-24 10:47
from __future__ import unicode_literals
from django.db import migrations, models
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('forms', '0006_auto_20170425_2225'),
]
operations = [
migrations.AddField(
model_name='field',
name='choices_bg',
field=models.CharField(blank=True, help_text='Comma separated options where applicable. If an option itself contains commas, surround the option with `backticks`.', max_length=1000, null=True, verbose_name='Choices'),
),
migrations.AddField(
model_name='field',
name='choices_en',
field=models.CharField(blank=True, help_text='Comma separated options where applicable. If an option itself contains commas, surround the option with `backticks`.', max_length=1000, null=True, verbose_name='Choices'),
),
migrations.AddField(
model_name='field',
name='choices_ru',
field=models.CharField(blank=True, help_text='Comma separated options where applicable. If an option itself contains commas, surround the option with `backticks`.', max_length=1000, null=True, verbose_name='Choices'),
),
migrations.AddField(
model_name='field',
name='choices_sl',
field=models.CharField(blank=True, help_text='Comma separated options where applicable. If an option itself contains commas, surround the option with `backticks`.', max_length=1000, null=True, verbose_name='Choices'),
),
migrations.AddField(
model_name='field',
name='default_bg',
field=models.CharField(blank=True, max_length=2000, null=True, verbose_name='Default value'),
),
migrations.AddField(
model_name='field',
name='default_en',
field=models.CharField(blank=True, max_length=2000, null=True, verbose_name='Default value'),
),
migrations.AddField(
model_name='field',
name='default_ru',
field=models.CharField(blank=True, max_length=2000, null=True, verbose_name='Default value'),
),
migrations.AddField(
model_name='field',
name='default_sl',
field=models.CharField(blank=True, max_length=2000, null=True, verbose_name='Default value'),
),
migrations.AddField(
model_name='field',
name='help_text_bg',
field=models.TextField(blank=True, null=True, verbose_name='Help text'),
),
migrations.AddField(
model_name='field',
name='help_text_en',
field=models.TextField(blank=True, null=True, verbose_name='Help text'),
),
migrations.AddField(
model_name='field',
name='help_text_ru',
field=models.TextField(blank=True, null=True, verbose_name='Help text'),
),
migrations.AddField(
model_name='field',
name='help_text_sl',
field=models.TextField(blank=True, null=True, verbose_name='Help text'),
),
migrations.AddField(
model_name='field',
name='label_bg',
field=models.TextField(null=True, verbose_name='Label'),
),
migrations.AddField(
model_name='field',
name='label_en',
field=models.TextField(null=True, verbose_name='Label'),
),
migrations.AddField(
model_name='field',
name='label_ru',
field=models.TextField(null=True, verbose_name='Label'),
),
migrations.AddField(
model_name='field',
name='label_sl',
field=models.TextField(null=True, verbose_name='Label'),
),
migrations.AddField(
model_name='field',
name='placeholder_text_bg',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Placeholder Text'),
),
migrations.AddField(
model_name='field',
name='placeholder_text_en',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Placeholder Text'),
),
migrations.AddField(
model_name='field',
name='placeholder_text_ru',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Placeholder Text'),
),
migrations.AddField(
model_name='field',
name='placeholder_text_sl',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Placeholder Text'),
),
migrations.AddField(
model_name='form',
name='button_text_bg',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Button text'),
),
migrations.AddField(
model_name='form',
name='button_text_en',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Button text'),
),
migrations.AddField(
model_name='form',
name='button_text_ru',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Button text'),
),
migrations.AddField(
model_name='form',
name='button_text_sl',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Button text'),
),
migrations.AddField(
model_name='form',
name='content_bg',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Content'),
),
migrations.AddField(
model_name='form',
name='content_en',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Content'),
),
migrations.AddField(
model_name='form',
name='content_ru',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Content'),
),
migrations.AddField(
model_name='form',
name='content_sl',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Content'),
),
migrations.AddField(
model_name='form',
name='email_message_bg',
field=models.TextField(blank=True, help_text='Emails sent based on the above options will contain each of the form fields entered. You can also enter a message here that will be included in the email.', null=True, verbose_name='Message'),
),
migrations.AddField(
model_name='form',
name='email_message_en',
field=models.TextField(blank=True, help_text='Emails sent based on the above options will contain each of the form fields entered. You can also enter a message here that will be included in the email.', null=True, verbose_name='Message'),
),
migrations.AddField(
model_name='form',
name='email_message_ru',
field=models.TextField(blank=True, help_text='Emails sent based on the above options will contain each of the form fields entered. You can also enter a message here that will be included in the email.', null=True, verbose_name='Message'),
),
migrations.AddField(
model_name='form',
name='email_message_sl',
field=models.TextField(blank=True, help_text='Emails sent based on the above options will contain each of the form fields entered. You can also enter a message here that will be included in the email.', null=True, verbose_name='Message'),
),
migrations.AddField(
model_name='form',
name='email_subject_bg',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Subject'),
),
migrations.AddField(
model_name='form',
name='email_subject_en',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Subject'),
),
migrations.AddField(
model_name='form',
name='email_subject_ru',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Subject'),
),
migrations.AddField(
model_name='form',
name='email_subject_sl',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Subject'),
),
migrations.AddField(
model_name='form',
name='response_bg',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Response'),
),
migrations.AddField(
model_name='form',
name='response_en',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Response'),
),
migrations.AddField(
model_name='form',
name='response_ru',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Response'),
),
migrations.AddField(
model_name='form',
name='response_sl',
field=mezzanine.core.fields.RichTextField(null=True, verbose_name='Response'),
),
]
|
n=int(input())
m=input().split()
for i in m:
if m.count(i)==1:
print(i)
break
|
#!/usr/bin/python3
'''
class's simple defination
'''
class Calculator:
def addition(x,y):
added = x+y
print(added)
def subtraction(x,y):
sub=x-y
print(sub)
def multiplication(x,y):
mult=x * y
print(mult)
def division(x,y):
div = x/y
print(div)
Calculator.addition(3,4)
Calculator.subtraction(3,4)
Calculator.multiplication(3,4)
Calculator.division(3,4)
|
import datetime
from waitlist.storage.database import HistoryEntry, Account, Character,\
HistoryFits, Shipfit, InvType
from waitlist.base import db
from sqlalchemy import and_, or_, func
from typing import Any, Dict, Union, List, Callable, ClassVar
from builtins import classmethod, staticmethod
from _datetime import timedelta
class StatCache(object):
def __init__(self) -> None:
self.__data: Dict[str, Any] = {}
def has_cache_item(self, key: str) -> bool:
if key not in self.__data:
return False
if self.__data[key]['datetime'] < datetime.datetime.utcnow():
return False
return True
def get_cache_item(self, key: str) -> Any:
if key not in self.__data:
return None
return self.__data[key]
def add_item_to_cache(self, key: str, item: Any) -> None:
self.__data[key] = item
class StatsManager(object):
cache: ClassVar[StatCache] = StatCache()
STAT_ID_APPROVED_DISTINCT_HULL_CHAR = 'a'
STAT_ID_APPROVED_FITS = 'b'
STAT_ID_JOINED_FLEET = 'c'
@classmethod
def get_distinct_hull_character_stats(
cls,
duration: timedelta):
def query_wrapper(fnc: Callable[[timedelta], Any],
duration: timedelta):
def f():
return fnc(duration)
return f
return cls.__get_query_result(
('Approved distinct Hull/Character '
+ 'combinations last ' + str(duration) + ' days'),
cls.STAT_ID_APPROVED_DISTINCT_HULL_CHAR
+ "_" + str(duration.total_seconds()),
query_wrapper(
StatsManager.__query_distinct_hull_character_combinations,
duration
),
timedelta(seconds=3600),
lambda row: row[0],
lambda row: row[1]
)
@classmethod
def get_approved_fits_by_account_stats(
cls,
duration: timedelta):
def query_wrapper(fnc: Callable[[timedelta], Any],
duration: timedelta):
def f():
return fnc(duration)
return f
return cls.__get_query_result(
('Approved fits by account during the '
+ 'last ' + str(duration) + ' days'),
cls.STAT_ID_APPROVED_FITS
+ "_" + str(duration.total_seconds()),
query_wrapper(
StatsManager.__query_approved_ships_by_account,
duration
),
timedelta(seconds=3600),
lambda row: row[0],
lambda row: row[1]
)
@classmethod
def get_joined_members_stats(
cls,
duration: timedelta):
def query_wrapper(fnc: Callable[[timedelta], Any],
duration: timedelta):
def f():
return fnc(duration)
return f
return cls.__get_query_result(
('Joined Memembers per month'),
cls.STAT_ID_JOINED_FLEET
+ "_" + str(duration.total_seconds()),
query_wrapper(
StatsManager.__query_joined_members,
duration
),
timedelta(seconds=3600),
lambda row: str(row[0])+"-"+str(row[1]),
lambda row: row[2]
)
@classmethod
def __get_query_result(cls, title: str, dataset_id: str,
query_func: Callable[[], Any],
cache_time: timedelta,
xfunc: Callable[[List[Any]], str],
yfunc: Callable[[List[Any]], int]
) -> Dict[
str, Union[str, List[Union[int, str]]]
]:
if cls.cache.has_cache_item(dataset_id):
result = cls.cache.get_cache_item(dataset_id)['data']
else:
# we are going to return this
dbdata = query_func()
data = {
'title': title,
'xnames': [],
'yvalues': [],
}
if dbdata is not None:
xnames = data['xnames']
yvalues = data['yvalues']
for row in dbdata:
xnames.append(xfunc(row))
yvalues.append(yfunc(row))
cls.cache.add_item_to_cache(
dataset_id,
StatsManager.__create_cache_item(data, cache_time)
)
result = data
return result
@staticmethod
def __create_cache_item(data: Any, expire_in: timedelta):
return {
'data': data,
'datetime': (datetime.datetime.utcnow()
+ expire_in)
}
@staticmethod
def __query_distinct_hull_character_combinations(duration: timedelta):
"""
Get distinct hull character combinations for the given duration
SELECT shipType, COUNT(name)
FROM (
SELECT DISTINCT invtypes."typeName" AS "shipType", characters.eve_name AS name
FROM fittings
JOIN invtypes ON fittings.ship_type = invtypes."typeID"
JOIN comp_history_fits ON fittings.id = comp_history_fits."fitID"
JOIN comp_history ON comp_history_fits."historyID" = comp_history."historyID"
JOIN characters ON comp_history."targetID" = characters.id
WHERE
(
comp_history.action = 'comp_mv_xup_etr'
OR
comp_history.action = 'comp_mv_xup_fit'
)
AND DATEDIFF(NOW(),comp_history.time) < 30
) AS temp
GROUP BY "shipType"
ORDER BY COUNT(name) DESC
LIMIT 15;
"""
since: datetime = datetime.datetime.utcnow() - duration
shiptype_name_combinations = db.session\
.query(InvType.typeName.label('shipType'),
Character.eve_name.label('name'))\
.distinct() \
.join(Shipfit, InvType.typeID == Shipfit.ship_type) \
.join(HistoryFits, Shipfit.id == HistoryFits.fitID) \
.join(HistoryEntry,
HistoryFits.historyID == HistoryEntry.historyID) \
.join(Character, HistoryEntry.targetID == Character.id) \
.filter(
and_(
or_(
HistoryEntry.action == 'comp_mv_xup_etr',
HistoryEntry.action == 'comp_mv_xup_fit'
),
HistoryEntry.time >= since
)
).subquery('shiptypeNameCombinations')
return db.session.query(shiptype_name_combinations.c.shipType,
func.count(shiptype_name_combinations.c.name))\
.group_by(shiptype_name_combinations.c.shipType) \
.order_by(func.count(shiptype_name_combinations.c.name).desc()) \
.all()
@staticmethod
def __query_approved_ships_by_account(duration: timedelta):
"""
Gets how many fits an account approved
during the give duration from now
SELECT name, COUNT(fitid)
FROM (
SELECT DISTINCT accounts.username AS name,
comp_history_fits.id as fitid
FROM fittings
JOIN invtypes ON fittings.ship_type = invtypes."typeID"
JOIN comp_history_fits ON fittings.id = comp_history_fits."fitID"
JOIN comp_history ON
comp_history_fits."historyID" = comp_history."historyID"
JOIN accounts ON comp_history."sourceID" = accounts.id
JOIN characters ON comp_history."targetID" = characters.id
WHERE
(
comp_history.action = 'comp_mv_xup_etr'
OR
comp_history.action = 'comp_mv_xup_fit'
)
AND DATEDIFF(NOW(),comp_history.time) < since
) AS temp
GROUP BY name
ORDER BY COUNT(fitid) DESC
LIMIT 15;
"""
since: datetime = datetime.datetime.utcnow() - duration
fits_flown_by_subquery = db.session.query(
Account.username.label('name'),
HistoryFits.id.label('fitid')
)\
.join(HistoryEntry, Account.id == HistoryEntry.sourceID) \
.join(Character, HistoryEntry.targetID == Character.id) \
.join(HistoryFits,
HistoryEntry.historyID == HistoryFits.historyID)\
.join(Shipfit, HistoryFits.fitID == Shipfit.id) \
.join(InvType, Shipfit.ship_type == InvType.typeID) \
.filter(
and_(
or_(
HistoryEntry.action == 'comp_mv_xup_etr',
HistoryEntry.action == 'comp_mv_xup_fit'
),
HistoryEntry.time >= since
)
).subquery("fitsFlownBy")
return db.session.query(fits_flown_by_subquery.c.name,
func.count(fits_flown_by_subquery.c.fitid))\
.group_by(fits_flown_by_subquery.c.name) \
.order_by(func.count(fits_flown_by_subquery.c.fitid).desc()) \
.all()
@staticmethod
def __query_joined_members(duration: timedelta):
"""
"""
since: datetime = datetime.datetime.utcnow() - duration
joinedSubquery = db.session.query(
func.year(HistoryEntry.time).label('year'),
func.month(HistoryEntry.time).label('month'),
func.day(HistoryEntry.time).label('day'),
HistoryEntry.targetID.label('target'),
)\
.filter(
and_(
HistoryEntry.action == HistoryEntry.EVENT_AUTO_RM_PL,
HistoryEntry.time >= since
)
).distinct().subquery("joinedFleet")
return db.session.query(joinedSubquery.c.year,
joinedSubquery.c.month,
func.count(joinedSubquery.c.target)
).group_by(
joinedSubquery.c.year,
joinedSubquery.c.month
).all();
"""
return db.session.query(
func.year(HistoryEntry.time),
func.month(HistoryEntry.time),
func.count(HistoryEntry.historyID)
)\
.filter(
and_(
HistoryEntry.action == HistoryEntry.EVENT_AUTO_RM_PL,
HistoryEntry.time >= since
)
)\
.group_by(func.year(HistoryEntry.time), func.month(HistoryEntry.time))\
.all()
"""
|
## SANTOSH KHADKA
## Complete-Python-3-Bootcamp/03-Methods and Functions/03-Function Practice Exercises
## Level 2 Problems:
def has_33(nums):
'''
FIND 33: Given a list of ints, return True if the array contains a 3 next to a 3 somewhere.
'''
previous = nums[0]
for x in range(1, len(nums)):
if x == 1:
if (previous == 3) and (nums[1] == 3):
return True
else:
if nums[x-1] == 3 and nums[x] == 3:
return True
return False
def paper_doll(text):
'''
PAPER DOLL: Given a string, return a string where for every character in the original there are three characters
'''
new_text = ""
for x in range(len(text)):
new_text += text[x]
new_text += text[x]
new_text += text[x]
# new_text += text[x]+text[x]+text[x]
return new_text
def blackjack(a, b, c):
'''
BLACKJACK: Given three integers between 1 and 11, if their sum is less than or equal to 21, return their sum.
If their sum exceeds 21 and there's an eleven, reduce the total sum by 10.
Finally, if the sum (even after adjustment) exceeds 21, return 'BUST'
'''
total = a + b + c
if total <= 21:
return total
elif (total > 21) and (a==11 or b==11 or c==11):
if (total - 10) > 21:
return "BUST"
else:
return total - 10
else:
return "BUST"
def summer_69(arr):
'''
SUMMER OF '69: Return the sum of the numbers in the array,
except ignore sections of numbers starting with a 6 and extending to the next 9 (every 6 will be followed by at least one 9).
Return 0 for no numbers.
'''
total = 0
ignore = 0
if (len(arr) == 0):
return 0
for x in range(len(arr)):
if arr[x] == 6:
ignore = 1
if ignore == 0:
total += arr[x]
if arr[x] == 9:
ignore = 0
return total
def function_test(func_name):
if "has_33" in str(func_name):
print(has_33([1, 3, 3]))
print(has_33([1, 3, 1, 3]))
print(has_33([3, 1, 3]))
if "paper_doll" in str(func_name):
print(paper_doll("Hello"))
print(paper_doll("Mississippi"))
if "blackjack" in str(func_name):
print(blackjack(5,6,7)) #--> 18
print(blackjack(9,9,9)) #--> 'BUST'
print(blackjack(9,9,11)) #--> 19
if "summer_69" in str(func_name):
print(summer_69([1, 3, 5])) #--> 9
print(summer_69([4, 5, 6, 7, 8, 9])) #--> 9
print(summer_69([2, 1, 6, 9, 11])) #--> 14
print(summer_69([])) # EMPTY --> 0
def main():
#function_test("has_33")
#function_test("paper_doll")
#function_test("blackjack")
function_test("summer_69")
if __name__ == "__main__":
main() |
#coding:utf-8
#!/usr/bin/env python
from django import template
register = template.Library()
@register.filter(name = 'dict_get')
def dict_get(v, k):
return v[k]
@register.filter(name = 'array_get')
def array_get(v, i):
return v[int(i)]
@register.filter(name='dict_lookup')
def dict_lookup(v, k1, k2):
return v[k1][k2]
@register.filter(name='range')
def filter_range(v):
return range(v)
|
import pymysql
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .base import Base
def mysql_session(db_dict):
connection_string = 'mysql+pymysql://{}:{}@{}:{}'.format(db_dict['user'],
db_dict['pass'],
db_dict['host'],
db_dict['port'])
engine = create_engine(connection_string, echo=True)
engine.execute("USE {}".format(db_dict['schema']))
Base.metadata.create_all(engine, checkfirst=True)
DBSession = sessionmaker(bind=engine)
session = DBSession()
return session
|
'''
Created on Nov 7, 2016
@author: micro
'''
import cv2 #OpenCV library
import numpy as np #numerical Python library
import matplotlib.pyplot as plt #more math libraries
import os
image_path = "C:/python27/Robotics_Letter.jpg"
print os.path.exists(image_path)
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
img = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE) #reads image
cv2.imshow("image",img) #shows image in a new window
cv2.waitKey(0) #waits for time in ms for a user to press a key
cv2.destroyAllWindows() #closes all windows once user presses a key
#print (type(img)) |
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from TestUtils import Workspace
from DynamicSchedulerGeneric import Utils as DynSchedUtils
from DynamicSchedulerGeneric import Analyzer
from DynamicSchedulerGeneric import GLUE2Handler
class DummyOutput:
def __init__(self):
self.queued = {}
self.running = {}
self.curr_id = None
def write(self, line):
if line.startswith("dn: GLUE2ShareID"):
self.curr_id = line[4:-1]
if line.startswith("GLUE2ComputingShareWaitingJobs"):
self.queued[self.curr_id] = int(line[32:-1])
if line.startswith("GLUE2ComputingShareRunningJobs"):
self.running[self.curr_id] = int(line[32:-1])
class GLUE1TestCase(unittest.TestCase):
def setUp(self):
self.vomap = {"atlasprod": "atlas",
"atlassgm": "atlas",
"dteamgold": "dteam",
"dteamsilver": "dteam",
"dteambronze": "dteam",
"infngridlow": "infngrid",
"infngridmedium": "infngrid",
"infngridhigh": "infngrid"}
self.headerfmt = "#!/bin/bash\n\n"
self.headerfmt += "printf 'nactive %d\n"
self.headerfmt += "nfree %d\n"
self.headerfmt += "now %d\n"
self.headerfmt += "schedCycle %d\n\n"
self.footerfmt = "'\n\nexit 0"
self.dictfmt = '{"group": "%s", "queue": "%s", "state": "%s", "qtime": %d, "name": "%s"}\n'
def _script(self):
jTable = [
("dteamgold", 'creamtest2', 'running', 1327566866, "creXX_23081970"),
("dteamgold", 'creamtest2', 'queued', 1327568866, "creXX_23081971"),
("dteambronze", "creamtest2", 'queued', 1327571866, "creXX_23081972"),
("dteamgold", "creamtest1", 'running', 1327567866, "creXX_23081973"),
("dteamsilver", "creamtest1", 'running', 1327569866, "creXX_23081974"),
("dteambronze", "creamtest1", 'queued', 1327570866, "creXX_23081975"),
("infngridlow", "creamtest1", 'queued', 1327572866, "creXX_23081976"),
("infngridmedium", "creamtest1", 'running', 1327573866, "creXX_23081977")
]
script = self.headerfmt % (5, 0, 1327574866, 26)
for jItem in jTable:
script += self.dictfmt % jItem
script += self.footerfmt
return script
def test_process_ok(self):
glue2shareid = 'GLUE2ShareID=creamtest1_dteam_abc,GLUE2ServiceID=abc,GLUE2GroupID=resource,o=glue'
ldif = """
dn: GLUE2PolicyID=creamtest1_dteam_abc_policy,%s
GLUE2PolicyUserDomainForeignKey: dteam
GLUE2MappingPolicyShareForeignKey: creamtest1_dteam_abc
dn: %s
GLUE2ShareID: creamtest1_dteam_abc
GLUE2ComputingShareMappingQueue: creamtest1
""" % (glue2shareid, glue2shareid)
workspace = Workspace(vomap = self.vomap)
workspace.setLRMSCmd(self._script())
workspace.setGLUE2StaticFile(ldif)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
dOut = DummyOutput()
collector = Analyzer.analyze(config, {})
GLUE2Handler.process(config, collector, dOut)
result = dOut.queued[glue2shareid] == 1
result = result and dOut.running[glue2shareid] == 2
self.assertTrue(result)
def test_process_missing_share(self):
try:
ldif = """
dn: GLUE2PolicyID=creamtest1_dteam_abc_policy,GLUE2ShareID=creamtest1_dteam_abc,GLUE2ServiceID=abc,GLUE2GroupID=resource,o=glue
GLUE2PolicyUserDomainForeignKey: dteam
GLUE2MappingPolicyShareForeignKey: creamtest1_dteam_abc
"""
workspace = Workspace(vomap = self.vomap)
workspace.setLRMSCmd(self._script())
workspace.setGLUE2StaticFile(ldif)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
collector = Analyzer.analyze(config, {})
GLUE2Handler.process(config, collector, DummyOutput())
self.fail("No exception detected")
except GLUE2Handler.GLUE2Exception, glue_error:
msg = str(glue_error)
self.assertTrue(msg.startswith("Invalid foreign key"))
def test_process_missing_vo_in_policy(self):
try:
glue2shareid = 'GLUE2ShareID=creamtest1_dteam_abc,GLUE2ServiceID=abc,GLUE2GroupID=resource,o=glue'
ldif = """
dn: %s
GLUE2ShareID: creamtest1_dteam_abc
GLUE2ComputingShareMappingQueue: creamtest1
dn: GLUE2PolicyID=creamtest1_dteam_abc_policy,%s
GLUE2MappingPolicyShareForeignKey: creamtest1_dteam_abc
""" % (glue2shareid, glue2shareid)
workspace = Workspace(vomap = self.vomap)
workspace.setLRMSCmd(self._script())
workspace.setGLUE2StaticFile(ldif)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
collector = Analyzer.analyze(config, {})
GLUE2Handler.process(config, collector, DummyOutput())
self.fail("No exception detected")
except GLUE2Handler.GLUE2Exception, glue_error:
msg = str(glue_error)
self.assertTrue(msg == "Missing mandatory attribute GLUE2PolicyUserDomainForeignKey")
if __name__ == '__main__':
unittest.main()
|
from rest_framework import serializers
from .models import Post, Vote
class PostSerializer(serializers.ModelSerializer):
poster = serializers.ReadOnlyField(source='poster.username')
poster_id = serializers.ReadOnlyField(source='poster.id')
# create an variable that will grab the votes from the serializer
votes = serializers.SerializerMethodField()
class Meta:
# tell what model to use
model = Post
# define the fields that you would like to serialize. - add the votes to be displayed as well
fields = ['id','title','url','poster','poster_id','created','votes']
# then create an function to display the information - name function same name of variable above get_variableName()
def get_votes(self, post):
return Vote.objects.filter(post=post).count()
class VoteSerializer(serializers.ModelSerializer):
class Meta:
# tell what model to use
model = Vote
# define the fields that you would like to serialize.
fields = ['id'] |
import json
import sys
input = open(sys.argv[1], "r")
tbl = {}
for line in input:
try:
values = line.split()
fv = []
for v in values:
fv.append(float(v))
tbl[(fv[0], fv[1], fv[2])] = [fv[3], fv[4]]
except:
pass
res = {}
res["divider"] = 1
res["accuracy"] = 0.02
res["gridSize"] = [50, 50, 50]
res["table"] = []
for i in tbl:
dif = tbl[i]
res["table"].append({"coordinate":[i[0], i[1], i[2]],"deviation":[dif[0], dif[1], 0]})
res["origin"] = [float(sys.argv[3]), float(sys.argv[4]), float(sys.argv[5])]
output = open(sys.argv[2], "w")
json.dump(res, output, indent=4,sort_keys=True)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
def err_rate(pred_labels,true_labels):
return (pred_labels != true_labels).sum().astype(float)/len(true_labels)
def benchmark(pred_labels,true_labels):
wrong= np.where(pred_labels != true_labels)[0]
return err_rate(pred_labels,true_labels), wrong
def plot_confusion_matrix(cm,cmap=plt.cm.Blues,fname='conf_matrix.png',title='Confusion matrix'):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(10)
plt.xticks(tick_marks, tick_marks.astype(str))
plt.yticks(tick_marks, tick_marks.astype(str))
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(fname)
plt.close()
def kfold_indices(k,n_train):
'''returns array of indices of shape (k,n_train/k) to grab the k bins of training data'''
bucket_sz=int(n_train/float(k))
ind= np.arange(k*bucket_sz) #robust for any k, even if does not divide evenly!
np.random.shuffle(ind)
return np.reshape(ind,(k,bucket_sz))
def kfold_cross_val(C, Xtr,Ytr,k,ntrain):
'''C is parameter varying'''
assert(ntrain <= Xtr.shape[0])
ind= kfold_indices(k,ntrain)
err=np.zeros(k)-1
keep_clf=dict(err=1.)
for i in range(k):
ival= ind[i,:]
itrain=np.array(list(set(ind.flatten())-set(ival)))
assert(len(list(set(ival) | set(itrain))) == len(ind.flatten()))
#train
lsvc = LinearSVC(C=C,fit_intercept=True)
lsvc.fit(Xtr[itrain,:],Ytr[itrain])
#get error for this kth sample
pred= lsvc.predict(Xtr[ival,:])
err[i],wrong= benchmark(pred, Ytr[ival])
if err[i] <= keep_clf['err']:
keep_clf['lsvc']=lsvc
keep_clf['err']=err[i]
return err,keep_clf
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List the best results for a video.
This is intended to be a complement to the graphical presentation:
It shows the best encodings for a video clip under a number of bitrates.
"""
import argparse
import sys
import encoder
import fileset_picker
import mpeg_settings
import optimizer
import pick_codec
import score_tools
def main():
parser = argparse.ArgumentParser()
parser.add_argument('videofile', nargs='?')
parser.add_argument('--codec')
parser.add_argument('--fileset', default='mpeg_video')
parser.add_argument('--component')
parser.add_argument('--single_config', action='store_true')
parser.add_argument('--criterion', default='psnr')
args = parser.parse_args()
codec = pick_codec.PickCodec(args.codec)
my_optimizer = optimizer.Optimizer(codec,
score_function=score_tools.PickScorer(args.criterion),
file_set=fileset_picker.PickFileset(args.fileset))
if args.videofile:
videofiles = [args.videofile]
else:
videofiles = my_optimizer.file_set.AllFileNames()
if args.single_config:
best_encoder = my_optimizer.BestOverallEncoder()
if not best_encoder:
print 'No overall best encoder'
return 1
for videofile_name in videofiles:
print '--- %s ---' % videofile_name
videofile = encoder.Videofile(videofile_name)
for bitrate in sorted(
mpeg_settings.MpegFiles().AllRatesForFile(videofile_name)):
if args.single_config:
encoding = best_encoder.Encoding(bitrate, videofile)
encoding.Recover()
else:
encoding = my_optimizer.BestEncoding(bitrate, videofile)
if not encoding.Result():
continue
if args.component:
component = encoding.result[args.component]
else:
component = ''
print '%s %d %f %s %s' % (encoding.encoder.Hashname(),
bitrate,
my_optimizer.Score(encoding),
component,
encoding.encoder.parameters.ToString())
if __name__ == '__main__':
sys.exit(main())
|
# 朴素
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
binS = bin(n)[2:]
pre = ""
for i in range(len(binS)):
if binS[i] == pre:
return False
else:
pre = binS[i]
return True
|
from unittest import TestCase
from panopto_client.remote_recorder import (
RemoteRecorderManagement, PanoptoAPIException)
from panopto_client.tests import instance_args
from datetime import datetime
import mock
@mock.patch.object(RemoteRecorderManagement, '_instance',
return_value=mock.sentinel.instance)
@mock.patch.object(RemoteRecorderManagement, '_request')
class RemoteRecorderManagementTest(TestCase):
def test_init(self, mock_request, mock_instance):
client = RemoteRecorderManagement()
self.assertEqual(
client._port, 'BasicHttpBinding_IRemoteRecorderManagement')
self.assertEqual(client._actas, None)
self.assertEqual(client._data, client._live)
def test_getRemoteRecordersById(self, mock_request, mock_instance):
client = RemoteRecorderManagement()
result = client.getRemoteRecordersById('test-recorder-id')
self.assertEqual(instance_args(mock_instance.call_args_list), [
'ns0:AuthenticationInfo', 'ns4:ArrayOfguid'])
mock_request.assert_called_with('GetRemoteRecordersById', {
'auth': mock.sentinel.instance,
'remoteRecorderIds': mock.sentinel.instance})
def test_getRemoteRecordersByExternalId(self, mock_request, mock_instance):
client = RemoteRecorderManagement()
result = client.getRemoteRecordersByExternalId('test-external-id')
self.assertEqual(instance_args(mock_instance.call_args_list), [
'ns0:AuthenticationInfo', 'ns4:ArrayOfstring'])
mock_request.assert_called_with('GetRemoteRecordersByExternalId', {
'auth': mock.sentinel.instance,
'externalIds': mock.sentinel.instance})
def test_scheduleRecording(self, mock_request, mock_instance):
mock.sentinel.instance.RecorderSettings = []
client = RemoteRecorderManagement()
result = client.scheduleRecording(
'test-name', folder_id='test-folder-id', is_broadcast=False,
start_time=datetime(2013, 3, 15, 9, 0, 0),
end_time=datetime(2013, 3, 15, 10, 0, 0),
recorder_id='test-recorder-id')
self.assertEqual(instance_args(mock_instance.call_args_list), [
'ns0:ArrayOfRecorderSettings', 'ns0:AuthenticationInfo',
'ns0:RecorderSettings'])
mock_request.assert_called_with('ScheduleRecording', {
'auth': mock.sentinel.instance, 'name': 'test-name',
'folderId': 'test-folder-id', 'isBroadcast': False,
'start': datetime(2013, 3, 15, 9, 0),
'end': datetime(2013, 3, 15, 10, 0),
'recorderSettings': mock.sentinel.instance})
def test_listRecorders(self, mock_request, mock_instance):
client = RemoteRecorderManagement()
try:
result = client.listRecorders()
except TypeError:
pass
self.assertEqual(instance_args(mock_instance.call_args_list), [
'ns0:AuthenticationInfo', 'ns0:Pagination'])
mock_request.assert_called_with('ListRecorders', {
'auth': mock.sentinel.instance,
'pagination': mock.sentinel.instance, 'sortBy': 'Name'})
def test_updateRemoteRecorderExternalId(self, mock_request, mock_instance):
client = RemoteRecorderManagement()
result = client.updateRemoteRecorderExternalId(
'test-recorder-id', 'test-external-id')
self.assertEqual(instance_args(mock_instance.call_args_list), [
'ns0:AuthenticationInfo'])
mock_request.assert_called_with('UpdateRemoteRecorderExternalId', {
'auth': mock.sentinel.instance, 'externalId': 'test-external-id',
'remoteRecorderId': 'test-recorder-id'})
def test_updateRecordingTime(self, mock_request, mock_instance):
client = RemoteRecorderManagement()
result = client.updateRecordingTime(
'test-session-id', start=datetime(2013, 3, 15, 9, 0, 0),
end=datetime(2013, 3, 15, 10, 0, 0))
self.assertEqual(instance_args(mock_instance.call_args_list), [
'ns0:AuthenticationInfo'])
mock_request.assert_called_with('UpdateRecordingTime', {
'auth': mock.sentinel.instance, 'sessionId': 'test-session-id',
'start': datetime(2013, 3, 15, 9, 0),
'end': datetime(2013, 3, 15, 10, 0)})
|
from django.urls import path
from journal.views import PageListView, PageDetailView, PageCreateView
urlpatterns = [
path('', PageListView.as_view(), name='journal-list-page'),
path('create/', PageCreateView.as_view(), name='new-page'),
path('<str:slug>/', PageDetailView.as_view(), name='journal-details-page'),
]
|
from flask_restplus import Api, Resource
import server.document as document
from server.operation.register import Register
from .. import api
from server.utils.handler import _parent_resoves
ns = api.namespace('register', description="用户注册")
class Register(Resource):
"""用户注册模块
"""
@document.DocumentFormart.request_model(data={"args1": 400, "args2": "成功"})
@document.DocumentFormart.response_code(data={"status": 100006, "msg": "成功"}, code=400.1)
@document.DocumentFormart.response_code(data={"status": 400, "msg": "成功"}, code=400)
@document.DocumentFormart.response_code(data={"status": 200, "msg": "成功"}, code=200)
def post(self):
"用户注册"
return _parent_resoves()
ns.add_resource(Register, '/')
|
'''
Pig Latin - Pig Latin is a game of alterations played on the English language game.
To create the Pig Latin form of an English word the initial consonant sound is transposed to the end of the word and
an ay is affixed (Ex.: "banana" would yield anana-bay).
'''
class PigLatin():
def pig_latin(self, text):
word = ''
for char in range(len(text)):
if text[char] == 'a' or text[char] == 'e' or text[char] == 'i' or text[char] =='o' or text[char] == 'u':
break
else:
word += text[char]
return text[char:] + word +'ay'
test = PigLatin()
print(test.pig_latin("banana")) |
import sys
import cctpy.cct
import tests.numpy_test
print(sys.path)
# 'C:\\Users\\madoka_9900\\Documents\\github\\cctpy\\codes\\cuda_optim',
# 'C:\\Users\\madoka_9900\\Documents\\github\\cctpy'
# C:\\Users\\madoka_9900\\Documents\\github\\cctpy\\codes
|
'''
This class uses a configuration file to extract selected values from the json string.
It also implement selection based on positive and negative list of keywords.
'''
import json
from jsonpath_rw import jsonpath, parse
class MessageFilter:
message_sanitization = True # we need to sanitize data before use
from_file = None
json_string = None
def __init__(self, file_name=None,message=None):
if file_name:
from_file = True
self.pysondata = self.readJSONfile(file_name)
elif message:
if self.message_sanitization == True:
message = self.sanitize(message)
self.pysondata = self.readFromJSONString(message)
def sanitize(self, message):
# replace "//" with "" which has been added by oslo messaging
message = message.replace("\\","")
# oslo adds extra sign (") in two different places. Removing them out here.
message = message.replace('"oslo.message": "{','')
message = message.replace('}", "oslo.version": "2.0"','')
return message
def readfile(self,file_name):
with open (file_name, "r") as myfile:
data=myfile.read()
return data
def readJSONfile(self,file_name):
data = self.readfile(file_name)
json_data = json.loads(data)
self.pysondata = json_data
return json_data
def readFromJSONString(self,str):
json_data = json.loads(str)
self.pysondata = json_data
return self.pysondata
def parse_from_path(self,path):
jsonpath_expr = parse(path)
#print self.pysondata
return [match.value for match in jsonpath_expr.find(self.pysondata)]
def parse_from_file(self):
pass
def parse_from_config(self,config):
# config is a dictionary
retrieve_values = {}
for j_path in config:
value = self.parse_from_path(j_path)
if value.__len__() == 0:
continue
elif (config[j_path].lower() == "any" ):
retrieve_values[j_path] = value[0]
elif "includes" in config[j_path].lower():
yes_list = match_value_from_string(config[j_path].lower(),"includes")
if is_superstring_of_a_string_from_list(yes_list,value[0]) is not False:
retrieve_values[j_path] = value[0]
elif "does_not_include" in config[j_path].lower():
no_list = match_value_from_string(config[j_path].lower(),"does_not_include")
if is_superstring_of_a_string_from_list(no_list,value[0]) is False:
retrieve_values[j_path] = value[0]
return retrieve_values
'''
The json path to retrieve value from can be given as a config file, or as a dictionary containing paths
'''
def parse(self,config):
# using predefined configuration file here.
#from filter import config
#return self.parse_from_config(conf_path, conf_value)
#return self.pysondata
return self.parse_from_config(config)
|
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import turbo_transformers
import unittest
import sys
import torch
import os
from onmt.modules.multi_headed_attn import MultiHeadedAttention
sys.path.append(os.path.dirname(__file__))
import test_helper
fname = "tt_decoder_multi_headed_attention.txt"
def create_test(batch_size,
key_seq_len,
query_seq_len,
attn_type,
pre_layernorm,
post_add_input,
with_quantize_dynamic=False,
set_layer_cache=False):
class TestMultiHeadedAttention(unittest.TestCase):
def init_data(self, use_cuda):
self.test_device = torch.device('cuda:0') if use_cuda else \
torch.device('cpu:0')
if not use_cuda:
torch.set_num_threads(4)
turbo_transformers.set_num_threads(4)
torch.set_grad_enabled(False)
self.head_count = 16
self.model_dim = 1024 #self.model_dim should % self.head_count = 0
self.size_per_head = int(self.model_dim / self.head_count)
onmt_multi_headed_attention = MultiHeadedAttention(
self.head_count, self.model_dim)
onmt_multi_headed_attention.eval()
torch_layernorm = torch.nn.LayerNorm(self.model_dim, eps=1e-6)
torch_layernorm.eval()
if use_cuda:
onmt_multi_headed_attention.to(self.test_device)
torch_layernorm.to(self.test_device)
K = torch.rand(
size=(
batch_size,
key_seq_len, #from_seq
self.model_dim),
dtype=torch.float32,
device=self.test_device)
V = torch.rand(size=(batch_size, key_seq_len, self.model_dim),
dtype=torch.float32,
device=self.test_device)
Q = torch.rand(
size=(
batch_size,
query_seq_len, #to_seq
self.model_dim),
dtype=torch.float32,
device=self.test_device)
turbo_attn_trans = turbo_transformers.MultiHeadedAttention.from_onmt(
onmt_multi_headed_attention,
torch_layernorm,
is_trans_weight=True)
turbo_attn_notrans = turbo_transformers.MultiHeadedAttention.from_onmt(
onmt_multi_headed_attention,
torch_layernorm,
is_trans_weight=False)
if with_quantize_dynamic and not use_cuda:
self.q_onmt_multi_headed_attention = torch.quantization.quantize_dynamic(
onmt_multi_headed_attention)
return onmt_multi_headed_attention, torch_layernorm, turbo_attn_trans, turbo_attn_notrans, Q, K, V
def check_torch_and_turbo(self, use_cuda, num_iter=1):
onmt_multi_headed_attention, torch_layernorm, turbo_attn_trans, turbo_attn_notrans, Q, K, V = \
self.init_data(use_cuda)
device = "GPU" if use_cuda else "CPU"
info = f"\"({device}, {set_layer_cache}, {pre_layernorm}, {post_add_input}, {attn_type}, {batch_size}, {key_seq_len:03}, {query_seq_len:03})\""
if attn_type == "context":
attention_mask = torch.zeros((batch_size, 1, key_seq_len),
dtype=torch.bool,
device=self.test_device)
elif attn_type == "self":
attention_mask = None
# torch.zeros(
# (batch_size, query_seq_len, key_seq_len),
# dtype=torch.bool,
# device=self.test_device)
else:
raise "attn type is not supported"
# set layer_cache
if set_layer_cache:
memory_keys = torch.rand(size=(batch_size, self.head_count,
key_seq_len,
self.size_per_head),
dtype=torch.float32,
device=self.test_device)
memory_values = torch.rand(size=(batch_size, self.head_count,
key_seq_len,
self.size_per_head),
dtype=torch.float32,
device=self.test_device)
self_keys = torch.rand(size=(batch_size, self.head_count,
query_seq_len,
self.size_per_head),
dtype=torch.float32,
device=self.test_device)
self_values = torch.rand(size=(batch_size, self.head_count,
query_seq_len,
self.size_per_head),
dtype=torch.float32,
device=self.test_device)
print("self_keys size: ", self_keys.size())
layer_cache_torch = {
"memory_keys": torch.clone(memory_keys),
"memory_values": torch.clone(memory_values),
"self_keys": torch.clone(self_keys),
"self_values": torch.clone(self_values)
}
else:
layer_cache_torch = {
"memory_keys": None,
"memory_values": None,
"self_keys": None,
"self_values": None
}
onmt_model = lambda: onmt_multi_headed_attention(
K,
V,
torch.clone(torch_layernorm(Q)) if pre_layernorm else Q,
mask=attention_mask,
layer_cache=layer_cache_torch,
attn_type=attn_type)
onmt_multi_headed_attention_result, torch_qps, torch_time_consume = \
test_helper.run_model(onmt_model, use_cuda, num_iter) # return output, attns
onmt_attns = onmt_multi_headed_attention_result[1]
if post_add_input:
onmt_output = onmt_multi_headed_attention_result[0] + Q
else:
onmt_output = onmt_multi_headed_attention_result[0]
print(
f"Multi Headed Attention {info} ONMT, QPS,{torch_qps}, time, {torch_time_consume}"
)
if with_quantize_dynamic and not use_cuda:
q_onmt_model = lambda: self.q_onmt_multi_headed_attention(
K,
V,
torch.clone(torch_layernorm(Q)) if pre_layernorm else Q,
mask=attention_mask,
layer_cache=layer_cache_torch,
attn_type=attn_type)
q_onmt_multi_headed_attention_result, q_torch_qps, q_torch_time_consume = \
test_helper.run_model(q_onmt_model, use_cuda, num_iter) # return output, attns
onmt_attns = q_onmt_multi_headed_attention_result[1]
if post_add_input:
onmt_output = q_onmt_multi_headed_attention_result[0] + Q
else:
onmt_output = q_onmt_multi_headed_attention_result[0]
print(
f"Multi Headed Attention {info} Q-ONMT, QPS, {q_torch_qps}, time, {q_torch_time_consume}"
)
# benchmarking turbo with weight transposed
turbo_attention_mask = attention_mask.float(
) * -1e18 if attention_mask is not None else None
if set_layer_cache:
layer_cache_turbo = {
"memory_keys": torch.clone(memory_keys),
"memory_values": torch.clone(memory_values),
"self_keys": torch.clone(self_keys),
"self_values": torch.clone(self_values)
}
else:
layer_cache_turbo = {
"memory_keys": None,
"memory_values": None,
"self_keys": None,
"self_values": None
}
turbo_model_trans = lambda: turbo_attn_trans(
K,
V,
Q,
turbo_attention_mask,
layer_cache=layer_cache_turbo,
attn_type=attn_type,
pre_layernorm=pre_layernorm,
post_add_input=post_add_input,
is_trans_weight=True)
# with turbo_transformers.pref_guard("pref_test") as perf:
turbo_result, turbo_qps, turbo_time_consume = \
test_helper.run_model(turbo_model_trans, use_cuda,
num_iter)
turbo_output_trans, turbo_attns_trans = turbo_result
print(
f"Multi Headed Attention {info} Turbo Trans, QPS, {turbo_qps}, time, {turbo_time_consume}"
)
self.assertTrue(
torch.max(torch.abs(onmt_output - turbo_output_trans)) < (
1e-3 if use_cuda else 1e-4))
self.assertTrue(
torch.max(torch.abs(onmt_attns - turbo_attns_trans)) < (
1e-3 if use_cuda else 1e-4))
if layer_cache_torch is not None:
for k, v in layer_cache_torch.items():
if v is not None:
self.assertTrue(
torch.max(torch.abs(layer_cache_turbo[k] -
v)) < 1e-3)
# benchmarking turbo with weight not transposed
if set_layer_cache:
layer_cache_turbo = {
"memory_keys": torch.clone(memory_keys),
"memory_values": torch.clone(memory_values),
"self_keys": torch.clone(self_keys),
"self_values": torch.clone(self_values)
}
else:
layer_cache_turbo = {
"memory_keys": None,
"memory_values": None,
"self_keys": None,
"self_values": None
}
turbo_model_notrans = lambda: turbo_attn_notrans(
K,
V,
Q,
turbo_attention_mask,
layer_cache=layer_cache_turbo,
attn_type=attn_type,
pre_layernorm=pre_layernorm,
post_add_input=post_add_input,
is_trans_weight=False)
with turbo_transformers.pref_guard("pref_test") as perf:
turbo_result, turbo_qps, turbo_time_consume_notrans = \
test_helper.run_model(turbo_model_notrans, use_cuda,
num_iter)
turbo_output_notrans, turbo_attns_notrans = turbo_result
print(
f"Multi Headed Attention {info} Turbo NoTrans, QPS,{turbo_qps}, time, {turbo_time_consume_notrans}"
)
self.assertTrue(
torch.max(torch.abs(onmt_output - turbo_output_notrans)) < (
1e-3 if use_cuda else 1e-4))
self.assertTrue(
torch.max(torch.abs(onmt_attns - turbo_attns_notrans)) < (
1e-3 if use_cuda else 1e-4))
if with_quantize_dynamic and not use_cuda:
with open(fname, "a") as fh:
fh.write(
f"{info} {torch_qps}, {q_torch_qps}, {turbo_qps}\n")
else:
with open(fname, "a") as fh:
fh.write(f"{info} {torch_qps}, {turbo_qps}\n")
def test_multi_headed_attention(self):
self.check_torch_and_turbo(use_cuda=False)
if torch.cuda.is_available() and \
turbo_transformers.config.is_compiled_with_cuda():
self.check_torch_and_turbo(use_cuda=True)
globals(
)[f"TestMultiHeadedAttention{batch_size}_{key_seq_len:3}_{query_seq_len:3}_{attn_type}_{pre_layernorm}_{post_add_input}_{with_quantize_dynamic}_{set_layer_cache}"] = TestMultiHeadedAttention
with open(fname, "w") as fh:
fh.write(", torch, q_torch, turbo_transformers\n")
for set_layer_cache in [True, False]:
for post_add_input in [False]:
for pre_layernorm in [False]:
for batch_size in [4]:
for query_seq_len in [1, 2]:
create_test(batch_size,
query_seq_len,
query_seq_len,
"self",
pre_layernorm,
post_add_input,
with_quantize_dynamic=False,
set_layer_cache=set_layer_cache)
for set_layer_cache in [False, True]:
for post_add_input in [False]:
for pre_layernorm in [False]:
for batch_size in [4]:
for key_seq_len in [10, 20, 30, 40, 50]:
for query_seq_len in [1, 2]:
create_test(batch_size,
key_seq_len,
query_seq_len,
"context",
pre_layernorm,
post_add_input,
with_quantize_dynamic=False,
set_layer_cache=set_layer_cache)
if __name__ == '__main__':
unittest.main()
|
'''
Script: Covid Vaccine Slot Availability Display
By: Shyam Hushangabadkar
'''
from optparse import OptionParser
import requests
from pygame import mixer
from datetime import datetime, timedelta
import time
play_sound = True
min_vaccines = 1
def _process_request(actual_dates, base_url, age, dose=1, print_flag='Y'):
if dose == 2:
dose_param = "available_capacity_dose2"
else:
dose_param = "available_capacity_dose1"
counter=0
# base_url = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}"
for given_date in actual_dates:
URL = base_url+"&date={}".format(given_date)
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
try:
result = requests.get(URL, headers=header)
except:
result.ok = None
if result.ok:
response_json = result.json()
if response_json["centers"]:
if (print_flag.lower() == 'y'):
for center in response_json["centers"]:
for session in center["sessions"]:
if (session["min_age_limit"] <= age and session[dose_param] > min_vaccines):
print('Pincode: {}'.format(center['pincode']))
print("Available on: {}".format(session['date']))
print("\t", center["name"])
print("\t", center["block_name"])
print("\t Price: ", center["fee_type"])
print("\t Availablity : ", session["available_capacity"])
if (session["vaccine"] != ''):
print("\t Vaccine type: ", session["vaccine"])
print("\n")
counter = counter + 1
else:
print("No Response!")
return counter
def check_availability_by_district(dis_id, age, num_days, sleep_time, num_of_iterations):
print_flag = 'Y'
print("-------- Starting search for Covid vaccine slots! --------")
actual = datetime.today()
list_format = [actual + timedelta(days=i) for i in range(num_days)]
actual_dates = [i.strftime("%d-%m-%Y") for i in list_format]
if num_of_iterations == -1: # run large number of times
num_of_iterations = 100000
for i in range(num_of_iterations):
print("Search try {} ...".format(i+1))
base_url = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}".format(dis_id)
counter = _process_request(actual_dates, base_url, age, print_flag)
if counter == 0:
print("No Vaccination slot available!")
else:
if play_sound:
mixer.init()
mixer.music.load('sound/bensound-happyrock.mp3')
mixer.music.play()
time.sleep(5)
mixer.music.stop()
print("Search Completed! Exiting....")
# sleep
for t in range(sleep_time):
time.sleep(1)
def check_availability_by_pincode(age, pincodes, num_days, sleep_time, num_of_iterations):
print_flag = 'Y'
print("-------- Starting search for Covid vaccine slots! --------")
actual = datetime.today()
list_format = [actual + timedelta(days=i) for i in range(num_days)]
actual_dates = [i.strftime("%d-%m-%Y") for i in list_format]
if num_of_iterations == -1: # run large number of times
num_of_iterations = 100000
for i in range(num_of_iterations):
counter = 0
print("Search try {} ...".format(i+1))
for pincode in pincodes:
base_url = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={}".format(pincode)
counter = _process_request(actual_dates, base_url, age, print_flag)
if counter == 0:
print("No Vaccination slot available!")
else:
mixer.init()
mixer.music.load('sound/bensound-happyrock.mp3')
mixer.music.play()
time.sleep(5)
mixer.music.stop()
print("Search Completed! Exiting....")
break
# sleep
for t in range(sleep_time):
time.sleep(1)
if __name__ == '__main__':
# argument parsing
parser = OptionParser()
parser.add_option('--search', dest='search',type='int', default=1, help='Enter 1 for search by pin; Enter 2 for search by district')
parser.add_option('--age', dest='age', type="int", help='Current age of person opting for vaccine')
parser.add_option('--pin', dest='pincodes', action='append', default=[],
help='Space separated pincodes. Example: --pin=1234,1235,1236')
parser.add_option('--district_id', dest='district_id', type='int', default=363,
help='Enter district id. District id can be found from typing below URL in browser:'
'https://cdn-api.co-vin.in/api/v2/admin/location/states'
'https://cdn-api.co-vin.in/api/v2/admin/location/districts/<id of your state>')
parser.add_option('--days', dest='num_days', type="int", default=3,
help='number of future days to search vaccine slot')
parser.add_option('--nexttrywait', dest='sleep_time', type="int", default=30,
help='time to wait before next search try')
parser.add_option('--retrycount', dest='num_of_iterations', type="int", default=5,
help='Number of times to try search before exit script')
options, args = parser.parse_args()
if options.age is None:
parser.error("parameter age is required. Please try 'python script.py --help'")
if (options.pincodes is None) and (options.district_id is None):
parser.error("parameter pin or district_id is required. Please try 'python script.py --help'")
age = options.age
if options.pincodes:
pins = options.pincodes[0].split(",")
pincodes = [int(ele) if ele.isdigit() else ele for ele in pins]
district_id = options.district_id
num_days = options.num_days
sleep_time = options.sleep_time
num_of_iterations = options.num_of_iterations
if options.search == 1:
check_availability_by_pincode(age, pincodes, num_days, sleep_time, num_of_iterations)
else:
check_availability_by_district(district_id, age, num_days, sleep_time, num_of_iterations)
|
import boto3
import json
def get_security_hub_info():
"""
A fucntion that gives security hub products info
"""
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
product_info = []
for region in regions:
# not availble in this region
if region == 'ap-east-1':
continue
client = boto3.client('securityhub', region_name=region)
# get product information
response = client.describe_products()['Products']
for res in response:
req_info = []
req_info.append(res)
product_info.append(req_info)
# convert products list into dictionary
product_dict = {"Products": product_info}
# convert dictionary into json
products_json = json.dumps(product_dict, indent=4, default=str)
print(products_json)
get_security_hub_info()
|
print('Hello World Full of Python and sum=',2+2) |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Map of the requirements needed by the inventory pipelines."""
REQUIREMENTS_MAP = {
'appengine':
{'module_name': 'load_appengine_pipeline',
'depends_on': 'projects',
'api_name': 'appengine_api',
'dao_name': 'appengine_dao'},
'backend_services':
{'module_name': 'load_backend_services_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'backend_service_dao'},
'bigquery_datasets':
{'module_name': 'load_bigquery_datasets_pipeline',
'depends_on': 'projects',
'api_name': 'bigquery_api',
'dao_name': 'dao'},
'buckets':
{'module_name': 'load_projects_buckets_pipeline',
'depends_on': 'projects',
'api_name': 'gcs_api',
'dao_name': 'project_dao'},
'buckets_acls':
{'module_name': 'load_projects_buckets_acls_pipeline',
'depends_on': 'buckets',
'api_name': 'gcs_api',
'dao_name': 'bucket_dao'},
'cloudsql':
{'module_name': 'load_projects_cloudsql_pipeline',
'depends_on': 'projects',
'api_name': 'cloudsql_api',
'dao_name': 'cloudsql_dao'},
'firewall_rules':
{'module_name': 'load_firewall_rules_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'project_dao'},
'folder_iam_policies':
{'module_name': 'load_folder_iam_policies_pipeline',
'depends_on': 'folders',
'api_name': 'crm_api',
'dao_name': 'folder_dao'},
'folders':
{'module_name': 'load_folders_pipeline',
'depends_on': 'organizations',
'api_name': 'crm_api',
'dao_name': 'folder_dao'},
'forwarding_rules':
{'module_name': 'load_forwarding_rules_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'forwarding_rules_dao'},
'ke':
{'module_name': 'load_ke_pipeline',
'depends_on': 'projects',
'api_name': 'ke_api',
'dao_name': 'ke_dao'},
'group_members':
{'module_name': 'load_group_members_pipeline',
'depends_on': 'groups',
'api_name': 'admin_api',
'dao_name': 'dao'},
'groups':
{'module_name': 'load_groups_pipeline',
'depends_on': 'organizations',
'api_name': 'admin_api',
'dao_name': 'dao'},
'instance_group_managers':
{'module_name': 'load_instance_group_managers_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'instance_group_manager_dao'},
'instance_groups':
{'module_name': 'load_instance_groups_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'instance_group_dao'},
'instance_templates':
{'module_name': 'load_instance_templates_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'instance_template_dao'},
'instances':
{'module_name': 'load_instances_pipeline',
'depends_on': 'projects',
'api_name': 'compute_api',
'dao_name': 'instance_dao'},
'org_iam_policies':
{'module_name': 'load_org_iam_policies_pipeline',
'depends_on': 'organizations',
'api_name': 'crm_api',
'dao_name': 'organization_dao'},
'organizations':
{'module_name': 'load_orgs_pipeline',
'depends_on': None,
'api_name': 'crm_api',
'dao_name': 'organization_dao'},
'projects':
{'module_name': 'load_projects_pipeline',
'depends_on': 'folders',
'api_name': 'crm_api',
'dao_name': 'project_dao'},
'projects_iam_policies':
{'module_name': 'load_projects_iam_policies_pipeline',
'depends_on': 'projects',
'api_name': 'crm_api',
'dao_name': 'project_dao'},
'service_accounts':
{'module_name': 'load_service_accounts_pipeline',
'depends_on': 'projects',
'api_name': 'iam_api',
'dao_name': 'service_account_dao'},
}
|
import serial # https://pythonhosted.org/pyserial
from serial.tools import list_ports
class PortDriverBase:
def __init__(self, port_type):
# find ports of specified type
ports_of_type = list(list_ports.grep(port_type))
if len(ports_of_type) == 0:
raise RuntimeError('No port names matched provided keyword')
port_name = ports_of_type[0][0]
# by default, has baudrate 9600, bytesize 8, stop bits 1
self.serial_port = serial.Serial(port_name)
self.serial_port.timeout = 2
def __del__(self):
if self.serial_port.is_open:
self.serial_port.close()
|
import re
from datetime import datetime
REGEX = re.compile(r'^[a-f\d]{24}$')
class Mongo(object):
@classmethod
def is_valid(cls, object_id):
try:
return bool(REGEX.match(object_id))
except TypeError:
return False
@classmethod
def get_timestamp(cls, object_id):
if cls.is_valid(object_id):
return datetime.utcfromtimestamp(int(object_id[:8], 16))
return False
|
#!/usr/bin/python3
# iproutediff - Compares outputs of "show ip route" for changes
#
# Written by Foeh Mannay, September 2017
import re
def parse(filename):
# Opens the file referenced by "filename" and attempts to interpret IP routes from it
# Initialise empty values for all fields:
routes={}
code = ""
prefix = ""
prefixlength = 33
admindistance = 0
metric = 0
nexthop = ""
interface = ""
age = ""
with open(filename, 'r') as infile:
for line in infile:
# skip lines with a prompt, the legend and the GOLR
m = re.search('#|RIP|OSPF|IS-IS|ODR|Gateway|variably', line)
if(m):
continue
# Match lines that imply the prefix length for the following routes
m = re.search('(\d*.\d*.\d*.\d*)(/\d*)is subnetted', line)
if(m):
prefixlength=m.group(2)
continue
# Match BGP routes (age, no interface)
m = re.search('^(B )(\d*.\d*.\d*.\d*)(/\d*) \[(\d*)/(\d*)\] via (\d*.\d*.\d*.\d*), (.*)', line)
if(m):
code=m.group(1)
prefix=m.group(2)
prefixlength=m.group(3)
admindistance=m.group(4)
metric=m.group(5)
nexthop=m.group(6)
age=m.group(7)
routes[prefix+prefixlength]=[code, admindistance, metric, nexthop, '', age]
continue
# Match static routes (age, no interface)
m = re.search('^(S )(\d*.\d*.\d*.\d*)(/\d*) \[(\d*)/(\d*)\] via (\d*.\d*.\d*.\d*), (.*)', line)
if(m):
code=m.group(1)
prefix=m.group(2)
prefixlength=m.group(3)
admindistance=m.group(4)
metric=m.group(5)
nexthop=m.group(6)
interface=m.group(7)
routes[prefix+prefixlength]=[code, admindistance, metric, nexthop, interface, '']
continue
# Match other route types (age and interface)
m = re.search('^(.......)(\d*.\d*.\d*.\d*)(/\d*) \[(\d*)/(\d*)\] via (\d*.\d*.\d*.\d*), (.*), (.*)', line)
if(m):
code=m.group(1)
prefix=m.group(2)
prefixlength=m.group(3)
admindistance=m.group(4)
metric=m.group(5)
nexthop=m.group(6)
age=m.group(7)
interface=m.group(8)
routes[prefix+prefixlength]=[code, admindistance, metric, nexthop, interface, age]
continue
# Match the first half of routes that spill onto the next line (prefix and length)
m = re.search('^(........)(\d*.\d*.\d*.\d*)(/\d*)', line)
if(m):
code=m.group(1)
prefix=m.group(2)
prefixlength=m.group(3)
continue
# Match the second half of routes that spill onto the next line (age and interface)
m = re.search('^ \[(\d*)/(\d*)\] via (\d*.\d*.\d*.\d*), (.*), (.*)', line)
if(m):
admindistance=m.group(1)
metric=m.group(2)
nexthop=m.group(3)
age=m.group(4)
interface=m.group(5)
routes[prefix+prefixlength]=[code, admindistance, metric, nexthop, interface, age]
continue
return(routes)
print("iproutediff - takes the output of two \"show ip route\" commands and notes the differences\n")
print("v0.1 alpha, By Foeh Mannay, September 2017\n")
filename = input("Enter filename of first 'show ip route' (A): ")
A = parse(filename)
if(A is None):
raise SystemExit("Error - unable to parse any routes from this file!\n")
filename = input("Enter filename of second 'show ip route' (B): ")
B = parse(filename)
if(B is None):
raise SystemExit("Error - unable to parse any routes from this file!\n")
# Temporary swap variable to get over Python's restriction on editing a dictionary while
# iterating over it
Z = {}
# Compare the two route lists
for key in A.keys():
if(key in B):
changed = False
# Check route type, AD, metric, nexthop & interface
for i in range(1,5):
if(A[key][i] != B[key][i]):
changed = True
if(changed):
print ("\nChanged:\n")
print ("<<< " + A[key][0] + key + " [" + A[key][1] + "/" + A[key][2] + "] via " + A[key][3] + " " + A[key][4] + " " + A[key][5])
print (">>> " + B[key][0] + key + " [" + B[key][1] + "/" + B[key][2] + "] via " + B[key][3] + " " + B[key][4] + " " + B[key][5] + "\n")
changed = False
del B[key]
else:
Z[key] = A[key]
A = Z
# Enumerate any routes removed
if(len(A) > 0):
print ("\nRemoved:\n")
for key in A.keys():
print ("<<< " + A[key][0] + key + " [" + A[key][1] + "/" + A[key][2] + "] via " + A[key][3] + " " + A[key][4] + " " + A[key][5] + "\n")
else:
print ("\nNo routes removed.\n")
# Enumerate any routes added
if(len(B) > 0):
print ("\nAdded:\n")
for key in B.keys():
print (">>> " + B[key][0] + key + " [" + B[key][1] + "/" + B[key][2] + "] via " + B[key][3] + " " + B[key][4] + " " + B[key][5] + "\n")
else:
print ("\nNo routes added.\n")
|
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, RobustScaler, OneHotEncoder, LabelEncoder, PowerTransformer, QuantileTransformer
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDRegressor, LinearRegression
import category_encoders as ce
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import LinearSVR
from sklearn.linear_model import BayesianRidge
import statistics
import pandas as p
import numpy as np
trainData= p.read_csv('C:\\Users\\Ryan BARRON\\Desktop\\CS_Fourth_Year\\CS4061_Machine_Learning\\Kaggle_Comptetion_Part1\\tcd ml 2019-20 income prediction training (with labels).csv')
trainData = trainData.drop("Instance", axis=1)
target= "Income in EUR"
cols = trainData.columns
# Setting my labels and target variables
x = trainData.drop(target, axis=1)
y = trainData[target]
# Impute missing year of record with median
yorImputer = SimpleImputer(strategy="mean")
yorScale = PowerTransformer("box-cox")
x[cols[0]] = yorScale.fit_transform(yorImputer.fit_transform(x[cols[0]] .to_frame()))
# Make gender consistent, replace 0, unknown and nan with same label, unknown
x[cols[1]] = x[cols[1]].replace(to_replace="0", value="unknown").fillna("unknown")
genderEncoder = LabelEncoder()
x[cols[1]] = genderEncoder.fit_transform(x[cols[1]].to_frame())
# Age
ageImputer = SimpleImputer(strategy="mean")
ageScaler = PowerTransformer("box-cox")
x[cols[2]] = ageScaler.fit_transform(ageImputer.fit_transform(x[cols[2]].to_frame()))
# Country
countryEncoder =LabelEncoder()
x[cols[3]] = countryEncoder.fit_transform(x[cols[3]].fillna("unknown").to_frame())
# Size of city
cityImputer = SimpleImputer(strategy="mean")
cityScaler = PowerTransformer("box-cox")
x[cols[4]] = cityScaler.fit_transform(cityImputer.fit_transform(x[cols[4]].to_frame()))
# Profession
professionEncoder = LabelEncoder()
x[cols[5]] = professionEncoder.fit_transform(x[cols[5]].fillna("unknown").to_frame())
# University
x[cols[6]] = x[cols[6]].replace(to_replace="0", value="No").fillna("unknown")
universityEncoder = LabelEncoder()
x[cols[6]] = universityEncoder.fit_transform(x[cols[6]].to_frame())
# Glasses
# donothing
# Hair color
x[cols[8]] = x[cols[8]].fillna("Unknown")
hairEncoder = LabelEncoder()
x[cols[8]] = hairEncoder.fit_transform(x[cols[8]].to_frame())
# Body height
heightImputer = SimpleImputer(strategy="mean")
heightScaler = PowerTransformer("box-cox")
x[cols[9]] = heightScaler.fit_transform(heightImputer.fit_transform(x[cols[9]].to_frame()))
# Spliting my data into training data and validation data
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size=0.3)
# numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),('scaler', StandardScaler())])
# categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant',fill_value='missing')),
# ('encoder', ce.TargetEncoder())])
# numeric_features = trainData.select_dtypes(include=['int64', 'float64']).drop([target], axis=1).columns
# categorical_features = trainData.select_dtypes(include=['object']).columns
# preprocessor = ColumnTransformer(
# transformers=[
# ('num', numeric_transformer, numeric_features),
# ('cat', categorical_transformer, categorical_features)])
regressions = [
AdaBoostRegressor(RandomForestRegressor(random_state=0))
]
# for regressor in regressions:
# pip = Pipeline(steps=[('preprocessor', preprocessor),
# ('Regression', regressor)])
# pip.fit(xT, yT)
# print(regressor)
# print("model score: %.3f" % pip.score(xTest, yTest))
for regressor in regressions:
regressor.fit(xTrain, yTrain)
print(regressor)
print("model score: %.3f" % regressor.score(xTest, yTest))
# sgd = Pipeline(steps=[('preprocessor', preprocessor),
# ('Regression', SGDRegressor())])
# sgd.fit(xT, yT)
# score = sgd.score(xTest, yTest)
# print(score)
|
from .forms import addemployee
from django.shortcuts import render,get_object_or_404,redirect
from .models import detail
from django.utils import timezone
from django.shortcuts import redirect
from django.views.generic import View
from django.contrib.auth import login as auth_login,logout
from django.contrib.auth import authenticate
from django.http import HttpResponseRedirect
from django.template.context_processors import csrf
from django.urls import reverse
from django.contrib.auth.models import User
from django.forms import modelformset_factory
from django.db.models import Q
import urllib.request
import json
dic = dict()
def tree(request):
global dic
dic={}
name=list()
print("on")
token = 'elZxQlHDSUallvL3OnnH'
url = 'https://api.zenefits.com/core/people'
hed = {'Authorization': 'Bearer ' + token}
req = urllib.request.Request(url=url, headers=hed)
content = urllib.request.urlopen(req)
data = json.load(content)
num = data['data']['data']
for item in num:
idi = item['id']
print(idi)
if detail.objects.filter(emp_id=idi).exists():
ext = detail.objects.get(emp_id=idi)
pivot = 0
if ext.status != item['status']:
ext.status = item['status']
pivot=1
if ext.preferred_name != item['preferred_name']:
ext.preferred_name = item['preferred_name']
pivot=1
if ext.last_name != item['last_name']:
ext.last_name = item['last_name']
pivot=1
if ext.work_phone != item['work_phone']:
ext.work_phone = item['work_phone']
pivot=1
if ext.personal_email != item['personal_email']:
ext.personal_email = item['personal_email']
pivot=1
if ext.location_url != item['location']['url']:
ext.location_url != item['location']['url']
pivot=1
if ext.department_url != item['department']['url']:
ext.department_url != item['department']['url']
pivot=1
if ext.manager_url != item['manager']['url']:
ext.manager_url != item['manager']['url']
pivot=1
if pivot:
ext.save()
continue
manager_url = item['manager']['url']
post = detail()
print("Nah")
post.emp_id = idi
print("-1")
if item['status'] != None:
post.status = item['status']
print("0")
post.last_name = item['last_name']
post.preferred_name = item['preferred_name']
if item['work_phone'] != None:
post.work_phone = item['work_phone']
print("1")
if item['personal_email'] != None:
post.personal_email = item['personal_email']
print("2")
location_url = item['location']['url']
post.location_url = location_url
if location_url != None:
loc = urllib.request.Request(url=location_url, headers=hed)
loc2 = urllib.request.urlopen(loc)
loc3 = json.load(loc2)
string = loc3['data']['city'] + " , " + loc3['data']['state'] + " , " + loc3['data']['country']
post.location = string
print("3")
dept_url = item['department']['url']
post.department_url = dept_url
if dept_url != None:
dept = urllib.request.Request(url=dept_url, headers=hed)
dept2 = urllib.request.urlopen(dept)
dept3 = json.load(dept2)
post.department = dept3['data']['name']
print("4")
manager_url = item['manager']['url']
post.manager_url = manager_url
if manager_url == None:
post.manager = "None"
post.manager_id = "0"
else:
man = urllib.request.Request(url=manager_url, headers=hed)
man2 = urllib.request.urlopen(man)
man3 = json.load(man2)
number = man3['data']['id']
if item['status']=='active':
print("dic is on")
if number in dic:
dic[number].append(idi)
else:
dic[number]=list()
dic[number].append(idi)
post.manager = man3['data']['preferred_name'] + " " + man3['data']['last_name']
post.manager_id = man3['data']['id']
print("5")
post.save()
print("inserted")
total = detail.objects.all()
for value in total:
number = value.manager_id
idi = value.emp_id
if value.manager == "None" and value.status=="active":
name.append(idi)
continue
if number in dic:
if idi not in dic[number]:
dic[number].append(idi)
else:
dic[number]=list()
dic[number].append(idi)
return render(request,'orgchart/tree.html',{'total':total,'dic':dic,'name':name})
def employee_detail(request,idi):
emp = get_object_or_404(detail, emp_id=idi)
if emp.manager_id!=0:
k = detail.objects.get(emp_id=emp.manager_id)
emp.manager=k.preferred_name + " " + k.last_name
emp.save()
print(emp)
return render(request,'orgchart/employee_detail.html',{'emp':emp})
def add_employee(request):
global dic
if request.method == "POST":
idi = request.POST.get("manager_id")
form = addemployee(request.POST)
if form.is_valid():
temp = detail.objects.get(emp_id=idi)
name = temp.preferred_name + " " + temp.last_name
post = form.save(commit=False)
post.subordinates_url = 'null'
post.manager = name
post.save()
return redirect('tree')
else:
form = addemployee()
return render(request,'orgchart/add_employee.html',{'form':form})
def detail_edit(request,pk):
check = get_object_or_404(detail, emp_id=pk)
idi = check.manager_id
if request.method == "POST" :
form = addemployee(request.POST,instance=check)
if form.is_valid():
post = form.save(commit=False)
if idi != 0:
temp = detail.objects.get(emp_id=idi)
name = temp.preferred_name + " " + temp.last_name
post.manager = name
post.subordinates_url = 'null'
post.save()
return redirect('employee_detail',idi = pk)
else:
form = addemployee(instance=check)
return render(request,'orgchart/edit_employee.html',{'form':form})
def delete_employee(request):
global dic
name = request.POST.get('str')
try:
k=get_object_or_404(detail, emp_id=name)
k.preferred_name = "X"
k.last_name = " "
k.save()
return redirect('tree')
except:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
def search_name(request):
try:
if request.method=="POST":
name = request.POST.get('str')
string = name.split()
for s in string:
print(s)
print("************")
if string:
match = detail.objects.filter(Q(preferred_name__icontains=string[0]) | Q(last_name__icontains=string[0]))
print("1")
if match:
name = list()
total = detail.objects.all()
if len(string)==2:
print("y")
for i in match:
if i.last_name == string[1] or i.preferred_name == string[1]:
k = i.emp_id
name.append(k)
else:
print("z")
for i in match:
k = i.emp_id
name.append(k)
if len(name)==0:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
return render(request,'orgchart/tree.html',{'name':name,'total':total,'dic':dic})
else:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
except:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
def search_id(request):
try:
if request.method=="POST":
string = request.POST.get('str')
if string:
match = detail.objects.get(emp_id=string)
if match:
name = list()
total = detail.objects.all()
name.append(match.emp_id)
return render(request,'orgchart/tree.html',{'name':name,'total':total,'dic':dic})
else:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
except:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
def search_location(request):
try:
if request.method=="POST":
name = request.POST.get('str')
string = name.split()
if string:
match = detail.objects.filter(location__icontains=string[0])
if match:
name_location = list()
total = detail.objects.all()
for i in match:
k = i.emp_id
name_location.append(k)
return render(request,'orgchart/tree.html',{'name_location':name_location,'total':total,'dic':dic})
else:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
except:
message = "No Result Found"
return render(request,'orgchart/tree.html', {'message':message})
def contact(request):
return render(request,'orgchart/contact.html', {}) |
#Leia uma String e retorne na tela mostrando se é uma palíndroma.
#Um palíndromo é uma palavra ou frase que pode ser lida no seu sentido normal, da esquerda para a direita, bem como no sentido contrário, da direita para a esquerda, sem que haja mudança nas palavras que a formam e no seu significado.
palavra = input()
palavra = palavra.lower()
palavra = palavra.replace(" ", "")
palavraInvertida = palavra[::-1]
if palavra == palavraInvertida:
print("Palíndromo")
else :
print("Não Palíndromo")
|
import requests
import random
import json
import sys
def GetOTP():
try:
URL = 'https://www.sms4india.com/api/v1/sendCampaign'
# MobileNumber = sys.argv[1]
OTP = str(random.randint(100000, 999999))
OTPMessage = OTP + ' is your OTP.'
# OTPMessage = OTP + 'is your OTP. Please DO NOT share this with anyone.'
req_params = {
'apikey':'W72M3Z2Q4BKQ9SL8IMFUMNJQVAGL0H36',
'secret':'RWA6EVIMC4G0GF9M',
'usetype':'stage',
'phone':7730038314,
'message':OTPMessage,
'senderid':'SMSIND'
}
response = requests.post(URL, req_params)
print(response.json())
return OTP
# print('OTP sent to the registered mobile number.')
# with open('generatedOTP.cfg', 'w') as fpFile:
# fpFile.write(OTP)
except IndexError:
print('Argument is missing!')
GetOTP()
|
import os
from scipy.io import loadmat
from prepare_caltech_dataset import convert_sequence, convert_annotations, read_sequence
import cv2
import glob
import json
def process_seqs():
"""Convert the sequence files and save to similar dir structure"""
if not os.path.exists('target/images'):
os.mkdir('target/images')
for dir_name in glob.glob('data/set*'):
parent_dir = os.path.split(dir_name)[-1]
for seq_path in glob.glob('{}/*.seq'.format(dir_name)):
current_dir = os.path.splitext(os.path.basename(seq_path))[0]
print('Converting {}/{}.seq'.format(parent_dir, current_dir))
read_sequence(seq_path, 'target/images', parent_dir + '_' + current_dir)
def process_annotations():
"""Convert annotations to json file format"""
final_annotation = {}
if not os.path.exists('target/Annotations'):
os.mkdir('target/Annotations')
for dir_name in glob.glob('data/annotations/set*'):
parent_dir = os.path.split(dir_name)[-1]
for vbb_file in glob.glob('{}/*.vbb'.format(dir_name)):
current_dir = os.path.splitext(os.path.basename(vbb_file))[0]
vbb = loadmat(vbb_file)
print('Converted annotations from {}'.format(vbb_file))
annotation = convert_annotations(vbb, '{}_{}'.format(parent_dir, current_dir))
final_annotation.update(annotation)
with open('target/Annotations/annotations.json', 'w') as f:
json.dump(final_annotation, f)
def main():
if not os.path.exists('target'):
os.mkdir('target')
#process_seqs()
process_annotations()
if __name__ == '__main__':
main()
|
from Tkinter import *
import tkMessageBox
#n = 14257
#e = 11
#d = 1267
#Tn = 13936
LUT_encryption = dict()
LUT_decryption = dict()
def encrypt_message():
n = int(entryn.get())
e = int(entrye.get())
message = texte.get(1.0, END)
encrypted_msg = ""
for i in message:
if i in LUT_encryption:
encrypted_msg += LUT_encryption[i]
else:
numerize = int(ord(i))
encrypt = pow(numerize, e, n)
LUT_encryption[i] = unichr(encrypt)
encrypted_msg += unichr(encrypt)
clear_text()
texte.insert(END, encrypted_msg)
tkMessageBox.showinfo("Encryption", "Message Encrypted!")
def clear_text():
texte.delete(1.0, END)
def clear_text2():
textd.delete(1.0, END)
def decrypt_message():
d = 1267
n = 14257
en_message = textd.get(1.0, END)
decrypted_msg = ""
for i in en_message:
if i in LUT_decryption:
decrypted_msg += LUT_decryption[i]
else:
numerize = ord(i)
decrypt = pow(numerize, d, n)
LUT_decryption[i] = unichr(decrypt)
decrypted_msg += unichr(decrypt)
clear_text2()
textd.insert(END, decrypted_msg)
tkMessageBox.showinfo("Decryption", "Message Decrypted!")
def openfileE():
clear_text()
f = open("Etext.txt", "r")
texte.insert (END, f.read())
f.close()
def savefileE():
f = open('Etext.txt', 'w')
t = texte.get(1.0, END)
f.write(t)
f.close()
def openfileD():
clear_text2()
f = open("Dtext.txt", "r")
textd.insert (END, f.read())
f.close()
def savefileD():
f = open('Dtext.txt', 'w')
t = textd.get(1.0, END)
f.write(t)
f.close()
root = Tk() #gives us a blank canvas object to work with
root.title("RSA")
root.configure(background="light blue")
#EncryptionMenu
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=openfileE)
filemenu.add_separator()
filemenu.add_command(label="Save", command=savefileE)
menubar.add_cascade(label="Encryption", menu=filemenu)
#DecryptionMenu
filemenu2 = Menu(menubar, tearoff=0)
filemenu2.add_command(label="Open", command=openfileD)
filemenu2.add_separator()
filemenu2.add_command(label="Save", command=savefileD)
menubar.add_cascade(label="Decryption", menu=filemenu2)
root.config(menu=menubar)
#Labels
labelEn = Label(root, text="Encryption", bg="light blue")
labelEn.grid(row=0, column=2, sticky=EW)
labelD = Label(root, text="Decryption", bg="light blue")
labelD.grid(row=0, column=8, sticky=EW)
labeln = Label(root, text="n =", bg="light blue")
labeln.grid(row=1, column=0, sticky=EW)
labele = Label(root, text="e =", bg="light blue")
labele.grid(row=1, column=2, sticky=EW)
labelpk = Label(root, text="Public Key: (14257, 11) ")
labelpk.grid(row=1, column=7, columnspan=2, sticky=EW)
label1 = Label(root, text=" ", bg="light blue")
label1.grid(row=0, column=5)
#Entryboxes
entryn = Entry(root, width=5)
entryn.grid(row=1, column=1)
entrye = Entry(root, width=5)
entrye.grid(row=1, column=3)
#Buttons
buttone = Button(root, text="Encrypt", bg="white", command=encrypt_message)
buttone.grid(row=1, column=4)
buttond = Button(root, text="Decrypt", bg="white", command=decrypt_message)
buttond.grid(row=1, column=10)
#Textboxes
texte = Text(root, height=10, width=3, bg= "light gray")
texte.grid(row=2, column=0, rowspan=5, columnspan=5, sticky=EW)
textd = Text(root, height=10, width=5, bg= "light gray")
textd.grid(row=2, column=6, rowspan=5, columnspan=5, sticky=EW)
mainloop() |
FILE forge.yaml
# Global forge configuration
registry:
type: docker
url: registry.gitlab.com
namespace: forgetest/forgetest
END
FILE Dockerfile
FROM nginx:1.7.9
RUN echo TEST-ID
END
FILE k8s/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{build.name}}
spec:
replicas: 3
selector:
matchLabels:
deployment: {{build.name}}
template:
metadata:
labels:
deployment: {{build.name}}
spec:
containers:
- name: nginx
image: {{build.images["Dockerfile"]}}
ports:
- containerPort: 80
END
FILE service.yaml
name: gitlab
END
RUN docker logout registry.gitlab.com
RUN forge deploy
MATCH
unable to locate docker credentials, please run `docker login registry.gitlab.com`
END
ERR
TIMEOUT 60
RUN docker login registry.gitlab.com -u forgetest -p forgetest
RUN forge -v deploy
RUN docker login registry.gitlab.com -u gitlab-ci-token -p kBjszMXMdvqW-L_sZzTk
RUN forge -v deploy
|
# Generated by Django 3.1.7 on 2021-03-27 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='APIInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('api_name', models.CharField(default='请输入接口名称', max_length=32, verbose_name='接口名称')),
('api_describe', models.TextField(default='请输入接口描述', max_length=255, verbose_name='接口描述')),
('api_manager', models.CharField(default='请输入接口负责人名字', max_length=11, verbose_name='接口负责人')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='修改时间')),
],
options={
'verbose_name': '接口列表',
'verbose_name_plural': '接口列表',
'db_table': 'api_info',
},
),
]
|
import os
from flask import Flask
from ckanpackager.controllers.actions import actions
from ckanpackager.controllers.error_handlers import error_handlers
from ckanpackager.controllers.packagers import packagers
from ckanpackager.controllers.status import status
"""
Ckanpackager service
Run a web service to access statistics, perform actions and queue tasks
"""
# create the application
app = Flask(__name__)
# read configuration
app.config.from_object('ckanpackager.config_defaults')
app.config.from_envvar('CKANPACKAGER_CONFIG')
# create folders if required
if not os.path.exists(app.config['TEMP_DIRECTORY']):
os.makedirs(app.config['TEMP_DIRECTORY'])
if not os.path.exists(app.config['STORE_DIRECTORY']):
os.makedirs(app.config['STORE_DIRECTORY'])
# register our blueprints
app.register_blueprint(status)
app.register_blueprint(actions)
app.register_blueprint(packagers)
app.register_blueprint(error_handlers)
def run():
"""
Start the server
"""
app.run(
host=app.config['HOST'],
port=int(app.config['PORT']),
threaded=False,
processes=1,
debug=True,
use_reloader=False
)
# start server (debug mode)
if __name__ == '__main__':
run()
|
# This will set object headers for each file in a container
# Threaded for large containers
# No error trapping yet... caution
# Written by: Joe Engel
import os
import pyrax
import threading
import time
CONCURRENCY = 1000
# Credentials
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
cf = pyrax.connect_to_cloudfiles(region="ORD")
# List containers and then prompt for container to delete
cont_name = ""
cont = cf.get_container(cont_name)
metadata = {""}
def header_obj(cfobj):
print "Setting header for obj: %s" % cfobj.name
cf.set_object_metadata(cont, cfobj)
print
for obj in cont.get_objects(full_listing=True):
while threading.activeCount() > CONCURRENCY:
time.sleep(0.1)
threading.Thread(target=header_obj, args=(obj,)).start()
# Delete the container
print "DONE!"
|
# -*- coding: utf-8 -*-
from model.group import Group
def test_create_group(app):
app.group.create(Group(name="Test Group", header="Test Header", footer="Test Footer"))
def test_create_empty_group(app):
app.group.create(Group(name="", header="", footer=""))
|
class Solution(object):
def dfs(self, node, depth):
if node.left and node.right:
p = self.dfs(node.left, depth+1)
q = self.dfs(node.right, depth+1)
return min(p, q)
elif node.right is None and node.left is not None:
return self.dfs(node.left, depth + 1)
elif node.right is not None and node.left is None:
return self.dfs(node.right, depth+1)
else:
return depth
def minDepth(self, root):
"""
https://leetcode.com/problems/minimum-depth-of-binary-tree/
"""
return self.dfs(root, 1) if root is not None else 0
"""
below is the bfs solution. return the first leaf. way simple than my dfs :(
def minDepth(self, root):
if not root:
return 0
queue = collections.deque([(root, 1)])
while queue:
node, level = queue.popleft()
if node:
if not node.left and not node.right:
return level
else:
queue.append((node.left, level+1))
queue.append((node.right, level+1)
""" |
from django.urls import re_path
from .consumers import *
websocket_urlpatterns=[
re_path('ws/chat/(?P<room_name>\w+)/',ChatConsumer.as_asgi()),
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.