blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa93ad03fc44012a2b48b86533a29102093f9c58 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_35/176.py | e46c645d92b8969ad48b9b94179d7069226800fc | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,876 | py | def main(filename):
f = open(filename)
lines = f.readlines()
f.close()
outlines = []
NORTH, WEST, EAST, SOUTH = 0, 1, 2, 3
T = int(lines.pop(0))
def get_neighbours(arr, row, col):
neighbours = []
if row > 0:
neighbours.append((NORTH, arr[row - 1][col]))
if col > 0:
neighbours.append((WEST, arr[row][col - 1]))
if col < W - 1:
neighbours.append((EAST, arr[row][col + 1]))
if row < H - 1:
neighbours.append((SOUTH, arr[row + 1][col]))
return neighbours
for case in xrange(T):
H, W = map(lambda x:int(x), lines.pop(0).split(' '))
alt_map = []
link_map = []
basin_map = []
for i in xrange(H):
alt_map.append(map(lambda x:int(x), lines.pop(0).split(' ')))
for row in xrange(H):
link_map.append([])
for col in xrange(W):
neighbours = get_neighbours(alt_map, row, col)
if len(neighbours) > 0:
min_alt = min(zip(*neighbours)[1])
if min_alt < alt_map[row][col]:
flow_to = filter(lambda x:x[1] == min_alt, neighbours)
tgt_cell = flow_to[0]
if len(flow_to) > 1:
min_dir = min(zip(*flow_to)[0])
tgt_cell = filter(lambda x: x[0] == min_dir, flow_to)[0]
link_map[row].append(tgt_cell[0])
else:
link_map[row].append(-1)
else:
link_map[row].append(-1)
def get_delta_row_col(dir):
delta_row = 0
delta_col = 0
if dir == NORTH:
delta_row = -1
elif dir == WEST:
delta_col = -1
elif dir == EAST:
delta_col = 1
elif dir == SOUTH:
delta_row = 1
return (delta_row, delta_col)
def get_conn(row, col):
connected = []
cur_dir = link_map[row][col]
if cur_dir != -1:
d_row, d_col = get_delta_row_col(cur_dir)
connected.append((row + d_row, col + d_col))
link_map[row][col] = -1
neighbours = get_neighbours(link_map, row, col)
for dir, link_dir in neighbours:
if (3 - dir) == link_dir:
d_row, d_col = get_delta_row_col(dir)
connected.append((row + d_row, col + d_col))
link_map[row + d_row][col + d_col] = -1
return connected
basin_map = list(alt_map)
cur_char = 'a'
nodes = []
num_accounted = 0
i = 0
j = 0
while num_accounted < H * W:
while True:
if isinstance(basin_map[i][j], int):
nodes.append((i, j))
break
j += 1
if j == W:
j = 0
i += 1
while len(nodes) > 0:
node_row, node_col = nodes.pop(0)
basin_map[node_row][node_col] = cur_char
num_accounted += 1
for row, col in get_conn(node_row, node_col):
nodes.append((row, col))
cur_char = chr(ord(cur_char) + 1)
line = 'Case #%i:\n' % ((case + 1))
for row in xrange(H):
line += ' '.join(basin_map[row])
line += '\n'
outlines.append(line)
f = open('B.out', 'w')
f.writelines(outlines)
f.close()
if __name__ == "__main__":
main('B-large.in') | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
6c72a45ff32d4962d15076f7ce9e9857f7f46759 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/22/usersdata/107/11706/submittedfiles/av1_2.py | 1ed072be432d68e735832fde03663c7d7ac9cb0d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=int(input('digite o valor de a:'))
b=int(input('digite o valor de b:'))
c=int(input('digite o valor de c':))
d=int(input('digite o valor de d:'))
if ABAD==5393 and CBCD==6268:
PRINT('VERDADEIRO')
ELSE:
print('FALSA') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
4dece2cdb4af6765f620558479f12b10a049bb03 | 8e3bd35267f40341d7ca03646e10a2b92eace0c7 | /series.py | b7a4817a18ddf7d719d6458cd6f4a7a4f031f87b | [] | no_license | shirsho-12/mathScripts | 2eb762b64ec61ffe8f0182f478353fda121d8c3b | 0ada093050221a2f4d9b33c09783b052c17fbcb3 | refs/heads/master | 2023-04-01T06:29:55.308901 | 2021-04-17T13:54:10 | 2021-04-17T13:54:10 | 354,479,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | import numpy
from sympy import Symbol, pprint, simplify
import sympy as sp
def get_series(var, expr, num_terms=10):
series = sp.series(expr, var, n=num_terms)
pprint(simplify(series))
x = Symbol("x")
expr = sp.ln(1 - 8*x**2)
# expr = sp.cos(x)
# expr = sp.atan(x**3)
# expr = sp.ln(sp.sec(x))
get_series(x, expr)
| [
"shirshajit@gmail.com"
] | shirshajit@gmail.com |
2974a98e4d774482aebe15fe8fd2b5970e282ff3 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /XsJLwhAddzbxdQqr4_4.py | 27672ae255be132627d23a9dd8c94ef3d9a364fa | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | """
Create a function that takes a list and returns the **difference** between the
biggest and smallest numbers.
### Examples
difference_max_min([10, 4, 1, 4, -10, -50, 32, 21]) ➞ 82
# Smallest number is -50, biggest is 32.
difference_max_min([44, 32, 86, 19]) ➞ 67
# Smallest number is 19, biggest is 86.
### Notes
N/A
"""
def difference_max_min(lst):
ooga = max(lst)
booga = min(lst)
return ooga - booga
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
2c09458e09ad39b4527ee72e2208c6e87689c8b0 | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /TMviewList/TMview/main.py | 5786dc10738bd2553b7952ac9ba865b10b335595 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 103 | py | from scrapy import cmdline
cmdline.execute('scrapy runspider ./TMview/spiders/tm_view.py'.split())
| [
"34021500@qq.com"
] | 34021500@qq.com |
c8d933b28a46b474602a1ecd35e3973757ca6e7c | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/kenan/desktop/compiz/compizconfig-python/actions.py | 86771fd00f5ece6b0cc60cd32a33be547e5a41d2 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("COPYING")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
a0f14bf59489e1820edcf0a4329a4155f3433160 | b55f7fe191a0ac499213505b297edffd2daab2ec | /DeepRLTrader/core/__init__.py | 31884eca75e1a3469faae7b5ec3c052da83623ad | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | chmbrs/DeepRLTrader | af77c33aee908d732fa760a1c48273f9b8ec6ae5 | 96ae2069a42e29838aa26165af0556835c1808dd | refs/heads/master | 2020-04-17T00:46:06.199575 | 2019-01-16T16:51:48 | 2019-01-16T16:51:48 | 166,061,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from .environnement import Local_env
from .environnement import Live_env
from .worker import Local_Worker
from .worker import Live_Worker
from .session import Local_session
from .session import Live_session
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
705172c4e9453f453bfc4b37feb291384cd02836 | 8ad8ee4e3a4e0e8ae0ed8e92c68cf122f5ba3723 | /web_JK_ENG00/webScript/UniversityofBolton.py | 66f48e84e92679e8358d92db44a79ac26398c9a7 | [] | no_license | yangyangyanga/automatic_update | 5b5065713853c4a1225142ece4ea39be1a05d011 | 53c1777cbb84e489b887f38e2745477d6b6f4604 | refs/heads/master | 2020-05-25T21:18:24.979779 | 2019-05-22T08:34:02 | 2019-05-22T08:34:02 | 187,996,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from middleware import *
makeThreading(urllist=getUrl(223),school='University of Bolton') | [
"1102213456@qq.com"
] | 1102213456@qq.com |
af49cccf78ba0cf24fdf317d6f5fb030c0a9d4a2 | b12aa1e2575a0d2b7345be676011afa174394b61 | /mengenalMysqlXampp/mengenalMysqlXampp/urls.py | ea5836ac0f652ce7828adad7dfb0ea3c2f209b72 | [] | no_license | frestea09/Latihan-django-to-data-scients | 52b5968685710cbb7c18525542576d42055b690b | 74b93d8daf2342feb69a4725143920eb299e20f8 | refs/heads/master | 2020-07-10T23:31:05.282827 | 2019-09-21T03:09:41 | 2019-09-21T03:09:41 | 204,396,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | """mengenalMysqlXampp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"ilmanfrasetya@gmail.com"
] | ilmanfrasetya@gmail.com |
d635a85034c10c4b59d607191010b1b0900e44c5 | 97062249c6eb04069c6fb01e71d06bc334c828e1 | /desktop/core/ext-py/Mako-0.8.1/mako/codegen.py | fc3469fce50aea72f8ab552c5df346d13597f5c7 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Albertsss/hue | 1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e | 454d320dd09b6f7946f3cc05bc97c3e2ca6cd485 | refs/heads/master | 2021-07-08T17:21:13.237871 | 2018-05-30T06:03:21 | 2018-05-30T06:03:21 | 135,386,450 | 0 | 1 | Apache-2.0 | 2020-07-25T13:36:58 | 2018-05-30T04:06:18 | Python | UTF-8 | Python | false | false | 49,252 | py | # mako/codegen.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module
source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
from mako import compat
MAGIC_NUMBER = 8
# names which are hardwired into the
# template and are not accessed via the
# context itself
RESERVED_NAMES = set(['context', 'loop', 'UNDEFINED'])
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
future_imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False,
enable_loop=True,
reserved_names=frozenset()):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not compat.py3k and isinstance(source_encoding, compat.text_type):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
future_imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
future_imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.future_imports = future_imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
self.enable_loop = enable_loop
self.reserved_names = reserved_names
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
self.compiler.enable_loop = self.compiler.enable_loop or eval(
pagetag.attributes.get(
'enable_loop', 'False')
)
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers(self.compiler)
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
if self.compiler.future_imports:
self.printer.writeline("from __future__ import %s" %
(", ".join(self.compiler.future_imports),))
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop)
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return list(main_identifiers.topleveldefs.values())
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None, None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if 'import' in node.attributes:
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside "
"<%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r,"
" context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, "
" calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri,"
" module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline(
"context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union(
[c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
if self.compiler.enable_loop:
has_loop = "loop" in to_write
to_write.discard("loop")
else:
has_loop = False
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.items():
if 'import' in ns.attributes:
self.printer.writeline(
"_mako_get_namespace(context, %r)."\
"_populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
if has_loop:
self.printer.writeline(
'loop = __M_loop = runtime.LoopStack()'
)
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the
corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline(
"return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline(
"def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key',
repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s,
False)
self.printer.writelines("return " + s, None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args,
"%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
if node.has_loop_context:
self.printer.writeline('finally:')
self.printer.writeline("loop = __M_loop._exit()")
self.printer.writeline(None)
else:
self.write_source_comment(node)
if self.compiler.enable_loop and node.keyword == 'for':
text = mangle_mako_loop(node, self.printer)
else:
text = node.text
self.printer.writeline(text)
children = node.get_children()
# this covers the three situations where we want to insert a pass:
# 1) a ternary control line with no children,
# 2) a primary control line with nothing but its own ternary
# and end control lines, and
# 3) any control line with no content other than comments
if not children or (
compat.all(isinstance(c, (parsetree.Comment,
parsetree.ControlLine))
for c in children) and
compat.all((node.is_ternary(c.keyword) or c.isend)
for c in children
if isinstance(c, parsetree.ControlLine))):
self.printer.writeline("pass")
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline(
'__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for __M_key in'
' [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline(
"context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the
# "closuredefs" defined in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, "
"callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable(
[], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, compiler, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
self.compiler = compiler
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
illegal_names = self.compiler.reserved_names.intersection(
self.locally_declared)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words declared in template: %s" %
", ".join(illegal_names))
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, "\
"argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(
node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node,
(parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
for ident in node.undeclared_identifiers():
if ident != 'context' and \
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitTextTag(self, node):
for ident in node.undeclared_identifiers():
if ident != 'context' and \
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
_FOR_LOOP = re.compile(
r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*'
r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):'
)
def mangle_mako_loop(node, printer):
"""converts a for loop into a context manager wrapped around a for loop
when access to the `loop` variable has been detected in the for loop body
"""
loop_variable = LoopVariable()
node.accept_visitor(loop_variable)
if loop_variable.detected:
node.nodes[-1].has_loop_context = True
match = _FOR_LOOP.match(node.text)
if match:
printer.writelines(
'loop = __M_loop._enter(%s)' % match.group(2),
'try:'
#'with __M_loop(%s) as loop:' % match.group(2)
)
text = 'for %s in loop:' % match.group(1)
else:
raise SyntaxError("Couldn't apply loop context: %s" % node.text)
else:
text = node.text
return text
class LoopVariable(object):
"""A node visitor which looks for the name 'loop' within undeclared
identifiers."""
def __init__(self):
self.detected = False
def _loop_reference_detected(self, node):
if 'loop' in node.undeclared_identifiers():
self.detected = True
else:
for n in node.get_children():
n.accept_visitor(self)
def visitControlLine(self, node):
self._loop_reference_detected(node)
def visitCode(self, node):
self._loop_reference_detected(node)
def visitExpression(self, node):
self._loop_reference_detected(node)
| [
"540227148@qq.com"
] | 540227148@qq.com |
5e6b1e4f698c5c092aff75f1caa0e101fcacd01b | f0e8338762530bd6c2cc402cda64c43bcec329ae | /leetcode/35. 搜索插入位置.py | 2f9b313fad3d2a95b67f4af9fb389fa870e4f470 | [] | no_license | pengyuhou/git_test1 | bcd60554d2dadad972848047d00f888444462f05 | 5aa441f94a0aa713771bdd93b53a702032060f5d | refs/heads/master | 2022-11-22T08:52:52.767933 | 2020-07-18T03:50:22 | 2020-07-18T03:50:22 | 259,177,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
import bisect
return bisect.bisect_left(nums, target)
if __name__ == '__main__':
# print(Solution().searchInsert([1, 3, 5, 6], 0))
import bisect
a = [1, 3, 5, 6]
print(bisect.bisect_left(a, 5))
bisect.insort(a,5)
print(a)
| [
"786490473@qq.com"
] | 786490473@qq.com |
d0377ce55de49112d96d6d73d6fdfc511bcc9219 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/parser/template/graph_tests/test_authorise_usergroups.py | 070b6c41a40a3be42877c809d4c3b60fa8448c92 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,358 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.authorise import TemplateAuthoriseNode
from programy.config.brain.brain import BrainConfiguration
from programy.config.brain.security import BrainSecurityConfiguration
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphAuthoriseTests(TemplateGraphTestClient):
def get_brain_config(self):
brain_config = BrainConfiguration()
brain_config.security._authorisation = BrainSecurityConfiguration("authorisation")
brain_config.security.authorisation._classname = "programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService"
brain_config.security.authorisation._denied_srai = "ACCESS_DENIED"
brain_config.security.authorisation._usergroups = "$BOT_ROOT/usergroups.yaml"
return brain_config
def test_authorise_with_role_as_attrib_access_allowed(self):
template = ET.fromstring("""
<template>
<authorise role="root">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("root", auth_node.role)
result = auth_node.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
def test_authorise_with_role_as_attrib_and_optional_srai_access_allowed(self):
template = ET.fromstring("""
<template>
<authorise role="root" denied_srai="NO_ACCESS">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("root", auth_node.role)
result = auth_node.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
def test_authorise_with_role_as_attrib_access_denied(self):
template = ET.fromstring("""
<template>
<authorise role="denied">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("denied", auth_node.role)
def test_authorise_with_role_as_attrib_and_optional_srai_access_denied(self):
template = ET.fromstring("""
<template>
<authorise role="denied" denied_srai="NO_ACCESS">
Hello
</authorise>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
auth_node = ast.children[0]
self.assertIsNotNone(auth_node)
self.assertIsInstance(auth_node, TemplateAuthoriseNode)
self.assertIsNotNone(auth_node.role)
self.assertEqual("denied", auth_node.role)
| [
"cliff@cotobadesign.com"
] | cliff@cotobadesign.com |
7b8a0f33c8c2cfce8e2214e673d7e23930159a0d | 1c762f8a085e851dcc38503008c3c997782c5c79 | /Data_Generator.py | 89fdbfcd546313b828a0742f5ac4328968277aa7 | [
"MIT"
] | permissive | Ajay2521/Face-Recognition-using-Siamese-Network | ca599ab2d05869a20accc22a3f3e17a77a193c18 | 0752e85b046599bf7ddb960cefeaf63b309e26c1 | refs/heads/main | 2023-05-13T21:14:56.740169 | 2021-06-11T03:37:41 | 2021-06-11T03:37:41 | 375,892,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,065 | py | # importing the neccessary libraries
# open cv for image processing
import cv2
# used to manipulate different parts
import sys
# used for manipulating array/matrics
import numpy as np
# used for accessing the file and folder in the machine
import os
# used for landmark's facial detector with pre-trained models, the dlib is used to estimate the location of 68 coordinates
import dlib
from imutils import face_utils
# for visulating the image
import matplotlib.pyplot as plt
# use to retrieve the faces information
detector = dlib.get_frontal_face_detector()
# print(detector)
# function for face detecting and save the Face ROI(embedding)
# takes 2 parameter, imagepath = Uploaded image location, name = user name
def image_data_generator(imagePath,name):
# setting up the path for saving the image
path = 'database'
# print(path) output -> path
# folder for the user to store user image
directory = os.path.join(path, name)
# print(directory) output -> path/name
# Creating the folder for user if the user folder not exist
if not os.path.exists(directory):
os.makedirs(directory, exist_ok = 'True')
# print("\nDirectory with the name {} is created successful".format(name))
# reading the uploaded image
image = cv2.imread(imagePath)
# print(image) -> print the image value in array [n,n,nc]
# plt.imshow(image) -> displaying the image
# converting the RGB Image into Gray scale Image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# print(gray_image) -> print the image value in array [n,n]
# plt.imshow(gray_image) # -> displaying the image
# detecting the faces in the image, which is similar to detectMultiScale()
# faces = face_cascade.detectMultiScale(gray_image)
# print(faces)
# The 1 in the second argument indicates that we should upsample the image 1 time. This will make everything bigger and allow us to detect more faces.
faces = detector(gray_image, 1)
#print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, rigth = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = gray_image[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# saving the roi croped images
cv2.imwrite(directory+'/'+name+".jpg",roi_image)
imagePath = 'faceDetect.jpg'
name = input("\nEnter name of person : ")
image_data_generator(imagePath, name)
# function for face detecting and save the Face ROI(embedding) from webcam
# takes 1 parameter, name = user name
def video_data_generator(name):
# setting up the path for saving the image
path = 'database'
# print(path) output -> path
# folder for the user to store user image
directory = os.path.join(path, name)
# print(directory) output -> path/name
# Creating the folder for user if the user folder not exist
if not os.path.exists(directory):
os.makedirs(directory, exist_ok = 'True')
# print("\nDirectory with the name {} is created successful".format(name))
# starting up the webcam
webcam = cv2.VideoCapture(0)
number_of_images = 0
MAX_NUMBER_OF_IMAGES = 20
while number_of_images < MAX_NUMBER_OF_IMAGES:
# reading the data from the webcam
ret, frame = webcam.read()
# flips a 2D array around vertical, horizontal, or both axes
# 1 means flipping around y-axis
frame = cv2.flip(frame, 1)
# converting the rgb frames to gray scale frames
# gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# The 1 in the second argument indicates that we should upsample the image 1 time. This will make everything bigger and allow us to detect more faces.
faces = detector(frame, 1)
#print(faces) # -> print the image value in array [(x,y)(w,h)]
# adds a counter to an iterable and returns it in a form of enumerate object
for i, d in enumerate(faces):
# top, bottom, left, rigth = x, y, w, h
# x = left(), y = top()
# w = right() - x, h = bottom() - y
# roi - region of interest
roi_image = frame[d.top():d.top() + (d.bottom() - d.top()), d.left():d.left() + (d.right() - d.left())]
# saving the croped image
cv2.imwrite(os.path.join(directory, str(name+str(number_of_images)+'.jpg')), roi_image)
number_of_images += 1
cv2.rectangle(frame, (d.left(), d.top()), (d.left() + (d.right() - d.left()), d.top() + (d.bottom() - d.top())), (0, 255, 0), 2)
# displaying the video
cv2.imshow("Webcam",frame)
# for closing the stream
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
# stoping the webcam
webcam.release()
# closing the window
cv2.destroyAllWindows()
name = input("\nEnter name of person : ")
video_data_generator(name)
| [
"noreply@github.com"
] | Ajay2521.noreply@github.com |
2e8a3ba4e7038eac2ccc96263bdc2de8ad1ca6fe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03761/s160390356.py | 6afd5b1b7f5508c82464c33c2033ab81a3670a86 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import sys
from string import ascii_lowercase as alphabets
from collections import Counter
def main():
inf=100
alphd={x:0 for x in alphabets}
ansd={x:inf for x in alphabets}
n=int(input())
s=[input() for _ in range(n)]
for st in s:
for x in st:
alphd[x]+=1
for a in alphabets:
ansd[a]=min(ansd[a],alphd[a])
alphd[a]=0
print(''.join([a*ansd[a] for a in alphabets if ansd[a]<inf]))
def main2():
inf=100
n=int(input())
s=[Counter(input()) for _ in range(n)]
ansd={x:inf for x in alphabets}
for c in s:
for x in alphabets:
ansd[x]=min(ansd[x],c[x])
print(''.join([a*ansd[a] for a in alphabets if ansd[a]<inf]))
if __name__=='__main__':
main2()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1e4a3845791a36a5df31e7ae98bd5bd693739a52 | d1ddb9e9e75d42986eba239550364cff3d8f5203 | /google-cloud-sdk/lib/surface/compute/xpn/organizations/__init__.py | 72700f6654f058b41f4e7a2194a8e157ea46cb1b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/searchparty | 8ecd702af0d610a7ad3a8df9c4d448f76f46c450 | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | refs/heads/master | 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 | Apache-2.0 | 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null | UTF-8 | Python | false | false | 812 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for configuring shared VPC network (XPN) organizations.
"""
from googlecloudsdk.calliope import base
class Organizations(base.Group):
"""Configure organizations for cross-project networking (XPN)."""
| [
"vinvivo@users.noreply.github.com"
] | vinvivo@users.noreply.github.com |
b9edd3e6f9ac2e63615f9388ce32ecd2ad05b7ff | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03659/s513043584.py | 3aa6b000e56fcb4dfa57e4fb72410c1623d07084 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | n = int(input())
a_lst = list(map(int, input().split()))
x = a_lst[0]
y = sum(a_lst[1:])
diff = abs(y - x)
for a in a_lst[1:-1]:
x += a
y -= a
diff = min(diff, abs(y - x))
print(diff) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
db708b0106ae135b1760c606ca3898726edcde4b | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad_BN/pyr_Tcrop256_pad20_jit15/pyr_2s/L5/step11_L2345678.py | 08c22bc3cddbff4ee3e973086092e77275248d2a | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,975 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
import Exps_7_v3.doc3d.I_to_M_Gk3_no_pad_BN.pyr_Tcrop256_pad20_jit15.pyr_0s.L5.step10_a as L5_0side
import Exps_7_v3.doc3d.I_to_M_Gk3_no_pad_BN.pyr_Tcrop256_pad20_jit15.pyr_1s.L5.step10_a as L5_1side
import step10_a as side2
#################################################################################################################################################################################################################################################################################################################################################################################################
ch032_1side_1__2side_all = [
L5_1side.ch032_1side_1,
side2.ch032_1side_1__2side_1,
]
ch032_1side_2__2side_all = [
L5_1side.ch032_1side_2,
side2.ch032_1side_2__2side_1,
side2.ch032_1side_2__2side_2,
]
ch032_1side_3__2side_all = [
L5_1side.ch032_1side_3,
side2.ch032_1side_3__2side_1,
side2.ch032_1side_3__2side_2,
side2.ch032_1side_3__2side_3,
]
ch032_1side_4__2side_all = [
L5_1side.ch032_1side_4,
side2.ch032_1side_4__2side_1,
side2.ch032_1side_4__2side_2,
side2.ch032_1side_4__2side_3,
side2.ch032_1side_4__2side_4,
]
ch032_1side_5__2side_all = [
L5_1side.ch032_1side_5,
side2.ch032_1side_5__2side_1,
side2.ch032_1side_5__2side_2,
side2.ch032_1side_5__2side_3,
side2.ch032_1side_5__2side_4,
side2.ch032_1side_5__2side_5,
]
ch032_1side_6__2side_all = [
L5_1side.ch032_1side_6,
side2.ch032_1side_6__2side_1,
side2.ch032_1side_6__2side_2,
side2.ch032_1side_6__2side_3,
side2.ch032_1side_6__2side_4,
side2.ch032_1side_6__2side_5,
side2.ch032_1side_6__2side_6,
]
ch032_1side_all__2side_all = [
[L5_0side.ch032_0side,],
ch032_1side_1__2side_all,
ch032_1side_2__2side_all,
ch032_1side_3__2side_all,
ch032_1side_4__2side_all,
ch032_1side_5__2side_all,
ch032_1side_6__2side_all,
]
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
ca50bb1529182918b335ff225b5d425669d78e7e | 56997c84a331433225f89f168520ad8d709083c1 | /Programmers/기타문제/압축/ziping_ver1.py | 7a9ba5815d01771a1bcea7d53a147f1cf8df61e5 | [] | no_license | miseop25/Back_Jun_Code_Study | 51e080f8ecf74f7d1a8bb1da404d29c8ba52325c | 1d993e718c37c571aae1d407054ec284dc24c922 | refs/heads/master | 2022-11-06T01:05:05.028838 | 2022-10-23T13:11:22 | 2022-10-23T13:11:22 | 200,828,984 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | def solution(msg):
answer = []
lzw_dict = dict()
word = "A"
for i in range(1, 27) :
lzw_dict[word] = i
word = chr(ord("A") + i )
m_index = 0
w = msg[0]
while m_index < len(msg):
if m_index + 1 < len(msg) :
temp = w + msg[m_index + 1]
else :
temp = w
if temp in lzw_dict :
answer.append(lzw_dict[temp])
else :
answer.append(lzw_dict[temp[: -1]])
break
if temp in lzw_dict :
w = temp
m_index += 1
else :
i+= 1
lzw_dict[temp] = i
answer.append(lzw_dict[temp[: -1]])
m_index += 1
w = msg[m_index]
return answer
print(solution("KAKAO")) | [
"richard25@naver.com"
] | richard25@naver.com |
bc16ca07017d65d4491e9ab5faa7546a0003799e | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/pcode/memstate/UniqueMemoryBank.pyi | 851ac4589eaa0006c84ab4e084c511aae767f320 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | pyi | from typing import List
import ghidra.pcode.memstate
import ghidra.program.model.address
import java.lang
class UniqueMemoryBank(ghidra.pcode.memstate.MemoryBank):
"""
An subclass of MemoryBank intended for modeling the "unique" memory
space. The space is byte-addressable and paging is not supported.
"""
class WordInfo(object):
initialized: int
word: long
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getByte(self, __a0: int) -> int: ...
def getClass(self) -> java.lang.Class: ...
def getWord(self, __a0: List[int]) -> None: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setByte(self, __a0: int, __a1: int) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self, spc: ghidra.program.model.address.AddressSpace, isBigEndian: bool): ...
def clear(self) -> None:
"""
Clear unique storage at the start of an instruction
"""
...
@staticmethod
def constructValue(ptr: List[int], offset: int, size: int, bigendian: bool) -> long: ...
@staticmethod
def deconstructValue(ptr: List[int], offset: int, val: long, size: int, bigendian: bool) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getChunk(self, offset: long, size: int, dest: List[int], stopOnUninitialized: bool) -> int: ...
def getClass(self) -> java.lang.Class: ...
def getInitializedMaskSize(self) -> int:
"""
@return the size of a page initialized mask in bytes. Each bit within the
mask corresponds to a data byte within a page.
"""
...
def getMemoryFaultHandler(self) -> ghidra.pcode.memstate.MemoryFaultHandler:
"""
@return memory fault handler (may be null)
"""
...
def getPageSize(self) -> int:
"""
A MemoryBank is instantiated with a \e natural page size. Requests for large chunks of data
may be broken down into units of this size.
@return the number of bytes in a page.
"""
...
def getSpace(self) -> ghidra.program.model.address.AddressSpace:
"""
@return the AddressSpace associated with this bank.
"""
...
def hashCode(self) -> int: ...
def isBigEndian(self) -> bool:
"""
@return true if memory bank is big endian
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setChunk(self, offset: long, size: int, src: List[int]) -> None: ...
def setInitialized(self, offset: long, size: int, initialized: bool) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
3cded22e233c71a389e5421e7c74cc2030579743 | 76b5be6d12c6885c8cb9ae458bf878a3dcf0401c | /DojoAssignments/Python2/PythonAssignments/Django/Django/Proj_DojoNingas/Proj_DojoNingas/urls.py | 9c840046f1d7b69577656ae64346e0b90eeb477b | [] | no_license | DaseinUXD/CodingDojo | ba1d532750d61a21feb401243c49e05623e9b8c2 | 19b2d0f0ce9f8c9d08747438412e5c988073f385 | refs/heads/master | 2020-03-11T16:36:51.312297 | 2018-09-19T22:32:09 | 2018-09-19T22:32:09 | 130,121,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | """Proj_DojoNingas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include("apps.dojo_ninjas.urls")),
]
| [
"markmatthewsphd@gmail.com"
] | markmatthewsphd@gmail.com |
8162fbb5266f2f8bfa76526a9ece1c0475bacea3 | e959e0af4559447309f083e73010752e88f848c4 | /meta/DesignDataPackage/lib/python/avm/schematic/__init__.py | f02e48688f5ceee5e08cf64e9acd918a96373baa | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] | permissive | lefevre-fraser/openmeta-mms | 5621e5bd2f14f63c5f44e5130678f93dae87e3d3 | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | refs/heads/master | 2022-04-21T13:39:07.209451 | 2020-04-14T19:56:34 | 2020-04-14T19:56:34 | 257,699,808 | 0 | 0 | NOASSERTION | 2020-04-21T19:48:04 | 2020-04-21T19:48:03 | null | UTF-8 | Python | false | false | 10,811 | py | # .\_schematic.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:2b86b09e6504617c4541a8a2f53a65ea784d5722
# Generated 2016-02-15 11:24:52.074000 by PyXB version 1.2.3
# Namespace schematic [xmlns:schematic]
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:057f9670-d409-11e5-9520-7429af7917c0')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.3'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import avm as _ImportedBinding__avm
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI(u'schematic', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, unicode):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type {schematic}SchematicModel with content type ELEMENT_ONLY
class SchematicModel_ (_ImportedBinding__avm.DomainModel_):
"""Complex type {schematic}SchematicModel with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'SchematicModel')
_XSDLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 6, 2)
_ElementMap = _ImportedBinding__avm.DomainModel_._ElementMap.copy()
_AttributeMap = _ImportedBinding__avm.DomainModel_._AttributeMap.copy()
# Base type is _ImportedBinding__avm.DomainModel_
# Element Pin uses Python identifier Pin
__Pin = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Pin'), 'Pin', '__schematic_SchematicModel__Pin', True, pyxb.utils.utility.Location(u'avm.schematic.xsd', 10, 10), )
Pin = property(__Pin.value, __Pin.set, None, None)
# Attribute UsesResource inherited from {avm}DomainModel
# Attribute Author inherited from {avm}DomainModel
# Attribute Notes inherited from {avm}DomainModel
# Attribute XPosition inherited from {avm}DomainModel
# Attribute YPosition inherited from {avm}DomainModel
# Attribute Name inherited from {avm}DomainModel
# Attribute ID inherited from {avm}DomainModel
_ElementMap.update({
__Pin.name() : __Pin
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'SchematicModel', SchematicModel_)
# Complex type {schematic}Pin with content type EMPTY
class Pin_ (_ImportedBinding__avm.DomainModelPort_):
"""Complex type {schematic}Pin with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Pin')
_XSDLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 15, 2)
_ElementMap = _ImportedBinding__avm.DomainModelPort_._ElementMap.copy()
_AttributeMap = _ImportedBinding__avm.DomainModelPort_._AttributeMap.copy()
# Base type is _ImportedBinding__avm.DomainModelPort_
# Attribute Notes inherited from {avm}Port
# Attribute XPosition inherited from {avm}Port
# Attribute Definition inherited from {avm}Port
# Attribute YPosition inherited from {avm}Port
# Attribute Name inherited from {avm}Port
# Attribute ID inherited from {avm}PortMapTarget
# Attribute PortMap inherited from {avm}PortMapTarget
# Attribute EDAGate uses Python identifier EDAGate
__EDAGate = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'EDAGate'), 'EDAGate', '__schematic_Pin__EDAGate', pyxb.binding.datatypes.string)
__EDAGate._DeclarationLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 18, 8)
__EDAGate._UseLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 18, 8)
EDAGate = property(__EDAGate.value, __EDAGate.set, None, None)
# Attribute EDASymbolLocationX uses Python identifier EDASymbolLocationX
__EDASymbolLocationX = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'EDASymbolLocationX'), 'EDASymbolLocationX', '__schematic_Pin__EDASymbolLocationX', pyxb.binding.datatypes.string)
__EDASymbolLocationX._DeclarationLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 19, 8)
__EDASymbolLocationX._UseLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 19, 8)
EDASymbolLocationX = property(__EDASymbolLocationX.value, __EDASymbolLocationX.set, None, None)
# Attribute EDASymbolLocationY uses Python identifier EDASymbolLocationY
__EDASymbolLocationY = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'EDASymbolLocationY'), 'EDASymbolLocationY', '__schematic_Pin__EDASymbolLocationY', pyxb.binding.datatypes.string)
__EDASymbolLocationY._DeclarationLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 20, 8)
__EDASymbolLocationY._UseLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 20, 8)
EDASymbolLocationY = property(__EDASymbolLocationY.value, __EDASymbolLocationY.set, None, None)
# Attribute EDASymbolRotation uses Python identifier EDASymbolRotation
__EDASymbolRotation = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'EDASymbolRotation'), 'EDASymbolRotation', '__schematic_Pin__EDASymbolRotation', pyxb.binding.datatypes.string)
__EDASymbolRotation._DeclarationLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 21, 8)
__EDASymbolRotation._UseLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 21, 8)
EDASymbolRotation = property(__EDASymbolRotation.value, __EDASymbolRotation.set, None, None)
# Attribute SPICEPortNumber uses Python identifier SPICEPortNumber
__SPICEPortNumber = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'SPICEPortNumber'), 'SPICEPortNumber', '__schematic_Pin__SPICEPortNumber', pyxb.binding.datatypes.unsignedInt)
__SPICEPortNumber._DeclarationLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 22, 8)
__SPICEPortNumber._UseLocation = pyxb.utils.utility.Location(u'avm.schematic.xsd', 22, 8)
SPICEPortNumber = property(__SPICEPortNumber.value, __SPICEPortNumber.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__EDAGate.name() : __EDAGate,
__EDASymbolLocationX.name() : __EDASymbolLocationX,
__EDASymbolLocationY.name() : __EDASymbolLocationY,
__EDASymbolRotation.name() : __EDASymbolRotation,
__SPICEPortNumber.name() : __SPICEPortNumber
})
Namespace.addCategoryObject('typeBinding', u'Pin', Pin_)
SchematicModel = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'SchematicModel'), SchematicModel_, location=pyxb.utils.utility.Location(u'avm.schematic.xsd', 4, 2))
Namespace.addCategoryObject('elementBinding', SchematicModel.name().localName(), SchematicModel)
Pin = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Pin'), Pin_, location=pyxb.utils.utility.Location(u'avm.schematic.xsd', 5, 2))
Namespace.addCategoryObject('elementBinding', Pin.name().localName(), Pin)
SchematicModel_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Pin'), Pin_, scope=SchematicModel_, location=pyxb.utils.utility.Location(u'avm.schematic.xsd', 10, 10)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.schematic.xsd', 10, 10))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(SchematicModel_._UseForTag(pyxb.namespace.ExpandedName(None, u'Pin')), pyxb.utils.utility.Location(u'avm.schematic.xsd', 10, 10))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
SchematicModel_._Automaton = _BuildAutomaton()
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
dda51f8afb2664e55ffebcb38827068a86d57fc9 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20180201/get_virtual_network_gateway_bgp_peer_status.py | fde6e4b745e746cc3d66fd3df1e4a7b7d879a36e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,685 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponse']]:
"""
List of BGP peers
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180201:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
7e4eee384d51edf704de40157dbc2da7bef31188 | 9d9220ac3abc0aa316d1ce3653afe2c6d1a0593e | /sqlalchemy/ext/mutable.py | d3133b1f53df59fd86b5b3e4e6859ccf73b2b5e0 | [
"MIT"
] | permissive | sauloal/PiCastPy | 016733b597f8b15f2bc2fb6e6bc5f0f9aef95e70 | c907a5ba72ccd576b2c7ae78af25abb741327cee | refs/heads/master | 2020-06-04T21:34:45.015707 | 2013-08-09T17:18:29 | 2013-08-09T17:18:29 | 11,934,825 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,204 | py | # ext/mutable.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
The :mod:`sqlalchemy.ext.mutable` extension replaces SQLAlchemy's legacy
approach to in-place mutations of scalar values, established by the
:class:`.types.MutableType` class as well as the ``mutable=True`` type flag,
with a system that allows change events to be propagated from the value to
the owning parent, thereby removing the need for the ORM to maintain copies
of values as well as the very expensive requirement of scanning through all
"mutable" values on each flush call, looking for changes.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The
:mod:`sqlalchemy.ext.mutable` extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Below, we illustrate the a simple
version of the :class:`.MutableDict` dictionary object, which applies
the :class:`.Mutable` mixin to a plain Python dictionary::
import collections
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
variants on this approach, such as subclassing ``UserDict.UserDict`` or
``collections.MutableMapping``; the part that's important to this example is
that the :meth:`.Mutable.changed` method is called whenever an in-place
change to the datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutableDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well
created our ``JSONEncodedDict`` such that it always returns an instance
of ``MutableDict``, and additionally ensured that all calling code
uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not
overridden, any values applied to a parent object which are not instances
of the mutable type will raise a ``ValueError``.
Our new ``MutableDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutableDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using
:meth:`~.Mutable.associate_with`. This is similar to
:meth:`~.Mutable.as_mutable` except it will intercept all occurrences
of ``MutableDict`` in all mappings unconditionally, without
the need to declare it individually::
MutableDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~.MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutableDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \\
other.x == self.x and \\
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`.orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
.. versionadded:: 0.7.10,0.8.0b2
Support for the :meth:`.MutableBase.coerce` method in conjunction with
objects of type :class:`.MutableComposite`.
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`.MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`.MutableBase._parents` collection is restored to all ``Point`` objects.
"""
from ..orm.attributes import flag_modified
from .. import event, types
from ..orm import mapper, object_mapper, Mapper
from ..util import memoized_property
import weakref
class MutableBase(object):
"""Common base class to :class:`.Mutable`
and :class:`.MutableComposite`.
"""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
"""
if value is None:
return None
msg = "Attribute '%s' does not accept objects of type %s"
raise ValueError(msg % (key, type(value)))
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
event.listen(parent_cls, 'load', load,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh', load,
raw=True, propagate=True)
event.listen(attribute, 'set', set,
raw=True, retval=True, propagate=True)
event.listen(parent_cls, 'pickle', pickle,
raw=True, propagate=True)
event.listen(parent_cls, 'unpickle', unpickle,
raw=True, propagate=True)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls
``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an
application, not with ad-hoc types else this will cause unbounded
growth in memory usage.
"""
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with
that type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :meth:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if prop.columns[0].type is sqltype:
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (hasattr(prop, 'composite_class') and
isinstance(prop.composite_class, type) and
issubclass(prop.composite_class, MutableComposite)):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_)
if not Mapper.dispatch.mapper_configured._contains(Mapper, _listen_for_type):
event.listen(Mapper, 'mapper_configured', _listen_for_type)
_setup_composite_listener()
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
.. versionadded:: 0.8
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
| [
"sauloal@gmail.com"
] | sauloal@gmail.com |
ab931e5ccb6b71bc187e3317e6c7844a9b49c6ea | 5d32d0e65aa3bfa677fd1b8c92569e07e9b82af1 | /Section 1 - Getting Started/Breakouts/Breakout 1.2 - Turtle Graphics/Turtle Shapes v2 - block1.py | dfff63c79e6078cc283ec4b1983b196f82b5dbf7 | [
"CC0-1.0"
] | permissive | pdst-lccs/lccs-python | b74ef2a02ac8ad2637f713fff5559f4e56c9827d | 95cb7ece05716521e9951d7a40de8fb20a88021f | refs/heads/master | 2023-05-28T00:46:57.313972 | 2023-05-22T10:16:43 | 2023-05-22T10:16:43 | 240,501,524 | 21 | 18 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Turtle Graphics - Further Activities
# Match the code blocks below to the corresponding shape
from turtle import * # import the turtle graphics library
forward(100)
right(90)
forward(50)
right(90)
forward(100)
right(90)
forward(50)
| [
"noreply@github.com"
] | pdst-lccs.noreply@github.com |
1d3245d9d48900c68394b1f5a8a746a3c42b03d0 | 0e6ce40f5a8e302698c2d0ddf945f7fa34dd190a | /mysite/polls/urls.py | 111b32d6b36b0bdc1ba4287fa9ac61db213f246c | [] | no_license | ricetak/django_tutorial | c89887a8153fb6901d3980217318c5137f7d3495 | 370ebade46a1aeade28b6626c4014cfea5db3a8b | refs/heads/master | 2020-05-26T09:12:34.024688 | 2019-05-23T07:10:55 | 2019-05-23T07:10:55 | 188,181,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | from django.urls import path
from . import views
'''
app_name = 'polls'
urlpatterns = [
# ex: /polls/
path('', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
'''
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"you@example.com"
] | you@example.com |
2298f7d32ab7b27f31caad11a0c3d60da1efc78d | a1b649fcd0b6f6c51afb13f406f53d7d823847ca | /studies/migrations/0031_merge_20170828_1227.py | f58fb7d972f9061d9e93b85af8e2e50c1173197c | [
"MIT"
] | permissive | enrobyn/lookit-api | e79f0f5e7a4ef8d94e55b4be05bfacaccc246282 | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | refs/heads/master | 2020-03-27T01:54:00.844971 | 2018-08-08T15:33:25 | 2018-08-08T15:33:25 | 145,752,095 | 0 | 0 | MIT | 2018-08-22T19:14:05 | 2018-08-22T19:14:04 | null | UTF-8 | Python | false | false | 340 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-28 16:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0030_merge_20170827_1909'),
('studies', '0030_merge_20170827_1539'),
]
operations = [
]
| [
"cwisecarver@cos.io"
] | cwisecarver@cos.io |
52836a63f5ed574bbefa5ef16a42d4feb1fddf38 | 9505e191cb287507c7df05212ab562bea1eda553 | /Data structures&Algorithms/bst.py | af404b2867e2e4e6c1e26a024acf1a2cfc23779b | [
"MIT"
] | permissive | iisdd/Courses | c7a662305f3efe7d61eb23f766381290b1107bb8 | a47d202e0d7e1ba85a38c6fe3dd9619eceb1045c | refs/heads/main | 2023-04-15T17:40:36.474322 | 2021-04-27T14:31:42 | 2021-04-27T14:31:42 | 316,904,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,154 | py | # 二叉搜索树,取个英文名字方便调包
class BiTreeNode:
def __init__(self, data):
self.data = data
self.lchild = None # 左孩子节点
self.rchild = None # 右孩子节点
self.parent = None
class BST:
def __init__(self, li=None):
self.root = None
if li:
for val in li:
self.insert_no_rec(val)
############################################## 插入功能 ################################################
def insert(self, node, val):
if not node: # 当前节点为None,就改变这个位置的值
node = BiTreeNode(val)
elif node.data > val: # 如果值改变了那就与左孩子建立联系,如果没改变就当说了句废话
node.lchild = self.insert(node.lchild, val) # 如果node.lchild有值就接着比,没有就落户了
node.lchild.parent = node
elif node.data < val: # 不考虑插入相同元素的情况
node.rchild = self.insert(node.rchild, val)
node.rchild.parent = node
return node
def insert_no_rec(self, val): # 非递归形式的插入
p = self.root
if not p: # 空树
self.root = BiTreeNode(val)
return
while 1:
if p.data > val:
if p.lchild: # 存在左孩子
p = p.lchild
else: # 左边没有节点,捏一个节点
p.lchild = BiTreeNode(val)
p.lchild.parent = p
return
elif p.data < val:
if p.rchild:
p = p.rchild
else:
p.rchild = BiTreeNode(val)
p.rchild.parent = p
return
############################################## 插入功能 ################################################
############################################## 查询功能 ################################################
def query(self, node, val): # 查询功能,递归版本
if not node:
return None
if node.data < val:
return self.query(node.rchild, val)
elif node.data > val:
return self.query(node.lchild, val)
else:
return node
def query_no_rec(self, val):
p = self.root
while p:
if p.data > val:
p = p.lchild
elif p.data < val:
p = p.rchild
else:
return p
############################################## 查询功能 ################################################
###################################### 遍历打印功能 #######################################
def pre_order(self, root): # 前序遍历树的节点,使用递归实现
if root:
print(root.data, end=',')
self.pre_order(root.lchild)
self.pre_order(root.rchild)
def in_order(self, root):
if root:
self.in_order(root.lchild)
print(root.data, end=',')
self.in_order(root.rchild)
def post_order(self, root):
if root:
self.post_order(root.lchild)
self.post_order(root.rchild)
print(root.data, end=',')
###################################### 遍历打印功能 #######################################
###################################### 删除功能 #######################################
def __remove_node_1(self, node): # 情况1: 删除的节点是叶子节点,两个下划线表示类内方法
if not node.parent: # node是根节点
self.root = None
elif node == node.parent.lchild: # node是它父节点的左孩子
node.parent.lchild = None
else: # node是它父节点的右孩子
node.parent.rchild = None
def __remove_node_21(self, node): # 情况2.1: 删除的节点不是叶子节点,且其只有左孩子
if not node.parent: # node是根节点
self.root = node.lchild
node.lchild.parent = None
elif node == node.parent.lchild: # node是其父节点的左孩子节点
node.parent.lchild = node.lchild
node.lchild.parent = node.parent
else: # node是其父节点的右孩子节点
node.parent.rchild = node.rchild
node.rchild.parent = node.parent
def __remove_node_22(self, node): # 情况2.2: 删除的节点非叶子节点,且其只有右孩子
if not node.parent:
self.root = node.rchild
node.rchild.parent = None
elif node == node.parent.lchild: # node是其父节点的左孩子节点
node.parent.lchild = node.rchild
node.rchild.parent = node.parent
else: # node是其父节点的右孩子节点
node.parent.rchild = node.rchild
node.rchild.parent = node.parent
def delete(self, val):
if self.root: # 不是空树
node = self.query_no_rec(val)
if not node:
return False # 没找到要删除的节点
if not node.lchild and not node.rchild: # 情况1:叶子节点
self.__remove_node_1(node)
elif not node.rchild: # 情况2.1:只有左孩子节点
self.__remove_node_21(node)
elif not node.lchild: # 情况2.2:只有右孩子节点
self.__remove_node_22(node)
else: # 情况3:有两个节点,找右孩子的最小节点
min_node = node.rchild
while min_node.lchild:
min_node = min_node.lchild
node.data = min_node.data
if min_node.rchild:
self.__remove_node_22(min_node)
else:
self.__remove_node_1(min_node)
###################################### 删除功能 #######################################
# tree = BST([4,6,7,9,2,1,3,5,8])
# tree.pre_order(tree.root)
# print('')
# tree.in_order(tree.root) # 升序的
# print('\n', tree.query_no_rec(4).data)
# print(tree.query_no_rec(11))
#
# tree.delete(4)
# tree.delete(1)
# tree.delete(8)
# tree.in_order(tree.root) | [
"noreply@github.com"
] | iisdd.noreply@github.com |
b40d9aae99b9bedfefe3e549932913b1fbbe044c | e01ab8185f34be16777e61aa7ce71f00e037dcf3 | /scripts/parallel_align_seqs_pynast.py | d4b23c42dd44163a509d00625ff5dedd70c4da5d | [] | no_license | zellett/qiime | caf6eae9e8346f7cdd2fed1f9c580a1777046709 | 395ae76e03ccf57272dc17a6d6555edb15ce4783 | refs/heads/master | 2020-05-20T06:03:22.867386 | 2012-12-09T19:08:07 | 2012-12-09T19:08:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,681 | py | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.5.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from qiime.util import (parse_command_line_parameters,
get_options_lookup,
make_option,
load_qiime_config)
from qiime.align_seqs import pairwise_alignment_methods
from qiime.parallel.align_seqs import ParallelAlignSeqsPyNast
qiime_config = load_qiime_config()
options_lookup = get_options_lookup()
script_info={}
script_info['brief_description']="""Parallel sequence alignment using PyNAST"""
script_info['script_description']="""A wrapper for the align_seqs.py PyNAST option, intended to make use of multicore/multiprocessor environments to perform analyses in parallel."""
script_info['script_usage']=[]
script_info['script_usage'].append(("""Example""","""Align the input file (-i) against using PyNAST and write the output (-o) to $PWD/pynast_aligned_seqs/. ALWAYS SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will generally look something like /home/ubuntu/my_analysis/).""","""%prog -i $PWD/inseqs.fasta -o $PWD/pynast_aligned_seqs/"""))
script_info['output_description']="""This results in a multiple sequence alignment (FASTA-formatted)."""
script_info['required_options'] = [\
options_lookup['fasta_as_primary_input'],\
options_lookup['output_dir']
]
pairwise_alignment_method_choices = pairwise_alignment_methods.keys()
blast_db_default_help =\
qiime_config['pynast_template_alignment_blastdb'] or \
'created on-the-fly from template_alignment'
script_info['optional_options'] = [\
make_option('-a','--pairwise_alignment_method',\
type='choice',help='Method to use for pairwise alignments'+\
' [default: %default]',\
default='uclust',choices=pairwise_alignment_method_choices),\
make_option('-d','--blast_db',\
dest='blast_db',help='Database to blast against'+\
' [default: %s]' % blast_db_default_help,
default=qiime_config['pynast_template_alignment_blastdb']),\
make_option('-e','--min_length',\
type='int',help='Minimum sequence '+\
'length to include in alignment [default: 75% of the'+\
' median input sequence length]',\
default=-1),
make_option('-p','--min_percent_id',action='store',\
type='float',help='Minimum percent '+\
'sequence identity to closest blast hit to include sequence in'+\
' alignment [default: %default]',default=75.0),\
options_lookup['jobs_to_start'],
options_lookup['retain_temp_files'],
options_lookup['suppress_submit_jobs'],
options_lookup['poll_directly'],
options_lookup['cluster_jobs_fp'],
options_lookup['suppress_polling'],
options_lookup['job_prefix'],
options_lookup['seconds_to_sleep']
]
script_info['version'] = __version__
# pynast_template_alignment_fp is required only if it is not
# provided in qiime_config
if qiime_config['pynast_template_alignment_fp']:
script_info['optional_options'].append(make_option('-t','--template_fp',\
type='string',dest='template_fp',help='Filepath for '+\
'template against [default: %default]',
default=qiime_config['pynast_template_alignment_fp']))
else:
script_info['required_options'].append(make_option('-t','--template_fp',\
type='string',dest='template_fp',\
help='Filepath for template against',
default=qiime_config['pynast_template_alignment_fp']))
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
# create dict of command-line options
params = eval(str(opts))
parallel_runner = ParallelAlignSeqsPyNast(
cluster_jobs_fp=opts.cluster_jobs_fp,
jobs_to_start=opts.jobs_to_start,
retain_temp_files=opts.retain_temp_files,
suppress_polling=opts.suppress_polling,
seconds_to_sleep=opts.seconds_to_sleep)
parallel_runner(opts.input_fasta_fp,
opts.output_dir,
params,
job_prefix=opts.job_prefix,
poll_directly=opts.poll_directly,
suppress_submit_jobs=False)
if __name__ == "__main__":
main() | [
"gregcaporaso@gmail.com"
] | gregcaporaso@gmail.com |
322e397c0f0c080a5c48552626e6c1dd530072c3 | a83bafc38b514a0339a5991be15870551ac49681 | /test/test_raw_material.py | 408000cb4e9c5d50a4d0ba805ba5188e82f5dea7 | [] | no_license | bimdata/python-api-client | 4ec2f81e404ef88d3a7e4d08e18965b598c567a2 | c9b6ea0fbb4729b2a1c10522bdddfe08d944739d | refs/heads/master | 2023-08-17T13:38:43.198097 | 2023-08-09T12:48:12 | 2023-08-09T12:48:12 | 131,603,315 | 0 | 4 | null | 2022-10-10T15:21:26 | 2018-04-30T14:06:15 | Python | UTF-8 | Python | false | false | 900 | py | """
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1 (v1)
Contact: support@bimdata.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import bimdata_api_client
from bimdata_api_client.model.raw_material import RawMaterial
class TestRawMaterial(unittest.TestCase):
"""RawMaterial unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRawMaterial(self):
"""Test RawMaterial"""
# FIXME: construct object with mandatory attributes with example values
# model = RawMaterial() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"infra@bimdata.io"
] | infra@bimdata.io |
b93e64133180e5336b6485e8319d6a56dcd396ef | b01f1b9bafc9c45125fff4c90f0dc5bca8707fe8 | /tests/test_fonts3.py | 76488d42dfc73fa0bb2f46163c86c4af2123e66a | [
"MIT"
] | permissive | bunkahle/PILasOPENCV | a573d0b42b00a22822a85e2260e83ea6f5f642ed | 832bae926614a16a5a3ae882a25576862aff4125 | refs/heads/master | 2023-04-06T19:39:02.163531 | 2022-03-09T16:04:04 | 2022-03-09T16:04:04 | 176,177,923 | 27 | 10 | MIT | 2023-03-19T12:25:54 | 2019-03-18T00:33:31 | Python | UTF-8 | Python | false | false | 711 | py | from __future__ import print_function
import PILasOPENCV as Image
import PILasOPENCV as ImageDraw
import PILasOPENCV as ImageFont
import cv2
# font = ImageFont.truetype("arial.ttf", 30)
size = 20
font = ImageFont.truetype("msgothic.ttc", 22+int(size/50), index=0, encoding="unic")
print(font)
im = Image.new("RGB", (512, 512), "grey")
draw = ImageDraw.Draw(im)
text = "Some text in arial"
draw.text((100, 250), text, font=font, fill=(0, 0, 0))
im = im.resize((256,256), Image.ANTIALIAS)
print(ImageFont.getsize(text, font))
mask = ImageFont.getmask(text, font)
print(type(mask))
cv2.imshow("mask", mask)
im.show()
im_numpy = im.getim()
print(type(im_numpy), im_numpy.shape, im_numpy.dtype) | [
"noreply@github.com"
] | bunkahle.noreply@github.com |
fe3ff6faf74a462febd26d25acda2a52115ffadf | 5ac348d455265b9733b8ae930e45998213f226ac | /AI/lab2/part2/PCA.py | 60def303b7c472146f5b655cbb87f5394aca32a9 | [] | no_license | jsw-zorro/USTC-Junior-Lab | e5eed0f2e1e9b1487d7554f8f8302f74cd4116d1 | 35c3f11b505de72b14e5ca9ea5188825302dcfd9 | refs/heads/master | 2020-04-10T22:42:11.432863 | 2018-07-04T06:09:13 | 2018-07-04T06:09:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | # -*- coding: utf-8 -*
import numpy as np
import os
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import misc
SAMPLE_NUM = 10
CLASS_NUM = 40
IMG_SHAPE = (112, 92)
scale = 0.5
k = 8
principal_percent = 0.8
def load_faceimg(path_dir, shrink_rate=0.5, train_rate=0.8):
sample_k = int(train_rate * SAMPLE_NUM)
train_m = int(train_rate * SAMPLE_NUM * CLASS_NUM)
test_m = int((1 - train_rate) * SAMPLE_NUM * CLASS_NUM) + 1
shape0 = int(IMG_SHAPE[0] * shrink_rate)
shape1 = int(IMG_SHAPE[1] * shrink_rate)
train_x = np.zeros((train_m, shape0 * shape1))
train_y = np.zeros(train_m).astype(np.int8)
test_x = np.zeros((test_m, shape0 * shape1))
test_y = np.zeros(test_m).astype(np.int8)
print train_x.shape, test_x.shape
for i in range(CLASS_NUM):
face_lable = i + 1
for j in range(SAMPLE_NUM):
filename = path_dir + '/s' + str(face_lable) + '/' + str(j + 1) + '.pgm'
img = misc.imresize(mpimg.imread(filename), shrink_rate).flatten().astype(np.float)
if j < sample_k:
train_x[i * sample_k + j, :] = img
train_y[i * sample_k + j] = face_lable
if j >= sample_k:
test_x[i * (10 - sample_k) + (j - sample_k), :] = img
test_y[i * (10 - sample_k) + (j - sample_k)] = face_lable
return train_x, train_y, test_x, test_y
# 0均值化
def zero_mean(train_x, test_x):
mean_x = train_x.mean(axis = 0).reshape(1, train_x.shape[1])
train_x = train_x - np.repeat(mean_x, train_x.shape[0], axis = 0)
test_x = test_x - np.repeat(mean_x, test_x.shape[0], axis=0)
return train_x, test_x
# PCA降维
def pca(train_x, test_x, threshold):
# step1.零均值化
train_x, test_x = zero_mean(train_x, test_x)
# step2.协方差矩阵
cov = np.cov(train_x, rowvar=0)
# step3.求特征值、特征向量并排序,以及贡献率对应的n值
eig_vals, eig_vecs = np.linalg.eig(cov)
n = threshold_trans(eig_vals, threshold)
eig = np.vstack((eig_vals, eig_vecs))
eig_vecs = np.delete(eig.T[np.lexsort(eig[::-1, :])].T[:, ::-1], 0, axis=0)
# step4.选择前n个特征向量作为基,降维
# n = int(eig_vecs.shape[1]*principal_percent)
eig_vecs = eig_vecs[:, 0:n]
train_x = np.dot(train_x, eig_vecs)
test_x = np.dot(test_x, eig_vecs)
return train_x, test_x, eig_vecs
def threshold_trans(values, ths):
all_values = sum(values)
sorted_values = np.sort(values)
sorted_values = sorted_values[-1::-1]
part_values = 0
n = 0
for value in sorted_values:
part_values += value
n += 1
if part_values >= all_values * ths:
return n
def predict(train_x, train_y, test_x, test_y):
# recognise via measuring educlidean distance in high dimentional space
count = 0
for i in range(test_x.shape[0]):
test_x1 = test_x[i, :].reshape((1, test_x.shape[1]))
sub = train_x - np.repeat(test_x1, train_x.shape[0], axis=0)
dis = np.linalg.norm(sub, axis=1)
fig = np.argmin(dis)
# print i, train_y[fig], test_y[i]
if train_y[fig] == test_y[i]:
count += 1
return count
def plot_face(img):
plt.figure('low dimension map')
r, c = (4, 10)
for i in range(r * c):
plt.subplot(r, c, i + 1)
x = int(math.sqrt(img.shape[1]))
plt.imshow(img[:, i].real.reshape(int(112*0.5), int(92*0.5)), cmap='gray')
plt.axis('off')
plt.show()
threshold = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.999, 0.999999]
# 载入数据集
print '[INFO]loading...'
train_xs, train_y, test_xs, test_y = load_faceimg(os.getcwd() + '/data')
# pca降维
print '[INFO]PCA...'
for ths in threshold:
train_x, test_x, eig_vecs = pca(train_xs, test_xs, ths)
print ths, train_x.shape
# 预测
count = predict(train_x, train_y, test_x, test_y)
correct_rate = count * 1.0 / test_x.shape[0]
print "Correct rate =", correct_rate * 100, "%"
if train_x.shape[1] > 40:
plot_face(eig_vecs) | [
"632679697@qq.com"
] | 632679697@qq.com |
139ef0f58f8798e9a04aff0ea852b8ecfd81d553 | 4fc11d739ce921ca2bd7fa4972c5bdaab620f714 | /backend/manage.py | 5cbe2d9cd42d0ea383489677a48668026adaae46 | [] | no_license | crowdbotics-apps/u-app-20291 | 4972a2d31a7d550821c1a677695839376f8fbd55 | 3b550f1374592938e75fe5f8f4a1b43d691a2055 | refs/heads/master | 2022-12-12T23:12:59.718116 | 2020-09-15T17:29:52 | 2020-09-15T17:29:52 | 295,799,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'u_app_20291.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9486c9afd91cf41d204a2a553a2d8a52d5904654 | 0b0d3246d39974cb8faff7d269da2d539415afab | /problem_python/p643.py | f23a17357f6d483556d9499124308313657f5eea | [] | no_license | xionghhcs/leetcode | 972e7ae4ca56b7100223630b294b5a97ba5dd7e8 | 8bd43dcd995a9de0270b8cea2d9a48df17ffc08b | refs/heads/master | 2020-03-07T17:18:08.465559 | 2019-09-29T11:11:26 | 2019-09-29T11:11:26 | 127,607,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
tmp_sum = sum(nums[:4])
i = 0
ans = tmp_sum
for j in range(k, len(nums)):
tmp_sum = tmp_sum - nums[i] + nums[j]
if tmp_sum > ans:
ans = tmp_sum
i += 1
return float(ans) / k
| [
"xionghhcs@163.com"
] | xionghhcs@163.com |
8afd1ee4f786b09f6389370a832292037a5f5e00 | 7c0934affab1651d64114100c728aeff34853e73 | /tests/storage_adapter_tests/test_jsondb_adapter.py | 254d190c5e899b5f2bab47731d66df30c2112553 | [
"BSD-3-Clause"
] | permissive | kashyap32/ChatterBot | 04bc5839081851db7e0dd5d9fadb7b36718044a6 | a5486b66f25cfdfca3ed016cce4c1850e6f1cf07 | refs/heads/master | 2021-01-21T03:29:17.640369 | 2016-05-13T23:54:58 | 2016-05-14T00:13:35 | 58,814,919 | 1 | 0 | null | 2016-05-14T15:24:19 | 2016-05-14T15:24:19 | null | UTF-8 | Python | false | false | 11,101 | py | from unittest import TestCase
from chatterbot.adapters.storage import JsonDatabaseAdapter
from chatterbot.conversation import Statement, Response
class JsonAdapterTestCase(TestCase):
def setUp(self):
"""
Instantiate the adapter.
"""
from random import randint
# Generate a random name for the database
database_name = str(randint(0, 9000))
self.adapter = JsonDatabaseAdapter(database=database_name)
def tearDown(self):
"""
Remove the test database.
"""
self.adapter.drop()
class JsonDatabaseAdapterTestCase(JsonAdapterTestCase):
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
statement = Statement("Test statement")
self.adapter.update(statement)
self.assertEqual(self.adapter.count(), 1)
def test_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
self.assertEqual(self.adapter.find("Non-existant"), None)
def test_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
statement = Statement("New statement")
self.adapter.update(statement)
found_statement = self.adapter.find("New statement")
self.assertNotEqual(found_statement, None)
self.assertEqual(found_statement.text, statement.text)
def test_update_adds_new_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertNotEqual(statement_found, None)
self.assertEqual(statement_found.text, statement.text)
def test_update_modifies_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
# Check the initial values
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 0
)
# Update the statement value
statement.add_response(
Statement("New response")
)
self.adapter.update(statement)
# Check that the values have changed
found_statement = self.adapter.find(statement.text)
self.assertEqual(
len(found_statement.in_response_to), 1
)
def test_get_random_returns_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, statement.text)
def test_find_returns_nested_responses(self):
response_list = [
Response("Yes"),
Response("No")
]
statement = Statement(
"Do you like this?",
in_response_to=response_list
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertIn("Yes", result.in_response_to)
self.assertIn("No", result.in_response_to)
def test_multiple_responses_added_on_update(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks.")
]
)
self.adapter.update(statement)
result = self.adapter.find(statement.text)
self.assertEqual(len(result.in_response_to), 2)
self.assertIn(statement.in_response_to[0], result.in_response_to)
self.assertIn(statement.in_response_to[1], result.in_response_to)
def test_update_saves_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thank you."),
Response("Thanks."),
]
)
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 2)
def test_getting_and_updating_statement(self):
statement = Statement("Hi")
self.adapter.update(statement)
statement.add_response(Response("Hello"))
statement.add_response(Response("Hello"))
self.adapter.update(statement)
response = self.adapter.find(statement.text)
self.assertEqual(len(response.in_response_to), 1)
self.assertEqual(response.in_response_to[0].occurrence, 2)
def test_deserialize_responses(self):
response_list = [
{"text": "Test", "occurrence": 3},
{"text": "Testing", "occurrence": 1},
]
results = self.adapter.deserialize_responses(response_list)
self.assertEqual(len(results), 2)
def test_remove(self):
text = "Sometimes you have to run before you can walk."
statement = Statement(text)
self.adapter.update(statement)
self.adapter.remove(statement.text)
result = self.adapter.find(text)
self.assertIsNone(result)
def test_remove_response(self):
text = "Sometimes you have to run before you can walk."
statement = Statement(
"A test flight is not recommended at this design phase.",
in_response_to=[Response(text)]
)
self.adapter.update(statement)
self.adapter.remove(statement.text)
results = self.adapter.filter(in_response_to__contains=text)
self.assertEqual(results, [])
class JsonDatabaseAdapterFilterTestCase(JsonAdapterTestCase):
def setUp(self):
super(JsonDatabaseAdapterFilterTestCase, self).setUp()
self.statement1 = Statement(
"Testing...",
in_response_to=[
Response("Why are you counting?")
]
)
self.statement2 = Statement(
"Testing one, two, three.",
in_response_to=[
Response("Testing...")
]
)
def test_filter_text_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(text="Howdy")
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to=[Response("Maybe")]
)
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
"Testing...",
in_response_to=[]
)
statement2 = Statement(
"Testing one, two, three.",
in_response_to=[]
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter(in_response_to=[])
self.assertEqual(len(results), 2)
self.assertIn(statement1, results)
self.assertIn(statement2, results)
def test_filter_contains_result(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_contains_no_result(self):
self.adapter.update(self.statement1)
results = self.adapter.filter(
in_response_to__contains="How do you do?"
)
self.assertEqual(results, [])
def test_filter_multiple_parameters(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Testing...",
in_response_to__contains="Why are you counting?"
)
self.assertEqual(len(results), 1)
self.assertIn(self.statement1, results)
def test_filter_multiple_parameters_no_results(self):
self.adapter.update(self.statement1)
self.adapter.update(self.statement2)
results = self.adapter.filter(
text="Test",
in_response_to__contains="Not an existing response."
)
self.assertEqual(len(results), 0)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
statement1 = Statement("Testing...")
statement2 = Statement("Testing one, two, three.")
self.adapter.update(statement1)
self.adapter.update(statement2)
results = self.adapter.filter()
self.assertEqual(len(results), 2)
def test_filter_returns_statement_with_multiple_responses(self):
statement = Statement(
"You are welcome.",
in_response_to=[
Response("Thanks."),
Response("Thank you.")
]
)
self.adapter.update(statement)
response = self.adapter.filter(
in_response_to__contains="Thanks."
)
# Get the first response
response = response[0]
self.assertEqual(len(response.in_response_to), 2)
def test_response_list_in_results(self):
"""
If a statement with response values is found using
the filter method, they should be returned as
response objects.
"""
statement = Statement(
"The first is to help yourself, the second is to help others.",
in_response_to=[
Response("Why do people have two hands?")
]
)
self.adapter.update(statement)
found = self.adapter.filter(text=statement.text)
self.assertEqual(len(found[0].in_response_to), 1)
self.assertEqual(type(found[0].in_response_to[0]), Response)
class ReadOnlyJsonDatabaseAdapterTestCase(JsonAdapterTestCase):
def test_update_does_not_add_new_statement(self):
self.adapter.read_only = True
statement = Statement("New statement")
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found, None)
def test_update_does_not_modify_existing_statement(self):
statement = Statement("New statement")
self.adapter.update(statement)
self.adapter.read_only = True
statement.add_response(
Statement("New response")
)
self.adapter.update(statement)
statement_found = self.adapter.find("New statement")
self.assertEqual(statement_found.text, statement.text)
self.assertEqual(
len(statement_found.in_response_to), 0
)
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
55c34c0724af09f837aabbb9a2eccc295dfd9049 | 60b35d9219c3cafd5be4c176ceb9694cc7e3f0aa | /planner.py | f9e2b91526eaa6532fd6464c9d70361fca11a84d | [] | no_license | mikesuhan/canvas_automation | 3b201290e4df0401614ffd24ada7b6da2582818b | 8274352ce526a8c2c70e0e8a3428924f72c7797f | refs/heads/main | 2023-02-12T07:37:48.150138 | 2021-01-11T19:30:44 | 2021-01-11T19:30:44 | 326,797,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | import datetime
from dateutil.parser import parse
def date_range(first_day=datetime.datetime(2021, 1, 11, 8, 30), last_day=datetime.datetime(2021, 5, 7, 8, 30)):
delta = last_day - first_day
return list(reversed([last_day - datetime.timedelta(days=x) for x in range(delta.days + 1)]))
def session_range(dates, *times, holidays=('jan 18 2020',)):
"""
Filters a range of dates based on session times
Arguments:
dates: a list of datetime objects
*times: a tuple of the day, start time, and end time of classes e.g. ('Monday', '8am', '10am'
Keyword Arguments:
holidays: a tuple of strings of holiday dates -- these dates are not included in the output
"""
sessions = []
if holidays is None: holidays = []
for date in dates:
# checks to make sure date isn't a holiday
for holiday in holidays:
if type(holiday) == str:
holiday = parse(holiday)
if holiday.day == date.day and holiday.month == date.month and holiday.year == holiday.year:
break
# continues if date is not a holiday
else:
day = date.strftime("%a").lower()
for session in times:
d, ts = session[0], session[1:]
if d.lower().startswith(day):
start_t = parse(ts[0])
start_at = date.replace(hour=start_t.hour, minute=start_t.minute)
if len(ts) > 1:
end_t = parse(ts[1])
end_at = date.replace(hour=end_t.hour, minute=end_t.minute)
else:
end_at = None
sessions.append((start_at, end_at))
return sessions
| [
"you@example.com"
] | you@example.com |
91cd2668c52a78e788bcdfe46bbb7e63ca3de71d | 395f93442d1d41ad228d62d4c15d197dbc1d1363 | /apps/user_operation/migrations/0004_auto_20181204_0942.py | fea67e4bcad34420f151c4ff625b83f50c1fd67b | [] | no_license | vevoly/ShopDjango | e0e310538eb4cdad0977f8ced1da6382a1441c67 | 8c25cf35797951c2a2d16933afedfa28689b597c | refs/heads/master | 2020-04-23T22:03:31.200141 | 2019-02-19T14:34:57 | 2019-02-19T14:34:57 | 171,489,038 | 0 | 0 | null | 2020-02-12T02:44:05 | 2019-02-19T14:33:04 | JavaScript | UTF-8 | Python | false | false | 1,108 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-12-04 09:42
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_operation', '0003_auto_20181117_1121'),
]
operations = [
migrations.AlterField(
model_name='useraddress',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 4, 9, 42, 7, 105424), help_text='添加时间', verbose_name='添加时间'),
),
migrations.AlterField(
model_name='userfav',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 4, 9, 42, 7, 103424), help_text='添加时间', verbose_name='添加时间'),
),
migrations.AlterField(
model_name='userleavingmessage',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 4, 9, 42, 7, 104424), help_text='添加时间', verbose_name='添加时间'),
),
]
| [
"jevoly@163.com"
] | jevoly@163.com |
6f1155fa56134bb787b2fc17e62b2b06bf1c3850 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_blitzes.py | e5dac4a2227faeea262af941d833107c53afb89e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._blitz import _BLITZ
#calss header
class _BLITZES(_BLITZ, ):
def __init__(self,):
_BLITZ.__init__(self)
self.name = "BLITZES"
self.specie = 'verbs'
self.basic = "blitz"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
63b1738112e6675083d3dacc3632b6ac68401436 | 7b556e8c35668a336e381ca30c61110408abf69e | /HSTB/kluster/gui/kluster_swathview.py | eb43c902d2381ecd9ff428217f97cdc828874b17 | [
"CC0-1.0"
] | permissive | OceanXplorer/kluster | 54775c3c4a93d9d51609248005271b1d7d7529c1 | bffddca5de7fd1a0eb8d5bf6b87252b84adc0636 | refs/heads/master | 2023-03-15T14:22:51.569255 | 2021-03-18T21:27:11 | 2021-03-18T21:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,447 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: testskip
# -----------------------------------------------------------------------------
# 2016, Scott Paine
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
"""
**********
Wiggly Bar
**********
Usage of VisPy to numerically simulate and view a simple physics model.
.. image:: http://i.imgur.com/ad0s9lB.png
This is a simple example of using VisPy to simulate a system with
two springs, a pivot, and a mass.
The system evolves in a nonlinear fashion, according to two equations:
.. image:: http://i.imgur.com/8reci4N.png
In these equations, the J term is the polar moment of inertia of the rod
given by:
.. image:: http://i.imgur.com/94cI1TL.png
The system has the option to update once every step using the
`Euler <https://en.wikipedia.org/wiki/Euler_method>`_ method
or a more stable third-order
`Runge-Kutta <https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods>`_
method. The instability of the Euler Method becomes apparent as the time step
is increased.
"""
from __future__ import division, print_function, absolute_import
from vispy import app, visuals
from vispy.visuals import transforms
from vispy.io import load_data_file
import sys
import numpy as np
import string
import logging
import traceback
# To switch between PyQt5 and PySide2 bindings just change the from import
from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal
logger = logging.getLogger(__name__)
VALID_METHODS = ['euler', 'runge-kutta']
PARAMETERS = [('d1', 0.0, 10.0, 'double', 0.97),
('d2', 0.0, 10.0, 'double', 0.55),
('m', 0.01, 100.0, 'double', 2.0),
('M', 0.01, 100.0, 'double', 12.5),
('k1', 0.01, 75.0, 'double', 1.35),
('k2', 0.01, 75.0, 'double', 0.50),
('b', 1.0, 1000.0, 'double', 25.75),
('time step', 0.001, 1.0, 'double', 1 / 60),
('x', -0.25, 0.25, 'double', -0.01),
('x dot', -10.0, 10.0, 'double', -0.12),
('theta', -np.pi / 5, np.pi / 5, 'double', 0.005),
('theta dot', -np.pi / 2, np.pi / 2, 'double', 0.0),
('scale', 5, 500, 'int', 50),
('font size', 6.0, 128.0, 'double', 24.0)]
CONVERSION_DICT = {'d1': 'd1', 'd2': 'd2', 'm': 'little_m', 'M': 'big_m',
'k1': 'spring_k1', 'k2': 'spring_k2', 'b': 'b',
'x': 'x', 'x dot': 'x_dot', 'theta': 'theta',
'theta dot': 'theta_dot', 'scale': 'scale',
'time step': 'dt', 'font size': 'font_size'}
def make_spiral(num_points=100, num_turns=4, height=12, radius=2.0,
xnot=None, ynot=None, znot=None):
"""
Generate a list of points corresponding to a spiral.
Parameters
----------
num_points : int
Number of points to map spiral over. More points means a
rounder spring.
num_turns : int
Number of coils in the spiral
height : float
The height of the spiral. Keep it in whatever units the rest of the
spiral is in.
radius : float
The radius of the coils. The spiral will end up being 2*radius wide.
xnot : float
Initial x-coordinate for the spiral coordinates to start at.
ynot : float
Initial y-coordinate for the spiral coordinates to start at.
znot : float
Initial z-coordinate for the spiral coordinates to start at.
Returns
-------
coord_list: list of tuples
Coordinate list of (x, y, z) positions for the spiral
Notes
-----
Right now, this assumes the center is at x=0, y=0. Later, it might be
good to add in stuff to change that.
"""
coords_list = []
znot = -4 if znot is None else znot
xnot = radius if xnot is None else xnot
ynot = 0 if ynot is None else ynot
theta_not = np.arctan2(ynot, xnot)
coords_list.append((xnot, ynot, znot))
for point in range(num_points):
znot += height / num_points
theta_not += 2 * np.pi * num_turns / num_points
xnot = np.cos(theta_not) * radius
ynot = np.sin(theta_not) * radius
coords_list.append((xnot, ynot, znot))
return coords_list
def make_spring(num_points=300, num_turns=4, height=12, radius=2.0,
xnot=None, ynot=None, znot=None):
"""
Generate a list of points corresponding to a spring.
Parameters
----------
num_points : int
Number of points to map spring over. More points means a rounder
spring.
num_turns : int
Number of coils in the spring
height : float
The height of the spring. Keep it in whatever units the rest of the
spring is in.
radius : float
The radius of the coils. The spring will end up being
2*radius wide.
xnot : float
Initial x-coordinate for the spring coordinates to start at.
ynot : float
Initial y-coordinate for the spring coordinates to start at.
znot : float
Initial z-coordinate for the spring coordinates to start at.
Returns
-------
coord_list: list of tuples
Coordinate list of (x, y, z) positions for the spring
Notes
-----
Right now, this assumes the center is at x=0, y=0. Later, it might be
good to add in stuff to change that.
Right now, the length of the "ends" is 10% of the overall length, as
well as a small "turn" that is length radius / 2. In the future, maybe
there could be a kwarg to set the length of the sides of the spring.
For now, 10% looks good.
"""
coords_list = []
init_pts = num_points // 10
znot = 0 if znot is None else znot
xnot = 0 if xnot is None else xnot
ynot = 0 if ynot is None else ynot
coords_list.append((xnot, ynot, znot))
for _ in range(init_pts):
znot += height / num_points
coords_list.append((xnot, ynot, znot))
hold_z = znot
for i in range(init_pts // 2):
small_theta = (i + 1) * np.pi / init_pts
xnot = radius / 2 * (1 - np.cos(small_theta))
znot = hold_z + radius / 2 * np.sin(small_theta)
coords_list.append((xnot, ynot, znot))
coords_list += make_spiral(num_points=num_points - 3 * init_pts,
num_turns=num_turns,
height=(
height -
(91 * height / num_points) -
radius / 2
),
radius=radius,
xnot=xnot,
ynot=ynot,
znot=znot)
hold_z = coords_list[-1][-1]
for i in range(init_pts // 2):
small_theta = np.pi / 2 - (i + 1) * np.pi / init_pts
xnot = radius / 2 * (1 - np.cos(small_theta))
znot = hold_z + radius / 2 * np.cos(small_theta)
coords_list.append((xnot, ynot, znot))
xnot = 0.0
znot += height / num_points
for _ in range(init_pts):
znot += height / num_points
coords_list.append((xnot, ynot, znot))
coords_list.append((0, 0, height))
return coords_list
class WigglyBar(app.Canvas):
def __init__(self, d1=None, d2=None, little_m=None, big_m=None,
spring_k1=None, spring_k2=None, b=None,
x=None, x_dot=None, theta=None, theta_dot=None,
px_len=None, scale=None, pivot=False, method='Euler', dt=None,
font_size=None):
"""
Main VisPy Canvas for simulation of physical system.
Parameters
----------
d1 : float
Length of rod (in meters) from pivot to upper spring.
d2 : float
Length of rod (in meters) from pivot to lower spring.
little_m : float
Mass of attached cube (in kilograms).
big_m : float
Mass of rod (in kilograms).
spring_k1 : float
Spring constant of lower spring (in N/m).
spring_k2 : float
Spring constant of upper spring (in N/m).
b : float
Coefficient of quadratic sliding friction (in kg/m).
x : float
Initial x-position of mass (in m).
x_dot : float
Initial x-velocity of mass (in m/s).
theta : float
Initial angle of rod, with respect to vertical (in radians).
theta_dot : float
Initial angular velocity of rod (in rad/s).
px_len : int
Length of the rod, in pixels.
scale : int
Scaling factor to change size of elements.
pivot : bool
Switch for showing/hiding pivot point.
method : str
Method to use for updating.
dt : float
Time step for simulation.
font_size : float
Size of font for text elements, in points.
Notes
-----
As of right now, the only supported methods are "euler" or
"runge-kutta". These correspond to an Euler method or an
order 3 Runge-Kutta method for updating x, theta, x dot, and theta dot.
"""
app.Canvas.__init__(self, title='Wiggly Bar', size=(800, 800),
create_native=False)
# Some initialization constants that won't change
self.standard_length = 0.97 + 0.55
self.center = np.asarray((500, 450))
self.visuals = []
self._set_up_system(
d1=d1, d2=d2, little_m=little_m, big_m=big_m,
spring_k1=spring_k1, spring_k2=spring_k2, b=b,
x=x, x_dot=x_dot, theta=theta, theta_dot=theta_dot,
px_len=px_len, scale=scale, pivot=pivot, method=method,
dt=dt, font_size=font_size
)
piv_x_y_px = np.asarray((
self.pivot_loc_px * np.sin(self.theta),
-1 * self.pivot_loc_px * (np.cos(self.theta))
))
# Make the spring points
points = make_spring(height=self.px_len / 4, radius=self.px_len / 24)
# Put up a text visual to display time info
self.font_size = 24. if font_size is None else font_size
self.text = visuals.TextVisual('0:00.00',
color='white',
pos=[50, 250, 0],
anchor_x='left',
anchor_y='bottom')
self.text.font_size = self.font_size
# Let's put in more text so we know what method is being used to
# update this
self.method_text = visuals.TextVisual(
'Method: {}'.format(self.method),
color='white',
pos=[50, 250, 0],
anchor_x='left',
anchor_y='top'
)
self.method_text.font_size = 2 / 3 * self.font_size
# Get the pivoting bar ready
self.rod = visuals.BoxVisual(width=self.px_len / 40,
height=self.px_len / 40,
depth=self.px_len,
color='white')
self.rod.transform = transforms.MatrixTransform()
self.rod.transform.scale(
(self.scale, self.scale * self.rod_scale, 0.0001)
)
self.rod.transform.rotate(np.rad2deg(self.theta), (0, 0, 1))
self.rod.transform.translate(self.center - piv_x_y_px)
# Show the pivot point (optional)
pivot_center = (self.center[0], self.center[1], -self.px_len / 75)
self.center_point = visuals.SphereVisual(radius=self.px_len / 75,
color='red')
self.center_point.transform = transforms.MatrixTransform()
self.center_point.transform.scale((self.scale, self.scale, 0.0001))
self.center_point.transform.translate(pivot_center)
# Get the upper spring ready.
self.spring_2 = visuals.TubeVisual(
points, radius=self.px_len / 100, color=(0.5, 0.5, 1, 1)
)
self.spring_2.transform = transforms.MatrixTransform()
self.spring_2.transform.rotate(90, (0, 1, 0))
self.spring_2.transform.scale((self.scale, self.scale, 0.0001))
self.spring_2.transform.translate(self.center + self.s2_loc)
# Get the lower spring ready.
self.spring_1 = visuals.TubeVisual(
points, radius=self.px_len / 100, color=(0.5, 0.5, 1, 1)
)
self.spring_1.transform = transforms.MatrixTransform()
self.spring_1.transform.rotate(90, (0, 1, 0))
self.spring_1.transform.scale(
(
self.scale *
(1.0 - (self.x * self.px_per_m) / (self.scale * self.px_len / 2)),
self.scale,
0.0001
)
)
self.spring_1.transform.translate(self.center + self.s1_loc)
# Finally, prepare the mass that is being moved
self.mass = visuals.BoxVisual(
width=self.px_len / 4, height=self.px_len / 8,
depth=self.px_len / 4, color='white'
)
self.mass.transform = transforms.MatrixTransform()
self.mass.transform.scale((self.scale, self.scale, 0.0001))
self.mass.transform.translate(self.center + self.mass_loc)
# Append all the visuals
self.visuals.append(self.center_point)
self.visuals.append(self.rod)
self.visuals.append(self.spring_2)
self.visuals.append(self.spring_1)
self.visuals.append(self.mass)
self.visuals.append(self.text)
self.visuals.append(self.method_text)
# Set up a timer to update the image and give a real-time rendering
self._timer = app.Timer('auto', connect=self.on_timer, start=True)
def on_draw(self, ev):
# Stolen from previous - just clears the screen and redraws stuff
self.context.set_clear_color((0, 0, 0, 1))
self.context.set_viewport(0, 0, *self.physical_size)
self.context.clear()
for vis in self.visuals:
if vis is self.center_point and not self.show_pivot:
continue
else:
vis.draw()
def on_resize(self, event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, self.physical_size[0], self.physical_size[1])
self.context.set_viewport(*vp)
for vis in self.visuals:
vis.transforms.configure(canvas=self, viewport=vp)
def on_timer(self, ev):
# Update x, theta, x_dot, theta_dot
self.params_update(dt=self.dt, method=self.method)
# Calculate change for the upper spring, relative to its starting point
extra_term = self.theta - self.theta_not
trig_junk = (
np.sin(self.theta_not) * (np.cos(extra_term) - 1) +
np.cos(self.theta_not) * np.sin(extra_term)
)
delta_x = self.d1 * self.px_per_m * trig_junk
net_s2_scale = (1 - (delta_x / (self.scale * self.px_len / 4)))
# Calculate change for the lower spring, relative to something
# arbitrary so I didn't have horrors mathematically
trig_junk_2 = np.sin(self.theta_not) - np.sin(self.theta)
first_term = self.d2 * trig_junk_2
top_term = (first_term - self.x) * self.px_per_m
net_s1_scale = 1 + top_term / self.s1_l_not
self.s1_loc[0] = -0.5 * (
-self.x * self.px_per_m +
self.s1_l_not +
self.d2 * self.px_per_m * (np.sin(self.theta) + np.sin(self.theta_not))
)
self.s1_loc[0] -= 0.5 * net_s1_scale * self.s1_l_not
# Calculate the new pivot location - this is important because the
# rotation occurs about
# the center of the rod, so it has to be offset appropriately
piv_x_y_px = np.asarray((
self.pivot_loc_px * np.sin(self.theta),
-1 * self.pivot_loc_px * np.cos(self.theta)
))
# Calculate the new mass x location, relative (again) to some
# simple parameter where x=0
self.mass_loc[0] = self.x_is_0 + self.x * self.px_per_m
# Figure out how much time has passed
millis_passed = int(100 * (self.t % 1))
sec_passed = int(self.t % 60)
min_passed = int(self.t // 60)
# Apply the necessary transformations to the rod
self.rod.transform.reset()
self.rod.transform.scale(
(self.scale, self.scale * self.rod_scale, 0.0001)
)
self.rod.transform.rotate(np.rad2deg(self.theta), (0, 0, 1))
self.rod.transform.translate(self.center - piv_x_y_px)
# Redraw and rescale the upper spring
self.spring_2.transform.reset()
self.spring_2.transform.rotate(90, (0, 1, 0))
self.spring_2.transform.scale((net_s2_scale * self.scale,
self.scale,
0.0001))
self.spring_2.transform.translate(self.center +
self.s2_loc +
np.asarray([delta_x, 0]))
# Redraw and rescale the lower spring
# (the hardest part to get, mathematically)
self.spring_1.transform.reset()
self.spring_1.transform.rotate(90, (0, 1, 0))
self.spring_1.transform.scale((net_s1_scale * self.scale,
self.scale,
0.0001))
self.spring_1.transform.translate(self.center +
self.s1_loc)
# Redrew and rescale the mass
self.mass.transform.reset()
self.mass.transform.scale((self.scale, self.scale, 0.0001))
self.mass.transform.translate(self.center + self.mass_loc)
# Update the timer with how long it's been
self.text.text = '{:0>2d}:{:0>2d}.{:0>2d}'.format(min_passed,
sec_passed,
millis_passed)
# Trigger all of the drawing and updating
self.update()
def params_update(self, dt, method='euler'):
# Uses either Euler method or Runge-Kutta,
# depending on your input to "method"
if method.lower() == 'euler':
self._euler_update(dt)
elif method.lower() == 'runge-kutta':
self._runge_kutta_update(dt)
def _euler_update(self, dt):
"""Update system using Euler's method (equivalent to order 1
Runge-Kutta Method).
"""
# Calculate the second derivative of x
x_dd_t1 = -self.b * self.x_dot * np.abs(self.x_dot)
x_dd_t2 = -self.spring_k1 * (self.x + self.d2 * self.theta)
x_dot_dot = (x_dd_t1 + x_dd_t2) / self.little_m
# Calculate the second derivative of theta
term1 = -self.spring_k1 * self.d2 * self.x
term2 = (
-self.theta *
(self.spring_k1 * (self.d2 ** 2) + self.spring_k2 * (self.d1 ** 2))
)
theta_dot_dot = (term1 + term2) / self.j_term
# Update everything appropriately
self.t += dt
self.x += dt * self.x_dot
self.theta += dt * self.theta_dot
self.x_dot += dt * x_dot_dot
self.theta_dot += dt * theta_dot_dot
def _runge_kutta_update(self, dt):
"""Update using order 3 Runge-Kutta Method.
"""
info_vector = np.asarray(
[self.x_dot, self.theta_dot, self.x, self.theta]
).copy()
t1a = -self.b * info_vector[0] * np.abs(info_vector[0])
t1b = -self.spring_k1 * (info_vector[2] + self.d2 * info_vector[3])
t2a = -self.spring_k1 * self.d2 * info_vector[2]
t2b = -info_vector[3] * (
self.spring_k1 * (self.d2 ** 2) + self.spring_k2 * (self.d1 ** 2)
)
k1 = [
(t1a + t1b) / self.little_m,
(t2a + t2b) / self.j_term,
info_vector[0],
info_vector[1]
]
k1 = np.asarray(k1) * dt
updated_est = info_vector + 0.5 * k1
t1a = -self.b * updated_est[0] * np.abs(updated_est[0])
t1b = -self.spring_k1 * (updated_est[2] + self.d2 * updated_est[3])
t2a = -self.spring_k1 * self.d2 * updated_est[2]
t2b = -updated_est[3] * (
self.spring_k1 * (self.d2 ** 2) + self.spring_k2 * (self.d1 ** 2)
)
k2 = [
(t1a + t1b) / self.little_m,
(t2a + t2b) / self.j_term,
updated_est[0],
updated_est[1]
]
k2 = np.asarray(k2) * dt
updated_est = info_vector - k1 + 2 * k2
t1a = -self.b * updated_est[0] * np.abs(updated_est[0])
t1b = -self.spring_k1 * (updated_est[2] + self.d2 * updated_est[3])
t2a = -self.spring_k1 * self.d2 * updated_est[2]
t2b = -updated_est[3] * (
self.spring_k1 * (self.d2 ** 2) + self.spring_k2 * (self.d1 ** 2)
)
k3 = [
(t1a + t1b) / self.little_m,
(t2a + t2b) / self.j_term,
updated_est[0],
updated_est[1]
]
k3 = np.asarray(k3) * dt
final_est = info_vector + (1 / 6) * (k1 + 4 * k2 + k3)
self.x_dot, self.theta_dot, self.x, self.theta = final_est.copy()
self.t += dt
def reset_parms(self, d1=None, d2=None, little_m=None, big_m=None,
spring_k1=None, spring_k2=None, b=None,
x=None, x_dot=None, theta=None, theta_dot=None,
px_len=None, scale=None, pivot=False, method='Euler',
dt=None, font_size=None):
"""
Reset system with a new set of paramters.
Parameters
----------
d1 : float
Length of rod (in meters) from pivot to upper spring.
d2 : float
Length of rod (in meters) from pivot to lower spring.
little_m : float
Mass of attached cube (in kilograms).
big_m : float
Mass of rod (in kilograms).
spring_k1 : float
Spring constant of lower spring (in N/m).
spring_k2 : float
Spring constant of upper spring (in N/m).
b : float
Coefficient of quadratic sliding friction (in kg/m).
x : float
Initial x-position of mass (in m).
x_dot : float
Initial x-velocity of mass (in m/s).
theta : float
Initial angle of rod, with respect to vertical (in radians).
theta_dot : float
Initial angular velocity of rod (in rad/s).
px_len : int
Length of the rod, in pixels.
scale : int
Scaling factor to change size of elements.
pivot : bool
Switch for showing/hiding pivot point.
method : str
Method to use for updating.
dt : float
Time step for simulation.
font_size : float
Size of font for text elements, in points.
Notes
-----
Since the time is reset, the system is reset as well by calling
this method.
"""
self._set_up_system(
d1=d1, d2=d2, little_m=little_m, big_m=big_m,
spring_k1=spring_k1, spring_k2=spring_k2, b=b,
x=x, x_dot=x_dot, theta=theta, theta_dot=theta_dot,
px_len=px_len, scale=scale, pivot=pivot, method=method,
dt=dt, font_size=font_size
)
def _set_up_system(self, d1=None, d2=None, little_m=None, big_m=None,
spring_k1=None, spring_k2=None, b=None,
x=None, x_dot=None, theta=None, theta_dot=None,
px_len=None, scale=None, pivot=False, method='Euler',
dt=None, font_size=None):
"""Initialize constants for the system that will be used later.
"""
self.method = (string.capwords(method, '-')
if method.lower() in VALID_METHODS else 'Euler')
self.font_size = font_size
try:
self.method_text.text = 'Method: {}'.format(self.method)
self.method_text.font_size = 2 / 3 * self.font_size
self.text.font_size = self.font_size
except AttributeError:
# Running in __init__, so self.method_text isn't established yet.
pass
self.show_pivot = pivot
# Initialize constants for the system
self.t = 0
self.dt = 1 / 60 if dt is None else dt
self.d1 = 0.97 if d1 is None else d1
self.d2 = 0.55 if d2 is None else d2
self.little_m = 2.0 if little_m is None else little_m
self.big_m = 12.5 if big_m is None else big_m
self.spring_k1 = 1.35 if spring_k1 is None else spring_k1
self.spring_k2 = 0.5 if spring_k2 is None else spring_k2
self.b = 25.75 if b is None else b
self.j_term = (
(1 / 3) * self.big_m * (self.d1 ** 3 + self.d2 ** 3) /
(self.d1 + self.d2)
)
self.x = -0.010 if x is None else x
self.x_dot = -0.12 if x_dot is None else x_dot
self.theta = 0.005 if theta is None else theta
self.theta_dot = 0.0 if theta_dot is None else theta_dot
self.theta_not = self.theta # I'll need this later
# Initialize constants for display
self.px_len = 10 if px_len is None else px_len
self.scale = 50 if scale is None else scale
self.px_per_m = self.scale * self.px_len / (0.97 + 0.55)
self.rod_scale = (self.d1 + self.d2) / self.standard_length
# Set up stuff for establishing a pivot point to rotate about
self.pivot_loc = (self.d2 - self.d1) / 2
self.pivot_loc_px = self.pivot_loc * self.px_per_m
# Set up positioning info for the springs and mass, as well as some
# constants for use later
# NOTE: Springs are not like boxes. Their center of rotation is at one
# end of the spring, unlike the box where it is in the middle.
# The location and scaling is set to reflect this. This means
# there's a little bit of x- and y-translation needed to properly
# center them.
self.s2_loc = np.asarray(
[self.d1 * self.px_per_m * np.sin(self.theta),
-self.d1 * self.px_per_m * np.cos(
self.theta)]
)
self.s1_l_not = self.px_len / 4 * self.scale
self.x_is_0 = (
-self.d2 * self.px_per_m * np.sin(self.theta_not) -
1.5 * self.s1_l_not
)
self.s1_loc = np.asarray(
[self.x_is_0 + 0.5 * self.s1_l_not + self.x * self.px_per_m,
self.d2 * self.px_per_m * np.cos(self.theta)]
)
self.mass_loc = np.asarray(
[self.x_is_0 + self.x * self.px_per_m,
self.d2 * self.px_per_m * np.cos(self.theta)]
)
class Paramlist(object):
def __init__(self, parameters):
"""Container for object parameters.
Based on methods from ../gloo/primitive_mesh_viewer_qt.
"""
self.parameters = parameters
self.props = dict()
self.props['pivot'] = False
self.props['method'] = 'Euler'
for nameV, minV, maxV, typeV, iniV in parameters:
nameV = CONVERSION_DICT[nameV]
self.props[nameV] = iniV
class SetupWidget(QtWidgets.QWidget):
changed_parameter_sig = Signal(Paramlist)
def __init__(self, parent=None):
"""Widget for holding all the parameter options in neat lists.
Based on methods from ../gloo/primitive_mesh_viewer_qt.
"""
super(SetupWidget, self).__init__(parent)
# Create the parameter list from the default parameters given here
self.param = Paramlist(PARAMETERS)
# Checkbox for whether or not the pivot point is visible
self.pivot_chk = QtWidgets.QCheckBox(u"Show pivot point")
self.pivot_chk.setChecked(self.param.props['pivot'])
self.pivot_chk.toggled.connect(self.update_parameters)
# A drop-down menu for selecting which method to use for updating
self.method_list = ['Euler', 'Runge-Kutta']
self.method_options = QtWidgets.QComboBox()
self.method_options.addItems(self.method_list)
self.method_options.setCurrentIndex(
self.method_list.index((self.param.props['method']))
)
self.method_options.currentIndexChanged.connect(
self.update_parameters
)
# Separate the different parameters into groupboxes,
# so there's a clean visual appearance
self.parameter_groupbox = QtWidgets.QGroupBox(u"System Parameters")
self.conditions_groupbox = QtWidgets.QGroupBox(u"Initial Conditions")
self.display_groupbox = QtWidgets.QGroupBox(u"Display Parameters")
self.groupbox_list = [self.parameter_groupbox,
self.conditions_groupbox,
self.display_groupbox]
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
# Get ready to create all the spinboxes with appropriate labels
plist = []
self.psets = []
# important_positions is used to separate the
# parameters into their appropriate groupboxes
important_positions = [0, ]
param_boxes_layout = [QtWidgets.QGridLayout(),
QtWidgets.QGridLayout(),
QtWidgets.QGridLayout()]
for nameV, minV, maxV, typeV, iniV in self.param.parameters:
# Create Labels for each element
plist.append(QtWidgets.QLabel(nameV))
if nameV == 'x' or nameV == 'scale':
# 'x' is the start of the 'Initial Conditions' groupbox,
# 'scale' is the start of the 'Display Parameters' groupbox
important_positions.append(len(plist) - 1)
# Create Spinboxes based on type - doubles get a DoubleSpinBox,
# ints get regular SpinBox.
# Step sizes are the same for every parameter except font size.
if typeV == 'double':
self.psets.append(QtWidgets.QDoubleSpinBox())
self.psets[-1].setDecimals(3)
if nameV == 'font size':
self.psets[-1].setSingleStep(1.0)
else:
self.psets[-1].setSingleStep(0.01)
elif typeV == 'int':
self.psets.append(QtWidgets.QSpinBox())
# Set min, max, and initial values
self.psets[-1].setMaximum(maxV)
self.psets[-1].setMinimum(minV)
self.psets[-1].setValue(iniV)
pidx = -1
for pos in range(len(plist)):
if pos in important_positions:
pidx += 1
param_boxes_layout[pidx].addWidget(plist[pos], pos + pidx, 0)
param_boxes_layout[pidx].addWidget(self.psets[pos], pos + pidx, 1)
self.psets[pos].valueChanged.connect(self.update_parameters)
param_boxes_layout[0].addWidget(QtWidgets.QLabel('Method: '), 8, 0)
param_boxes_layout[0].addWidget(self.method_options, 8, 1)
param_boxes_layout[-1].addWidget(self.pivot_chk, 2, 0, 3, 0)
for groupbox, layout in zip(self.groupbox_list, param_boxes_layout):
groupbox.setLayout(layout)
for groupbox in self.groupbox_list:
self.splitter.addWidget(groupbox)
vbox = QtWidgets.QVBoxLayout()
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.splitter)
hbox.addStretch(5)
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
def update_parameters(self, option):
"""When the system parameters change, get the state and emit it."""
self.param.props['pivot'] = self.pivot_chk.isChecked()
self.param.props['method'] = self.method_list[
self.method_options.currentIndex()
]
keys = map(lambda x: x[0], self.param.parameters)
for pos, nameV in enumerate(keys):
self.param.props[CONVERSION_DICT[nameV]] = self.psets[pos].value()
self.changed_parameter_sig.emit(self.param)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, param=None):
"""Main Window for holding the Vispy Canvas and the parameter
control menu.
"""
QtWidgets.QMainWindow.__init__(self)
self.resize(1067, 800)
icon = load_data_file('wiggly_bar/spring.ico')
self.setWindowIcon(QtGui.QIcon(icon))
self.setWindowTitle('Nonlinear Physical Model Simulation')
self.parameter_object = SetupWidget(self)
self.parameter_object.param = (param
if param is not None else
self.parameter_object.param)
self.parameter_object.changed_parameter_sig.connect(self.update_view)
self.view_box = WigglyBar(**self.parameter_object.param.props)
self.view_box.create_native()
self.view_box.native.setParent(self)
splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
splitter.addWidget(self.parameter_object)
splitter.addWidget(self.view_box.native)
self.setCentralWidget(splitter)
def update_view(self, param):
"""Update the VisPy canvas when the parameters change.
"""
self.view_box.reset_parms(**param.props)
def uncaught_exceptions(ex_type, ex_value, ex_traceback):
lines = traceback.format_exception(ex_type, ex_value, ex_traceback)
msg = ''.join(lines)
logger.error('Uncaught Exception\n%s', msg)
def main():
sys.excepthook = uncaught_exceptions
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
try: # pyside2
app = QtWidgets.QApplication()
except TypeError: # pyqt5
app = QtWidgets.QApplication([])
win = MainWindow()
win.show()
app.exec_()
if __name__ == '__main__':
main()
| [
"eyou102@gmail.com"
] | eyou102@gmail.com |
319f8d28ab811b2e7eaf832c142ce5a9f1993d33 | 6766c01dee6c6330a62e14d5c036eedb60887228 | /book/admin.py | 04b64bbc505dc63b03a804135fa5da5558baf3c5 | [] | no_license | whu2017/easyreading | 5fbf299ab1d2e489e6dfd881a466852d646bbb52 | 71b2936345f9253648c046a68839c7164e506bfe | refs/heads/master | 2020-04-06T04:13:32.918077 | 2017-05-24T01:27:06 | 2017-05-24T01:27:06 | 83,019,406 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from book.models import Category, Book, Comment
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', )
class BookAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'author', 'price', 'score', 'total_chapter', 'allow_trial',
'trial_chapter', 'create_timestamp', 'update_timestamp')
class CommentAdmin(admin.ModelAdmin):
list_display = ('user', 'book', 'score', 'content', 'timestamp')
admin.site.register(Category, CategoryAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Comment, CommentAdmin)
| [
"doraemonext@gmail.com"
] | doraemonext@gmail.com |
9fa7197b8a44396a777f1f416ab3e8488903a9b1 | 5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63 | /swexpert/1859(백만 장자).py | 88fa455f52e94d8e73ed650c5a4527801d43a941 | [] | no_license | juyi212/Algorithm_study | f5d263c5329c994a457bbe897e5e1405d2b1d67a | f225cc593a50b74686111f654f7133707a1d1310 | refs/heads/master | 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | import sys
sys.stdin = open('input1.txt','r')
T=int(input())
for i in range(0, T):
day = int(input())
dayprice = list(map(int, input().split()))
maxprice = dayprice[len(dayprice)-1]
benefit = 0
buy = 0
for j in range(day-2, -1, -1):
if dayprice[j] < maxprice:
benefit += maxprice-dayprice[j]
else:
maxprice = dayprice[j]
print('#{0} {1}'.format(i+1, benefit))
# for tc in range(1, int(input())+1):
# N = int(input())
# costs = list(map(int, input().split()))
#
# result = 0
# while True:
# max_value = max(costs)
# max_idx = costs.index(max_value)
# total = 0
# if max_idx != 0:
# total = max_value * max_idx
# for i in range(max_idx):
# total -= costs[i]
# result += total
#
# if max_idx == len(costs)-1 or max_idx == len(costs)-2:
# break
# else:
# costs = costs[max_idx+1:]
#
# print(f'#{tc} {result}')
| [
"dea8307@naver.com"
] | dea8307@naver.com |
13ad959a6218c2871702b4ef16bfccf686044504 | e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d | /intro/searching.py | 7d3678bf41cb22f6d1c32d55870e4744d264fc59 | [] | no_license | neuroph12/nlpy | 3f3d1a8653a832d6230cb565428ee0c77ef7451d | 095976d144dacf07414bf7ee42b811eaa67326c1 | refs/heads/master | 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # this line will show some book samples in NLTK.
from nltk.book import *
## concordance
# print('Sense and Sensibility by Jane Austen 1811')
# print(text2.concordance('affection'))
# print('text5: Chat Corpus')
print(text5.concordance('lol'))
## similarity
# print(text1.similar('monstrous'))
## common contexts
# print(text2.common_contexts(["monstrous", "very"]))
## dispersion plot
# text4.dispersion_plot(['citizens', 'democracy', 'freedom', 'duties', 'America'])
## generate is note supported now?
# print(text3.generate())
## | [
"anderscui@gmail.com"
] | anderscui@gmail.com |
fd6feb2ed457231f5f56dceff0819d45e00509b8 | 343bdaddfc66c6316e2cee490e9cedf150e3a5b7 | /0001_0100/0094/0094.py | b366673455e6be0051362104cde337887818eb30 | [] | no_license | dm-alexi/acmp | af7f6b4484b78f5922f3b464406a0ba5dea0d738 | 3fa0016d132adfeab7937b3e8c9687a34642c93a | refs/heads/master | 2021-07-09T15:14:25.857086 | 2020-10-20T19:08:54 | 2020-10-20T19:08:54 | 201,908,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | with open("input.txt", "r") as f, open("output.txt", "w") as q:
n, m, k = (int(x) for x in f.read().split())
q.write("1" if n >= m else "NO" if n <= k else str((m - n - 1) // (n - k) + 2))
| [
"dm2.alexi@gmail.com"
] | dm2.alexi@gmail.com |
39d81f04162ffe643e220fbda57ad7cee54f091e | 873d9322f0d9296a0eda49bba65faba3a7ba62e3 | /kontrasto/templatetags/kontrasto_tags.py | 9918722e2f02363153ae4fafe2370029bd7c40a1 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | nimasmi/kontrasto | d9910ca015f7d4263b7d9b829f3282936cfbb0b9 | 08fc0279e2b3e1da1a5cec769874572455fd0527 | refs/heads/main | 2023-04-19T21:18:47.677839 | 2021-05-15T00:42:50 | 2021-05-15T00:42:50 | 370,022,377 | 0 | 0 | MIT | 2021-05-23T10:28:35 | 2021-05-23T10:28:34 | null | UTF-8 | Python | false | false | 1,881 | py | from django import template
from kontrasto import wcag_2, wcag_3
register = template.Library()
@register.filter(name="dominant_color")
def dominant_color(image):
return image.get_dominant_color()
@register.filter(name="wcag_2_contrast")
def wcag_2_contrast(image, text_color: str) -> str:
return wcag_2.wcag2_contrast(image.get_dominant_color(), text_color)
@register.simple_tag(name="wcag_2_contrast_light_or_dark")
def wcag_2_contrast_light_or_dark(
image, light_color: str, dark_color: str
) -> str:
dominant = image.get_dominant_color()
light_contrast = wcag_2.wcag2_contrast(dominant, light_color)
dark_contrast = wcag_2.wcag2_contrast(dominant, dark_color)
lighter = light_contrast > dark_contrast
return {
"text_color": light_color if lighter else dark_color,
"text_theme": "light" if lighter else "dark",
"bg_color": dominant,
"bg_color_transparent": f"{dominant}aa",
"bg_theme": "dark" if lighter else "light",
}
@register.filter(name="wcag_3_contrast")
def wcag_3_contrast(image, text_color: str) -> str:
return wcag_3.apca_contrast(image.get_dominant_color(), text_color)
@register.simple_tag(name="wcag_3_contrast_light_or_dark")
def wcag_3_contrast_light_or_dark(
image, light_color: str, dark_color: str
) -> str:
dominant = image.get_dominant_color()
light_contrast = wcag_3.format_contrast(
wcag_3.apca_contrast(dominant, light_color)
)
dark_contrast = wcag_3.format_contrast(
wcag_3.apca_contrast(dominant, dark_color)
)
lighter = light_contrast > dark_contrast
return {
"text_color": light_color if lighter else dark_color,
"text_theme": "light" if lighter else "dark",
"bg_color": dominant,
"bg_color_transparent": f"{dominant}aa",
"bg_theme": "dark" if lighter else "light",
}
| [
"thibaudcolas@gmail.com"
] | thibaudcolas@gmail.com |
065cde2487f798bbdd3629817a89aac06a72872c | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/Scaleform/daapi/view/meta/PremiumWindowMeta.py | 938001af0b0a21975d24c7e234953c27e7860c41 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,076 | py | # 2016.11.19 19:51:28 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/PremiumWindowMeta.py
from gui.Scaleform.daapi.view.meta.SimpleWindowMeta import SimpleWindowMeta
class PremiumWindowMeta(SimpleWindowMeta):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends SimpleWindowMeta
"""
def onRateClick(self, rateId):
self._printOverrideError('onRateClick')
def as_setHeaderS(self, prc, bonus1, bonus2):
if self._isDAAPIInited():
return self.flashObject.as_setHeader(prc, bonus1, bonus2)
def as_setRatesS(self, data):
"""
:param data: Represented by PremiumWindowRatesVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setRates(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\Scaleform\daapi\view\meta\PremiumWindowMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:51:28 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
1ab1c26e10ab8717734ea8ad5224365ae174f7e4 | c4b8e1e09dedbccd37ca008ecaaca4438610bbaf | /google_or_tools/futoshiki_sat.py | 0eac27b21330dd3171807643a0076cc4144a12bc | [
"MIT"
] | permissive | hakank/hakank | 4806598b98cb36dd51b24b0ab688f52dadfe9626 | c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2 | refs/heads/master | 2023-08-15T00:21:52.750270 | 2023-07-27T16:21:40 | 2023-07-27T16:21:40 | 11,933,517 | 336 | 97 | MIT | 2023-07-27T11:19:42 | 2013-08-06T20:12:10 | JavaScript | UTF-8 | Python | false | false | 4,036 | py | # Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Futoshiki problem in OR-tools CP-SAT Solver.
From http://en.wikipedia.org/wiki/Futoshiki
'''
The puzzle is played on a square grid, such as 5 x 5. The objective
is to place the numbers 1 to 5 (or whatever the dimensions are)
such that each row, and column contains each of the digits 1 to 5.
Some digits may be given at the start. In addition, inequality
constraints are also initially specifed between some of the squares,
such that one must be higher or lower than its neighbour. These
constraints must be honoured as the grid is filled out.
'''
Also see
http://www.guardian.co.uk/world/2006/sep/30/japan.estheraddley
This model is inspired by the Minion/Tailor
example futoshiki.eprime.
It's a port of my old CP model futoshiki.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main(values, lt):
model = cp.CpModel()
#
# data
#
size = len(values)
RANGE = list(range(size))
NUMQD = list(range(len(lt)))
#
# variables
#
field = {}
for i in RANGE:
for j in RANGE:
field[i, j] = model.NewIntVar(1, size, "field[%i,%i]" % (i, j))
field_flat = [field[i, j] for i in RANGE for j in RANGE]
#
# constraints
#
# set initial values
for row in RANGE:
for col in RANGE:
if values[row][col] > 0:
model.Add(field[row, col] == values[row][col])
# all rows have to be different
for row in RANGE:
model.AddAllDifferent([field[row, col] for col in RANGE])
# all columns have to be different
for col in RANGE:
model.AddAllDifferent([field[row, col] for row in RANGE])
# all < constraints are satisfied
# Also: make 0-based
for i in NUMQD:
model.Add(
field[lt[i][0] - 1, lt[i][1] - 1] < field[lt[i][2] - 1, lt[i][3] - 1])
#
# search and result
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
for i in RANGE:
for j in RANGE:
print(solver.Value(field[i, j]), end=" ")
print()
print()
# print("num_solutions:", num_solutions)
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
#
# Example from Tailor model futoshiki.param/futoshiki.param
# Solution:
# 5 1 3 2 4
# 1 4 2 5 3
# 2 3 1 4 5
# 3 5 4 1 2
# 4 2 5 3 1
#
# Futoshiki instance, by Andras Salamon
# specify the numbers in the grid
#
values1 = [[0, 0, 3, 2, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
# [i1,j1, i2,j2] requires that values[i1,j1] < values[i2,j2]
# Note: 1-based
lt1 = [[1, 2, 1, 1], [1, 4, 1, 5], [2, 3, 1, 3], [3, 3, 2, 3], [3, 4, 2, 4],
[2, 5, 3, 5], [3, 2, 4, 2], [4, 4, 4, 3], [5, 2, 5, 1], [5, 4, 5, 3],
[5, 5, 4, 5]]
#
# Example from http://en.wikipedia.org/wiki/Futoshiki
# Solution:
# 5 4 3 2 1
# 4 3 1 5 2
# 2 1 4 3 5
# 3 5 2 1 4
# 1 2 5 4 3
#
values2 = [[0, 0, 0, 0, 0], [4, 0, 0, 0, 2], [0, 0, 4, 0, 0], [0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]]
# Note: 1-based
lt2 = [[1, 2, 1, 1], [1, 4, 1, 3], [1, 5, 1, 4], [4, 4, 4, 5], [5, 1, 5, 2],
[5, 2, 5, 3]]
if __name__ == "__main__":
print("Problem 1")
main(values1, lt1)
print("\nProblem 2")
main(values2, lt2)
| [
"hakank@gmail.com"
] | hakank@gmail.com |
58be27206cfb7d5fc2a4de4abf35d558fc019c02 | 483424524c70852cc043e0d77bf1b757a61d797a | /deepspeed/ops/sparse_attention/sparse_attention_utils.py | ccb0f940dff65839beac579f81c4dfb7e499e6bb | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/DeepSpeed | 810f1af320020718d0794f5a97cde6f1d17af122 | 55d9964c59c0c6e23158b5789a5c36c28939a7b0 | refs/heads/master | 2023-09-06T07:40:52.145692 | 2023-09-05T23:51:23 | 2023-09-05T23:51:23 | 235,860,204 | 27,557 | 3,347 | Apache-2.0 | 2023-09-14T21:38:46 | 2020-01-23T18:35:18 | Python | UTF-8 | Python | false | false | 12,300 | py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn import functional as F
from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig
'''
This file contains few utility functions to handle adapting pretrained model with sparse self-attention module.
'''
class SparseAttentionUtils:
"""This class provides some utility functions that are use integrating sparse attention into transformer models.
Such utilities include extending position embeddings, replacing current self-attention layer with sparse attention, padding sequences to multiple of block size, etc.
"""
@staticmethod
def extend_position_embedding(model, max_position):
"""This function extends the position embedding weights of a model loaded from a checkpoint.
It assumes the new max position is bigger than the original max length.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
Return:
model: updated model; in which position embedding weights have been extended based on new size
"""
if hasattr(model, 'bert'):
original_max_position = model.bert.embeddings.position_embeddings.weight.size(0)
assert max_position > original_max_position
extend_multiples = max(1, max_position // original_max_position)
model.bert.embeddings.position_embeddings.weight.data = model.bert.embeddings.position_embeddings.weight.repeat(
extend_multiples, 1)
elif hasattr(model, 'roberta'):
# RoBERTa has positions 0 & 1 reserved, so embedding size is max position + 2
original_max_position, embed_size = model.roberta.embeddings.position_embeddings.weight.shape
original_max_position -= 2
extend_multiples = max(1, max_position // original_max_position)
assert max_position > original_max_position
max_position += 2
extended_position_embedding = model.roberta.embeddings.position_embeddings.weight.new_empty(
max_position, embed_size)
k = 2
for i in range(extend_multiples):
extended_position_embedding[k:(
k + original_max_position)] = model.roberta.embeddings.position_embeddings.weight[2:]
k += original_max_position
model.roberta.embeddings.position_embeddings.weight.data = extended_position_embedding
else:
raise ValueError(
'Please extend \"extend_position_embedding\" function to support your model type. It currently only supports \"bert\" & \"roberta\"!'
)
model.config.max_position_embeddings = max_position
print(f'Extended position embeddings to {original_max_position * extend_multiples}')
return model
@staticmethod
def update_tokenizer_model_max_length(tokenizer, max_position):
"""This function updates the position embedding length of a tokenizer to a new max position.
Arguments:
tokenizer: required: a transformer tokenizer
max_position: required: an integer determining new position embedding size
Return:
tokenizer: updated tokenizer; in which model maximum length has been extended based on new size
"""
tokenizer.model_max_length = max_position
tokenizer.init_kwargs['model_max_length'] = max_position
print(f'updated tokenizer model max imum length to {max_position}')
return tokenizer
@staticmethod
def replace_model_self_attention_with_sparse_self_attention(
model,
max_position,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in model encoder with sparse self attention.
It currently supports bert and roberta model and can be easily extended to any other models following similar steps here.
For sparsityConfig, refer to the config class.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
model: updated model; in which self attention layer has been replaced with DeepSpeed Sparse Self Attention layer.
"""
if hasattr(model, 'bert'):
model.config.max_position_embeddings = max_position
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, model.bert.encoder.layer,
sparsity_config)
elif hasattr(model, 'roberta'):
model.config.max_position_embeddings = max_position + 2
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config,
model.roberta.encoder.layer,
sparsity_config)
else:
raise ValueError(
'Please extend \"update_model_self_attention_to_sparse_self_attention\" function to support \
your model type. It currently only supports \"bert\" & \"roberta\"!')
return model
@staticmethod
def replace_self_attention_layer_with_sparse_self_attention_layer(
config,
layers,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in attention layer with sparse self attention.
For sparsityConfig, refer to the config class.
Arguments:
config: required: transformer model config
layers: required: transformer model attention layers
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
layers: updated attention layers; in which self attention layers have been replaced with DeepSpeed Sparse Self Attention layer.
"""
for layer in layers:
deepspeed_sparse_self_attn = BertSparseSelfAttention(config, sparsity_config)
deepspeed_sparse_self_attn.query = layer.attention.self.query
deepspeed_sparse_self_attn.key = layer.attention.self.key
deepspeed_sparse_self_attn.value = layer.attention.self.value
layer.attention.self = deepspeed_sparse_self_attn
return layers
@staticmethod
def pad_to_block_size(block_size, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds,
pad_token_id, model_embeddings):
"""This function pads input tokens and attention mask on sequence length dimension to be multiple of block size.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs.
Note)
1- instead of passing your embedding layer to this function, you can simply add this function to your model. It can be more simplified if given attention_mask and/or token_type_ids are none.
2- you need to call unpad function before returning your model output to unpad the encoder sequence output.
Arguments:
block_size: required: an integer determining the block size of sparsity config.
pad_token_id: required: an integer determining the pad token from the model config; such as bert.config.pad_token_id.
input_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary
attention_mask: a torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences.
token_type_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
position_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the indices of positions of each input sequence tokens in the position embeddings.
inputs_embeds: an optional torch.FloatTensor of shape [batch_size, sequence_length, hidden_size] that contains embedded representation and can be passed instead of input_ids directly.
model_embeddings: an optional object. If inputs_embeds are not none, this will be your model embeddings such as BertEmbeddings from your model such as BertModel. You can move this function inside your model and use self.embeddings instead of passing this parameter.
Return:
pad_len: an integer determining how much inputs have been padded to transfer sequence length dimension to multiple of block size.
input_ids: if input_ids are not none padded input_ids otherwise none.
attention_mask: if attention_mask is not none padded attention_mask otherwise none.
token_type_ids: if token_type_ids are not none padded token_type_ids otherwise none.
position_ids: if position_ids are not none padded position_ids otherwise none.
inputs_embeds: if inputs_embeds are not none padded inputs_embeds otherwise none.
"""
batch_size, seq_len = input_ids.shape if input_ids is not None else inputs_embeds.shape[:-1]
pad_len = (block_size - seq_len % block_size) % block_size
if pad_len > 0:
if inputs_embeds is not None:
pad_input_ids = inputs_embeds.new_full((batch_size, pad_len), pad_token_id, dtype=torch.long)
pad_inputs_embeds = model_embeddings(pad_input_ids)
inputs_embeds = torch.cat([inputs_embeds, pad_inputs_embeds], dim=-2)
# may not be needed as input_ids are not used if inputs_embeds are given
if input_ids is not None:
input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id)
if position_ids is not None:
# pad position_id with pad_token_id
position_ids = F.pad(position_ids, (0, pad_len), value=pad_token_id)
# pad attention mask without attention on the padding tokens
attention_mask = F.pad(attention_mask, (0, pad_len), value=False)
# pad token_type_ids with token_type_id = 0
token_type_ids = F.pad(token_type_ids, (0, pad_len), value=0)
return pad_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
@staticmethod
def unpad_sequence_output(pad_len, sequence_output):
"""This function unpads sequence output if inputs of the model were padded.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you return the model outputs.
Arguments:
pad_len: required: an integer determining how much model inputs have been padded to transfer sequence length dimension to multiple of block size.
sequence_output: required: sequence output of the encoder layer.
Return:
sequence_output: unpaded sequence output of the encoder layer.
"""
if (pad_len > 0):
sequence_output = sequence_output[:, :-pad_len]
return sequence_output
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
2fce7a68118cfb2a297a7f558fcf02e1990f725a | 882c2b3c410b838372d43e431d1ccd6e02ba45f6 | /AlMgSiMC/cylinder_khachaturyan.py | 7025b331326e9df285f6023052ba55b49b3f0ad5 | [] | no_license | davidkleiven/GPAWTutorial | d46f7b8750172ba5ff36ccc27f97089cac94fd95 | 0bffc300df1d048142559855d3ccb9d0d8074d2e | refs/heads/master | 2021-06-08T05:44:42.784850 | 2021-02-25T10:23:28 | 2021-02-25T10:23:28 | 98,557,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | import numpy as np
from apal import Khachaturyan
import matplotlib as mpl
mpl.rcParams.update({'font.size': 18, 'axes.unicode_minus': False, 'svg.fonttype': 'none'})
from matplotlib import pyplot as plt
C_al = np.array([[0.62639459, 0.41086487, 0.41086487, 0, 0, 0],
[0.41086487, 0.62639459, 0.41086487, 0, 0, 0],
[0.41086487, 0.41086487, 0.62639459, 0, 0, 0],
[0, 0, 0, 0.42750351, 0, 0],
[0, 0, 0, 0, 0.42750351, 0],
[0, 0, 0, 0, 0, 0.42750351]])
SIZE = 512
MISFIT = np.array([[0.0440222, 0.00029263, 0.0008603],
[0.00029263, -0.0281846, 0.00029263],
[0.0008603, 0.00029263, 0.0440222]])
def strain_energy(radius, length):
from cylinder import create_cylinder
khach = Khachaturyan(elastic_tensor=C_al, misfit_strain=MISFIT)
voxels = np.zeros((SIZE, SIZE, SIZE), dtype=np.int32)
voxels = create_cylinder(voxels, radius, length, SIZE)
print("Created cylinder")
energy = khach.strain_energy_voxels(voxels)
print("Strain energy: {} meV/A^3".format(energy*1000))
return energy*1000.0
def strain_ellipsoid(a, b, c):
from cylinder import create_ellipsoid
khach = Khachaturyan(elastic_tensor=C_al, misfit_strain=MISFIT)
voxels = np.zeros((SIZE, SIZE, SIZE), dtype=np.int32)
voxels = create_ellipsoid(voxels, a, b, c, SIZE)
print("Created ellipsoid")
energy = khach.strain_energy_voxels(voxels)
print("Strain energy: {} meV/A^3 (a={},b={},c={})".format(energy*1000, a, b, c))
return energy*1000.0
def calculate_all():
r = 20
data = []
for d in range(2, 200, 4):
energy = strain_energy(r, d)
data.append([r, d, energy])
fname = "data/strain_energy_cylinder{}.csv".format(int(r))
np.savetxt(fname, data, delimiter=",", header="Radius (A), Length (A), Energy (meV/A^3)")
def calculate_ellipsoid():
a = c = 20
data = []
flip_ba = True
for b in list(range(2, 20, 4)) + list(range(20, 200, 20)):
if flip_ba:
energy = strain_ellipsoid(b, a, c)
else:
energy = strain_ellipsoid(a, b, c)
data.append([a, b, c, energy])
if flip_ba:
fname = "data/strain_energy_ellipsoid{}_flipped.csv".format(int(a))
else:
fname = "data/strain_energy_ellipsoid{}.csv".format(int(a))
np.savetxt(fname, data, delimiter=",", header="Half-axis x (A), Half-axis y (A), Half-axis z (A), Energy (meV/A^3)")
def save_voxels(radius, length):
from cylinder import create_cylinder
voxels = np.zeros((SIZE, SIZE, SIZE), dtype=np.int32)
voxels = create_cylinder(voxels, radius, length, SIZE)
voxels = np.array(voxels, dtype=np.uint8)
fname = "/work/sophus/cylinder_R{}_L{}.bin".format(int(radius), int(length))
voxels.tofile(fname)
print("Voxels written to {}".format(fname))
def plot_strain_energy(fname):
data = np.loadtxt(fname, delimiter=",")
aspect = data[:, 1]/data[:, 0]
energy = data[:, 2]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(aspect, energy, color="#5d5c61")
ax.set_xlabel("Aspect ratio (L/R)")
ax.set_ylabel(r"Strain energy (meV/\r{A}\$^3\$)")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.show()
def plot_strain_energy_ellipsoids():
data = np.loadtxt("data/strain_energy_ellipsoid20.csv", delimiter=",")
data_flipped = np.loadtxt("data/strain_energy_ellipsoid20_flipped.csv", delimiter=",")
aspect = data[:, 1]/data[:, 0]
aspect_flipped = data_flipped[:, 1]/data_flipped[:, 0]
energy = data[:, 3]
energy_flipped = data_flipped[:, 3]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(aspect, energy, color="#5d5c61", marker="o", mfc="none")
ax.plot(aspect_flipped, energy_flipped, color="#557a95", marker="v", mfc="none")
ax.set_xlabel("Aspect ratio (L/R)")
ax.set_ylabel(r"Strain energy (meV/\r{A}\$^3\$)")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.show()
#calculate_all()
#calculate_ellipsoid()
plot_strain_energy_ellipsoids()
#plot_strain_energy("data/strain_energy_cylinder20.csv")
#save_voxels(50, 400)
| [
"davidkleiven446@gmail.com"
] | davidkleiven446@gmail.com |
9e338203a2a4f179a30804f2de90d78601d2e927 | 2d4af29250dca8c72b74e190e74d92f1467120a0 | /TaobaoSdk/Request/HanoiFunctionSearchRequest.py | af0a66a03944adcd825a8c566195dce3de1fdbe1 | [] | no_license | maimiaolmc/TaobaoOpenPythonSDK | 2c671be93c40cf487c0d7d644479ba7e1043004c | d349aa8ed6229ce6d76a09f279a0896a0f8075b3 | refs/heads/master | 2020-04-06T03:52:46.585927 | 2014-06-09T08:58:27 | 2014-06-09T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 根据条件检索function。current_page:最大100页 page_size:10-30
# @author wuliang@maimiaotech.com
# @date 2013-09-22 16:52:39
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">根据条件检索function。current_page:最大100页 page_size:10-30</SPAN>
# <UL>
# </UL>
class HanoiFunctionSearchRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.hanoi.function.search"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">分配给调用方的名称信息,内部统计使用</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.app_name = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">FunctionQuery的json格式</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.sdata = None
| [
"chenke@maimiaotech.com"
] | chenke@maimiaotech.com |
829bd5aca7467be68cffeb2aa4981ff4b6482b94 | 06a045819cf99c7059afde40dca12cf9d3eb5f81 | /pandas/tests/io/parser/dtypes/test_dtypes_basic.py | 915cc9a9a1f95dd1fee943e1acd3b3e6fed63067 | [
"BSD-3-Clause"
] | permissive | MarcoGorelli/pandas | b9882c6ac1e4bc753819b7bc7c8b567964efd275 | 86a4ee01c7899ef454d35b95cde11e9593921c9d | refs/heads/main | 2023-08-22T12:35:45.122152 | 2023-05-04T22:11:07 | 2023-05-04T22:11:07 | 164,618,359 | 4 | 1 | BSD-3-Clause | 2023-05-05T09:02:23 | 2019-01-08T09:55:54 | Python | UTF-8 | Python | false | false | 14,980 | py | """
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from collections import defaultdict
from io import StringIO
import numpy as np
import pytest
from pandas.errors import ParserWarning
import pandas as pd
from pandas import (
DataFrame,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import (
ArrowStringArray,
IntegerArray,
StringArray,
)
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.usefixtures("pyarrow_xfail")
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
msg = (
"Integer column has NA values"
if parser.engine == "c"
else "Unable to convert column DOY"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
result = parser.read_csv_check_warnings(
ParserWarning,
"Both a converter and dtype were specified for column a "
"- only the converter will be used.",
StringIO(data),
dtype={"a": "i8"},
converters={"a": lambda x: str(x)},
)
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
)
def test_numeric_dtype(all_parsers, dtype):
data = "0\n1"
parser = all_parsers
expected = DataFrame([0, 1], dtype=dtype)
result = parser.read_csv(StringIO(data), header=None, dtype=dtype)
tm.assert_frame_equal(expected, result)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_boolean_dtype(all_parsers):
parser = all_parsers
data = "\n".join(
[
"a",
"True",
"TRUE",
"true",
"1",
"1.0",
"False",
"FALSE",
"false",
"0",
"0.0",
"NaN",
"nan",
"NA",
"null",
"NULL",
]
)
result = parser.read_csv(StringIO(data), dtype="boolean")
expected = DataFrame(
{
"a": pd.array(
[
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
None,
None,
None,
None,
None,
],
dtype="boolean",
)
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_delimiter_with_usecols_and_parse_dates(all_parsers):
# GH#35873
result = all_parsers.read_csv(
StringIO('"dump","-9,1","-9,1",20101010'),
engine="python",
names=["col", "col1", "col2", "col3"],
usecols=["col1", "col2", "col3"],
parse_dates=["col3"],
decimal=",",
)
expected = DataFrame(
{"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("thousands", ["_", None])
def test_decimal_and_exponential(
request, python_parser_only, numeric_decimal, thousands
):
# GH#31920
decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None)
@pytest.mark.parametrize("thousands", ["_", None])
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
def test_1000_sep_decimal_float_precision(
request, c_parser_only, numeric_decimal, float_precision, thousands
):
# test decimal and thousand sep handling in across 'float_precision'
# parsers
decimal_number_check(
request, c_parser_only, numeric_decimal, thousands, float_precision
)
text, value = numeric_decimal
text = " " + text + " "
if isinstance(value, str): # the negative cases (parse as text)
value = " " + value + " "
decimal_number_check(
request, c_parser_only, (text, value), thousands, float_precision
)
def decimal_number_check(request, parser, numeric_decimal, thousands, float_precision):
# GH#31920
value = numeric_decimal[0]
if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"):
request.node.add_marker(
pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}")
)
df = parser.read_csv(
StringIO(value),
float_precision=float_precision,
sep="|",
thousands=thousands,
decimal=",",
header=None,
)
val = df.iloc[0, 0]
assert val == numeric_decimal[1]
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
def test_skip_whitespace(c_parser_only, float_precision):
DATA = """id\tnum\t
1\t1.2 \t
1\t 2.1\t
2\t 1\t
2\t 1.2 \t
"""
df = c_parser_only.read_csv(
StringIO(DATA),
float_precision=float_precision,
sep="\t",
header=0,
dtype={1: np.float64},
)
tm.assert_series_equal(df.iloc[:, 1], pd.Series([1.2, 2.1, 1.0, 1.2], name="num"))
@pytest.mark.usefixtures("pyarrow_xfail")
def test_true_values_cast_to_bool(all_parsers):
# GH#34655
text = """a,b
yes,xxx
no,yyy
1,zzz
0,aaa
"""
parser = all_parsers
result = parser.read_csv(
StringIO(text),
true_values=["yes"],
false_values=["no"],
dtype={"a": "boolean"},
)
expected = DataFrame(
{"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]}
)
expected["a"] = expected["a"].astype("boolean")
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)])
def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value):
# GH#35211
parser = all_parsers
data = """a,a\n1,1"""
dtype_dict = {"a": str, **dtypes}
# GH#42462
dtype_dict_copy = dtype_dict.copy()
result = parser.read_csv(StringIO(data), dtype=dtype_dict)
expected = DataFrame({"a": ["1"], "a.1": [exp_value]})
assert dtype_dict == dtype_dict_copy, "dtype dict changed"
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_mangle_dup_cols_single_dtype(all_parsers):
# GH#42022
parser = all_parsers
data = """a,a\n1,1"""
result = parser.read_csv(StringIO(data), dtype=str)
expected = DataFrame({"a": ["1"], "a.1": ["1"]})
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtype_multi_index(all_parsers):
# GH 42446
parser = all_parsers
data = "A,B,B\nX,Y,Z\n1,2,3"
result = parser.read_csv(
StringIO(data),
header=list(range(2)),
dtype={
("A", "X"): np.int32,
("B", "Y"): np.int32,
("B", "Z"): np.float32,
},
)
expected = DataFrame(
{
("A", "X"): np.int32([1]),
("B", "Y"): np.int32([2]),
("B", "Z"): np.float32([3]),
}
)
tm.assert_frame_equal(result, expected)
def test_nullable_int_dtype(all_parsers, any_int_ea_dtype):
# GH 25472
parser = all_parsers
dtype = any_int_ea_dtype
data = """a,b,c
,3,5
1,,6
2,4,"""
expected = DataFrame(
{
"a": pd.array([pd.NA, 1, 2], dtype=dtype),
"b": pd.array([3, pd.NA, 4], dtype=dtype),
"c": pd.array([5, 6, pd.NA], dtype=dtype),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
@pytest.mark.parametrize("default", ["float", "float64"])
def test_dtypes_defaultdict(all_parsers, default):
# GH#41574
data = """a,b
1,2
"""
dtype = defaultdict(lambda: default, a="int64")
parser = all_parsers
result = parser.read_csv(StringIO(data), dtype=dtype)
expected = DataFrame({"a": [1], "b": 2.0})
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtypes_defaultdict_mangle_dup_cols(all_parsers):
# GH#41574
data = """a,b,a,b,b.1
1,2,3,4,5
"""
dtype = defaultdict(lambda: "float64", a="int64")
dtype["b.1"] = "int64"
parser = all_parsers
result = parser.read_csv(StringIO(data), dtype=dtype)
expected = DataFrame({"a": [1], "b": [2.0], "a.1": [3], "b.2": [4.0], "b.1": [5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.usefixtures("pyarrow_xfail")
def test_dtypes_defaultdict_invalid(all_parsers):
# GH#41574
data = """a,b
1,2
"""
dtype = defaultdict(lambda: "invalid_dtype", a="int64")
parser = all_parsers
with pytest.raises(TypeError, match="not understood"):
parser.read_csv(StringIO(data), dtype=dtype)
def test_dtype_backend(all_parsers):
# GH#36712
parser = all_parsers
data = """a,b,c,d,e,f,g,h,i,j
1,2.5,True,a,,,,,12-31-2019,
3,4.5,False,b,6,7.5,True,a,12-31-2019,
"""
result = parser.read_csv(
StringIO(data), dtype_backend="numpy_nullable", parse_dates=["i"]
)
expected = DataFrame(
{
"a": pd.Series([1, 3], dtype="Int64"),
"b": pd.Series([2.5, 4.5], dtype="Float64"),
"c": pd.Series([True, False], dtype="boolean"),
"d": pd.Series(["a", "b"], dtype="string"),
"e": pd.Series([pd.NA, 6], dtype="Int64"),
"f": pd.Series([pd.NA, 7.5], dtype="Float64"),
"g": pd.Series([pd.NA, True], dtype="boolean"),
"h": pd.Series([pd.NA, "a"], dtype="string"),
"i": pd.Series([Timestamp("2019-12-31")] * 2),
"j": pd.Series([pd.NA, pd.NA], dtype="Int64"),
}
)
tm.assert_frame_equal(result, expected)
def test_dtype_backend_and_dtype(all_parsers):
# GH#36712
parser = all_parsers
data = """a,b
1,2.5
,
"""
result = parser.read_csv(
StringIO(data), dtype_backend="numpy_nullable", dtype="float64"
)
expected = DataFrame({"a": [1.0, np.nan], "b": [2.5, np.nan]})
tm.assert_frame_equal(result, expected)
def test_dtype_backend_string(all_parsers, string_storage):
# GH#36712
pa = pytest.importorskip("pyarrow")
with pd.option_context("mode.string_storage", string_storage):
parser = all_parsers
data = """a,b
a,x
b,
"""
result = parser.read_csv(StringIO(data), dtype_backend="numpy_nullable")
if string_storage == "python":
expected = DataFrame(
{
"a": StringArray(np.array(["a", "b"], dtype=np.object_)),
"b": StringArray(np.array(["x", pd.NA], dtype=np.object_)),
}
)
else:
expected = DataFrame(
{
"a": ArrowStringArray(pa.array(["a", "b"])),
"b": ArrowStringArray(pa.array(["x", None])),
}
)
tm.assert_frame_equal(result, expected)
def test_dtype_backend_ea_dtype_specified(all_parsers):
# GH#491496
data = """a,b
1,2
"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), dtype="Int64", dtype_backend="numpy_nullable"
)
expected = DataFrame({"a": [1], "b": 2}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_dtype_backend_pyarrow(all_parsers, request):
# GH#36712
pa = pytest.importorskip("pyarrow")
parser = all_parsers
data = """a,b,c,d,e,f,g,h,i,j
1,2.5,True,a,,,,,12-31-2019,
3,4.5,False,b,6,7.5,True,a,12-31-2019,
"""
result = parser.read_csv(StringIO(data), dtype_backend="pyarrow", parse_dates=["i"])
expected = DataFrame(
{
"a": pd.Series([1, 3], dtype="int64[pyarrow]"),
"b": pd.Series([2.5, 4.5], dtype="float64[pyarrow]"),
"c": pd.Series([True, False], dtype="bool[pyarrow]"),
"d": pd.Series(["a", "b"], dtype=pd.ArrowDtype(pa.string())),
"e": pd.Series([pd.NA, 6], dtype="int64[pyarrow]"),
"f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"),
"g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"),
"h": pd.Series(
[pd.NA, "a"],
dtype=pd.ArrowDtype(pa.string()),
),
"i": pd.Series([Timestamp("2019-12-31")] * 2),
"j": pd.Series([pd.NA, pd.NA], dtype="null[pyarrow]"),
}
)
tm.assert_frame_equal(result, expected)
def test_ea_int_avoid_overflow(all_parsers):
# GH#32134
parser = all_parsers
data = """a,b
1,1
,1
1582218195625938945,1
"""
result = parser.read_csv(StringIO(data), dtype={"a": "Int64"})
expected = DataFrame(
{
"a": IntegerArray(
np.array([1, 1, 1582218195625938945]), np.array([False, True, False])
),
"b": 1,
}
)
tm.assert_frame_equal(result, expected)
| [
"noreply@github.com"
] | MarcoGorelli.noreply@github.com |
9872ed3d75f6b59b33b85328df1ac8e4c7ea2837 | 214942f8cf694227d32077accd8aa379c26b4830 | /ooi_instrument_agent/utils.py | 2b2a23ad7e8bce1df9cd47a0b66c4ece8d784abe | [
"Apache-2.0"
] | permissive | oceanobservatories/ooi-instrument-agent | 76084b2c554f195983550aa67d3c86e66a39f525 | e22e4300079468bb99c543cbbf1cb5c8b4a96897 | refs/heads/master | 2021-01-21T14:58:10.847453 | 2016-06-21T18:15:19 | 2016-06-21T18:18:24 | 58,216,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,896 | py | import json
import logging
from flask import request
from werkzeug.exceptions import abort
from ooi_instrument_agent.client import ZmqDriverClient
DEFAULT_TIMEOUT = 90000
log = logging.getLogger(__name__)
def get_client(consul, driver_id):
"""
Create a ZmqDriverClient for the specified driver_id
:param consul: Instance of consul.Consul
:param driver_id: Reference designator of target driver
:return: ZmqDriverClient if found, otherwise 404
"""
return ZmqDriverClient(*get_host_and_port(consul, driver_id))
def get_host_and_port(consul, driver_id):
"""
Return the host and port for the specified driver_id
:param consul: Instance of consul.Consul
:param driver_id: Reference designator of target driver
:return: host, port if found, otherwise 404
"""
host_and_port = get_service_host_and_port(consul, 'instrument_driver', tag=driver_id)
if host_and_port is None:
abort(404)
return host_and_port
def get_service_host_and_port(consul, service_id, tag=None):
"""
Return the first passing host and port for the specified service_id
:param consul: Instance of consul.Consul
:param service_id: service_id
:param tag: tag
:return: host, port if found, otherwise None
"""
index, matches = consul.health.service(service_id, tag=tag, passing=True)
for match in matches:
host = match.get('Node', {}).get('Address')
port = match.get('Service', {}).get('Port')
if host and port:
return host, port
def list_drivers(consul):
"""
Return a list of all passing drivers currently registered in Consul
:param consul: Instance of consul.Consul
:return: List of reference designators
"""
drivers = []
index, passing = consul.health.service('instrument_driver', passing=True)
for each in passing:
tags = each.get('Service', {}).get('Tags', [])
drivers.extend(tags)
return drivers
def get_port_agent(consul, driver_id):
"""
Fetch the port agent information for the specified driver from Consul
:param consul: Instance of consul.Consul
:param driver_id: Reference designator of target driver
:return: Dictionary containing the port agent data for the specified driver
"""
return_dict = {}
for name, service_id in [('data', 'port-agent'),
('command', 'command-port-agent'),
('sniff', 'sniff-port-agent'),
('da', 'da-port-agent')]:
host_and_port = get_service_host_and_port(consul, service_id, tag=driver_id)
if host_and_port:
host, port = host_and_port
return_dict[name] = {'host': host, 'port': port}
if return_dict:
return return_dict
abort(404)
def get_from_request(name, default=None):
"""
Extract the target parameter from a Flask request object. Attempts to do the right
thing whether the input data was passed as URL query params, a form or as JSON.
:param name: Target parameter
:param default: Default value to return if not found
:return: Extracted value if found, else default
"""
def extract(value_dict, name):
val = value_dict.get(name)
if val is None:
return default
try:
val = json.loads(val)
except (TypeError, ValueError):
pass
return val
if request.args:
return extract(request.args, name)
if request.form:
return extract(request.form, name)
if request.json:
return request.json.get(name, default)
return default
def get_timeout():
"""
Get the timeout from the request object as an int
:return: timeout
"""
val = get_from_request('timeout')
try:
return int(val)
except (ValueError, TypeError):
return DEFAULT_TIMEOUT
| [
"petercable@gmail.com"
] | petercable@gmail.com |
1a3dcd0ed91952cbd51126d875f0e262109a6f94 | 6244c2efe590494e0870253269e269848f8debe4 | /BooleanNetworks/LEMScores/parseLEMscores_malaria_40hr.py | 272717bc1a9856b72ebf33574f87bff521033bb8 | [] | no_license | breecummins/BooleanNetworks | 53db4dc4e50d5d571344ed55b65efb66a1c4328d | 074409a6dd569b2f0ce3602e7dfda496db08cd01 | refs/heads/master | 2021-09-03T20:31:53.237492 | 2018-01-11T19:55:32 | 2018-01-11T19:55:32 | 117,146,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,021 | py | import parseLEMscores_yeast_mouse as PLS
import parseLEMscores_malaria_20hr as PLS20
from networkbuilder_yeast_mouse import createNetworkFile
import time
def parseLEMfile(bound=0,fname='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_90tfs_top25_dljtk_lem_score_table.txt'):
# returns the source, target, and type of regulation sorted by decreasing LEM score (also returned)
source=[]
type_reg=[]
target=[]
lem_score=[]
with open(fname,'r') as f:
for _ in range(8):
f.readline()
for l in f.readlines():
wordlist=l.split()
lem = float(wordlist[5])
if lem>bound:
target.append(wordlist[0])
lem_score.append(lem)
two_words=wordlist[2].split('(')
type_reg.append(two_words[0])
source.append(two_words[1][:-1])
[lem_score,source,target,type_reg] = PLS.sort_by_list_in_reverse(lem_score,[source,target,type_reg])
return source,target,type_reg,lem_score
def generateResult(threshold=0.1,frontname='malaria40hr_90TF_top25',makegraph=1,saveme=1,onlylargestnetwork=0,LEMfile='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_90tfs_top25_dljtk_lem_score_table.txt',new_network_path='',new_network_date='',essential=True):
print 'Parsing file...'
source,target,type_reg,lem_score=parseLEMfile(threshold,LEMfile)
genes = sorted(set(source).intersection(target))
# print genes
print 'Making outedges...'
outedges,regulation,LEM_scores=PLS20.makeOutedges(genes,source,target,type_reg,lem_score)
# print outedges
print 'Extracting strongly connected components...'
grouped_scc_gene_inds=PLS20.strongConnectIndices(outedges)
scc_genenames=[[genes[g] for g in G] for G in grouped_scc_gene_inds ]
# print scc_genes
if onlylargestnetwork:
L = [len(g) for g in grouped_scc_gene_inds]
ind=L.index(max(L))
grouped_scc_gene_inds = grouped_scc_gene_inds[ind]
flat_scc_gene_inds = grouped_scc_gene_inds[:]
scc_genenames = scc_genenames[ind]
flat_scc_genenames = scc_genenames[:]
else:
flat_scc_gene_inds= [g for G in grouped_scc_gene_inds for g in G]
flat_scc_genenames = [s for S in scc_genenames for s in S]
outedges,regulation,LEM_scores=PLS20.pruneOutedges(flat_scc_gene_inds,outedges,regulation,LEM_scores)
if makegraph:
print 'Making graph for {} nodes and {} edges....'.format(len(flat_scc_gene_inds),len([o for oe in outedges for o in oe]))
PLS.makeGraph(flat_scc_genenames,outedges,regulation,name='{}_graph_thresh{}.pdf'.format(frontname,str(threshold).replace('.','-')))
if saveme:
createNetworkFile(flat_scc_genenames,outedges,regulation,new_network_path+'{}D_'.format(len(flat_scc_genenames))+time.strftime("%Y_%m_%d")+'_{}_T{}'.format(frontname,str(threshold).replace('.','-')) + '_essential'*essential +'.txt',[essential]*len(flat_scc_genenames))
if __name__ == "__main__":
# frontname='malaria40hr_90TF_top25'
# new_network_path = '/Users/bcummins/GIT/DSGRN/networks/'
# LEMfile='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_90tfs_top25_dljtk_lem_score_table.txt'
# for threshold in [0.01, 0.0075, 0.005, 0.001]:
# generateResult(threshold,frontname,1,1,1,LEMfile,new_network_path,True)
frontname='malaria40hr_50TF_top25'
new_network_path = '/Users/bcummins/GIT/DSGRN/networks/'
LEMfile='/Users/bcummins/ProjectData/malaria/wrair2015_v2_fpkm-p1_s19_40hr_highest_ranked_genes/wrair2015_v2_fpkm-p1_s19_50tfs_top25_dljtk_lem_score_table.txt'
makegraph=1
saveme=0
onlylargestnetwork=0
essential=True
for threshold in [0.02]:
generateResult(threshold,frontname,makegraph,saveme,onlylargestnetwork,LEMfile,new_network_path,essential)
| [
"breecummins@gmail.com"
] | breecummins@gmail.com |
9a670955cc54404b943dfc93a7b7692e7f24ee44 | 00b5ad360284adc06f7e7ca9b2d1c2d3a0edd6f9 | /recycle/CRF-C-LR.py | 5571d01c9c38fafa50e8533efab0bdcfe00946ba | [] | no_license | ShenDezhou/CBLSTM | e09d36f609df2b34ace2ae8085d2232039838675 | b5ac4714f8ea14cf2bfd6ce6033eb697ef078686 | refs/heads/master | 2021-04-16T19:47:44.758194 | 2020-07-20T06:21:08 | 2020-07-20T06:21:08 | 249,381,106 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2019年5月17日
@author: Administrator
'''
from sklearn.feature_extraction.text import CountVectorizer
import os
import codecs
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pkuseg
class Sentiment(object):
vectorizer=None
log_model=None
acc_score=None
def __init__(self):
pass
@classmethod
def load_model(cls_obj):
data = []
data_labels = []
for filename in os.listdir(u"./hotelcomment/正面"):
if filename.endswith(".txt"):
with codecs.open("./hotelcomment/正面/"+filename, 'r', encoding='utf-8') as f:
text = f.read()
data.append(text)
data_labels.append('pos')
continue
else:
continue
for filename in os.listdir(u"./hotelcomment/负面"):
if filename.endswith(".txt"):
with codecs.open(u"./hotelcomment/负面/"+filename, 'r', encoding='utf-8') as f:
text = f.read()
data.append(text)
data_labels.append('neg')
continue
else:
continue
print(len(data), len(data_labels))
seg = pkuseg.pkuseg(model_name='web')
cls_obj.vectorizer = CountVectorizer(
analyzer = lambda text: seg.cut(text),
lowercase = False,
)
features = cls_obj.vectorizer.fit_transform(
data
)
features_nd = features.toarray()
X_train, X_test, y_train, y_test = train_test_split(
features_nd,
data_labels,
train_size=0.80,
random_state=1234)
cls_obj.log_model = LogisticRegression()
cls_obj.log_model = cls_obj.log_model.fit(X=X_train, y=y_train)
y_pred = cls_obj.log_model.predict(X_test)
cls_obj.acc_score=accuracy_score(y_test, y_pred)
return cls_obj | [
"bangtech@sina.com"
] | bangtech@sina.com |
f91f09dca1cd6719bb83aa81dbb34abf79e48761 | f0b75bd94f133a13f469f429a696f26be3be9862 | /week_4/.history/class_exercise1_20200217114534.py | 797ca5e3e1d3292edc31ea02837aa9efe73bbf2a | [] | no_license | dechavez4/Python_handin_assignments | 023350fabd212cdf2a4ee9cd301306dc5fd6bea0 | 82fd8c991e560c18ecb2152ea5a8fc35dfc3c608 | refs/heads/master | 2023-01-11T23:31:27.220757 | 2020-05-22T10:33:56 | 2020-05-22T10:33:56 | 237,179,899 | 0 | 0 | null | 2022-12-30T20:14:04 | 2020-01-30T09:30:16 | Python | UTF-8 | Python | false | false | 1,235 | py | import numpy as np
a = np.arange(10,30).reshape(4,5)
#exercise 1 table
yellow = a[0,0]
green = a[:3, 2]
teal = a[:, (1,3)]
blue = a[::2, 4]
red = a[0, 1:4]
#print('yellow= ', yellow, 'green= ', green, 'blue= ', blue, 'teal=', teal, 'red=', red)
#exercise 2 cube:
c = np.arange(0, 27).reshape((3, 3, 3)) # = (z, y, x)
slice1 = c[1, 1, :]
slice2 = c[:, 1 , 0 ]
slice3 = c[0, :, 2]
#print('slice1 = ', slice1, 'slice2 = ', slice2, 'slice3 = ', slice3)
#exercise 3 masking:
data = np.arange(1,101).reshape(10,10)
even = data[data % 2 == 0]
sixOnly = np.where(data % 10 == 6)
six = data[sixOnly]
#print('even =', even, 'sixOnly', six)
#exercise 4 numpy and csv:
filename = 'befkbhalderstatkode.csv'
bef_stats_df = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
dd = bef_stats_df
mask_year_2015 = dd[:, 0] == 2015
mask_german = dd[:,3] == 5180
german_children_mask = (mask_year_2015 & mask_german & (dd[:, 2] <= 0))
german_children = np.sum(dd[(german_children_mask)][:, 4])
#print(german_children)
def showNum(arr, bydel, alder, statkode):
parts = (dd[:,0] == arr) & (dd[:,3] == bydel) & (dd[:,2] <= alder) & (dd[:,1] <=bydel)
partsData = dd[parts]
print(partsData)
showNum(2015, 2, 0, 5180) | [
"chavezgamingv2@hotmail.com"
] | chavezgamingv2@hotmail.com |
e976cba53c1c34192f3fe8310a36c701a6966fc2 | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /python/geometry/polygon/line_line_intersect.py | bb815c566278010dd4858fcbc86ec08f82250541 | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | ../line_line_intersect.py | [
"info@akihikoy.net"
] | info@akihikoy.net |
bf6d916adf0631e19932c2e5f3d01cddfc18a72e | ee409ec2e421bdac5988fcbe6592b05824b51d58 | /google-datacatalog-qlik-connector/tests/google/datacatalog_connectors/qlik/scrape/engine_api_dimensions_helper_test.py | da581d76cc0bbe1954ca4808e0652eabc188a810 | [
"Apache-2.0",
"Python-2.0"
] | permissive | GoogleCloudPlatform/datacatalog-connectors-bi | 7b11ed25856e83c8bd4b701dd836e0d20815caf7 | 58cc57e12632cbd1e237b3d6930e519333c51f4e | refs/heads/master | 2023-04-01T14:27:24.548547 | 2022-02-12T09:55:56 | 2022-02-12T09:55:56 | 259,464,922 | 34 | 18 | Apache-2.0 | 2022-02-12T09:55:57 | 2020-04-27T21:51:45 | Python | UTF-8 | Python | false | false | 6,322 | py | #!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from unittest import mock
from google.datacatalog_connectors.qlik.scrape import \
engine_api_dimensions_helper
from . import scrape_ops_mocks
class EngineAPIDimensionsHelperTest(unittest.TestCase):
__SCRAPE_PACKAGE = 'google.datacatalog_connectors.qlik.scrape'
__BASE_CLASS = f'{__SCRAPE_PACKAGE}.base_engine_api_helper' \
f'.BaseEngineAPIHelper'
__HELPER_CLASS = f'{__SCRAPE_PACKAGE}.engine_api_dimensions_helper' \
f'.EngineAPIDimensionsHelper'
def setUp(self):
self.__helper = engine_api_dimensions_helper.EngineAPIDimensionsHelper(
server_address='https://test-server', auth_cookie=mock.MagicMock())
@mock.patch(f'{__HELPER_CLASS}._EngineAPIDimensionsHelper__get_dimensions',
lambda *args: None)
@mock.patch(f'{__BASE_CLASS}._run_until_complete')
def test_get_dimensions_should_raise_unknown_exception(
self, mock_run_until_complete):
mock_run_until_complete.side_effect = Exception
self.assertRaises(Exception, self.__helper.get_dimensions, 'app_id')
@mock.patch(f'{__HELPER_CLASS}._EngineAPIDimensionsHelper__get_dimensions',
lambda *args: None)
@mock.patch(f'{__BASE_CLASS}._run_until_complete')
def test_get_dimensions_should_return_empty_list_on_timeout(
self, mock_run_until_complete):
mock_run_until_complete.side_effect = asyncio.TimeoutError
dimensions = self.__helper.get_dimensions('app-id')
self.assertEqual(0, len(dimensions))
# BaseEngineAPIHelper._hold_websocket_communication is purposefully not
# mocked in this test case in order to simulate a full send/reply scenario
# with replies representing an App with Dimensions. Maybe it's worth
# refactoring it in the future to mock that method, and the private async
# ones from EngineAPIDimensionsHelper as well, thus testing in a more
# granular way.
@mock.patch(f'{__BASE_CLASS}._generate_message_id')
@mock.patch(f'{__BASE_CLASS}._send_get_all_infos_message')
@mock.patch(f'{__BASE_CLASS}._BaseEngineAPIHelper__send_open_doc_message')
@mock.patch(f'{__BASE_CLASS}._connect_websocket',
new_callable=scrape_ops_mocks.AsyncContextManager)
def test_get_dimensions_should_return_list_on_success(
self, mock_websocket, mock_send_open_doc, mock_send_get_all_infos,
mock_generate_message_id):
mock_send_open_doc.return_value = asyncio.sleep(delay=0, result=1)
mock_send_get_all_infos.return_value = asyncio.sleep(delay=0, result=2)
mock_generate_message_id.side_effect = [3, 4]
websocket_ctx = mock_websocket.return_value.__enter__.return_value
websocket_ctx.set_itr_break(0.25)
websocket_ctx.set_data([
{
'id': 1,
'result': {
'qReturn': {
'qHandle': 1,
},
},
},
{
'id': 2,
'result': {
'qInfos': [{
'qId': 'dimension-id',
'qType': 'dimension'
}],
},
},
{
'id': 3,
'result': {
'qReturn': {
'qHandle': 2,
},
},
},
{
'id': 4,
'result': {
'qProp': [{
'qInfo': {
'qId': 'dimension-id',
},
}],
},
},
])
dimensions = self.__helper.get_dimensions('app-id')
self.assertEqual(1, len(dimensions))
self.assertEqual('dimension-id', dimensions[0].get('qInfo').get('qId'))
mock_send_open_doc.assert_called_once()
mock_send_get_all_infos.assert_called_once()
# BaseEngineAPIHelper._hold_websocket_communication is purposefully not
# mocked in this test case in order to simulate a full send/reply scenario
# with replies representing an App with no Dimensions. Maybe it's worth
# refactoring it in the future to mock that method, and the private async
# ones from EngineAPIDimensionsHelper as well, thus testing in a more
# granular way.
@mock.patch(f'{__BASE_CLASS}._send_get_all_infos_message')
@mock.patch(f'{__BASE_CLASS}._BaseEngineAPIHelper__send_open_doc_message')
@mock.patch(f'{__BASE_CLASS}._connect_websocket',
new_callable=scrape_ops_mocks.AsyncContextManager)
def test_get_dimensions_should_return_empty_list_on_none_available(
self, mock_websocket, mock_send_open_doc, mock_send_get_all_infos):
mock_send_open_doc.return_value = asyncio.sleep(delay=0, result=1)
mock_send_get_all_infos.return_value = asyncio.sleep(delay=0, result=2)
websocket_ctx = mock_websocket.return_value.__enter__.return_value
websocket_ctx.set_itr_break(0.25)
websocket_ctx.set_data([
{
'id': 1,
'result': {
'qReturn': {
'qHandle': 1,
},
},
},
{
'id': 2,
'result': {
'qInfos': [],
},
},
])
dimensions = self.__helper.get_dimensions('app-id')
self.assertEqual(0, len(dimensions))
mock_send_open_doc.assert_called_once()
mock_send_get_all_infos.assert_called_once()
| [
"noreply@github.com"
] | GoogleCloudPlatform.noreply@github.com |
6c7b94252cf23796c1c645176f35159465ceabce | 33cf73bf603ffe09ad763fca4103e979ed50a4bc | /service_api/cd/NightWorkSpider.py | 43e27efb13f5b1eaca5c928bbe078a11de05959a | [] | no_license | daddvted/excavat0r | f73d05670766d5f47ef5d7e443289851fc172906 | 8c2c56b6395bede4135fd859b1338831345054b6 | refs/heads/master | 2022-06-09T11:51:34.461893 | 2018-12-12T10:06:42 | 2018-12-12T10:06:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,876 | py | """
夜间施工查询
URL: http://www.cdcc.gov.cn/QualitySafeShow/NightWorkList.aspx
"""
import re
import time
import random
import requests
import lxml.html
import mysql.connector
from urllib.parse import urlencode
class NightWorkSpider(object):
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36",
"Mozilla/5.0 (Linux; U; Android 4.4.4; zh-cn; MI NOTE LTE Build/KTU84P) AppleWebKit/533.1 (KHTML, like Gecko)Version/4.0 MQQBrowser/5.4 TBS/025489 Mobile Safari/533.1 MicroMessenger/6.3.13.49_r4080b63.740 NetType/cmnet Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13D15 MicroMessenger/6.3.13 NetType/WIFI Language/zh_CN",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Shuame; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.9.1.1000 Chrome/39.0.2146.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13",
"Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)",
"Googlebot/2.1 (http://www.googlebot.com/bot.html)",
"Opera/9.20 (Windows NT 6.0; U; en)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0",
]
config = {
'user': 'root',
'password': 'hello',
'host': '192.168.86.86',
'port': '3306',
'database': 'service_cd',
'raise_on_warnings': True,
}
URL = "http://www.cdcc.gov.cn/QualitySafeShow/NightWorkList.aspx"
BASE_URL = "http://www.cdcc.gov.cn/QualitySafeShow/"
def __init__(self):
self.total_page = 0
self.urls = []
self.__VIEWSTATE = ""
self.__EVENTVALIDATION = ""
self.__EVENTTARGET = ""
self.cookie = ""
self.crawl_date = time.strftime('%Y%m%d', time.localtime())
# Init mysql
self.conn = mysql.connector.connect(**self.config)
self.cursor = self.conn.cursor()
def save2db(self, data):
template = "INSERT INTO nightwork(unit, project, part, start, end, addr, crawl_date) " \
"VALUES (%(unit)s, %(project)s, %(part)s, %(start)s, %(end)s, %(addr)s, %(crawl_date)s)"
self.cursor.execute(template, data)
self.conn.commit()
# 1st crawl, get total
def crawl(self):
print("crawling page 1")
headers = {
"User-Agent": random.choice(self.USER_AGENTS)
}
browser = requests.get(self.URL, headers=headers)
if browser.status_code == 200:
session = browser.cookies.get("ASP.NET_SessionId")
self.cookie = "ASP.NET_SessionId=" + session
html = lxml.html.fromstring(browser.text)
# Crawl urls of 1st page
links = html.xpath('//table[@id="DgList"]/tr/td[2]/a')
for link in links:
self.urls.append(self.BASE_URL + str(link.attrib["href"]))
page_div = html.xpath('//div[@id="Navigate_divPanel"]/span')
if len(page_div):
tmp = str(page_div[0].text_content())
match = re.findall(r'(\d+)', tmp)
self.total_page = int(match[0])
view_state_div = html.xpath('//input[@id="__VIEWSTATE"]')
self.__VIEWSTATE = view_state_div[0].attrib["value"]
event_valid_div = html.xpath('//input[@id="__EVENTVALIDATION"]')
self.__EVENTVALIDATION = event_valid_div[0].attrib["value"]
self.__EVENTTARGET = "Navigate$btnNavNext"
self.crawl_step2()
# Only 1 page, start final_crawl()
else:
self.final_crawl()
else:
print("Error while crawling page 1")
self.crawl_step2()
def crawl_step2(self):
for p in range(2, self.total_page + 1):
data = {
"__VIEWSTATE": self.__VIEWSTATE,
"__EVENTVALIDATION": self.__EVENTVALIDATION,
"__EVENTTARGET": self.__EVENTTARGET,
}
print("crawling page {}".format(p))
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": random.choice(self.USER_AGENTS),
"Cookie": self.cookie
}
browser = requests.post(self.URL, headers=headers, data=urlencode(data))
if browser.status_code == 200:
html = lxml.html.fromstring(browser.text)
view_state_div = html.xpath('//input[@id="__VIEWSTATE"]')
self.__VIEWSTATE = view_state_div[0].attrib["value"]
event_valid_div = html.xpath('//input[@id="__EVENTVALIDATION"]')
self.__EVENTVALIDATION = event_valid_div[0].attrib["value"]
self.__EVENTTARGET = "Navigate$btnNavNext"
links = html.xpath('//table[@id="DgList"]/tr/td[2]/a')
for link in links:
self.urls.append(self.BASE_URL + str(link.attrib["href"]))
self.final_crawl()
else:
print("Error while crawling page {}".format(p))
self.final_crawl()
def final_crawl(self):
for url in self.urls:
print("Crawling url: {}".format(url))
headers = {
"User-Agent": random.choice(self.USER_AGENTS)
}
browser = requests.get(url, headers=headers)
if browser.status_code == 200:
html = lxml.html.fromstring(browser.text)
tds = html.xpath('//table[@id="viewTable"]/tr/td[2]')
data = {
"unit": str(tds[0].text_content()),
"project": str(tds[1].text_content()),
"part": str(tds[2].text_content()),
"start": str(tds[3].text_content()),
"end": str(tds[4].text_content()),
"addr": str(tds[5].text_content()),
"crawl_date": self.crawl_date
}
self.save2db(data)
else:
print("Error while crawling url: {}".format(url))
if __name__ == "__main__":
spider = NightWorkSpider()
spider.crawl()
spider.cursor.close()
spider.conn.close()
| [
"ski2per@163.com"
] | ski2per@163.com |
b7a15ee772f12d767de80ae61a8dfe0147385d56 | 2e00398c4b77ab6e1996dbbefa167e13a8ad40a9 | /users/apps.py | f4ebd07740e02a9ffa3ee350068584103561efaf | [] | no_license | cleliofavoccia/PurBeurre | d754b83ed28b1240447243f149080058a60ccdfb | e2b5a51fbd91412e68ddb1c3c785713c7988cc41 | refs/heads/main | 2023-03-20T11:06:32.466520 | 2021-03-12T16:02:22 | 2021-03-12T16:02:22 | 331,650,830 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | """Manage users app"""
from django.apps import AppConfig
class UserConfig(AppConfig):
name = 'users'
| [
"favoccia.c@live.fr"
] | favoccia.c@live.fr |
fc3a3a852a14c61bf200443577da2911fd89726f | d954e2f74d1186c8e35be8ea579656513d8d3b98 | /rllib/utils/metrics/learner_info.py | c3d0672ed9b43cee73cde9f6bbcb3ab1634805d6 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vakker/ray | a865de214e60f9e62d61c03ae7ce55ad6030f84c | de238dd626a48a16c8b3cd006f3482db75f63a83 | refs/heads/master | 2023-01-23T22:30:44.839942 | 2022-10-23T01:05:48 | 2022-10-23T01:05:48 | 171,845,804 | 0 | 1 | Apache-2.0 | 2023-01-14T08:01:04 | 2019-02-21T09:54:36 | Python | UTF-8 | Python | false | false | 4,034 | py | from collections import defaultdict
import numpy as np
import tree # pip install dm_tree
from typing import Dict
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.typing import PolicyID
# Instant metrics (keys for metrics.info).
LEARNER_INFO = "learner"
# By convention, metrics from optimizing the loss can be reported in the
# `grad_info` dict returned by learn_on_batch() / compute_grads() via this key.
LEARNER_STATS_KEY = "learner_stats"
@DeveloperAPI
class LearnerInfoBuilder:
def __init__(self, num_devices: int = 1):
self.num_devices = num_devices
self.results_all_towers = defaultdict(list)
self.is_finalized = False
def add_learn_on_batch_results(
self,
results: Dict,
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Adds a policy.learn_on_(loaded)?_batch() result to this builder.
Args:
results: The results returned by Policy.learn_on_batch or
Policy.learn_on_loaded_batch.
policy_id: The policy's ID, whose learn_on_(loaded)_batch method
returned `results`.
"""
assert (
not self.is_finalized
), "LearnerInfo already finalized! Cannot add more results."
# No towers: Single CPU.
if "tower_0" not in results:
self.results_all_towers[policy_id].append(results)
# Multi-GPU case:
else:
self.results_all_towers[policy_id].append(
tree.map_structure_with_path(
lambda p, *s: _all_tower_reduce(p, *s),
*(
results.pop("tower_{}".format(tower_num))
for tower_num in range(self.num_devices)
)
)
)
for k, v in results.items():
if k == LEARNER_STATS_KEY:
for k1, v1 in results[k].items():
self.results_all_towers[policy_id][-1][LEARNER_STATS_KEY][
k1
] = v1
else:
self.results_all_towers[policy_id][-1][k] = v
def add_learn_on_batch_results_multi_agent(
self,
all_policies_results: Dict,
) -> None:
"""Adds multiple policy.learn_on_(loaded)?_batch() results to this builder.
Args:
all_policies_results: The results returned by all Policy.learn_on_batch or
Policy.learn_on_loaded_batch wrapped as a dict mapping policy ID to
results.
"""
for pid, result in all_policies_results.items():
if pid != "batch_count":
self.add_learn_on_batch_results(result, policy_id=pid)
def finalize(self):
self.is_finalized = True
info = {}
for policy_id, results_all_towers in self.results_all_towers.items():
# Reduce mean across all minibatch SGD steps (axis=0 to keep
# all shapes as-is).
info[policy_id] = tree.map_structure_with_path(
_all_tower_reduce, *results_all_towers
)
return info
def _all_tower_reduce(path, *tower_data):
"""Reduces stats across towers based on their stats-dict paths."""
# TD-errors: Need to stay per batch item in order to be able to update
# each item's weight in a prioritized replay buffer.
if len(path) == 1 and path[0] == "td_error":
return np.concatenate(tower_data, axis=0)
elif tower_data[0] is None:
return None
if isinstance(path[-1], str):
# Min stats: Reduce min.
if path[-1].startswith("min_"):
return np.nanmin(tower_data)
# Max stats: Reduce max.
elif path[-1].startswith("max_"):
return np.nanmax(tower_data)
if np.isnan(tower_data).all():
return np.nan
# Everything else: Reduce mean.
return np.nanmean(tower_data)
| [
"noreply@github.com"
] | vakker.noreply@github.com |
3e034c1f69961ac0240eb97d3fa99c041e1ea2e1 | c9803fb67b885214f138a805990d77cf4d714818 | /proof_of_work/deep_q/v0/deepqagentv0.py | 8b0e078ffc9d708f5a50856f8a53fc868448bd46 | [
"MIT"
] | permissive | michaelneuder/parkes_lab_fa19 | e68247ad5253d54f4d6074593a0e63fe61fcfc18 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | refs/heads/master | 2020-07-12T10:32:15.585380 | 2020-01-26T21:45:05 | 2020-01-26T21:45:05 | 204,792,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,374 | py | import copy
from environmentv0 import Environment
from keras.models import clone_model
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import matplotlib.pyplot as plt
plt.style.use('seaborn-muted')
import numpy as np
import progressbar
import time
import util
np.random.seed(0)
class DeepQLearningAgent(object):
def __init__(self, discount, alpha, T, rho):
# MDP
self.alpha = alpha
self.T = T
self.rho = rho
self.exploration_rate = 1
self.exploration_decrease = float(1e-5)
self.min_exploration_rate = 0.1
# deep q
self.learning_rate = 0.001
self.value_model = util.createModel(self.learning_rate)
self.target_model = clone_model(self.value_model)
self.target_model.set_weights(self.value_model.get_weights())
self.learning_update_count = 0
self.max_learning_steps = int(4e4)
self.memories = []
self.training_memory_count = 32
self.discount = discount
self.update_target_frequency = 1000
self.max_memory_count = 10000
self.min_memory_count_learn = 1000
# environment
self.env = Environment(self.alpha, self.T)
# visualization
self.states_visited = np.zeros((self.T+1, self.T+1))
self.steps_before_done = []
self.last_50_steps = []
self.snyc_points = []
self.timing_between_updates = []
self.net_training_time = []
# timing
self.last_target_net_clone = time.time()
def chooseAction(self, current_state):
# explore based on number of visits to that state.
self.exploration_rate -= self.exploration_decrease
current_explore_rate = self.exploration_rate
if self.exploration_rate < self.min_exploration_rate:
current_explore_rate = self.min_exploration_rate
if np.random.uniform() < current_explore_rate:
return np.random.randint(low=0, high=3)
return np.argmax(self.value_model.predict(util.prepareInput(current_state)))
def syncModels(self):
self.target_model = clone_model(self.value_model)
self.target_model.set_weights(self.value_model.get_weights())
def learn(self, iterations=10000):
start_time = time.time()
while self.learning_update_count < self.max_learning_steps:
self.runTrial()
print("total time {:.04f} s".format(time.time() - start_time))
def runTrial(self):
done = False
self.env.reset()
step_counter = 0
while (not done) and (self.learning_update_count < self.max_learning_steps):
step_counter += 1
current_state = self.env.current_state
self.states_visited[current_state] += 1
# take action
action = self.chooseAction(current_state)
new_state, reward, done = self.env.takeAction(action)
reward_value = util.evalReward(self.rho, reward)
# creating a new memory
memory = dict({
'current_state' : current_state,
'action' : action,
'reward' : reward_value,
'new_state' : new_state,
'done' : done
})
self.memories.append(memory)
# training network
if len(self.memories) > self.min_memory_count_learn:
start_training = time.time()
self.trainNeuralNet()
self.net_training_time.append(time.time() - start_training)
self.learning_update_count += 1
# keep memory list finite
if len(self.memories) > self.max_memory_count:
self.memories.pop(0)
# update models
if self.learning_update_count % self.update_target_frequency == 0:
print('global step: {}. syncing models'.format(self.learning_update_count))
update_time = time.time() - self.last_target_net_clone
self.timing_between_updates.append(update_time)
print(' last synced: {:.04f} s ago'.format(update_time))
updates_remaining = (self.max_learning_steps - self.learning_update_count)/ self.update_target_frequency
print(' eta: {:.02f} s'.format(updates_remaining * update_time))
print('*'*30)
self.syncModels()
self.value_model.save('saved_models/value_net_iter{0:06d}.h5'.format(self.learning_update_count))
self.snyc_points.append(self.learning_update_count)
self.last_50_steps.append(np.mean(self.steps_before_done[-50:]))
self.last_target_net_clone = time.time()
self.steps_before_done.append(step_counter)
def trainNeuralNet(self):
memory_subset_indeces = np.random.randint(low=0, high=len(self.memories), size=self.training_memory_count)
memory_subset = [self.memories[i] for i in memory_subset_indeces]
rewards = []
current_states = []
new_states = []
actions = []
dones = []
for memory in memory_subset:
rewards.append(memory['reward'])
current_states.append(memory['current_state'])
new_states.append(memory['new_state'])
actions.append(memory['action'])
dones.append(memory['done'])
current_state_predictions = np.zeros((len(current_states), 3))
new_states_prepped = util.prepareInputs(new_states)
# new_state_predictions = self.target_model.predict(new_states_prepped)
new_state_predictions = [[1,1,1]]
for i in range(len(new_state_predictions)):
total_reward = rewards[i]
if not dones[i]:
total_reward += self.discount * max(new_state_predictions[i])
# clip
if total_reward > 1:
total_reward = 1
elif total_reward < -1:
total_reward = -1
current_state_predictions[i][actions[i]] = total_reward
# fiting model --- this is the neural net training
self.value_model.fit(
np.squeeze(np.asarray(current_states)),
np.squeeze(np.asarray(current_state_predictions)),
epochs=1,
verbose=False)
def main():
qlagent = DeepQLearningAgent(discount=0.99, alpha=0.45, T=9 , rho=0.6032638549804688)
qlagent.learn(iterations=int(5000))
print(qlagent.exploration_rate)
plt.plot(qlagent.net_training_time)
plt.show()
# results
analyzer = util.ResultsAnalyzer(
qlagent.value_model, qlagent.states_visited, qlagent.steps_before_done,
qlagent.last_50_steps, qlagent.snyc_points, qlagent.timing_between_updates)
end_policy = analyzer.extractPolicy()
analyzer.processPolicy(end_policy)
analyzer.plotStatesVisited(save=True)
analyzer.plotLogStatesVisited(save=True)
analyzer.plotStepsCounter(save=True)
analyzer.plotExploration(save=True)
analyzer.plotLast50(save=True)
analyzer.plotTimings(save=True)
if __name__ == "__main__":
main() | [
"michael.neuder@gmail.com"
] | michael.neuder@gmail.com |
553c8bdce9310f714de89a953254547790cb5798 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_14/23.py | d377a8e795b0b42d56e3300bc9c3260586116e75 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | #!/usr/bin/env python
import gmpy,math
import sys
f=sys.stdin
n=int(f.next())
class Case(object):
def __init__(self):
self.res = "IMPOSSIBLE"
N,M,A = map(int,f.next().split())
if N*M < A:
return
for xb in range(N+1):
for yb in range(M+1):
for xc in range(yb,N+1):
for yc in range(xb,M+1):
if abs(xb*yc - xc*yb) == A:
self.res = "%s %s %s %s %s %s"%(0,0,xb,yb,xc,yc)
return
def run(self):
pass
def __str__(self):
return str(self.res)
for case in range(1, n+1):
c=Case()
c.run()
print "Case #%s: %s"%(case,c)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f264b6aefd6e4f3b76d8adff5912a5ebfda45ef3 | 3d89ff4093d989940e7d0e535343a748adb0a87f | /5690-ClosestDessertCost.py | 8389d0bb9e6e364114ec8116c04b9239786fe5fc | [] | no_license | Scott-Larsen/LeetCode | 129585bb3017fbb59c07c22f74afe4309b46c15d | f644afb34f15cd4e310026a00ccf4149ba8daf10 | refs/heads/main | 2021-06-22T23:02:06.515527 | 2021-06-12T16:30:31 | 2021-06-12T16:30:31 | 204,087,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | # 5690. Closest Dessert Cost
# You would like to make dessert and are preparing to buy the ingredients. You have n ice cream base flavors and m types of toppings to choose from. You must follow these rules when making your dessert:
# There must be exactly one ice cream base.
# You can add one or more types of topping or have no toppings at all.
# There are at most two of each type of topping.
# You are given three inputs:
# baseCosts, an integer array of length n, where each baseCosts[i] represents the price of the ith ice cream base flavor.
# toppingCosts, an integer array of length m, where each toppingCosts[i] is the price of one of the ith topping.
# target, an integer representing your target price for dessert.
# You want to make a dessert with a total cost as close to target as possible.
# Return the closest possible cost of the dessert to target. If there are multiple, return the lower one.
class Solution:
def closestCost(
self, baseCosts: List[int], toppingCosts: List[int], target: int
) -> int:
combos = set(baseCosts)
for topping in toppingCosts:
cmbs = list(combos)
for c in cmbs:
combos.add(topping + c)
combos.add(2 * topping + c)
if target in combos:
return target
i = 1
while i <= target:
if target - i in combos:
return target - i
elif target + i in combos:
return target + i
i += 1
return min(baseCosts) | [
"scott@scottlarsen.com"
] | scott@scottlarsen.com |
ff848fbbf9d48acf972b91af78b1a7f35fba2c83 | 53f3eb1730f94f89d9d9d3d80a4182360d4e4420 | /13/utils/scanners.py | 4ed34abbdb482bd8e2e43cd02a596e078e284442 | [
"MIT"
] | permissive | Magnificent-Big-J/advent-of-code-2017 | 964b1da28b4e4f4398a3562baa130d5fdd701e9a | b83a849752c9a045978a0ea5eceb409adbfca0f4 | refs/heads/master | 2021-09-01T06:59:10.604222 | 2017-12-23T14:52:22 | 2017-12-23T14:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | def load_scanners():
layers = {}
layer = 0
with open('input.txt') as f:
for line in f:
data = [int(i) for i in line.split(': ')]
while layer != data[0]:
layers[layer] = {'s': -1, 'd': -1, 'dir': None}
layer += 1
layers[data[0]] = {'s': 0, 'd': data[1], 'dir': 'down'}
layer += 1
return layers
def move_scanners(layers):
for j in layers:
if layers[j]['dir'] == 'down':
if layers[j]['s'] < (layers[j]['d'] - 1):
layers[j]['s'] += 1
else:
layers[j]['s'] -= 1
layers[j]['dir'] = 'up'
elif layers[j]['dir'] == 'up':
if layers[j]['s'] > 0:
layers[j]['s'] -= 1
else:
layers[j]['s'] += 1
layers[j]['dir'] = 'down'
return layers
| [
"chris@chrxs.net"
] | chris@chrxs.net |
0a6db6f5367369ae8bb4340f78ad9fdd04f78a82 | 6a1975a11de163ce0e6a5f001df41758bea3686b | /1047. Remove All Adjacent Duplicates In String/Solution_栈.py | 5bd255435f7a6ac7e19295dca77315666a0668f4 | [] | no_license | Inpurple/Leetcode | 7f08e0e500d37913e9244f08ea8f603b3fc1ce88 | df2bcca72fd303100dbcd73d1dfae44467abbb44 | refs/heads/master | 2020-05-20T02:17:08.430557 | 2019-09-22T07:51:28 | 2019-09-22T07:51:28 | 185,327,908 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class Solution(object):
def removeDuplicates(self, S):
"""
:type S: str
:rtype: str
"""
sta=[]
for i in S:
if sta and sta[-1]==i:
sta.pop()
else:
sta.append(i)
return ''.join(sta)
| [
"noreply@github.com"
] | Inpurple.noreply@github.com |
dcd366a00afd84b0b4dc0d78f57f34f918a3028d | 0fea8a6421fe5f5967f2202910022c2bfd277b4d | /164.生成一个随机的8位密码,要求4个字母和4个数字.py | c4b132ec248696b2fb5da097f6336c9a28df8e14 | [] | no_license | maohaoyang369/Python_exercise | 4dc10ec061aa0de2bcfe59c86be115e135fb3fab | 8fbee8854db76d09e2b1f9365ff55198ddabd595 | refs/heads/master | 2020-04-09T23:04:02.327118 | 2019-09-05T14:49:07 | 2019-09-05T14:49:07 | 160,646,057 | 0 | 2 | null | 2019-03-21T14:44:13 | 2018-12-06T08:50:19 | Python | UTF-8 | Python | false | false | 411 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# 生成一个随机的8位密码,要求4个字母和4个数字
import random
import string
spam_num = random.choices("0123456789", k=4)
print(spam_num)
spam_letters = random.sample(string.ascii_letters, 4)
print(spam_letters)
spam = spam_num+spam_letters
print(spam)
spam_num_letters = random.shuffle(spam)
print(spam)
secrity = "".join(spam)
print(secrity)
| [
"372713573@qq.com"
] | 372713573@qq.com |
07ed60f2ac262214e2aa84b74db7f7fd479050c3 | 5cc204e2ecb9a756127e7c71633a1edcdb3e989b | /pylmp/InKim/BGF_mergeBgf.py | 4e3d973e087898e5159c36b8879389f57020e8c7 | [] | no_license | hopefulp/sandbox | 1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8 | 4d26767f287be6abc88dc74374003b04d509bebf | refs/heads/master | 2023-06-27T17:50:16.637851 | 2023-06-15T03:53:39 | 2023-06-15T03:53:39 | 218,209,112 | 1 | 0 | null | 2022-09-13T13:22:34 | 2019-10-29T05:14:02 | C++ | UTF-8 | Python | false | false | 3,415 | py | #!/opt/applic/epd/bin/python
import sys, re, string, getopt, optparse, math, time
from os import popen
option = ""; args = ""; bgf_file = ""; mod_file = ""; out_file = ""
usage = """
Usage: mergeBGF.py -b bgf1_file -c bgf2_file -o out_file
"""
options, args = getopt.getopt(sys.argv[1:], 'hb:c:o:', ['help','bgf1=','bgf2=','out='])
for option, value in options:
if option in ('-h', '--help'):
print usage; sys.exit(0)
elif option in ('-b', '--bgf1'):
bgf1_file = value
elif option in ('-c', '--bgf2'):
bgf2_file = value
elif option in ('-o', '--out'):
out_file = value
elif option in (''):
print usage; sys.exit(0)
#-----------------
# merge two bgf file
#
#_________________
def mergebgf(bgf1_file, bgf2_file, out_file):
print(options)
# read bgf 1 and bgf 2
f_bgf1_file = open(bgf1_file)
f_bgf2_file = open(bgf2_file)
f_out_file = open(out_file,'w')
bgf1_atom_data = []; bgf2_atom_data = []; bgf1_conect_data = []; bgf2_conect_data = []
n_atoms_1 = 0; n_atoms_2 = 0
while 1:
line = f_bgf1_file.readline()
if not line:
break
if 'HETATM' in line:
n_atoms_1 += 1
parse = re.split('\s*', line)
bgf1_atom_data.append(parse)
if 'FORMAT' in line:
continue
if 'CONECT' in line:
parse = re.split('\s*', line)
parse = parse[:-1]
bgf1_conect_data.append(parse)
while 1:
line = f_bgf2_file.readline()
if not line:
break
if 'HETATM' in line:
n_atoms_2 += 1
parse = re.split('\s*', line)
bgf2_atom_data.append(parse)
if 'FORMAT' in line:
continue
if 'CONECT' in line:
parse = re.split('\s*', line)
parse = parse[:-1]
bgf2_conect_data.append(parse)
# add n_atom_1 to atom id of bgf 2
#margin = int(math.ceil(n_atoms_1 / 10.0)*10)
#print(margin)
margin = n_atoms_1
for atom in bgf2_atom_data:
atom[1] = str(int(atom[1]) + margin)
for conect in bgf2_conect_data:
n_conect = len(conect)
for i in xrange(1, n_conect):
conect[i] = str(int(conect[i]) + margin)
# merge the file sequentially: 1 -> 2
f_bgf1_file.seek(0)
f_bgf2_file.seek(0)
# header
while 1:
line = f_bgf1_file.readline()
if not line:
break
if 'HETATM' in line:
break
f_out_file.write(line)
# atom data of bgf1
for item in bgf1_atom_data:
item[6] = float(item[6])
item[7] = float(item[7])
item[8] = float(item[8])
item[12] = float(item[12])
wline = '{0:>6} {1:>5} {2:<5} {3:3} {4:<1}{5:>5} {6:>10.5f}{7:>10.5f}{8:>10.5f} {9:<5}{10:3}{11:2} {12:>8.5f}'.format(*item)
wline += '\n'
f_out_file.write(wline)
# atom data of bgf2
for item in bgf2_atom_data:
item[6] = float(item[6])
item[7] = float(item[7])
item[8] = float(item[8])
item[12] = float(item[12])
wline = '{0:>6} {1:>5} {2:<5} {3:3} {4:<1}{5:>5} {6:>10.5f}{7:>10.5f}{8:>10.5f} {9:<5}{10:3}{11:2} {12:>8.5f}'.format(*item)
wline += '\n'
f_out_file.write(wline)
f_out_file.write('FORMAT CONECT (a6,12i6)\n')
wline = ""
for item in bgf1_conect_data:
for i in xrange(0, len(item)):
wline += '{0:>6}'.format(item[i])
wline += '\n'
f_out_file.write(wline)
wline = ""
for item in bgf2_conect_data:
for i in xrange(0, len(item)):
wline += '{0:>6}'.format(item[i])
wline += '\n'
f_out_file.write(wline)
f_out_file.write("END\n")
f_out_file.write("")
f_out_file.close()
#return 1
# main call
mergebgf(bgf1_file, bgf2_file, out_file)
| [
"hopefulp@gmail.com"
] | hopefulp@gmail.com |
4b33c4af014c182b96c8f0f664c28eb3b5f7d2b0 | 50d6a01aac56215c166d5659196dbcbcbf48c5d2 | /mongo/src/conn.py | 6bbf8572d94f3e70610b34726d5e16f6696228d2 | [] | no_license | HackUPCCrew/MachineLearning | c66541709165382b3c1e15c5d51bc2b068f57948 | 7697dcdf73a8e0a24f8793118612cbbf25653153 | refs/heads/master | 2021-07-14T01:25:59.521438 | 2017-10-17T19:35:45 | 2017-10-17T19:35:45 | 106,882,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | #!/usr/bin/env python3
from pymongo import MongoClient
from pprint import pprint
client = MongoClient("mongodb://34.224.70.221:8080")
db=client.admin
serverStatusResult=db.command("serverStatus")
pprint(serverStatusResult)
| [
"krishnakalyan3@gmail.com"
] | krishnakalyan3@gmail.com |
5c9f74d4f9302e90ca39b1dd80dce303ed88f773 | aedd3aeadfb13eda4489d26ee3d9762598878936 | /leetcode/1281. 整数的各位积和之差.py | f67fa9eb3bba6026574b965d007dd6e0b0c201b1 | [] | no_license | AnJian2020/Leetcode | 657e8225c4d395e8764ef7c672d435bda40584c7 | cded97a52c422f98b55f2b3527a054d23541d5a4 | refs/heads/master | 2023-03-26T16:25:36.136647 | 2021-03-26T07:04:10 | 2021-03-26T07:04:10 | 283,940,538 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | class Solution:
def subtractProductAndSum(self, n: int) -> int:
numList=list(str(n))
sum=0
product=1
for item in range(len(numList)):
numList[item]=int(numList[item])
sum+=numList[item]
product*=numList[item]
result=product-sum
return result
if __name__ == "__main__":
print(Solution().subtractProductAndSum(4421)) | [
"xuhao2018@foxmail.com"
] | xuhao2018@foxmail.com |
9b4871a27d15086682164ca0e12198fdb16cab67 | a4344e89e7f467d8bfd3f000f8cced17e36bfd70 | /predict.py | 3a781070190775ab4d7ab85cabf0b6a3f4912cfa | [] | no_license | Schnei1811/InsectClassifier | 5b8d90e21dd23857af82aa26d048591bb70a2cf5 | b8c22a103b7f2099058f4994681a8b2babc147a2 | refs/heads/master | 2023-04-18T08:51:07.753666 | 2021-03-14T03:07:49 | 2021-03-14T03:07:49 | 347,531,957 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | import argparse
import cv2
from glob import glob
from tqdm import tqdm
import numpy as np
import os
import torch, torchvision
import torch.nn as nn
from torchvision import models, transforms
import json
import csv
# Number of classes in the dataset
img_size = 224
class GlobalAvgPool2d(nn.Module):
def forward (self, x):
return torch.mean(x.view(x.size(0), x.size(1), -1), dim=2)
def initialize_model(arch, num_classes):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
if arch == "resnet":
""" Resnet101 """
model_ft = models.resnet101()
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
elif arch == "mobilenet":
""" Mobilenet """
model_ft = models.mobilenet_v2()
num_ftrs = model_ft.classifier[1].in_features
elif arch == "densenet":
""" Densenet """
model_ft = models.densenet201() #DenseNet201
num_ftrs = model_ft.classifier.in_features
else:
print(f"Unknown model name {arch}. Choose from resnet, mobilenet, or densenet")
quit()
model_ft.classifier = nn.Sequential(
GlobalAvgPool2d(), #Equivalent to GlobalAvgPooling in Keras
# nn.Linear(1920, 1024),
nn.Linear(num_ftrs, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, num_classes))
return model_ft
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, X_images, X_paths):
self.X_images = X_images
self.X_paths = X_paths
def __len__(self):
return len(self.X_images)
def __getitem__(self, idx):
sample = self.X_images[idx]
sample = sample.astype("float32") / 255.0
sample = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])(sample)
return (sample, self.X_paths[idx])
def buildImageAspectRatio(X_path):
img = cv2.imread(X_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
resize_x = int(img.shape[1] * img_size / max(img.shape))
resize_y = int(img.shape[0] * img_size / max(img.shape))
push_x = (img_size - resize_x) // 2
push_y = (img_size - resize_y) // 2
resized_img = cv2.resize(img, (resize_x, resize_y))
canvas = np.zeros((img_size, img_size, 3)).astype("uint8") + 255
canvas[push_y:resized_img.shape[0] + push_y, push_x:resized_img.shape[1] + push_x, :] = resized_img
return canvas
def createData(data_name, X_paths):
if not os.path.exists("Arrays_Batches"):
os.makedirs("Arrays_Batches")
if not os.path.exists("Arrays_Data"):
os.makedirs("Arrays_Data")
reset = True
data_batch = 0
for i, X_path in enumerate(tqdm(X_paths)):
if reset == True:
reset = False
X = np.expand_dims(buildImageAspectRatio(X_path), axis=0)
else:
X = np.vstack((X, np.expand_dims(buildImageAspectRatio(X_path), axis=0)))
if not i == 0 and i % 999 == 0:
reset = True
np.save(f"Arrays_Batches/{data_name}_Input_{data_batch}_{len(X)}.npy", X)
data_batch += 1
if i == len(X_paths) - 1:
np.save(f"Arrays_Batches/{data_name}_Input_{data_batch}_{len(X)}.npy", X)
data_batch += 1
data_paths = []
for batch in range(data_batch):
data_paths.append(glob(f'Arrays_Batches/{data_name}_Input_{batch}_*')[0])
for i, data_path in enumerate(tqdm(data_paths)):
data = np.load(data_path)
if i == 0:
X = data
else:
X = np.vstack((X, data))
np.save(f'Arrays_Data/{data_name}_Input_{len(X)}.npy', X)
def test_model(model, dataloader, device, num_to_class, report_csv):
model.eval()
preds_array = np.array([])
for inputs, paths in tqdm(dataloader):
inputs = inputs.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
preds_cpu = preds.cpu().numpy()
preds_array = np.append(preds_array, preds_cpu)
for i, pred in enumerate(preds_cpu):
img_name = paths[i].split("/")[-1]
report_csv.append([img_name, num_to_class[pred]])
csv_path = f"pred.csv"
with open(csv_path, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(report_csv)
def main(data_name, arch, model_name, batch_size):
report_csv = [["file_path", "prediction (Order_Family)"]]
with open(f"metadata/{data_name}_num_to_class.json") as f:
num_to_class = json.load(f)
num_to_class = {int(k):v for k,v in num_to_class.items()}
num_classes = len(num_to_class)
X_paths = glob("extracted/*")
input_file_path = f"Arrays_Data/{data_name}_Input_{len(X_paths)}.npy"
if not os.path.exists(input_file_path):
createData(data_name, X_paths)
X = np.load(input_file_path)
image_dataset = CustomDataset(X, X_paths)
dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
model_ft = initialize_model(arch, num_classes)
# Detect if we have a GPU available
# if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
model_ft = nn.DataParallel(model_ft)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = model_ft.to(device)
model_path = os.path.join("models", arch, model_name)
model_ft.load_state_dict(torch.load(model_path))
test_model(model_ft, dataloader, device, num_to_class, report_csv)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_name", default="Alus")
parser.add_argument("--arch", default="mobilenet") #densenet, resnet, mobilenet
parser.add_argument("--model_name", default="0_0.9765853658536585_450.pt")
parser.add_argument("--batch_size", default=32, type=int)
args = parser.parse_args()
main(args.data_name, args.arch, args.model_name, args.batch_size) | [
"stefan871@gmail.com"
] | stefan871@gmail.com |
36bd450f476d6d992245f98f6ee62e8f0459c471 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 10082 - WERTYU/main.py | f4cf71c10e1e6bc76858ecb5779421a0e7b80c6f | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | '''
Created on Jun 18, 2013
@author: Yubin Bai
All rights reserved.
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 30
def solve(par):
r1 = '`1234567890-' + 'qwertyuiop[' + 'asdfghjhkl' + 'zxcvbnm,.'
r2 = '1234567890-=' + 'wertyuiop[]' + 'sdfghjhkl;' + 'xcvbnm,./'
d = {' ': ' '}
for k, v in zip(r2, r1):
d[k.upper()] = v.upper()
word = par
result = []
for c in word:
result.append(d[c])
return ''.join(result)
class Solver:
def getInput(self):
self.numOfTests = 1
self.input = []
word = self.fIn.readline().strip()
self.input.append((word))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("Case #%d: %s\n" % (test + 1, self.results[test]))
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
| [
"xenron@outlook.com"
] | xenron@outlook.com |
14da669856411f17a43c79936abfc07ed1dc2c1c | d9e0406c275417791024f97abc0600c96910633f | /question/migrations/0005_auto_20210510_1025.py | 549c6daf667227fa8927f506dd141be48c18de81 | [] | no_license | lesage20/vuejs | 46c75e7528ae6e9834f351ed4f814869fae417ac | da0522280dd1e6cf858c90758f38c4da963785a1 | refs/heads/main | 2023-04-19T12:20:54.672778 | 2021-05-12T08:42:34 | 2021-05-12T08:42:34 | 366,649,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 3.1.7 on 2021-05-10 10:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question', '0004_question_titre'),
]
operations = [
migrations.AlterField(
model_name='question',
name='prop',
field=models.ManyToManyField(blank=True, null=True, related_name='question', to='question.Proposition'),
),
]
| [
"angezanou00@gmail.com"
] | angezanou00@gmail.com |
b6b1f4eed9917b484d8c00356460fcc6a66f2a3b | 04e080a00f37a3501c5060380d65c5a6cd669d90 | /thonnycontrib/m5stack/esp8266_api_stubs/uhashlib.py | 02e34e1294866de8ac040af1dc3d2d19287c2acc | [
"MIT"
] | permissive | thonny/thonny-m5stack | 473a2876e72b88d283d8b9d64189028ef7fea111 | a502579ad5e264342ae0bc2c554c78527053693b | refs/heads/master | 2020-04-20T14:57:15.605699 | 2019-11-18T22:28:36 | 2019-11-18T22:28:36 | 168,914,658 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py |
class sha1:
''
def digest():
pass
def update():
pass
class sha256:
''
def digest():
pass
def update():
pass
| [
"aivar.annamaa@gmail.com"
] | aivar.annamaa@gmail.com |
199dc7283ca9dfcf42146c61abe183d9955ad52c | 84dbd7dfdc2c63433b2088dd3fe711a07cf8b3b8 | /week13/day1/daily/phonedir/views.py | e34357f2c7c7e8e170399766febe68bd94355371 | [] | no_license | jfrance00/di-exercises | 623bebeddd3ff3ed062e1ad5097f15f7ed002362 | bbc97714c26b41ed76dfed35df5780e3aa482b5e | refs/heads/master | 2022-11-27T23:31:59.742231 | 2020-07-29T12:20:01 | 2020-07-29T12:20:01 | 257,882,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import flask
import wtforms as wtf
from . import forms, models
from . import app, db
@app.route('/', methods=['GET', 'POST'])
def index():
form = forms.SearchNameOrNumber()
return flask.render_template('index.html', form=form)
@app.route('/add-input', methods=['GET', 'POST'])
def add_input():
form = forms.AddPersonToDatabase()
if flask.request.method == "POST":
id = form.id.data
name = form.name.data
phone = form.phone.data
email = form.email.data
address = form.address.data
entry = models.Person(id=id, name=name, phone=phone, email=email, address=address)
db.session.add(entry)
db.session.commit()
flask.flash(f'{name} added successfully')
return flask.render_template('add-input.html', form=form) | [
"jfrance00@gmail.com"
] | jfrance00@gmail.com |
f8eb219a525fd3d56b4f5fae1875cccf536032f1 | ee7ca0fed1620c3426fdfd22e5a82bba2a515983 | /dsn_purchase_order/models/purchase.py | 843a5a966c61fcaa6cf586e164ee2d06c78cc085 | [] | no_license | disna-sistemas/odoo | 318d0e38d9b43bea56978fe85fc72850d597f033 | 0826091462cc10c9edc3cc29ea59c417f8e66c33 | refs/heads/8.0 | 2022-03-08T19:01:21.162717 | 2022-02-15T13:06:26 | 2022-02-15T13:06:26 | 99,210,381 | 0 | 5 | null | 2019-07-24T08:49:58 | 2017-08-03T08:36:55 | Python | UTF-8 | Python | false | false | 5,912 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp import tools, _
class dsnPurchaseOrder(models.Model):
_inherit = "purchase.order"
_order = "date_order desc, name"
class dsnPurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
# _order = "date_planned desc, name"
_order = "id"
@api.multi
@api.onchange('product_id')
def dsn_warning_obsolete(self):
self.ensure_one()
res = {}
if self.product_id:
_obsolete = False
if self.product.state and self.product_id.state=='obsolete':
res = {'warning': {'title': _('Obsolete Product'), 'message': _(
'This product is obsolete')}}
return res
class dsnPurchasereport(models.Model):
_inherit = "purchase.report"
dsncat2_id = fields.Many2one(comodel_name='product.category',
string='Cat2',
readonly=True)
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.dsncat2_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
t.dsncat2_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
| [
"sistemas@disna.com"
] | sistemas@disna.com |
b93664d963b69f1fac7d321eef6d9a3d5390debd | ad357cfbec64afb8f4cc4043b212996768f9755c | /api/barriers/migrations/0038_auto_20200224_1622.py | 4bd3709007de448df0b1ecfc265d0b5a0b6953ae | [
"MIT"
] | permissive | uktrade/market-access-api | 6b4680e6455eb5c25480ccd3e3d9445654269f36 | 4da26d1be53843d22411577409d9489010bdda09 | refs/heads/master | 2023-08-30T14:47:10.373148 | 2023-08-29T13:58:08 | 2023-08-29T13:58:08 | 131,856,014 | 2 | 3 | MIT | 2023-09-14T08:04:42 | 2018-05-02T13:38:37 | Python | UTF-8 | Python | false | false | 968 | py | # Generated by Django 2.2.8 on 2020-02-24 16:22
from django.db import migrations
def populate_archived_reason(apps, schema_editor):
BarrierInstance = apps.get_model("barriers", "BarrierInstance")
BarrierInstance.objects.filter(
archived=True,
archived_reason__isnull=True,
).update(archived_reason="OTHER", archived_explanation="Archive reason unknown")
def unpopulate_archived_reason(apps, schema_editor):
BarrierInstance = apps.get_model("barriers", "BarrierInstance")
BarrierInstance.objects.filter(
archived=True,
archived_reason="OTHER",
archived_explanation="Archive reason unknown",
).update(
archived_reason=None,
archived_explanation=None,
)
class Migration(migrations.Migration):
dependencies = [
("barriers", "0037_auto_20200224_1552"),
]
operations = [
migrations.RunPython(populate_archived_reason, unpopulate_archived_reason),
]
| [
"noreply@github.com"
] | uktrade.noreply@github.com |
d38479e4f3d5d36e535a5c308876ea91eff7adfb | aac11cb909c13b0f24e90e18bca098d0f52c048d | /makewiki/settings.py | e58696ce2984e1f9b308be7eea134585c125558a | [
"MIT"
] | permissive | LukazDane/makewiki_v2 | 7add2002bc9c9813a66461305b56b3b92ffe3c36 | d71790c99951ed47d202e5a00d1eb7480b8552bd | refs/heads/master | 2022-04-30T00:08:06.716424 | 2020-01-08T01:11:26 | 2020-01-08T01:11:26 | 221,078,482 | 0 | 0 | MIT | 2022-04-22T22:44:16 | 2019-11-11T22:04:27 | Python | UTF-8 | Python | false | false | 4,100 | py | """
Django settings for makewiki project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1yct-t!2bnkgc7j59z+9cdd2k)@y+ftqor$!aya()3if^cnlo-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', 'makewiki-lh.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts.apps.AccountsConfig', # new
'wiki',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_currentuser.middleware.ThreadLocalUserMiddleware',
]
ROOT_URLCONF = 'makewiki.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'templates').replace('\\', '/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'makewiki.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'wiki.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# wiki app settings
WIKI_PAGE_TITLE_MAX_LENGTH = 600
# Where to redirect during authentication
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
DEFAULT_LOGOUT_URL = '/'
# Required for Heroku
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# PROTIP:
# Need to override settings? Create a local_settings.py file
# in this directory, and add settings there.
try:
from makewiki.settings import *
except ImportError:
pass
| [
"deandrevidal@aol.com"
] | deandrevidal@aol.com |
51eb6471c303f10aa5f7d41241c0f542184c8c79 | 5d0e76e3c741adc120ce753bacda1e723550f7ac | /724. Find Pivot Index.py | d578d962c4b85d3f422ade4922f5502c890f4700 | [] | no_license | GoldF15h/LeetCode | d8d9d5dedca3cce59f068b94e2edf986424efdbf | 56fcbede20e12473eaf09c9d170c86fdfefe7f87 | refs/heads/main | 2023-08-25T12:31:08.436640 | 2021-10-20T04:36:23 | 2021-10-20T04:36:23 | 392,336,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | class Solution:
def pivotIndex(self, nums: List[int]) -> int:
right_sum = sum(nums)
left_sum = 0
prev = 0
for i in range(len(nums)) :
left_sum += prev
prev = nums[i]
right_sum -= nums[i]
if left_sum == right_sum :
return i
return -1 | [
"todsapon.singsunjit@gmail.com"
] | todsapon.singsunjit@gmail.com |
e6361dfa82714822273013df5ab2d96aacb6a6a4 | f366c19ce822a3e8f3cd5f670b25c6fa54322d0b | /python_udemy/introducao-python/iterando-strings-while.py | 7f9c554e14042ea18c66b656f267db6dbad27279 | [] | no_license | marcelomatz/py-studiesRepo | b83875a366010c9a60bc15d853fcf81c31cee260 | ce99014228f00d8c73cc548dd6c4d5fedc3f1b68 | refs/heads/main | 2023-09-05T00:03:47.712289 | 2021-06-15T09:43:27 | 2021-06-15T09:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # iterar é passar por cada um dos elementos de uma string
# se tem índice é iterável
frase = 'o rato roeu a roupa do rei de roma'
tamanho_frase = len(frase)
contador = 0
nova_string = ''
while contador < tamanho_frase:
letra = frase[contador]
if letra == 'r':
nova_string += 'R'
else:
nova_string += letra
contador += 1
print(nova_string)
| [
"agenciahipster@gmail.com"
] | agenciahipster@gmail.com |
dc910c5e544db2555849a7d275f3d49ddc8c3178 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/nckkem001/question1.py | db30afcae4f531091f93b33fe973fe4a0f450d70 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | """Program to analyse student marks from source file and determine which students
are advised to consult an advisor.
Kemeshan Naicker
11 May 2014"""
#Prompt user for name of source file.
file = input("Enter the marks filename:\n")
#Open file for processing
txtfile = open(file, "r")
#Read file into a string, and replace newline characters with spaces in order to
#read string into a list.
markslist = txtfile.read()
txtfile.close()
markslist = markslist.split("\n")
markslist = " ".join(markslist)
markslist = markslist.split(",")
markslist = " ".join(markslist)
#Read string into a list.
markslist = markslist.split()
marks = []
students = []
for i in range (0, len(markslist), 2):
students.append(markslist[i])
marks.append(eval(markslist[i+1]))
#Calculate standard deviation.
total = 0
N = len(marks)
for i in marks:
total += i
avrg = total/N
sdsum = 0
for i in marks:
sdsum += (i - avrg)**2
sd = (sdsum/N)**(1/2)
#Find students who are below one standard deviation of the mean and append them
#to a new list.
fail_list = []
for i in range(N):
if marks[i] < (avrg - sd):
fail_list.append(students[i])
#Print output.
print("The average is: {0:0.2f}".format(avrg))
print("The std deviation is: {0:0.2f}".format(sd))
if len(fail_list) > 0:
print("List of students who need to see an advisor:")
for i in fail_list:
print(i) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
ddb61c4bdd3fe3a64465df7dd8e17bc6b3404d57 | 0a346b0601b32907902206a0e46ea55be64390b4 | /style_trans/neural_style.py | 8261b661b74a941d3560532e80ad4643e2310592 | [] | no_license | invoker4zoo/tensorflow_project | cf1f0fc2d7772a4da4004c1f0c14c5be8fcb909e | 847e511f8fa4634181b00055a01c75b9b46c5396 | refs/heads/master | 2020-03-11T18:06:43.514220 | 2018-04-19T08:53:42 | 2018-04-19T08:53:42 | 130,167,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,054 | py | # coding=utf-8
"""
@ license: Apache Licence
@ github: invoker4zoo
@ author: invoker/cc
@ wechat: whatshowlove
@ software: PyCharm
@ file: neural_style
@ time: 18-4-10
"""
import tensorflow as tf
import reader
import _reader
from preprocessing import preprocessing_factory
from nets import nets_factory
import losses
import time
import os
import utils
# tf.app.flags.DEFINE_float("CONTENT_WEIGHT", 5e0, "Weight for content features loss")
# tf.app.flags.DEFINE_float("STYLE_WEIGHT", 1e2, "Weight for style features loss")
# tf.app.flags.DEFINE_float("TV_WEIGHT", 1e-5, "Weight for total variation loss")
# tf.app.flags.DEFINE_string("VGG_MODEL", "pretrained/vgg_16.ckpt", "vgg model params path")
# tf.app.flags.DEFINE_list("CONTENT_LAYERS", ["vgg_16/conv3/conv3_3"],
# "Which VGG layer to extract content loss from")
# tf.app.flags.DEFINE_list("STYLE_LAYERS", ["vgg_16/conv1/conv1_2", "vgg_16/conv2/conv2_2",
# "vgg_16/conv3/conv3_3", "vgg_16/conv4/conv4_3"],
# "Which layers to extract style from")
# tf.app.flags.DEFINE_string("SUMMARY_PATH", "tensorboard", "Path to store Tensorboard summaries")
# tf.app.flags.DEFINE_string("STYLE_IMAGE", "img/picasso.jpg", "Styles to train")
# tf.app.flags.DEFINE_float("STYLE_SCALE", 1.0, "Scale styles. Higher extracts smaller features")
# tf.app.flags.DEFINE_float("LEARNING_RATE", 10., "Learning rate")
# tf.app.flags.DEFINE_string("CONTENT_IMAGE", "img/dancing.jpg", "Content image to use")
# tf.app.flags.DEFINE_boolean("RANDOM_INIT", True, "Start from random noise")
# tf.app.flags.DEFINE_integer("NUM_ITERATIONS", 1000, "Number of iterations")
# # reduce image size because of cpu training
# tf.app.flags.DEFINE_integer("IMAGE_SIZE", 256, "Size of output image")
#######################################################################
tf.app.flags.DEFINE_string("loss_model", 'vgg_16', "loss model name")
tf.app.flags.DEFINE_string("naming", 'test', "model_name")
tf.app.flags.DEFINE_string("loss_model_file", "pretrained/vgg_16.ckpt", "pretrained model")
tf.app.flags.DEFINE_string("checkpoint_exclude_scopes", "vgg_16/fc", "ignore variables")
tf.app.flags.DEFINE_float("content_weight", 5, "Weight for content features loss")
tf.app.flags.DEFINE_float("style_weight", 100, "Weight for style features loss")
tf.app.flags.DEFINE_float("tv_weight", 0.0, "Weight for total variation loss")
tf.app.flags.DEFINE_integer("image_size", 256, "Size of output image")
tf.app.flags.DEFINE_list("content_layers", ["vgg_16/conv3/conv3_3"],
"Which VGG layer to extract content loss from")
tf.app.flags.DEFINE_list("style_layers", ["vgg_16/conv1/conv1_2", "vgg_16/conv2/conv2_2",
"vgg_16/conv3/conv3_3", "vgg_16/conv4/conv4_3"],
"Which layers to extract style from")
tf.app.flags.DEFINE_string("model_path", 'models', "path to save model")
tf.app.flags.DEFINE_string("content_image", "img/dancing.jpg", "Content image to use")
tf.app.flags.DEFINE_string("style_image", "img/picasso.jpg", "Styles to train")
tf.app.flags.DEFINE_float("learning_rate", 10, "Learning rate")
tf.app.flags.DEFINE_integer("step", 100, "Number of iterations")
FLAGS = tf.app.flags.FLAGS
def total_variation_loss(layer):
shape = tf.shape(layer)
height = shape[1]
width = shape[2]
y = tf.slice(layer, [0,0,0,0], tf.stack([-1,height-1,-1,-1])) - tf.slice(layer, [0,1,0,0], [-1,-1,-1,-1])
x = tf.slice(layer, [0,0,0,0], tf.stack([-1,-1,width-1,-1])) - tf.slice(layer, [0,0,1,0], [-1,-1,-1,-1])
return tf.nn.l2_loss(x) / tf.to_float(tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))
# TODO: Okay to flatten all style images into one gram?
def gram(layer):
shape = tf.shape(layer)
num_filters = shape[3]
size = tf.size(layer)
filters = tf.reshape(layer, tf.stack([-1, num_filters]))
gram = tf.matmul(filters, filters, transpose_a=True) / tf.to_float(size)
return gram
# TODO: Different style scales per image.
def get_style_features(style_paths, style_layers):
with tf.Graph().as_default() as g:
network_fn = nets_factory.get_network_fn(
FLAGS.loss_model,
num_classes=1,
is_training=False)
image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
FLAGS.loss_model,
is_training=False)
image = tf.expand_dims(
reader.get_image(FLAGS.style_image, FLAGS.image_size, FLAGS.image_size, image_preprocessing_fn), 0)
# image = tf.expand_dims(
# _reader.get_image(FLAGS.content_image, FLAGS.image_size), 0)
_, endpoints = network_fn(image, spatial_squeeze=False)
features = []
for layer in style_layers:
features.append(gram(endpoints[layer]))
with tf.Session() as sess:
init_func = utils._get_init_fn(FLAGS)
init_func(sess)
return sess.run(features)
def get_content_features(content_path, content_layers):
with tf.Graph().as_default() as g:
network_fn = nets_factory.get_network_fn(
FLAGS.loss_model,
num_classes=1,
is_training=False)
image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
FLAGS.loss_model,
is_training=False)
image = tf.expand_dims(
reader.get_image(FLAGS.content_image, FLAGS.image_size, FLAGS.image_size, image_preprocessing_fn), 0)
# image = tf.expand_dims(
# _reader.get_image(FLAGS.content_image, FLAGS.image_size), 0)
_, endpoints = network_fn(image, spatial_squeeze=False)
layers = []
for layer in content_layers:
layers.append(endpoints[layer])
with tf.Session() as sess:
init_func = utils._get_init_fn(FLAGS)
init_func(sess)
return sess.run(layers + [image])
def main(argv=None):
# style_features_t = losses.get_style_features(FLAGS)
# Make sure the training path exists.
training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
if not(os.path.exists(training_path)):
os.makedirs(training_path)
"""get features"""
style_features_t = get_style_features(FLAGS.style_image, FLAGS.style_layers)
res = get_content_features(FLAGS.content_image, FLAGS.content_layers)
content_features_t, image_t = res[:-1], res[-1]
image = tf.constant(image_t)
random = tf.random_normal(image_t.shape)
initial = tf.Variable(image)
"""Build Network"""
network_fn = nets_factory.get_network_fn(
FLAGS.loss_model,
num_classes=1,
is_training=True)
image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
FLAGS.loss_model,
is_training=False)
preprocess_content_image = tf.expand_dims(
reader.get_image(FLAGS.content_image, FLAGS.image_size, FLAGS.image_size, image_preprocessing_fn), 0)
# preprocess_content_image = tf.expand_dims(
# _reader.get_image(FLAGS.content_image, FLAGS.image_size), 0)
# preprocess_style_image = tf.expand_dims(
# reader.get_image(FLAGS.style_image, FLAGS.image_size, FLAGS.image_size, image_preprocessing_fn), 0)
_, endpoints_dict = network_fn(preprocess_content_image, spatial_squeeze=False)
"""build loss"""
content_loss = 0
for content_features, layer in zip(content_features_t, FLAGS.content_layers):
layer_size = tf.size(content_features)
content_loss += tf.nn.l2_loss(endpoints_dict[layer] - content_features) / tf.to_float(layer_size)
content_loss = FLAGS.content_weight * content_loss / len(FLAGS.content_layers)
style_loss = 0
for style_gram, layer in zip(style_features_t, FLAGS.style_layers):
layer_size = tf.size(style_gram)
style_loss += tf.nn.l2_loss(gram(endpoints_dict[layer]) - style_gram) / tf.to_float(layer_size)
# style_loss += (gram(endpoints_dict[layer]) - style_gram)
style_loss = FLAGS.style_weight * style_loss
tv_loss = FLAGS.tv_weight * total_variation_loss(initial)
total_loss = content_loss + style_loss + tv_loss
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(total_loss)
output_image = tf.image.encode_png(tf.saturate_cast(tf.squeeze(initial) + reader.mean_pixel, tf.uint8))
with tf.Session() as sess:
init_func = utils._get_init_fn(FLAGS)
init_func(sess)
sess.run(tf.global_variables_initializer())
start_time = time.time()
for step in range(FLAGS.step):
_, loss_t, cl, sl = sess.run([train_op, total_loss, content_loss, style_loss])
elapsed = time.time() - start_time
start_time = time.time()
print(step, elapsed, loss_t, cl, sl)
image_t = sess.run(output_image)
with open('out.png', 'wb') as f:
f.write(image_t)
if __name__ == '__main__':
tf.app.run()
| [
"412214410@qq.com"
] | 412214410@qq.com |
70ddfb5469533e9612ff63f5df784bca6e0d927f | 27a31ec197f5603fe6fb438171a78bb381bf43b1 | /examples/cifar10_cnn.py | f296605e6cfb4b38459aefcfd189cdd36da0de7b | [
"MIT"
] | permissive | seba-1511/gsoc15-demo | 42152c335e6eb8e91479dee4ab0db5376ba55ec4 | 7fa542f33fdb39d73e2b11318c046ecf35fb9bcf | refs/heads/master | 2021-01-18T14:34:28.686048 | 2015-04-20T02:26:10 | 2015-04-20T02:26:10 | 33,458,769 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
'''
Train a (fairly simple) deep CNN on the CIFAR10 small images dataset.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
batch_size = 32
nb_classes = 10
nb_epoch = 25
data_augmentation = True
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data(test_split=0.1)
print X_train.shape[0], 'train samples'
print X_test.shape[0], 'test samples'
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten(64*8*8))
model.add(Dense(64*8*8, 512, init='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='normal'))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
if not data_augmentation:
print "Not using data augmentation or normalization"
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10)
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print 'Test score:', score
else:
print "Using real time data augmentation"
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
for e in range(nb_epoch):
print '-'*40
print 'Epoch', e
print '-'*40
print "Training..."
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(X_train.shape[0])
for X_batch, Y_batch in datagen.flow(X_train, Y_train):
loss = model.train(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("train loss", loss)])
print "Testing..."
# test time!
progbar = generic_utils.Progbar(X_test.shape[0])
for X_batch, Y_batch in datagen.flow(X_test, Y_test):
score = model.test(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("test loss", score)])
| [
"seba-1511@hotmail.com"
] | seba-1511@hotmail.com |
b0343599369edefd5045f582b653e85406f9da25 | 5ce2e7ac259fa4482a9b5cb668346cbf14bc9a2d | /src/plt_roc.py | 223fee4ae7c5666b1d79a3ea41deda5ae39b1a20 | [] | no_license | Sapphirine/Analysis-on-Children-Learning-Performance | 708e65d1a0330fec6c873a5b0a96b9198b9fe7a4 | da522fc9019238c8cc332045b40541578ffc6ba0 | refs/heads/master | 2020-11-26T17:44:55.074527 | 2019-12-20T00:55:30 | 2019-12-20T00:55:30 | 229,163,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import os
from src.model import model
def clear_temp():
for i in range(1, 4):
folder_name = 'result_' + str(i)
file_list = [f for f in os.listdir("static/temp/" + folder_name + '/') if f.endswith(".png")]
for f in file_list:
os.remove("static/temp/" + folder_name + '/' + f)
def create_pic(test_num, names, model_name):
if not names[model_name]:
name = '1'
else:
name = str(max(names[model_name]) + 1)
folder_name = 'result_' + model_name[-1]
if model_name[-1] == '1':
score = model.predict(test_num, 1, folder_name, name)
elif model_name[-1] == '2':
score = model.predict(test_num, 2, folder_name, name)
elif model_name[-1] == '3':
score = model.predict(test_num, 3, folder_name, name)
return name, score
| [
"noreply@github.com"
] | Sapphirine.noreply@github.com |
ee58f494d5908b5a89eb8df1330e2d4deba9875d | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/shared_code/central_comp/cod/codem/codem/data/query.py | 1475c6b410aa61cf609740f06c796d35e5881a06 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,621 | py | """
Functions to query the causes of death database and format and process
all of the CODEm input data. Needs helper functions from the demographics, shared,
and covariates modules.
"""
import pandas as pd
import numpy as np
import sqlalchemy as sql
import logging
import pymysql
import re
import codem.data.queryStrings as QS
import codem.reference.db_connect as db_connect
from codem.data.demographics import get_mortality_data
from codem.data.shared import get_location_info, exclude_regions
from codem.data.covariates import get_all_covariates
logger = logging.getLogger(__name__)
def save_model_outliers(model_version_id, gbd_round_id, decomp_step_id, connection):
"""
Execute any stored procedure in the specified database with a list of arguments.
:param model_version_id: int
model version ID
:param gbd_round_id: int
gbd round ID
:param decomp_step_id: int
decomposition step ID
:param connection: str
database that you wish to execute the stored procedure on
:return: None
"""
logger.info('Running outlier stored procedure.')
creds = db_connect.read_creds()
db = 'ADDRESS'.format(creds=creds, connection=connection)
engine = sql.create_engine(db)
connect = engine.raw_connection()
cursor = connect.cursor()
try:
cursor.callproc('cod.save_model_outliers', [
float(model_version_id), float(gbd_round_id), float(decomp_step_id)
])
except pymysql.err.InternalError as e:
code, msg = e.args
if re.search('No outlier found for this model version id', str(msg)):
logger.info('There are no outliers for the model version ID {}'.format(model_version_id))
else:
if re.search('already exists in outlier_history table', str(msg)):
logger.info('Model version ID {} already exists in the outlier_history table.'.format(model_version_id))
else:
raise e
finally:
cursor.close()
connect.commit()
def copy_model_outliers(old_model_version_id, new_model_version_id, connection):
"""
Execute any stored procedure in the specified database with a list of arguments.
:param old_model_version_id: int
old "from" model version ID
:param new_model_version_id: int
new "to" model version ID
:param connection: str
database that you wish to execute the stored procedure on
:return: None
"""
logger.info('Running outlier stored copy procedure for old model versions..')
creds = db_connect.read_creds()
db = 'ADDRESS'.format(creds=creds, connection=connection)
engine = sql.create_engine(db)
connect = engine.raw_connection()
cursor = connect.cursor()
try:
cursor.callproc('cod.copy_outliers_by_model_version_id', [
float(old_model_version_id), float(new_model_version_id)
])
except pymysql.err.InternalError as e:
logger.info("Hit an error with cod.copy_outliers_by_model_version_id.")
raise e
finally:
cursor.close()
connect.commit()
def exists_in_outlier_history(model_version_id, connection):
"""
Check to see if this model version already exists in the outlier history table.
:param model_version_id: (int)
:param connection: (str)
:return:
"""
logger.info(f"Checking to make sure that {model_version_id} does not exist in the outlier history table.")
call = f"SELECT COUNT(*) AS count FROM cod.outlier_history WHERE model_version_id = {model_version_id}"
count = db_connect.query(call, connection=connection)['count'][0]
if count:
logger.info(f"The model version {model_version_id} already exists in the outlier history table.")
return count
def get_cod_data(cause_id, sex, start_year, start_age, end_age,
location_set_version_id, refresh_id, outlier_decomp_step_id,
db_connection, model_version_id, gbd_round_id, outlier_model_version_id):
"""
strings indicating model parameters -> Pandas Data Frame
Given a list of model parameters will query from the COD database and
return a pandas data frame. The data frame contains the base variables
used in the CODEm process.
Also will call the outlier stored procedure in the database to save model outliers if
"""
logger.info(f"Querying cod data for refresh {refresh_id} and decomp {outlier_decomp_step_id} outliers.")
if not exists_in_outlier_history(
model_version_id=model_version_id,
connection=db_connection):
if model_version_id in outlier_model_version_id:
logger.info(f"Running the outlier stored procedure for decomp_step_id {outlier_decomp_step_id}")
save_model_outliers(
model_version_id=model_version_id,
gbd_round_id=gbd_round_id,
decomp_step_id=outlier_decomp_step_id,
connection=db_connection
)
else:
for out in outlier_model_version_id:
logger.info(f"Running the outlier stored procedure to copy outliers from"
f"{out} to {model_version_id}")
copy_model_outliers(
old_model_version_id=out,
new_model_version_id=model_version_id,
connection=db_connection
)
else:
logger.warning("The outlier model version already exists in the table, therefore"
"we aren't copying it over.")
pass
logger.info(f"Querying cod data for refresh {refresh_id}.")
call = QS.codQueryStr.format(c=cause_id, s=sex, sy=start_year, sa=start_age,
ea=end_age, loc_set_id=location_set_version_id,
rv=refresh_id,
model_version_id=model_version_id)
df = db_connect.query(call, db_connection)
df['national'] = df['national'].map(lambda x: x == 1).astype(int)
return df
def rbinom(n, p, size):
"""
Wrapper over np binom function that takes nans
:param n: int > 0
number of trials
:param p: float, 0 < p < 1
probability of success
:param size: int > 0
number of observations
"""
if np.isnan(p) or np.isnan(n):
draws = np.repeat(np.nan, size)
else:
draws = np.random.binomial(n=n, p=p, size=size)
return draws
def data_variance(df, response):
"""
(data frame, string) -> array
Given a data frame and a response type generates an estimate of the variance
for that response based on sample size. A single array is returned where
each observation has been sampled 100 times from a normal distribution to
find the estimate.
"""
logger.info("Running data variance for response {}".format(response))
np.seterr(invalid='ignore')
cf = df.cf.values
n = df.sample_size.values
if response == "lt_cf":
gc_var = df.gc_var_lt_cf.values
elif response == "ln_rate":
gc_var = df.gc_var_ln_rate.values
else:
raise RuntimeError("Must specify lt_cf or ln_rate!")
env = df.envelope.values
pop = df["pop"].values
cf[cf <= 0.00000001] = np.NaN
cf[cf >= 1.] = np.NaN
cf_sd = (cf * (1-cf) / n)**.5
cf_sd[cf_sd > .5] = .5 # cap cf_sd
f = lambda i: np.random.normal(cf[i], cf_sd[i], 100) * (env[i]/pop[i])
if response == "lt_cf":
f = lambda i: np.random.normal(cf[i], cf_sd[i], 100)
draws = np.array(list(map(f, range(len(cf)))))
draws[draws <= 0] = np.NaN
if response == "lt_cf":
draws = np.log(draws / (1 - draws))
elif response == "ln_rate":
draws = np.log(draws)
draws_masked = np.ma.masked_array(draws, np.isnan(draws))
ss_var = np.array(draws_masked.var(axis=1))
sd_final = (ss_var + gc_var) ** 0.5
sd_final[sd_final == 0.] = np.NaN
np.seterr(invalid='warn')
return sd_final
def data_process(df):
"""
Pandas data frame -> Pandas data frame
Given a pandas data frame that was queried for CODEm returns a
Pandas data frame that has columns added for mixed effect analysis and
is re-indexed after removing countries with full sub-national data.
"""
df2 = df.copy()
remove = df2[(df.is_estimate == 0) & (df.standard_location == 0)].country_id.unique()
df2 = df2[df2.location_id.map(lambda x: x not in remove)]
df2 = df2.replace([np.inf, -np.inf], np.nan)
df2["region_nest"] = df2.super_region.map(str) + ":" + df2.region.map(str)
df2["age_nest"] = df2.region_nest + ":" + df2.age.map(str)
df2["country_nest"] = df2.region_nest + ":" + df2.country_id.map(str)
df2["sub_nat_nest"] = df2.country_nest + ":" + df2.location_id.map(str)
df2["ln_rate_sd"] = data_variance(df2, "ln_rate")
df2["lt_cf_sd"] = data_variance(df2, "lt_cf")
df2.reset_index(inplace=True, drop=True)
return df2
def get_codem_data(cause_id, sex, start_year, start_age, end_age, regions_exclude,
location_set_version_id, decomp_step_id, refresh_id, gbd_round, db_connection,
model_version_id, gbd_round_id,
env_run_id, pop_run_id, outlier_model_version_id, outlier_decomp_step_id,
standard_location_set_version_id):
"""
:param cause_id: int
cause_id to pull results from
:param sex: int, 1 or 2
sex_id to query
:param start_year: int
year of first data point
:param start_age: int
age of first data point
:param end_age: int
age of last data point
:param regions_exclude: str
str of regions to exclude
:param location_set_version_id: int
cod location version to use
:param decomp_step_id: int
integer 1-5 that indicates which step of the decomposition analysis (for pulling outliers)
:param refresh_id: int
refresh ID to use to pull cod.cv_data
:param db_connection: str
db connection string not including .ihme.forecasting.edu
:param model_version_id: int
model version of the CODEm model
:param gbd_round_id: int
GBD round ID
:param gbd_round: int
year round that we are working with
:param pop_run_id: int
run ID for get_population
:param env_run_id: int
run ID for get_envelope
:param outlier_model_version_id: int
which model version to use for outliers
:param outlier_decomp_step_id: int
which outliers to pull for those that are pulling active outliers
:param standard_location_set_version_id: int
standard location set version ID to use
:return: data frame
data frame with all model data
"""
logger.info("Beginning full CoD query.")
cod = get_cod_data(
cause_id=cause_id,
sex=sex,
start_year=start_year,
start_age=start_age,
end_age=end_age,
location_set_version_id=location_set_version_id,
refresh_id=refresh_id,
outlier_decomp_step_id=outlier_decomp_step_id,
db_connection=db_connection,
model_version_id=model_version_id,
gbd_round_id=gbd_round_id,
outlier_model_version_id=outlier_model_version_id
)
mort = get_mortality_data(
sex=sex,
start_year=start_year,
start_age=start_age,
end_age=end_age,
location_set_version_id=location_set_version_id,
gbd_round_id=gbd_round_id,
gbd_round=gbd_round,
decomp_step_id=decomp_step_id,
db_connection=db_connection,
pop_run_id=pop_run_id,
env_run_id=env_run_id,
standard_location_set_version_id=standard_location_set_version_id
)
loc = get_location_info(location_set_version_id,
standard_location_set_version_id=standard_location_set_version_id,
db_connection=db_connection)
loc = exclude_regions(loc, regions_exclude=regions_exclude)
mort_df = mort.merge(loc, how='right', on=['location_id'])
cod_df = cod.merge(mort_df, how='right',
on=['location_id', 'age', 'sex', 'year'])
cod_df.loc[cod_df["cf"] == 1, "cf"] = np.NAN
cod_df.loc[cod_df["cf"] == 0, "cf"] = np.NAN
cod_df['ln_rate'] = np.log(cod_df['cf'] * cod_df['envelope'] / cod_df['pop'])
cod_df['lt_cf'] = np.log(cod_df['cf'].map(lambda x: x/(1.0-x)))
df = data_process(cod_df)
return df
def get_codem_input_data(model_parameters):
"""
Given an integer which represents a valid model version ID, returns
two pandas data frames. The first is the input data needed for
running CODEm models and the second is a data frame of meta data
needed for covariate selection.
:param model_parameters: dictionary of model parameters
"""
df = get_codem_data(
cause_id=model_parameters["cause_id"],
sex=model_parameters["sex_id"],
start_year=model_parameters["start_year"],
start_age=model_parameters["age_start"],
end_age=model_parameters["age_end"],
regions_exclude=model_parameters["locations_exclude"],
location_set_version_id=model_parameters["location_set_version_id"],
decomp_step_id=model_parameters["decomp_step_id"],
refresh_id=model_parameters["refresh_id"],
db_connection=model_parameters["db_connection"],
gbd_round=model_parameters["gbd_round"],
model_version_id=model_parameters["model_version_id"],
gbd_round_id=model_parameters["gbd_round_id"],
env_run_id=model_parameters["env_run_id"],
pop_run_id=model_parameters["pop_run_id"],
outlier_model_version_id=model_parameters["outlier_model_version_id"],
outlier_decomp_step_id=model_parameters['outlier_decomp_step_id'],
standard_location_set_version_id=model_parameters["standard_location_set_version_id"]
)
cov_df, priors = get_all_covariates(
model_version_id=model_parameters["model_version_id"],
sex=model_parameters["sex_id"],
decomp_step_id=model_parameters["decomp_step_id"],
gbd_round_id=model_parameters["gbd_round_id"],
location_set_version_id=model_parameters["location_set_version_id"],
db_connection=model_parameters["db_connection"],
standard_location_set_version_id=model_parameters["standard_location_set_version_id"]
)
df = df[
(df.year >= model_parameters["start_year"]) &
(df.age >= model_parameters["age_start"]) &
(df.age <= model_parameters["age_end"])
]
df2 = df.merge(cov_df, how="left", on=["location_id", "age", "sex", "year"])
covs = df2[priors.name.values]
df = df.drop_duplicates()
covs = covs.loc[df.index]
df.reset_index(drop=True, inplace=True)
covs.reset_index(drop=True, inplace=True)
columns = df.columns.values[df.dtypes.values == np.dtype('float64')]
df[columns] = df[columns].astype('float32')
return df, covs, priors
def adjust_input_data(df, covs):
"""
Adjust the input data such that observations with missing covariates,
or the envelope/population are equal to zero. Also change cf values of
zero to NaN
"""
logger.info("Adjusting input data.")
# remove observations where covariate values are missing
adjust_df = df.copy()
covariates = covs.copy()
if covariates.isnull().values.any():
raise RuntimeError("You have null covariates!")
covariates.dropna(inplace=True)
adjust_df.drop(np.setdiff1d(adjust_df.index.values, covariates.index.values), inplace=True)
# remove observations where population or envelope is zero
zeroes = adjust_df[(adjust_df["envelope"] <= 0) | (adjust_df["pop"] <= 0)]
if not zeroes.empty:
raise RuntimeError("You have negative or 0 envelope/pops!")
adjust_df = adjust_df[(adjust_df["envelope"] > 0) & (adjust_df["pop"] > 0)]
covariates.drop(np.setdiff1d(covariates.index.values, adjust_df.index.values), inplace=True)
# change cf values of zero and one in the main data frame to np.NaN
adjust_df["cf"] = adjust_df["cf"].map(lambda x: np.NaN if x <= 0.00000001 or x >= 1 else x)
adjust_df["cf"][(adjust_df["lt_cf_sd"].isnull()) | (adjust_df["ln_rate_sd"].isnull())] = np.NaN
adjust_df["ln_rate"][(adjust_df["cf"].isnull())] = np.NaN
adjust_df["lt_cf"][(adjust_df["cf"].isnull())] = np.NaN
covariates.reset_index(drop=True, inplace=True)
adjust_df.reset_index(drop=True, inplace=True)
return adjust_df, covariates, pd.concat([adjust_df, covariates], axis=1)
| [
"cheth@uw.edu"
] | cheth@uw.edu |
75191d15b2725b2857af15a70d45006484ef4b0a | 8890ff61262ff98369464721c165d53aa9febe85 | /ironio/iron_worker.py | 95d9b6a3ab577d63f149cf7dd9502ca60702b892 | [
"Apache-2.0"
] | permissive | Mause/tumblr_conn | 09e91bb86e6310ac3f9b0be292967283990558ea | e0ac78947355e41a8432a2a3e12fb86fb28a4c72 | refs/heads/master | 2022-06-15T05:30:27.737676 | 2013-04-29T20:19:57 | 2013-04-29T20:19:57 | 9,258,639 | 0 | 0 | null | 2022-05-17T03:16:49 | 2013-04-06T10:52:39 | JavaScript | UTF-8 | Python | false | false | 16,518 | py | import os
import mimetypes
import zipfile
from dateutil.tz import *
from . import iron_core
try:
import json
except ImportError:
import simplejson as json
def file_exists(file):
"""Check if a file exists."""
if not os.path.exists(file):
return False
try:
open(file)
except IOError:
return False
return True
class Task:
id = None
project = None
code_id = None
code_history_id = None
status = None
code_name = None
code_rev = None
created_at = None
updated_at = None
start_time = None
end_time = None
duration = None
timeout = 3600
message = None
delay = 0
start_at = None
end_at = None
next_start = None
last_run_time = None
run_times = None
run_count = None
run_every = None
percent = None
payload = None
priority = 0
scheduled = False
repeating = False
__json_attrs = ["payload"]
__rfc3339_attrs = [
"created_at", "updated_at", "start_at", "end_at", "next_start", "last_run_time"]
__timestamp_attrs = ["start_time", "end_time"]
__schedule_attrs = [
"start_at", "end_at", "next_start", "last_run_time", "run_count", "run_every"]
__repeating_attrs = ["end_at", "next_start", "run_every"]
__aliases = {
"project": "project_id",
"msg": "message"
}
__ignore = ["message"]
def __str__(self):
if self.id is not None and self.scheduled:
return "IronWorker Scheduled Task #%s" % self.id
elif self.id is not None:
return "IronWorker Task #%s" % self.id
else:
return "IronWorker Task"
def __repr__(self):
return "<%s>" % str(self)
def __set(self, attr, value):
if attr in self.__rfc3339_attrs:
if isinstance(value, basestring):
value = iron_core.IronClient.fromRfc3339(value)
if attr in self.__schedule_attrs:
self.scheduled = True
if attr in self.__repeating_attrs:
self.repeating = True
if attr in self.__json_attrs:
if isinstance(value, basestring):
try:
value = json.loads(value)
except:
pass
setattr(self, attr, value)
def __init__(self, values=None, **kwargs):
if values is None:
values = {}
self.payload = {}
attrs = [
x
for x in vars(self.__class__).keys()
if not x.startswith("__")]
for k in kwargs.keys():
values[k] = kwargs[k]
for prop in values.keys():
if prop in attrs and prop not in self.__ignore:
self.__set(prop, values[prop])
elif prop in self.__aliases:
self.__set(self.__aliases[prop], values[prop])
class CodePackage:
id = None
project = None
name = None
runtime = None
latest_checksum = None
revision = None
latest_history_id = None
latest_change = None
files = None
executable = None
zip_path = None
__rfc3339_attrs = ["latest_change"]
__aliases = {
"project_id": "project",
"rev": "revision",
"exec": "executable"
}
def __str__(self):
if self.name is not None:
return "%s Code Package" % self.name
elif self.id is not None:
return "Code Package #%s" % self.id
else:
return "IronWorker Code Package"
def __repr__(self):
return "<%s>" % str(self)
def __set(self, attr, value):
if attr in self.__rfc3339_attrs:
value = iron_core.IronClient.fromRfc3339(value)
setattr(self, attr, value)
def __init__(self, values=None, **kwargs):
if values is None:
values = {}
self.files = {}
for k in kwargs.keys():
values[k] = kwargs[k]
attrs = [
x
for x in vars(self.__class__).keys()
if not x.startswith("__")]
for prop in values.keys():
if prop in attrs:
self.__set(prop, values[prop])
elif prop in self.__aliases:
self.__set(self.__aliases[prop], values[prop])
def merge(self, target, ignoreRootDir=False):
if os.path.isfile(target):
self.files[os.path.basename(target)] = target
elif os.path.isdir(target):
for dirname, dirnames, filenames in os.walk(target):
for filename in filenames:
path = os.path.join(dirname, filename)
if ignoreRootDir:
ziploc = path.lstrip(target).lstrip("/")
else:
ziploc = path
self.files[ziploc] = path
else:
raise ValueError("'%s' is not a file or directory." % target)
if len(self.files) == 1:
for dest, loc in self.files.iteritems():
self.executable = dest
def merge_dependency(self, dep):
dependency = __import__(dep)
location = os.path.dirname(dependency.__file__)
parent = location.rstrip(os.path.basename(location))
for dirname, dirnames, filenames in os.walk(location):
for filename in filenames:
path = os.path.join(dirname, filename)
if path.startswith(parent):
newpath = path[len(parent):]
else:
newpath = path
ziploc = newpath.lstrip("/")
self.files[ziploc] = path
def zip(self, destination=None, overwrite=True):
if destination is None:
if self.name is not None:
destination = "%s.zip" % self.name
else:
raise ValueError("Package name or destination is required.")
if file_exists(destination) and not overwrite:
raise ValueError("Destination '%s' already exists." % destination)
filelist = self.files.copy()
for dest, loc in filelist.items():
if not file_exists(loc):
del(self.files[dest])
if len(self.files) > 0:
z = zipfile.ZipFile(destination, "w")
for dest, loc in self.files.items():
z.write(loc, dest)
z.close()
self.zip_path = destination
return file_exists(destination)
class IronWorker:
NAME = "iron_worker_python"
VERSION = "1.2.0"
def __init__(self, **kwargs):
"""Prepare a configured instance of the API wrapper and return it.
Keyword arguments are passed directly to iron_core_python; consult its
documentation for a full list and possible values."""
self.client = iron_core.IronClient(
name=IronWorker.NAME,
version=IronWorker.VERSION, product="iron_worker", **kwargs)
#############################################################
####################### CODE PACKAGES #######################
#############################################################
def codes(self):
packages = []
resp = self.client.get("codes")
raw_packages = resp["body"]["codes"]
for package in raw_packages:
packages.append(CodePackage(package))
return packages
def code(self, id):
if isinstance(id, CodePackage):
id = id.id
resp = self.client.get("codes/%s" % id)
raw_package = resp["body"]
return CodePackage(raw_package)
def postCode(self, code, zipFilename=None):
zip_loc = code.zip_path
if zipFilename is not None:
zip_loc = zipFilename
if zip_loc is None:
raise ValueError("Need to set the zip file to upload.")
if not file_exists(zip_loc):
raise ValueError("File doesn't exist: %s" % zip_loc)
if code.name is None:
raise ValueError("Code needs a name.")
if code.executable is None:
raise ValueError("Code's executable file needs to be set.")
if code.runtime is None:
code.runtime = "python"
file = open(zip_loc, "rb")
file_contents = file.read()
file.close()
data = [("data", json.dumps({
"name": code.name,
"runtime": code.runtime,
"file_name": code.executable
}))]
files = [("file", zip_loc, file_contents)]
content_type, body = IronWorker.encode_multipart_formdata(data, files)
headers = {
"Content-Type": content_type
}
resp = self.client.post(url="codes", body=body, headers=headers)
return CodePackage(resp["body"])
def upload(self, target, name=None, executable=None, overwrite=True):
if isinstance(target, CodePackage):
code = target
else:
code = CodePackage()
code.merge(target)
if name is not None:
code.name = name
if executable is not None:
code.executable = executable
if code.name is None:
raise ValueError("Need to set a name for the package.")
if code.executable is None:
raise ValueError("Need to set a file as the executable.")
clean_up = not file_exists("%s.zip" % code.name) or overwrite
if code.zip_path is None or not file_exists(code.zip_path):
code.zip(overwrite=overwrite)
result = self.postCode(code)
if clean_up:
os.remove(code.zip_path)
return result
def deleteCode(self, id):
if isinstance(id, CodePackage):
id = id.id
self.client.delete("codes/%s" % id)
return True
def revisions(self, id):
revisions = []
if isinstance(id, CodePackage):
id = id.id
resp = self.client.get("codes/%s/revisions" % id)
raw_revs = resp["body"]["revisions"]
for rev in raw_revs:
revisions.append(CodePackage(rev))
return revisions
def download(self, id, rev=None, destination=None):
if isinstance(id, CodePackage):
if rev is None and id.revision is not None:
rev = id.revision
id = id.id
url = "codes/%s/download" % id
if rev is not None:
url = "%s?revision=%s" % (url, rev)
resp = self.client.get(url)
dest = resp["resp"].getheader("Content-Disposition")
dest = dest.lstrip("filename=")
if destination is not None:
if os.path.isdir(destination):
dest = os.path.join(destination, dest)
else:
dest = destination
dup_dest = dest
iteration = 1
while file_exists(dup_dest) and destination is None:
iteration += 1
dup_dest = dest.rstrip(".zip") + " (" + str(iteration) + ").zip"
f = open(dup_dest, "wb")
f.write(resp["body"])
f.close()
return file_exists(dup_dest)
#############################################################
########################## TASKS ############################
#############################################################
def tasks(self, scheduled=False):
tasks = []
if not scheduled:
resp = self.client.get("tasks")
raw_tasks = resp["body"]
raw_tasks = raw_tasks["tasks"]
else:
resp = self.client.get("schedules")
raw_tasks = resp["body"]
raw_tasks = raw_tasks["schedules"]
for raw_task in raw_tasks:
tasks.append(Task(raw_task))
return tasks
def queue(self, task=None, tasks=None, retry=None, **kwargs):
tasks_data = []
if task is None:
task = Task(**kwargs)
if tasks is None:
tasks = [task]
for task in tasks:
payload = task.payload
if not isinstance(payload, basestring):
payload = json.dumps(payload)
if task.code_name is None:
raise ValueError("task.code_name is required.")
task_data = {
"name": task.code_name,
"code_name": task.code_name,
"payload": payload,
"priority": task.priority,
"delay": task.delay
}
if not task.scheduled:
type_str = "tasks"
task_data["timeout"] = task.timeout
else:
type_str = "schedules"
if task.run_every is not None:
task_data["run_every"] = task.run_every
if task.end_at is not None:
if task.end_at.tzinfo is None:
task.end_at = task.end_at.replace(tzinfo=tzlocal())
task_data["end_at"] = iron_core.IronClient.toRfc3339(task.end_at)
if task.run_times is not None:
task_data["run_times"] = task.run_times
if task.start_at is not None:
if task.start_at.tzinfo is None:
task.start_at = task.start_at.replace(tzinfo=tzlocal())
task_data["start_at"] = iron_core.IronClient.toRfc3339(task.start_at)
tasks_data.append(task_data)
data = json.dumps({type_str: tasks_data})
headers = {"Content-Type": "application/json"}
if retry is not None:
resp = self.client.post(type_str, body=data, headers=headers, retry=retry)
else:
resp = self.client.post(type_str, body=data, headers=headers)
tasks = resp["body"]
if len(tasks[type_str]) > 1:
return [Task(task, scheduled=(type_str == "schedules"))
for task in tasks[type_str]]
else:
return Task(tasks[type_str][0],
scheduled=(type_str == "schedules"))
def task(self, id, scheduled=False):
if isinstance(id, Task):
scheduled = id.scheduled
id = id.id
if not scheduled:
url = "tasks/%s" % id
else:
url = "schedules/%s" % id
resp = self.client.get(url)
raw_task = resp["body"]
return Task(raw_task)
def log(self, id):
if isinstance(id, Task):
if id.scheduled:
raise ValueError("Cannot retrieve a scheduled task's log.")
id = id.id
url = "tasks/%s/log" % id
headers = {"Accept": "text/plain"}
resp = self.client.get(url, headers=headers)
return resp["body"]
def cancel(self, id, scheduled=False):
if isinstance(id, Task):
scheduled = id.scheduled
id = id.id
if not scheduled:
url = "tasks/%s/cancel" % id
else:
url = "schedules/%s/cancel" % id
self.client.post(url)
return True
#############################################################
######################### HELPERS ###########################
#############################################################
@staticmethod
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be
uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % IronWorker.get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, str(body)
@staticmethod
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
| [
"jack.thatch@gmail.com"
] | jack.thatch@gmail.com |
70184ab14338c2a28726a4e83e4412be6ed0cadb | 6df76f8a6fcdf444c3863e3788a2f4b2c539c22c | /django code/p41/p41/urls.py | 3a7f92a6e6a622c7d431852c4a84ab90185c7bcf | [] | no_license | basantbhandari/DjangoProjectsAsDocs | 068e4a704fade4a97e6c40353edb0a4299bd9678 | 594dbb560391eaf94bb6db6dc07702d127010b88 | refs/heads/master | 2022-12-18T22:33:23.902228 | 2020-09-22T13:11:01 | 2020-09-22T13:11:01 | 297,651,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """p41 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('enroll/', include('enroll.urls'))
] | [
"36443209+basantbhandari@users.noreply.github.com"
] | 36443209+basantbhandari@users.noreply.github.com |
2435312c8826faf2062cab3201f18f92c06d0d66 | 1561c6b62982c33c2b9b028af1369832d7c190c3 | /synaptor/seg_utils/relabel.py | 91343bf2a92925a627505864733973550b225989 | [
"MIT"
] | permissive | nkemnitz/Synaptor | b60f33e51ed045e7cdaf18465af3d80edaca1cf3 | 40618786d5b762eb3877ecac49ff310f3e6f892d | refs/heads/master | 2020-05-06T15:43:38.584607 | 2019-04-07T17:31:57 | 2019-04-07T17:31:57 | 180,205,457 | 1 | 0 | NOASSERTION | 2019-04-08T18:05:03 | 2019-04-08T18:05:03 | null | UTF-8 | Python | false | false | 2,654 | py | import numpy as np
from . import describe
from . import _relabel
def relabel_data(d, mapping, copy=True):
"""
Relabel data according to a mapping dict.
Modify the entries of :param:d according to a :param:mapping dictionary.
If a value within :param:d doesn't match a key for :param:mapping,
leave it unchanged.
Args:
d (3darray): A data volume.
mapping (dict): A mapping from data values in d to new desired values.
copy (bool): Whether or not to perform relabeling in-place. Defaults
to True, which will create a new volume.
Returns:
3darray: A modified or newly created volume with the
desired modifications.
"""
if copy:
d = np.copy(d)
return _relabel.relabel_data(d, mapping)
def relabel_data_1N(d, copy=True):
"""
Relabel segment values from 1:N
Args:
d (3darray): A segmentation.
copy (bool): Whether or not to perform relabeling in-place. Defaults
to True, which will create a new volume.
Returns:
3darray: A modified or newly created volume with new segids.
"""
mapping = {v: i+1 for (i, v) in enumerate(describe.nonzero_unique_ids(d))}
return relabel_data(d, mapping, copy=copy)
def relabel_data_iterative(d, mapping):
"""
Python-based iterative relabeling
Remapping data according to an id mapping using an iterative strategy.
Best when only modifying a few ids. If a value within d doesn't match
a key for mapping, leave it unchanged.
Args:
d (3darray): A segmentation.
mapping (dict): A mapping from data values in d to new desired values.
Returns:
3darray: A new volume with the desired modifications.
"""
r = np.copy(d)
src_ids = set(np.unique(d))
mapping = dict(filter(lambda x: x[0] in src_ids, mapping.items()))
for (k, v) in mapping.items():
r[d == k] = v
return r
def relabel_data_lookup_arr(d, mapping):
"""
Python-based lookup array relabeling
Remapping data according to an id mapping using a lookup np array.
Best when modifying several ids at once and ids are approximately dense
within 1:max
Args:
d (3darray): A segmentation.
mapping (dict): A mapping from data values in d to new desired values.
Returns:
3darray: A new volume with the desired modifications.
"""
if len(mapping) == 0:
return d
map_keys = np.array(list(mapping.keys()))
map_vals = np.array(list(mapping.values()))
map_arr = np.arange(0, d.max()+1)
map_arr[map_keys] = map_vals
return map_arr[d]
| [
"nturner.stanford@gmail.com"
] | nturner.stanford@gmail.com |
2f847646f43a261924fc84f50fb8e1f46ebf1b26 | 5b01236940cb3b1bb2e987797a0e07868133a85b | /app/error.py | bd23e12d9065af85513cc0e57d88c9b0ff65e2c4 | [] | no_license | dicksonkariuki/Watchlist | 47cf68c45d1ecd810c986a12cb8934ab8453e09c | 2089a577ff6b8bf07d14232658ce9671b6ebb899 | refs/heads/master | 2020-08-08T19:03:24.766702 | 2019-10-17T07:28:38 | 2019-10-17T07:28:38 | 213,634,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from flask import render_template
from app import app
@app.errorhandler(404)
def four_Ow_four(error):
"""
Function to render the 404 page
"""
return render_template ('fourOwfour.html'),404 | [
"dicksonkariuki4@gmail.com"
] | dicksonkariuki4@gmail.com |
d9b3012794241a6b430ddc7807eaaf0d74e8c56f | d8ea695288010f7496c8661bfc3a7675477dcba0 | /examples/raspberry_pi/relay.py | 01f0a68e794f40467c91592b842f2802038c96ef | [] | no_license | dabolau/demo | de9c593dabca26144ef8098c437369492797edd6 | 212f4c2ec6b49baef0ef5fcdee6f178fa21c5713 | refs/heads/master | 2021-01-17T16:09:48.381642 | 2018-10-08T10:12:45 | 2018-10-08T10:12:45 | 90,009,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import RPi.GPIO as GPIO
import time
def relay(i=0):
# 设置针脚模式为(BOARD)
GPIO.setmode(GPIO.BOARD)
# 禁用警告
GPIO.setwarnings(False)
# 设置针脚
PIN = 40
# 设置针脚为输出模式
GPIO.setup(PIN, GPIO.OUT)
# 设置开关(0/1),0表示关,1表示开。
INT = i
# 开(闭合)
if INT == 1:
GPIO.output(PIN, GPIO.HIGH) # 高电平输出
print('power on')
# 关(断开)
if INT == 0:
GPIO.output(PIN, GPIO.LOW) # 低电平输出
print('power off')
# 延时5秒
time.sleep(5)
# 释放针脚
GPIO.cleanup()
if __name__ == '__main__':
relay(1) # 开
relay(0) # 关
relay(1) # 开
| [
"dabolau@qq.com"
] | dabolau@qq.com |
c6eb011206a6832c3dd908dc6cb075ac850cb450 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/binary-tree-right-side-view.py | c8b32518fe78924faa153d9767c0efaaf96c2cc5 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 1,306 | py | # Time: O(n)
# Space: O(h)
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
# @param root, a tree node
# @return a list of integers
def rightSideView(self, root):
result = []
self.rightSideViewDFS(root, 1, result)
return result
def rightSideViewDFS(self, node, depth, result):
if not node:
return
if depth > len(result):
result.append(node.val)
self.rightSideViewDFS(node.right, depth+1, result)
self.rightSideViewDFS(node.left, depth+1, result)
# BFS solution
# Time: O(n)
# Space: O(n)
class Solution2(object):
# @param root, a tree node
# @return a list of integers
def rightSideView(self, root):
if root is None:
return []
result, current = [], [root]
while current:
next_level = []
for node in current:
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
result.append(node.val)
current = next_level
return result
| [
"noreply@github.com"
] | black-shadows.noreply@github.com |
f319c545c30418259e6008aa0ce2bf88c2d3a7bb | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nntyndal.py | bdbf9a039c90b7737706e72fb7e0f0869ab7c93f | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 43 | py | ii = [('CrokTPS.py', 1), ('WadeJEB.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
be894f7257499ff10f4f259ce795d3033f32e04e | b1f748d761751e89f62cf5b8a2b13adac5bf3a29 | /metermaster/urls.py | dcdb41d64262561d4761bce0daa60a73a5e3078d | [] | no_license | sangeeth-subramoniam/buildingmanagementheroku | 7b77be693fa73dbd2dff9c816bf50daf1e501029 | db26de549f7088d2ff80a303abeeaaa548d43e0b | refs/heads/master | 2023-07-08T13:46:06.384694 | 2021-08-10T06:50:14 | 2021-08-10T06:50:14 | 392,492,925 | 0 | 0 | null | 2021-08-04T02:46:57 | 2021-08-04T00:14:10 | Python | UTF-8 | Python | false | false | 409 | py | from django.urls import path,include
from . import views
app_name = 'metermaster'
urlpatterns = [
path('', views.home , name = "home"),
path('metermaster_update_form/<int:pk>', views.updatemeterForm , name = 'updateMeterForm'),
path('metermaster_delete_form/<int:pk>', views.deletemeterForm , name = 'deleteMeterForm'),
path('ajax/load-stores/', views.load_store, name='ajax_load_stores'),
]
| [
"s-sangeeth-k@sicis.co.jp"
] | s-sangeeth-k@sicis.co.jp |
1999c84509f04a543cf1c61c698ae75b971dd835 | f3ed1631f5cfb10ec3c03974a04f73e1e8dd5829 | /handofcats/middlewares/__init__.py | 2a4c39ee259addb6db2d9c2c2ba965c6c9a45062 | [] | no_license | tell-k/handofcats | 9839e20eb3731890a16dcb6d864b7fc13ee80032 | 135e9abac83db318a7b07337191a1d4f699f7ef2 | refs/heads/master | 2020-12-25T22:29:35.495296 | 2016-01-10T00:29:30 | 2016-01-10T00:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | # -*- coding:utf-8 -*-
from functools import wraps
class MiddlewareApplicator(object):
def __init__(self, fns):
self.middlewares = [middlewarefy(fn) for fn in fns]
def register(self, fn):
self.middlewares.append(middlewarefy(fn))
def __call__(self, fn):
def call(*args, **kwargs):
context = {}
context["_args"] = args
context["_keys"] = list(kwargs.keys())
context.update(kwargs)
def create_result(context):
args = context["_args"]
kwargs = {k: context[k] for k in context["_keys"]}
return fn(*args, **kwargs)
closure = create_result
for m in reversed(self.middlewares):
closure = m(closure)
return closure(context)
return call
def middlewarefy(fn):
@wraps(fn)
def middleware(closure):
return lambda context: fn(context, closure)
return middleware
from .verbosity_adjustment import middleware_verbosity_adjustment
DEFAULT_MIDDLEWARES = [
middleware_verbosity_adjustment,
]
| [
"podhmo+altair@beproud.jp"
] | podhmo+altair@beproud.jp |
3588b3df70f9fbd1b7167ef3bfa267d162441634 | a487691662edb19792007571fc084e68f180af0a | /2020/mapreduceInPython/mapper.py | d3bd22bddd279da6ca99da7d3585b8bd1619f2ba | [] | no_license | eiahb3838ya/PHBS_BigData_2019 | a74231817b1114079961c7d4dba8b7adc2794cad | 91b71d229188cf750e4acf093615bfba5e27ca96 | refs/heads/master | 2021-07-15T06:23:43.505842 | 2020-11-12T03:32:29 | 2020-11-12T03:32:29 | 225,119,161 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 19:31:24 2020
@author: eiahb
"""
import sys
from multiprocessing import Pool
import time
def main():
# 读入每行input
for line in sys.stdin:
aRecord = line.split(",")
stockTimeStamp = "{}_{}".format(aRecord[0], aRecord[1][:12])
# results = []
print("%s\t%s" % (stockTimeStamp,aRecord[2]))
if __name__ =="__main__":
tic = time.time()
main()
toc = time.time() - tic
| [
"eiahb3838ya@gmail.com"
] | eiahb3838ya@gmail.com |
c4a51811da26e90ea2d38213de6bbed6a36e762f | fd60c2370bf5fb2355c4b30a30ad5ce9c62bc10d | /orc/arp.py | 6e98cd52efe755a4ea35da057dff2e7af733f3f8 | [] | no_license | hecanjog/hcj.py | 08e43edf62330e1b9e0448edf549c7d18e2e9699 | e42538cd48499bb9e9c11321b2f9db56f15486b4 | refs/heads/master | 2021-01-21T04:54:46.693980 | 2020-01-24T02:03:06 | 2020-01-24T02:03:06 | 19,010,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | from pippi import dsp
from pippi import tune
from hcj import fx
midi = {'pc': 3}
def play(ctl):
param = ctl.get('param')
lpd = ctl.get('midi').get('pc')
lpd.setOffset(111)
key = 'g'
#bd = dsp.read('/home/hecanjog/sounds/drums/Tinyrim2.wav').data
#bd = dsp.read('/home/hecanjog/sounds/drums/Jngletam.wav').data
#bd = dsp.read('/home/hecanjog/sounds/drums/78oh.wav').data
#bd = dsp.amp(bd, 1)
#bd = dsp.transpose(bd, dsp.rand(0.65, 0.72) / 1)
#bd = dsp.transpose(bd, dsp.rand(0.3, 0.32) / 1)
chord = tune.fromdegrees([1,8], root='g', octave=dsp.randint(0,2))
chord.reverse()
chord = dsp.rotate(chord, lpd.geti(4, low=0, high=len(chord)-1))
#chord = dsp.randshuffle(chord)
reps = param.get('reps', default=16)
rep = param.get('rep', default=0)
beat = dsp.bpm2frames(130) / 4
beat = dsp.mstf(4100) / 32
#length = beat
out = ''
for n in range(4):
freq = chord[int(rep) % len(chord)]
if dsp.rand() > 0.5:
freq *= 2**dsp.randint(0, lpd.geti(7, low=0, high=8, default=0))
pw = lpd.get(8, low=0.1, high=1, default=1)
#length = dsp.mstf(lpd.get(2, low=50, high=2500, default=500) * dsp.rand(0.5, 2))
length = dsp.mstf(lpd.get(14, low=50, high=5000, default=500))
wf = dsp.wavetable('tri', 512)
wf = dsp.wavetable('impulse', 512)
wf = dsp.wavetable('sine2pi', 512)
wf = dsp.breakpoint([0] + [ dsp.rand(-1,1) for w in range(lpd.geti(15, low=4, high=200, default=4)) ] + [0], 512)
win = dsp.wavetable('sine', 512)
mod = [ dsp.rand(0, 1) for m in range(512) ]
modr = dsp.rand(0.01, 0.02)
modr = lpd.get(16, low=0.01, high=1, default=1)
modf = dsp.rand(0.5, 2)
amp = lpd.get(6, low=0, high=2, default=0)
amp = dsp.rand(0, 2)
o = dsp.pulsar(freq, length, pw, wf, win, mod, modr, modf, amp)
o = dsp.env(o, 'random')
o = dsp.taper(o, dsp.mstf(10))
o = dsp.pan(o, dsp.rand())
rep = rep + 1
out += o
#out = dsp.mix([ dsp.fill(bd, dsp.flen(out), silence=True), out ])
param.set('rep', (rep + 1) % reps)
return out
| [
"erik@hecanjog.com"
] | erik@hecanjog.com |
ba436d0fe6e4b79670c1531daca1fcb18e165398 | 48fcd5b9203c5f34dcad9483259c0f3d46f5d48b | /codeacademy-python3/base_exponent.py | 17a5f5046f4816dd6ad60e798125ea7c861562f1 | [] | no_license | ssaulrj/codes-python | 438dd691815d0a688d264928eb07187ba30c2138 | 04b75b001de60a5e202ad373f3379864753ce203 | refs/heads/master | 2022-11-17T11:40:18.883096 | 2020-07-06T00:57:58 | 2020-07-06T00:57:58 | 234,440,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | # Write your large_power function here:
def large_power(base, exponent):
if base**exponent > 5000:
return True
else:
return False
# Uncomment these function calls to test your large_power function:
print(large_power(2, 13))
# should print True
print(large_power(2, 12))
# should print False
| [
"noreply@github.com"
] | ssaulrj.noreply@github.com |
1032cddc93ebd229103c6e5a1e6c3da61571fa57 | 4c6cb7019d06a1c0c588bb98fb359a536c8ae8ea | /04-03/todolist/buy/models.py | 05fdbd3c8532d624dc018c762eb55337dd07c3ec | [] | no_license | hanifmisbah/tugas_bersama | 2be54f4b386a470b04ca29aa293246985b44707a | 4bd4e195b56090ca9256b9e319bb34b92a86d032 | refs/heads/master | 2022-12-19T03:15:33.085665 | 2020-09-10T09:07:56 | 2020-09-10T09:07:56 | 294,304,050 | 0 | 0 | null | 2020-09-10T04:37:28 | 2020-09-10T04:37:28 | null | UTF-8 | Python | false | false | 229 | py | from django.db import models
# Create your models here.
class Buy(models.Model):
name = models.TextField(default='')
brg = models.TextField(default='')
jmlh = models.TextField(default='')
price = models.TextField(default='') | [
"hanifmisbah97@gmail.com"
] | hanifmisbah97@gmail.com |
d066a7f11637683077d5744bdde78bc3b66d42c3 | 53ba0b6f172abcade631ae1f52852c400302559e | /python_developer_tools/cv/utils/opencv_utils/视频操作/__init__.py | 86f7689b1178eccfb5e86f3d6a67938bf93a95a2 | [
"Apache-2.0"
] | permissive | sssssshf/python_developer_tools | f97c64ee0aa0a7e9d31d173192805771c83abb7f | 44d2e67a2e2495a12d6b32da12c76cf0010ac7ea | refs/heads/main | 2023-08-19T02:44:53.536200 | 2021-10-13T02:10:19 | 2021-10-13T02:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:9/14/2021 4:06 PM
# @File:__init__.py
| [
"zengxh@chint.com"
] | zengxh@chint.com |
d6a4beafdd972f88c046bce2fb861e95ccfb9b20 | ea767918d1391d950714d3fafabf65330bade863 | /odin/bay/distributions/quantized.py | bf60b13f432ab1ab8ac1ca5af1b42c52e44d16aa | [
"MIT"
] | permissive | tirkarthi/odin-ai | f5bb33d02047025029891e1282b9bd389eb4eb07 | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | refs/heads/master | 2023-06-02T20:15:11.233665 | 2020-09-25T09:57:28 | 2020-09-25T09:57:28 | 298,744,248 | 0 | 0 | MIT | 2020-09-26T05:29:11 | 2020-09-26T05:29:10 | null | UTF-8 | Python | false | false | 1,797 | py | from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import exp as exp_bijector
from tensorflow_probability.python.distributions import (
NegativeBinomial, Normal, QuantizedDistribution, TransformedDistribution,
Uniform)
from tensorflow_probability.python.internal import dtype_util
__all__ = ["qUniform", "qNormal"]
class qNormal(QuantizedDistribution):
def __init__(self,
loc=0.,
scale=1.,
min_value=None,
max_value=None,
validate_args=False,
allow_nan_stats=True,
name="qNormal"):
super(qNormal,
self).__init__(distribution=Normal(loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
low=min_value,
high=max_value,
name=name)
class qUniform(QuantizedDistribution):
def __init__(self,
low=0.,
high=1.,
min_value=None,
max_value=None,
validate_args=False,
allow_nan_stats=True,
name="qUniform"):
super(qUniform,
self).__init__(distribution=Uniform(low=low,
high=high,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
low=min_value,
high=max_value,
name=name)
| [
"nickartin13@gmail.com"
] | nickartin13@gmail.com |
c6281301f2104fda3c8e84f6c963abd6f8f8925d | fb84fa89744e25a6842e5a22cc9aa35f17cb9c79 | /pyquant/marketdata/spot.py | 845f68291cb39b90413809921767447a73b176ad | [] | no_license | masa4u/pyquant-xmlrpc | dbcf92d257cb89d033f9c7811799126412bca9f8 | 54565f0e71fa819a69ba3e3b92a012dbf5a8046f | refs/heads/master | 2016-09-06T10:47:01.093006 | 2015-03-30T02:00:16 | 2015-03-30T02:00:16 | 30,795,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from pyquant.marketdata.marketdata import MarketDataType
from pyquant.marketdata.single import MarketDataSingle
class MarketDataSpot(MarketDataSingle):
def __init__(self):
super(MarketDataSpot, self).__init__()
print MarketDataSpot().data_type
print MarketDataSpot().value
if __name__ == '__main__':
from pyquant.marketdata.libor import MarketDataLibor
from pyquant.marketdata.cmt import MarketDataCMT
from pyquant.marketdata.cms import MarketDataCMS
from pyquant.marketdata.curve import MarketDataCurve
if issubclass(MarketDataSpot, MarketDataSingle):
print 'yes'
single_data_list = [MarketDataSpot, MarketDataLibor, MarketDataCMT, MarketDataCurve]
for c in single_data_list:
print c.__name__, issubclass(c, MarketDataSingle)
| [
"masa4u@gmail.com"
] | masa4u@gmail.com |
37919b373e29fe85008749e2a8c1d126d697b1f8 | 6c677098c78b3f410019ac26f116cd8539949d35 | /utils/money.py | b1e72ef7c897fae87de3c441e35f2689b539d670 | [
"MIT"
] | permissive | Pythonian/bsawf | eb05dcf7eeb3fab10dad269f9018fc3aa56c967e | 3e422a81cfb1b157119473c20b94a9a01f8b9672 | refs/heads/master | 2023-05-27T20:32:25.965703 | 2022-03-16T14:57:26 | 2022-03-16T14:57:26 | 253,907,876 | 0 | 0 | MIT | 2023-05-02T20:53:12 | 2020-04-07T20:44:53 | Python | UTF-8 | Python | false | false | 375 | py | def cents_to_dollars(cents):
"""
Convert cents to dollars.
:param cents: Amount in cents
:type cents: int
:return: float
"""
return round(cents / 100.0, 2)
def dollars_to_cents(dollars):
"""
Convert dollars to cents.
:param dollars: Amount in dollars
:type dollars: float
:return: int
"""
return int(dollars * 100)
| [
"prontomaster@gmail.com"
] | prontomaster@gmail.com |
9aaa01d7f2436c3f20c0bfbd0c9d297f697493b0 | 8f68d714d5ba6bd27e65238557c7daa17eb814a8 | /15-project/python_ipify/tests/__init__.py | 51fa4088f0abfa850215c3df3f51eb616df1292e | [
"MIT"
] | permissive | acckcc/python-training-course | 28834fe6ed8794f88a38f038fd64d9d224e685bb | 804e457a8e7ac9dee75a6adfe25b32edf7ba6024 | refs/heads/master | 2022-04-19T17:48:46.699806 | 2020-04-16T15:04:53 | 2020-04-16T15:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | # -*- coding: utf-8 -*-
"""Unit test package for python_ipify."""
| [
"zj0512@gmail.com"
] | zj0512@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.