blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
629b3a5257059913a7da306eec068efe1e380de4 | bd8bc7abe0f774f84d8275c43b2b8c223d757865 | /705_DesignHashSet/MyHashSet.py | 06a999a2cec78ea54e5bd553bcec061ffea7135f | [
"MIT"
] | permissive | excaliburnan/SolutionsOnLeetcodeForZZW | bde33ab9aebe9c80d9f16f9a62df72d269c5e187 | 64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7 | refs/heads/master | 2023-04-07T03:00:06.315574 | 2021-04-21T02:12:39 | 2021-04-21T02:12:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | class Bucket:
def __init__(self):
self.buckt = []
def add(self, key):
if key not in self.buckt:
self.buckt.append(key)
def remove(self, key):
for i, k in enumerate(self.buckt):
if k == key:
del self.buckt[i]
def contains(self, key):
return key in self.buckt
class MyHashSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.key_space = 2069
self.hash_table = [Bucket() for _ in range(self.key_space)]
def add(self, key: int) -> None:
hash_key = key % self.key_space
self.hash_table[hash_key].add(key)
def remove(self, key: int) -> None:
hash_key = key % self.key_space
self.hash_table[hash_key].remove(key)
def contains(self, key: int) -> bool:
"""
Returns true if this set contains the specified element
"""
hash_key = key % self.key_space
return self.hash_table[hash_key].contains(key)
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
| [
"noreply@github.com"
] | excaliburnan.noreply@github.com |
f008a629a26cf50be0ba05f2ff12cea28da03c6d | 9188d0d7ce9fc5fadf4d2593741894e1448f9326 | /indico/vendor/django_mail/__init__.py | 9cee3d734ac462e5c0da565826c3884569db9e34 | [
"MIT"
] | permissive | vaclavstepan/indico | b411410416acdfa50b0d374f89ec8208de00fb2f | 8ca1ac4d4a958f22f24580a790b3cb015570bdfb | refs/heads/master | 2023-07-21T04:42:03.031131 | 2021-09-01T09:54:17 | 2021-09-01T09:54:17 | 385,897,420 | 0 | 0 | MIT | 2021-07-16T13:07:32 | 2021-07-14T10:16:57 | null | UTF-8 | Python | false | false | 1,359 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# TODO: Move this whole package into a standalone pypi package, since it's
# useful in general for anyoen who wants to send emails (without using django)
# The code in here is taken almost verbatim from `django.core.mail`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/2.2.x/django/core/mail/__init__.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Tools for sending email.
"""
from flask import current_app
from .backends.base import BaseEmailBackend
from .module_loading_utils import import_string
__all__ = ['get_connection']
def get_connection(backend=None, fail_silently=False, **kwds) -> BaseEmailBackend:
"""Load an email backend and return an instance of it.
If backend is None (default), use ``EMAIL_BACKEND`` from config.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or current_app.config['EMAIL_BACKEND'])
return klass(fail_silently=fail_silently, **kwds)
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
7a98bd64abdb3d07cde81ab64491ac9725dcb528 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02936/s782768449.py | 56eea4ff0a74433df558677289cc44ba5326f3a0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | n, q = map(int, input().split())
Tree = [[] for _ in range(n+1)]
Counter = [0 for _ in range(n+1)]
AB = (tuple(map(int, input().split())) for _ in range(n-1))
PX = (tuple(map(int, input().split())) for _ in range(q))
for a,b in AB:
Tree[a].append(b)
Tree[b].append(a)
for p,x in PX:
Counter[p] += x
P = [-1]*(n+1)
nodes = [1]
while nodes:
parent = nodes.pop()
for node_i in Tree[parent]:
if P[parent]==node_i:
continue
P[node_i] = parent
nodes.append(node_i)
Counter[node_i] += Counter[parent]
print(*Counter[1:]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6f05788dedfb6545482ba4bd9b8acffa93ddfb1e | 29c476c037a05170ff2ddef8edd07014d3751614 | /0x03-python-data_structures/10-divisible_by_2.py | 787cefb10383f36ee67ca2a984d31d20a27f6684 | [] | no_license | hacheG/holbertonschool-higher_level_programming | a0aaddb30665833bd260766dac972b7f21dda8ea | 535b1ca229d7cf61124a128bb5725e5200c27fbc | refs/heads/master | 2020-07-22T23:09:27.486886 | 2020-02-13T19:41:34 | 2020-02-13T19:41:34 | 207,360,462 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #!/usr/bin/python3
def divisible_by_2(my_list=[]):
new_list = []
for i in my_list:
if(i % 2 == 0):
new_list.append(True)
else:
new_list.append(False)
return(new_list)
| [
"943@holbertonschool.com"
] | 943@holbertonschool.com |
a452e26672a218523c0d1f3f356856adc98f25b9 | b6553d9371a3612c992cfe0dba678cbc16c6812b | /a-concurrent/http_request.py | 36e4bf8acdab802ce4473dd7627fe7ccf2c97506 | [] | no_license | drgarcia1986/bev-py-concurrency | 071ef6f899c7c892eeb446c024b67bfa56d6a83c | 4159b9acb82ade59c9b7d1b5ae49d764fddf5430 | refs/heads/master | 2021-01-09T05:50:37.331265 | 2017-02-03T15:30:24 | 2017-02-03T15:30:24 | 80,840,883 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | import asyncio
import aiohttp
async def make_get(delay):
response = await aiohttp.get('https://httpbin.org/delay/{}'.format(delay))
response.close()
return delay, response.status == 200
async def make_requests(*delays):
requests = [make_get(d) for d in delays]
return await asyncio.gather(*requests)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
responses = loop.run_until_complete(make_requests(5, 2, 3))
for response in responses:
print(response)
print('Done')
| [
"drgarcia1986@gmail.com"
] | drgarcia1986@gmail.com |
907b45c3d962efdc0f0ec839c4a198f7fa84df2e | 7ac82627034f262d110112112bd3f7e430f3fd90 | /CodeEdgeDetection.py | 13accf650198daa9d37272e2f4a0fbf31bbe2ac1 | [] | no_license | saguileran/Codigos | bca7172e19aefb5ed9ec0720991cafff078278cc | f49a72f57de3769d06ff4a09df07e9d25d4dde29 | refs/heads/master | 2020-06-01T16:53:27.446944 | 2019-06-27T19:26:57 | 2019-06-27T19:26:57 | 190,854,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
# loading image
#img0 = cv2.imread('SanFrancisco.jpg',)
img0 = cv2.imread('Camera.jpg',)
# converting to gray scale
gray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
# remove noise
img = cv2.GaussianBlur(gray,(3,3),0)
# convolute with proper kernels
laplacian = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y
plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
#TOmado de: https://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Gradient_Sobel_Laplacian_Derivatives_Edge_Detection.php
| [
"root@beaglebone.localdomain"
] | root@beaglebone.localdomain |
266c3267d5b874ecb5dda55196cfc42fc8c3ef76 | 29a78032c3b2fdd4722f6c054ab20a5a8cea627c | /studtpy/string2.py | ec6493272f2407e3dd9850cfac2e0d2a611c519b | [] | no_license | jungting20/pythonpro | 838ea188f846b6e1a90f1a7c429f02464b1b0927 | 455dd23132023cb472bab5e8d9ba4a881331db54 | refs/heads/master | 2021-06-27T16:20:54.768172 | 2017-09-16T08:38:19 | 2017-09-16T08:38:19 | 103,737,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | n = 42
f = 7.03
s = 'string cheese'
aa = {'n':42,'f':7.03,'s':'string cheese'}
bb = {'n':40,'f':6.03,'s':'cheese'}
#이걸 기억하자 깔끔하게 기억 넣는순서가있음 딕셔너리는 객체니까 그냥 1개로 보는
#거임 그러니
#결국 저 숫자 0 1의 의미는 .format에 인자에 넣는 순서를 말하는거임 ㅋ
#깨달았다
bb = '{0[n]:<10d} {0[f]:<10f} {0[s]:<10s} {1[n]} {1[f]} {1[s]}'.format(aa,bb)
cc = '{0:0>2d}'.format(1)
print(cc)
| [
"jungting20@gmail.com"
] | jungting20@gmail.com |
a1e5a47e98755a69e6e6ec284c49cc864ae633de | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/pandas/core/computation/pytables.py | 0787ffe9d2aba703a0a4433d1e1344dc72020c57 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,128 | py | """ manage PyTables query interface via Expressions """
import ast
import numpy as np
import pandas as pd
import pandas.core.common as com
from functools import partial
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.computation import expr, ops, scope as _scope
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.core.dtypes.common import is_list_like
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from typing import Any, Dict, Optional, Tuple
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: Dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: Optional[Dict[str, Any]] = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: Dict[str, Any]
def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> "TermValue":
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side="left")
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: Optional[Tuple[Any, Any, pd.Index]] = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError(f"cannot subscript {repr(value)} with {repr(slobj)}")
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: Optional[PyTablesExprVisitor]
env: PyTablesScope
def __init__(
self,
where,
queryables: Optional[Dict[str, Any]] = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif isinstance(where, (list, tuple)):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join((f"({w})" for w in com.flatten(where)))
else:
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
)
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
)
return self.condition, self.filter
class TermValue:
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
""" quote the string if not encoded
else encode and return """
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
| [
"danielmoreira12@github.com"
] | danielmoreira12@github.com |
6b30f92d6c8692c9de33540170070de26905643f | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1815745/homework02/program02.py | 5dd1b627308412ffb7923fffd477fb45a221ef02 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | '''
Un file di compiti contiene informazioni su un insieme di compiti da eseguire.
Esistono due tipologie di compiti:
- compiti che possono essere eseguiti indipendentemente dagli altri.
- compiti da svolgere solo al termine di un compito preliminare.
I compiti del primo tipo sono codificati nel file mediante una linea che contiene
in sequenza le due sottostringhe "comp" ed "N" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
Compiti del secondo tipo sono codificati nel file mediante due linee di codice.
-- la prima linea, contiene in sequenza le due sottostringhe "comp" ed "N"
(senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
-- la seconda linea (immediatamente successiva nel file) contiene
in sequenza le due sottostringhe "sub" ed "M" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "M" e' l'ID del compito preliminare.
il seguente file di compiti contiene informazioni su 4 compiti (con identificativi 1,3,7 e 9).
I compiti con identificativi 1 e 9 possono essere svolti indipendentemente dagli altri mentre i compiti
con identificativo 3 e 7 hanno entrambi un compito preliminare.
comp 3
sub 9
comp1
comp 9
comp 7
sub3
Scrivere la funzione pianifica(fcompiti,insi,fout) che prende in input:
- il percorso di un file (fcompiti)
- un insieme di ID di compiti da cercare (insi)
- ed il percorso di un file (fout)
e che salva in formato JSON nel file fout un dizionario (risultato).
Il dizionario (risultato) dovra' contenere come chiavi gli identificativi (ID) dei compiti
presenti in fcompiti e richiesti nell'insieme insi.
Associata ad ogni ID x del dizionario deve esserci una lista contenente gli identificativi (ID) dei compiti
che bisogna eseguire prima di poter eseguire il compito x richiesto
(ovviamente la lista di un ID di un compito che non richie un compito preliminare risultera' vuota ).
Gli (ID) devono comparire nella lista nell'ordine di esecuzione corretto, dal primo fino a quello precedente a quello richiesto
(ovviamente il primo ID di una lista non vuota corripondera' sempre ad un compito che non richiede un compito preliminare).
Si puo' assumere che:
- se l' ID di un compito che richieda un compito preliminare e' presente in fcompiti
allora anche l'ID di quest'ultimo e' presente in fcompiti
- la sequenza da associare al compito ID del dizionario esiste sempre
- non esistono cicli (compiti che richiedono se' stessi anche indirettamente)
Ad esempio per il file di compiti fcompiti contenente:
comp 3
sub 9
comp1
comp 9
comp 7
sub3
al termine dell'esecuzione di pianifica(fcompiti,{'7','1','5'}, 'a.json')
il file 'a.json' deve contenere il seguente dizionario
{'7':['9','3'],'1':[]}
Per altri esempi vedere il file grade02.txt
AVVERTENZE:
non usare caratteri non ASCII, come le lettere accentate;
non usare moduli che non sono nella libreria standard.
NOTA: l'encoding del file e' 'utf-8'
ATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.
'''
import json
def pianifica(fcompiti,insi,fout):
with open(fcompiti, encoding = 'utf-8' , mode = 'rt') as f:
diz = {}
dizbis = {}
lista_sub_bis = []
vab = []
l = 0
for linea in f:
l += 1
if linea.find('comp') != -1:
x = linea.strip('comp \n')
k = 0
h = 0
for insieme in insi :
varID = insieme
if x == varID:
lista_sub = []
diz[x] = lista_sub
h = l + 1
if h == 0:
lista_sub_bis = []
dizbis[x] = lista_sub_bis
k = l + 1
if linea.find('sub') != -1:
if l == h:
y = linea.strip('sub \n')
lista_sub.append(y)
if k == l:
y = linea.strip('sub \n')
lista_sub_bis.append(y)
for chbis,vabis in dizbis.items():
for valorebis in vabis:
for ch,va in diz.items():
for valore in va:
if chbis == valore:
va.append(valorebis)
vab = sorted(va)
diz[ch] = vab
with open(fout, mode = 'w') as f:
json.dump(diz,f)
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
bc5d19bd2accbef984fe12bf55bf5efc38843cdd | 0e5291f09c5117504447cc8df683ca1506b70560 | /netbox_client/models/virtual_chassis.py | 55a342402aba80391af4f170111c0cc764deb924 | [
"MIT"
] | permissive | nrfta/python-netbox-client | abd0192b79aab912325485bf4e17777a21953c9b | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | refs/heads/master | 2022-11-13T16:29:02.264187 | 2020-07-05T18:06:42 | 2020-07-05T18:06:42 | 277,121,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | # coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VirtualChassis(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'master': 'NestedDevice',
'domain': 'str',
'tags': 'list[str]',
'member_count': 'int'
}
attribute_map = {
'id': 'id',
'master': 'master',
'domain': 'domain',
'tags': 'tags',
'member_count': 'member_count'
}
def __init__(self, id=None, master=None, domain=None, tags=None, member_count=None): # noqa: E501
"""VirtualChassis - a model defined in Swagger""" # noqa: E501
self._id = None
self._master = None
self._domain = None
self._tags = None
self._member_count = None
self.discriminator = None
if id is not None:
self.id = id
self.master = master
if domain is not None:
self.domain = domain
if tags is not None:
self.tags = tags
if member_count is not None:
self.member_count = member_count
@property
def id(self):
"""Gets the id of this VirtualChassis. # noqa: E501
:return: The id of this VirtualChassis. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VirtualChassis.
:param id: The id of this VirtualChassis. # noqa: E501
:type: int
"""
self._id = id
@property
def master(self):
"""Gets the master of this VirtualChassis. # noqa: E501
:return: The master of this VirtualChassis. # noqa: E501
:rtype: NestedDevice
"""
return self._master
@master.setter
def master(self, master):
"""Sets the master of this VirtualChassis.
:param master: The master of this VirtualChassis. # noqa: E501
:type: NestedDevice
"""
if master is None:
raise ValueError("Invalid value for `master`, must not be `None`") # noqa: E501
self._master = master
@property
def domain(self):
"""Gets the domain of this VirtualChassis. # noqa: E501
:return: The domain of this VirtualChassis. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this VirtualChassis.
:param domain: The domain of this VirtualChassis. # noqa: E501
:type: str
"""
if domain is not None and len(domain) > 30:
raise ValueError("Invalid value for `domain`, length must be less than or equal to `30`") # noqa: E501
self._domain = domain
@property
def tags(self):
"""Gets the tags of this VirtualChassis. # noqa: E501
:return: The tags of this VirtualChassis. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this VirtualChassis.
:param tags: The tags of this VirtualChassis. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def member_count(self):
"""Gets the member_count of this VirtualChassis. # noqa: E501
:return: The member_count of this VirtualChassis. # noqa: E501
:rtype: int
"""
return self._member_count
@member_count.setter
def member_count(self, member_count):
"""Sets the member_count of this VirtualChassis.
:param member_count: The member_count of this VirtualChassis. # noqa: E501
:type: int
"""
self._member_count = member_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VirtualChassis, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VirtualChassis):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"67791576+underline-bot@users.noreply.github.com"
] | 67791576+underline-bot@users.noreply.github.com |
2064f456c5a1818c85b08b9b443632e186ae9c5d | d8010e5d6abc2dff0abb4e695e74fb23b4f7d558 | /publishing/books/views.py | 5a144558fd6f3fac6f3640f5283f5929042b46fd | [
"MIT"
] | permissive | okoppe8/django-nested-inline-formsets-example | d17f4e1181925d132625e51453cb8c50107ade1c | c0b1abb32f6d09a7732d48e40ea61e21b258e959 | refs/heads/master | 2020-03-09T21:48:51.804513 | 2018-03-04T11:13:55 | 2018-03-04T11:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import DetailView, FormView, ListView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from .forms import PublisherBooksWithImagesFormset
from .models import Publisher, Book, BookImage
class HomeView(TemplateView):
template_name = 'books/home.html'
class PublisherListView(ListView):
model = Publisher
template_name = 'books/publisher_list.html'
class PublisherDetailView(DetailView):
model = Publisher
template_name = 'books/publisher_detail.html'
class PublisherUpdateView(SingleObjectMixin, FormView):
model = Publisher
template_name = 'books/publisher_update.html'
def get(self, request, *args, **kwargs):
# The Publisher we're editing:
self.object = self.get_object(queryset=Publisher.objects.all())
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# The Publisher we're uploading for:
self.object = self.get_object(queryset=Publisher.objects.all())
return super().post(request, *args, **kwargs)
def get_form(self, form_class=None):
"""
Use our big formset of formsets, and pass in the Publisher object.
"""
return PublisherBooksWithImagesFormset(
**self.get_form_kwargs(), instance=self.object)
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
form.save()
messages.add_message(
self.request,
messages.SUCCESS,
'Changes were saved.'
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('books:publisher_detail', kwargs={'pk': self.object.pk})
| [
"phil@gyford.com"
] | phil@gyford.com |
b7caeef897518daa65994562807bac7471c5cbf2 | 202180e6b7109e9058cce442054d6532c44c796d | /crm/migrations/0010_auto_20161127_1530.py | 1ffb531b34b3e42c78cc8c52836e43f171d5a024 | [
"Apache-2.0"
] | permissive | pkimber/old-crm-migrated-to-gitlab | 230d4eec0cfa794c90fff8c75154d98699820093 | 835e8ff3161404316b7da35cf61e3851763b37b9 | refs/heads/master | 2021-06-15T22:07:08.207855 | 2017-04-27T21:05:53 | 2017-04-27T21:05:53 | 12,544,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-27 15:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crm', '0009_auto_20160215_0844'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='date_deleted',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ticket',
name='deleted',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='ticket',
name='user_deleted',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| [
"code@pkimber.net"
] | code@pkimber.net |
d80e11fcc0b33c2543565138e2a0162fa4c18653 | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba0142.pngMap.py | a8a06e317152099d16dd372f1deee6c101d4566c | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba0142.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111001111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000111111001111110000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001111111111111111110000111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111101000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111110111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111110000001000000000000000000',
'00000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111110100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001011111111111111111111111111111111111111111111111100000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001111100111111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001111000111111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001110000001111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001111000001111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000011110000001111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001110000001111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001000000011111111111111111111111111111111000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000011011111111111111111111111111111110000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000001000011111111111111111111111111111111110000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111100000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111100000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111110000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111110000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111110100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111110000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111010000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111110001000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111100000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111110000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111110000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111100111111111111111110000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111100011111111111111111000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111100001111111111111111000000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111100001111111111111111110000000000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111000001111111111111111110000000000',
'00000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111110000011111111111111111111100000000',
'00000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111111000011111111111111111111110000000',
'00000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111110000001111111111111111111111100000',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111100000001111101111111111111111100000',
'00000000000000000000000000000000000000000000000000000000001011111111111111111111111111111111100000001111110100111111111111111000',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111000000111100001111111111111110000',
'00000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111000000111110000011111111111111100',
'00000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111000001111110001111111111111111110',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
d88bc3b619dd0c947cd3561a3c6ff54d1970a192 | 779af874adf1647461981b0c36530cf9924f5f01 | /python3/dist-packages/UpdateManager/UpdateManagerVersion.py | d75b462722ca8685adf317e6dfadc5abbfc87df3 | [] | no_license | hitsuyo/Library_Python_3.5 | 8974b5de04cb7780b0a1a75da5cb5478873f08e7 | 374e3f9443e4d5cae862fd9d81db8b61030ae172 | refs/heads/master | 2022-11-05T23:46:47.188553 | 2018-01-04T19:29:05 | 2018-01-04T19:29:05 | 116,093,537 | 1 | 2 | null | 2022-10-26T03:07:06 | 2018-01-03T05:02:20 | Python | UTF-8 | Python | false | false | 21 | py | VERSION = '1:16.04.5' | [
"nguyentansang3417@gmail.com"
] | nguyentansang3417@gmail.com |
c26b0cd852fa2c4d413cab8c8391e0fee5551723 | 464338d9556cf9892a4647ab1b94dd9d2879274d | /ohealth/ohealth_insurance_plan/__init__.py | 7fde06bc5796d75e88f9e7335f1229285c7746ad | [] | no_license | linzheyuan/O-health | 5199637de8343508c5fe2d8e611a394cf39d1673 | eab3fc74ee7b878dbcc8234597de053de9d9e608 | refs/heads/master | 2021-01-24T21:36:14.068894 | 2013-07-07T12:34:28 | 2013-07-07T12:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | # -*- coding: utf-8 -*-
#/#############################################################################
#
# HITSF
#
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
import ohealth_insurance_plan
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"rizabisnis@gmail.com"
] | rizabisnis@gmail.com |
1672eb182095e59d0e9341177bf03cc900cbf11e | 4ae3b27a1d782ae43bc786c841cafb3ace212d55 | /django_test_proj/env_02/restful03/restful03/urls.py | 7a095534f5b9d3d66860e7f591a63ea43cc1412b | [] | no_license | bopopescu/Py_projects | c9084efa5aa02fd9ff6ed8ac5c7872fedcf53e32 | a2fe4f198e3ca4026cf2e3e429ac09707d5a19de | refs/heads/master | 2022-09-29T20:50:57.354678 | 2020-04-28T05:23:14 | 2020-04-28T05:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | """restful03 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^api', include("api.urls"))
]
| [
"sunusd@yahoo.com"
] | sunusd@yahoo.com |
3b9878aecf89cf4a6e5d1ea51931dd7545238438 | 70fc2cc55f1030c08382bd80050afc62c48c29ad | /scripts/run_benchmark.py | b29f9942681644f5b369ad95b6ee30aef0d14647 | [] | no_license | haowenCS/transfer-hpo-framework | 4ecdd3cf204f685e74a2b561263c6f21308ab9db | 0f2cc036fac7a504e506ec0cc6bf914003933c44 | refs/heads/main | 2023-08-21T03:40:23.119136 | 2021-10-04T15:50:05 | 2021-10-04T15:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,506 | py | import argparse
import copy
import json
import os
parser = argparse.ArgumentParser()
parser.add_argument(
'--benchmark',
choices=['alpine', 'quadratic', 'adaboost', 'svm', 'openml-svm', 'openml-xgb',
'openml-glmnet', 'nn'],
default='alpine',
)
parser.add_argument('--task', type=int)
parser.add_argument(
'--method',
choices=['gpmap', 'gcp', 'random', 'rgpe', 'ablr', 'tstr', 'taf', 'wac', 'rmogp',
'gcp+prior', 'klweighting'],
default='random',
)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n-init', type=int, default=3)
parser.add_argument('--output-file', type=str, default=None)
parser.add_argument('--iteration-multiplier', type=int, default=1)
parser.add_argument('--empirical-meta-configs', action='store_true')
parser.add_argument('--grid-meta-configs', action='store_true')
parser.add_argument('--learned-initial-design', choices=['None', 'unscaled', 'scaled', 'copula'],
default='None')
parser.add_argument('--search-space-pruning', choices=['None', 'complete', 'half'], default='None')
parser.add_argument('--percent-meta-tasks', default=1.0)
parser.add_argument('--percent-meta-data', default=1.0)
args, unknown = parser.parse_known_args()
output_file = args.output_file
if output_file is not None:
try:
with open(output_file, 'r') as fh:
json.load(fh)
print('Output file %s exists - shutting down.' % output_file)
exit(1)
except Exception as e:
print(e)
pass
from ConfigSpace import (
Configuration,
ConfigurationSpace,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
)
from rgpe.exploring_openml import SVM, XGBoost, GLMNET
import numpy as np
import pandas as pd
from smac.optimizer.acquisition import EI
from smac.optimizer.ei_optimization import FixedSet
from smac.scenario.scenario import Scenario
from smac.facade.roar_facade import ROAR
from smac.facade.smac_bo_facade import SMAC4BO
from smac.initial_design.latin_hypercube_design import LHDesign
import rgpe
import rgpe.test_functions
from rgpe.methods.noisy_ei import NoisyEI, ClosedFormNei
try:
os.makedirs(os.path.dirname(output_file))
except:
pass
kwargs = {}
assert len(unknown) % 2 == 0, (len(unknown), unknown)
for i in range(int(len(unknown) / 2)):
# Drop the initial "--"
key = unknown[i * 2][2:].replace('-', '_')
value = unknown[i * 2 + 1]
kwargs[key] = value
seed = args.seed
np.random.seed(seed)
benchmark_name = args.benchmark
method_name = args.method
empirical_meta_configs = args.empirical_meta_configs
grid_meta_configs = args.grid_meta_configs
if empirical_meta_configs and grid_meta_configs:
raise ValueError('Only one allowed at a time!')
learned_initial_design = args.learned_initial_design
# Use the same seed for each method benchmarked!
rng_initial_design = np.random.RandomState(seed)
search_space_pruning = args.search_space_pruning
# Set up the benchmark function
if benchmark_name in ['alpine']:
task = args.task
if benchmark_name == 'alpine':
benchmark = rgpe.test_functions.Alpine1D(rng=seed, task=task)
else:
raise ValueError()
data_by_task = benchmark.get_meta_data(fixed_grid=grid_meta_configs)
acquisition_function_maximizer = None
acquisition_function_maximizer_kwargs = None
initial_design = LHDesign
initial_design_kwargs = {'init_budget': args.n_init, 'rng': rng_initial_design}
initial_configurations = None
elif benchmark_name == 'quadratic':
task = args.task
if task is None:
raise TypeError('Task must not be None!')
benchmark = rgpe.test_functions.Quadratic(rng=seed, task=task)
data_by_task = benchmark.get_meta_data(fixed_grid=grid_meta_configs)
acquisition_function_maximizer = None
acquisition_function_maximizer_kwargs = None
initial_design = LHDesign
initial_design_kwargs = {'init_budget': args.n_init, 'rng': rng_initial_design}
initial_configurations = None
elif benchmark_name in ['openml-svm', 'openml-xgb', 'openml-glmnet']:
task = args.task
if task is None:
raise TypeError('Task must not be None!')
acquisition_function_maximizer = None
acquisition_function_maximizer_kwargs = None
initial_design = LHDesign
initial_design_kwargs = {'init_budget': args.n_init, 'rng': rng_initial_design}
initial_configurations = None
task_to_dataset_mapping = [
3, 31, 37, 44, 50, 151, 312, 333, 334, 335,
1036, 1038, 1043, 1046, 1049, 1050, 1063, 1067, 1068,
1120, 1176,
1461, 1462, 1464, 1467, 1471, 1479, 1480, 1485, 1486, 1487, 1489, 1494,
1504, 1510, 1570, 4134, 4534,
]
task_to_dataset_mapping = {i: task_to_dataset_mapping[i] for i in range(len(task_to_dataset_mapping))}
print('OpenML dataset ID', task_to_dataset_mapping[task])
if benchmark_name == 'openml-svm':
benchmark = SVM(dataset_id=task_to_dataset_mapping[task], rng=task_to_dataset_mapping[task])
elif benchmark_name == 'openml-xgb':
benchmark = XGBoost(dataset_id=task_to_dataset_mapping[task], rng=task_to_dataset_mapping[task])
elif benchmark_name == 'openml-glmnet':
benchmark = GLMNET(dataset_id=task_to_dataset_mapping[task], rng=task_to_dataset_mapping[task])
# Set up the meta-data to be used
data_by_task = dict()
rng = np.random.RandomState(seed)
for i, dataset_id in enumerate(task_to_dataset_mapping.values()):
if task == i:
continue
if benchmark_name == 'openml-svm':
meta_benchmark = SVM(dataset_id=dataset_id, rng=dataset_id)
elif benchmark_name == 'openml-xgb':
meta_benchmark = XGBoost(dataset_id=dataset_id, rng=dataset_id)
else:
meta_benchmark = GLMNET(dataset_id=dataset_id, rng=dataset_id)
num_data = len(meta_benchmark.configurations)
if grid_meta_configs:
cs = meta_benchmark.get_configuration_space()
cs.seed(seed)
configurations = cs.sample_configuration(50)
targets = np.array(
[
meta_benchmark.objective_function(config)['function_value']
for config in configurations
]
)
else:
choices = rng.choice(num_data, size=50, replace=False)
configurations = [meta_benchmark.configurations[choice] for choice in choices]
targets = np.array([meta_benchmark.targets[choice] for choice in choices])
data_by_task[i] = {'configurations': configurations, 'y': targets}
elif benchmark_name in ['adaboost', 'svm', 'nn']:
task = args.task
if task is None:
raise TypeError('Task must not be None!')
if benchmark_name == 'adaboost':
benchmark = rgpe.test_functions.AdaboostGrid(rng=seed, task=task)
n_params = 2
elif benchmark_name == 'svm':
benchmark = rgpe.test_functions.SVMGrid(rng=seed, task=task)
n_params = 6
elif benchmark_name == 'nn':
benchmark = rgpe.test_functions.NNGrid(rng=seed, task=task)
n_params = 7
else:
raise ValueError(benchmark_name)
data_by_task = benchmark.get_meta_data(fixed_grid=grid_meta_configs)
acquisition_function_maximizer = FixedSet
acquisition_function_maximizer_kwargs = {
'configurations': list(benchmark.data[task].keys())
}
initial_design = None
initial_design_kwargs = None
else:
raise ValueError(benchmark_name)
# Do custom changes to the number of function evaluations.
# The multiplier is used for random 2x, random 4x, etc.
# However, some benchmarks don't provide enough recorded configurations
# for this, so we cap the number of function evaluations here.
iteration_multiplier = args.iteration_multiplier
num_function_evals = benchmark.get_meta_information()['num_function_evals'] * iteration_multiplier
if benchmark_name == 'adaboost':
if num_function_evals > 108:
num_function_evals = 108
print('Clamping num function evals to %d' % num_function_evals)
if benchmark_name == 'svm':
if num_function_evals > 288:
num_function_evals = 288
print('Clamping num function evals to %d' % num_function_evals)
if benchmark_name == 'nn':
if num_function_evals > len(benchmark.data[task]):
num_function_evals = len(benchmark.data[task])
print('Clamping num function evals to %d' % num_function_evals)
if 'openml' in benchmark_name:
# TODO: the benchmark queries `config.get_array()` which relies on the ConfigSpace behind
# config. If we use search space pruning, this will of course be shrinked and we will no
# longer get the correct mapping. Therefore, we create a new configuration object with the
# correct configuration space. This function should actually live in the benchmark itself.
def wrapper(config: Configuration, **kwargs) -> float:
values = config.get_dictionary()
new_config = Configuration(benchmark.get_configuration_space(), values=values)
return benchmark.objective_function(new_config)['function_value']
else:
def wrapper(config: Configuration, **kwargs) -> float:
return benchmark.objective_function(config)['function_value']
# Disable SMAC using the pynisher to limit memory and time usage of subprocesses. If you're using
# this code for some real benchmarks, make sure to enable this again!!!
tae_kwargs = {'use_pynisher': False}
# Now load data from previous runs if possible
if empirical_meta_configs is True:
data_by_task_new = {}
meta_config_files_dir, _ = os.path.split(output_file)
for task_id in data_by_task:
if benchmark_name in ['openml-glmnet', 'openml-svm', 'openml-xgb']:
data_by_task_new[task_id] = {}
# TODO change this number if changing the number of seed!
seed = benchmark.rng.randint(15)
meta_config_file = os.path.join(meta_config_files_dir, '..', 'gpmap-10',
'%d_50_%d.configs' % (seed, task_id))
with open(meta_config_file) as fh:
metadata = json.load(fh)
configurations = []
targets = []
for config, target in metadata:
configurations.append(Configuration(
configuration_space=benchmark.get_configuration_space(), values=config)
)
targets.append(target)
targets = np.array(targets)
data_by_task_new[task_id] = {'configurations': configurations, 'y': targets}
data_by_task = data_by_task_new
print('Metadata available for tasks:', {key: len(data_by_task[key]['y']) for key in data_by_task})
# subsample data and/or number of meta-tasks
dropping_rng = np.random.RandomState(seed + 13475)
percent_meta_tasks = args.percent_meta_tasks
if percent_meta_tasks == 'rand':
percent_meta_tasks = dropping_rng.uniform(0.1, 1.0)
else:
percent_meta_tasks = float(percent_meta_tasks)
if percent_meta_tasks < 1:
actual_num_base_tasks = len(data_by_task)
keep_num_base_tasks = int(np.ceil(actual_num_base_tasks * percent_meta_tasks))
print('Percent meta tasks', percent_meta_tasks, 'keeping only', keep_num_base_tasks, 'tasks')
if keep_num_base_tasks < actual_num_base_tasks:
base_tasks_to_drop = dropping_rng.choice(
list(data_by_task.keys()),
replace=False,
size=actual_num_base_tasks - keep_num_base_tasks,
)
for base_task_to_drop in base_tasks_to_drop:
del data_by_task[base_task_to_drop]
percent_meta_data = args.percent_meta_data
if percent_meta_data == 'rand' or float(percent_meta_data) < 1:
for task_id in data_by_task:
if percent_meta_data == 'rand':
percent_meta_data_ = dropping_rng.uniform(0.1, 1.0)
else:
percent_meta_data_ = float(percent_meta_data)
num_configurations = len(data_by_task[task_id]['configurations'])
keep_num_configurations = int(np.ceil(num_configurations * percent_meta_data_))
print('Percent meta data', percent_meta_data, 'keeping only', keep_num_configurations,
'configurations for task', task_id)
if keep_num_configurations < num_configurations:
keep_data_mask = dropping_rng.choice(
num_configurations, replace=False, size=keep_num_configurations,
)
data_by_task[task_id] = {
'configurations': [
config
for i, config in enumerate(data_by_task[task_id]['configurations'])
if i in keep_data_mask
],
'y': np.array([
y
for i, y in enumerate(data_by_task[task_id]['y'])
if i in keep_data_mask
]),
}
# Conduct search psace pruning
if search_space_pruning is not 'None':
full_search_space = benchmark.get_configuration_space()
to_optimize = [True if isinstance(hp, (UniformIntegerHyperparameter,
UniformFloatHyperparameter)) else False
for hp in full_search_space.get_hyperparameters()]
# Section 4 of Perrone et al., 2019
if search_space_pruning == 'complete':
minima_by_dimension = [hp.upper if to_optimize[i] else None
for i, hp in enumerate(full_search_space.get_hyperparameters())]
maxima_by_dimension = [hp.lower if to_optimize[i] else None
for i, hp in enumerate(full_search_space.get_hyperparameters())]
print(minima_by_dimension, maxima_by_dimension, to_optimize)
for task_id, metadata in data_by_task.items():
argmin = np.argmin(metadata['y'])
best_config = metadata['configurations'][argmin]
for i, hp in enumerate(full_search_space.get_hyperparameters()):
if to_optimize[i]:
value = best_config[hp.name]
if value is None:
continue
if value < minima_by_dimension[i]:
minima_by_dimension[i] = value
if value > maxima_by_dimension[i]:
maxima_by_dimension[i] = value
# Section 5 of Perrone et al., 2019
elif search_space_pruning == 'half':
num_hyperparameters = len(full_search_space.get_hyperparameters())
num_tasks = len(data_by_task)
bounds = [(0, 1)] * (num_hyperparameters * 2)
optima = []
for task_id, metadata in data_by_task.items():
argmin = np.argmin(metadata['y'])
best_config = metadata['configurations'][argmin]
optima.append(best_config.get_array())
bounds.append((0, 1))
bounds.append((0, 1))
import scipy.optimize
optima = np.array(optima)
def _optimizee(x, lambda_, return_n_violations=False):
x = np.round(x, 12)
l = x[: num_hyperparameters]
u = x[num_hyperparameters: 2 * num_hyperparameters]
slack_minus = x[num_hyperparameters * 2: num_hyperparameters * 2 + num_tasks]
slack_plus = x[num_hyperparameters * 2 + num_tasks:]
n_violations = 0
for t in range(num_tasks):
for i in range(num_hyperparameters):
if not to_optimize[i]:
continue
if np.isfinite(optima[t][i]) and (optima[t][i] < l[i] or optima[t][i] > u[i]):
n_violations += 1
break
if return_n_violations:
return n_violations
rval = (
(lambda_ / 2 * np.power(np.linalg.norm(u - l, 2), 2))
+ (1 / (2 * num_tasks) * np.sum(slack_minus) + np.sum(slack_plus))
)
return rval
minima_by_dimension = [0 for i in
range(len(full_search_space.get_hyperparameters()))]
maxima_by_dimension = [1 for i in
range(len(full_search_space.get_hyperparameters()))]
# The paper isn't specific about the values to use for lambda...
for lambda_ in [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]:
init = []
while len(init) < 1:
cand = [0] * num_hyperparameters
cand.extend([1] * num_hyperparameters)
cand.extend([0] * (2 * num_tasks))
cand = np.array(cand)
if _optimizee(cand, lambda_) < 10e7:
init.append(cand)
init = np.array(init)
constraints = []
class LowerConstraint:
def __init__(self, i, t):
self.i = i
self.t = t
def __call__(self, x):
rval = x[self.i] - x[num_hyperparameters * 2 + self.t] - optima[self.t, self.i]
return rval if np.isfinite(rval) else 0
class UpperConstraint:
def __init__(self, i, t):
self.i = i
self.t = t
def __call__(self, x):
rval = optima[self.t, self.i] - x[num_hyperparameters * 2 + num_tasks + self.t] - x[num_hyperparameters + self.i]
return rval if np.isfinite(rval) else 0
for t in range(num_tasks):
for i in range(num_hyperparameters):
if not to_optimize[i]:
continue
constraints.append(scipy.optimize.NonlinearConstraint(
LowerConstraint(i, t), -np.inf, 0)
)
constraints.append(scipy.optimize.NonlinearConstraint(
UpperConstraint(i, t), -np.inf, 0)
)
res = scipy.optimize.minimize(
_optimizee, bounds=bounds, args=(lambda_, ),
x0=init[0],
tol=1e-12,
constraints=constraints,
)
print(res)
n_violations = _optimizee(res.x, lambda_, return_n_violations=True)
print('Number of violations', n_violations)
if n_violations > 25:
continue
else:
result = np.round(res.x, 12)
minima_by_dimension = [result[i] for i in
range(len(full_search_space.get_hyperparameters()))]
maxima_by_dimension = [result[num_hyperparameters + i] for i in
range(len(full_search_space.get_hyperparameters()))]
break
else:
raise ValueError(search_space_pruning)
print('Original configuration space')
print(benchmark.get_configuration_space())
print('Pruned configuration space')
configuration_space = ConfigurationSpace()
for i, hp in enumerate(full_search_space.get_hyperparameters()):
if to_optimize[i]:
if search_space_pruning == 'half':
tmp_config = full_search_space.get_default_configuration()
vector = tmp_config.get_array()
vector[i] = minima_by_dimension[i]
tmp_config = Configuration(full_search_space, vector=vector)
new_lower = tmp_config[hp.name]
vector[i] = maxima_by_dimension[i]
tmp_config = Configuration(full_search_space, vector=vector)
new_upper = tmp_config[hp.name]
else:
new_lower = minima_by_dimension[i]
new_upper = maxima_by_dimension[i]
if isinstance(hp, UniformFloatHyperparameter):
new_hp = UniformFloatHyperparameter(
name=hp.name,
lower=new_lower,
upper=new_upper,
log=hp.log,
)
elif isinstance(hp, UniformIntegerHyperparameter):
new_hp = UniformIntegerHyperparameter(
name=hp.name,
lower=new_lower,
upper=new_upper,
log=hp.log,
)
else:
raise ValueError(type(hp))
else:
new_hp = copy.deepcopy(hp)
configuration_space.add_hyperparameter(new_hp)
for condition in full_search_space.get_conditions():
hp1 = configuration_space.get_hyperparameter(condition.child.name)
hp2 = configuration_space.get_hyperparameter(condition.parent.name)
configuration_space.add_condition(type(condition)(hp1, hp2, condition.value))
print(configuration_space)
reduced_configuration_space = configuration_space
if benchmark_name in ['adaboost', 'svm', 'nn']:
fixed_set_configurations = []
for config in benchmark.data[task].keys():
try:
Configuration(reduced_configuration_space, config.get_dictionary())
fixed_set_configurations.append(config)
except:
continue
acquisition_function_maximizer_kwargs['configurations'] = fixed_set_configurations
print('Using only %d configurations' %
len(acquisition_function_maximizer_kwargs['configurations']))
configuration_space = benchmark.get_configuration_space()
else:
configuration_space = benchmark.get_configuration_space()
reduced_configuration_space = configuration_space
scenario = Scenario({
'run_obj': 'quality',
'runcount_limit': num_function_evals,
'cs': reduced_configuration_space,
'deterministic': True,
'output_dir': None,
})
# Mapping the latin hypercube initial design to the available points of the grid via a nearest
# neighbor method.
if benchmark_name in ['adaboost', 'svm', 'nn']:
import copy
import pyDOE
from sklearn.neighbors import NearestNeighbors
lhd = pyDOE.lhs(n=n_params, samples=args.n_init)
initial_configurations = []
vectors = []
conf_list = []
for config in benchmark.data[task].keys():
try:
new_config = Configuration(reduced_configuration_space, config.get_dictionary())
conf_list.append(config)
vectors.append(new_config.get_array())
except:
continue
vectors = np.array(vectors)
taken_indices = []
for design in lhd:
nbrs = NearestNeighbors(n_neighbors=len(vectors)).fit(vectors)
_, indices = nbrs.kneighbors([design])
for ind in indices.flatten():
if ind not in taken_indices:
taken_indices.append(ind)
initial_configurations.append(conf_list[ind])
break
assert len(initial_configurations) == args.n_init
print(initial_configurations)
# Now learn an initial design
if learned_initial_design in ['scaled', 'unscaled', 'copula']:
from smac.runhistory.runhistory2epm import RunHistory2EPM4Cost
from smac.runhistory.runhistory import RunHistory
from smac.tae.execute_ta_run import StatusType
from rgpe.utils import get_gaussian_process
from smac.epm.util_funcs import get_types
if learned_initial_design != 'copula':
print('Learned init with scaled/unscaled')
rh2epm = RunHistory2EPM4Cost(
scenario=scenario,
num_params=len(scenario.cs.get_hyperparameter_names()),
success_states=[StatusType.SUCCESS],
)
else:
print('Learned init with copula transform')
from rgpe.utils import copula_transform
class CopulaRH2EPM(RunHistory2EPM4Cost):
def transform_response_values(self, values: np.ndarray) -> np.ndarray:
return copula_transform(values)
rh2epm = CopulaRH2EPM(
scenario=scenario,
num_params=len(scenario.cs.get_hyperparameter_names()),
success_states=[StatusType.SUCCESS],
)
new_initial_design = []
minima = {}
maxima = {}
candidate_configurations = list()
candidate_set = set()
benchmark_class = benchmark.__class__
meta_models = {}
meta_models_rng = np.random.RandomState(seed)
for task_id, metadata in data_by_task.items():
if benchmark_name in ['openml-svm', 'openml-xgb', 'openml-glmnet']:
dataset_id = task_to_dataset_mapping[task_id]
meta_benchmark = benchmark_class(rng=seed, dataset_id=dataset_id)
else:
meta_benchmark = benchmark_class(task=task_id, rng=seed, load_all=False)
if learned_initial_design == 'scaled':
minima[task_id] = meta_benchmark.get_empirical_f_opt()
if hasattr(meta_benchmark, 'get_empirical_f_worst'):
maxima[task_id] = meta_benchmark.get_empirical_f_worst()
elif hasattr(meta_benchmark, 'get_empirical_f_max'):
maxima[task_id] = meta_benchmark.get_empirical_f_max()
else:
raise NotImplementedError()
rh = RunHistory()
for config, target in zip(metadata['configurations'], metadata['y']):
rh.add(config=config, cost=target, time=0, status=StatusType.SUCCESS)
types, bounds = get_types(benchmark.get_configuration_space(), None)
gp = get_gaussian_process(meta_benchmark.get_configuration_space(), rng=meta_models_rng,
bounds=bounds, types=types, kernel=None)
X, y = rh2epm.transform(rh)
gp.train(X, y)
meta_models[task_id] = gp
for config in metadata['configurations']:
if config not in candidate_set:
if benchmark_name in ['adaboost', 'svm', 'nn']:
if config not in acquisition_function_maximizer_kwargs['configurations']:
continue
else:
try:
Configuration(reduced_configuration_space, config.get_dictionary())
except Exception as e:
continue
candidate_configurations.append(config)
candidate_set.add(config)
print('Using %d candidates for the initial design' % len(candidate_configurations))
predicted_losses_cache = dict()
def target_function(config, previous_losses):
losses = []
for i, (task_id, meta_benchmark) in enumerate(data_by_task.items()):
meta_model = meta_models[task_id]
key = (config, task_id)
if key in predicted_losses_cache:
loss_cfg = predicted_losses_cache[key]
else:
loss_cfg, _ = meta_model.predict(config.get_array().reshape((1, -1)))
if learned_initial_design == 'scaled':
minimum = minima[task_id]
diff = maxima[task_id] - minimum
diff = diff if diff > 0 else 1
loss_cfg = (loss_cfg - minimum) / diff
predicted_losses_cache[key] = loss_cfg
if loss_cfg < previous_losses[i]:
tmp_loss = loss_cfg
else:
tmp_loss = previous_losses[i]
losses.append(tmp_loss)
return np.mean(losses), losses
current_loss_cache = [np.inf] * len(data_by_task)
for i in range(args.n_init):
losses = []
loss_cache = []
for j, candidate_config in enumerate(candidate_configurations):
loss, loss_cache_tmp = target_function(candidate_config, current_loss_cache)
losses.append(loss)
loss_cache.append(loss_cache_tmp)
min_loss = np.min(losses)
min_losses_indices = np.where(losses == min_loss)[0]
argmin = meta_models_rng.choice(min_losses_indices)
print(argmin, losses[argmin], len(losses), losses)
new_initial_design.append(candidate_configurations[argmin])
current_loss_cache = loss_cache[argmin]
del candidate_configurations[argmin]
initial_configurations = copy.deepcopy(new_initial_design)
initial_design = None
initial_design_kwargs = None
del meta_models
print('Learned initial design')
print(initial_configurations)
# Set up the optimizer
if method_name == 'random':
method = ROAR(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'gpmap':
method = SMAC4BO(
model_type='gp',
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'gcp':
from rgpe.utils import copula_transform
from smac.runhistory.runhistory2epm import RunHistory2EPM4Cost
class CopulaRH2EPM(RunHistory2EPM4Cost):
def transform_response_values(self, values: np.ndarray) -> np.ndarray:
return copula_transform(values)
method = SMAC4BO(
model_type='gp',
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
runhistory2epm=CopulaRH2EPM,
)
elif method_name == 'ablr':
from rgpe.methods.ablr import ABLR
normalization = kwargs['normalization']
if normalization == 'mean/var':
use_copula_transform = False
elif normalization == 'Copula':
use_copula_transform = True
else:
raise ValueError(normalization)
print('ABLR', use_copula_transform)
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
model=ABLR,
model_kwargs={'training_data': data_by_task, 'use_copula_transform': use_copula_transform},
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'gcp+prior':
from rgpe.methods.GCPplusPrior import GCPplusPrior, CustomEI
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
model=GCPplusPrior,
model_kwargs={'training_data': data_by_task},
acquisition_function=CustomEI,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'taf':
from rgpe.methods.taf import TAF
weighting_mode = kwargs['weighting_mode']
normalization = kwargs['normalization']
if weighting_mode == 'rgpe':
from rgpe.methods.rgpe import RGPE
model = RGPE
weight_dilution_strategy = kwargs['weight_dilution_strategy']
sampling_mode = kwargs['sampling_mode']
model_kwargs = {
'weight_dilution_strategy': weight_dilution_strategy,
'number_of_function_evaluations': num_function_evals,
'training_data': data_by_task,
'num_posterior_samples': 1000,
'sampling_mode': sampling_mode,
'normalization': normalization,
}
elif weighting_mode == 'tstr':
from rgpe.methods.tstr import TSTR
model = TSTR
weight_dilution_strategy = kwargs['weight_dilution_strategy']
bandwidth = float(kwargs['bandwidth'])
normalization = kwargs['normalization']
model_kwargs = {
'weight_dilution_strategy': weight_dilution_strategy,
'number_of_function_evaluations': num_function_evals,
'training_data': data_by_task,
'bandwidth': bandwidth,
'normalization': normalization,
}
else:
raise ValueError(weighting_mode)
print(model, normalization, weight_dilution_strategy)
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
acquisition_function=TAF,
model=model,
model_kwargs=model_kwargs,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'tstr':
from rgpe.methods.tstr import TSTR
from rgpe.utils import EI as EI4TSTR
if kwargs['acquisition_function_name'] == 'NoisyEI':
acquisition_function = NoisyEI
elif kwargs['acquisition_function_name'] == 'fullmodelEI':
acquisition_function = EI
elif kwargs['acquisition_function_name'] == 'EI':
acquisition_function = EI4TSTR
else:
raise ValueError(kwargs['acquisition_function_name'])
bandwidth = float(kwargs['bandwidth'])
variance_mode = kwargs['variance_mode']
normalization = kwargs['normalization']
weight_dilution_strategy = kwargs['weight_dilution_strategy']
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
model=TSTR,
model_kwargs={
'training_data': data_by_task,
'bandwidth': bandwidth,
'variance_mode': variance_mode,
'normalization': normalization,
'weight_dilution_strategy': weight_dilution_strategy,
'number_of_function_evaluations': num_function_evals,
},
acquisition_function=acquisition_function,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'wac':
from rgpe.methods.warmstarting_ac import WarmstartingAC
variance_mode = 'average'
acquisition_function = EI
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
model=WarmstartingAC,
model_kwargs={'training_data': data_by_task, 'variance_mode': variance_mode},
acquisition_function=acquisition_function,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'klweighting':
from rgpe.methods.kl_weighting import KLWeighting
eta = float(kwargs['eta'])
print('KLWeighting', eta)
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
model=KLWeighting,
model_kwargs={'training_data': data_by_task, 'eta': eta},
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'rgpe':
from rgpe.methods.rgpe import RGPE
if kwargs['acquisition_function_name'] == 'EI':
from rgpe.utils import EI as EI4RGPE
acquisition_function = EI4RGPE
acquisition_function_kwargs = {}
elif kwargs['acquisition_function_name'] == 'fullmodelEI':
acquisition_function = EI
acquisition_function_kwargs = {}
elif kwargs['acquisition_function_name'] == 'CFNEI':
acquisition_function = ClosedFormNei
acquisition_function_kwargs = {}
else:
# This will fail if it is not an integer below
acquisition_function = NoisyEI
target_model_incumbent = kwargs['target_model_incumbent']
if target_model_incumbent == 'True':
target_model_incumbent = True
elif target_model_incumbent == 'False':
target_model_incumbent = False
else:
raise ValueError(target_model_incumbent)
acquisition_function_kwargs = {
'n_samples': int(kwargs['acquisition_function_name']),
'target_model_incumbent': target_model_incumbent,
}
num_posterior_samples = int(kwargs['num_posterior_samples'])
sampling_mode = kwargs['sampling_mode']
variance_mode = kwargs['variance_mode']
normalization = kwargs['normalization']
weight_dilution_strategy = kwargs['weight_dilution_strategy']
print(num_posterior_samples, acquisition_function, sampling_mode, variance_mode,
weight_dilution_strategy, normalization)
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
model=RGPE,
model_kwargs={
'training_data': data_by_task,
'weight_dilution_strategy': weight_dilution_strategy,
'number_of_function_evaluations': num_function_evals,
'variance_mode': variance_mode,
'num_posterior_samples': num_posterior_samples,
'sampling_mode': sampling_mode,
'normalization': normalization,
},
acquisition_function_kwargs=acquisition_function_kwargs,
acquisition_function=acquisition_function,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
elif method_name == 'rmogp':
from rgpe.methods.rmogp import MixtureOfGPs, NoisyMixtureOfGPs
from rgpe.methods.rgpe import RGPE
model = RGPE
weight_dilution_strategy = kwargs['weight_dilution_strategy']
sampling_mode = kwargs['sampling_mode']
num_posterior_samples = int(kwargs['num_posterior_samples'])
use_expectation = bool(kwargs['use_expectation'] == 'True')
use_global_incumbent = bool(kwargs['use_global_incumbent'] == 'True')
normalization = kwargs['normalization']
alpha = float(kwargs['alpha'])
acq_func = MixtureOfGPs
acq_func_kwargs = {
'use_expectation': use_expectation,
'use_global_incumbent': use_global_incumbent,
}
print('MOGP', num_posterior_samples, sampling_mode, acq_func, use_expectation,
use_global_incumbent, weight_dilution_strategy, alpha, normalization)
model_kwargs = {
'weight_dilution_strategy': weight_dilution_strategy,
'number_of_function_evaluations': num_function_evals,
'training_data': data_by_task,
'variance_mode': 'target',
'num_posterior_samples': num_posterior_samples,
'sampling_mode': sampling_mode,
'normalization': normalization,
'alpha': alpha,
}
method = SMAC4BO(
scenario=scenario,
rng=np.random.RandomState(seed),
tae_runner=wrapper,
tae_runner_kwargs=tae_kwargs,
initial_design=initial_design,
initial_design_kwargs=initial_design_kwargs,
initial_configurations=initial_configurations,
acquisition_function=acq_func,
acquisition_function_kwargs=acq_func_kwargs,
model=model,
model_kwargs=model_kwargs,
acquisition_function_optimizer=acquisition_function_maximizer,
acquisition_function_optimizer_kwargs=acquisition_function_maximizer_kwargs,
)
else:
raise ValueError(method_name)
# Reduce the local search for XGB to finish BO in a reasonable amount of time
if benchmark_name in ['openml-xgb']:
if method_name != 'random':
method.solver.epm_chooser.acq_optimizer.local_search.n_steps_plateau_walk = 1
method.solver.epm_chooser.acq_optimizer.n_sls_iterations = 5
# Disable random configurations form SMAC
method.solver.epm_chooser.random_configuration_chooser = None
# And now run the optimizer
method.optimize()
if hasattr(method.solver.epm_chooser.model, 'weights_over_time'):
weight_file = output_file
weight_file = weight_file.replace('.json', '.weights')
weights_over_time = method.solver.epm_chooser.model.weights_over_time
weights_over_time = [
[float(weight) for weight in weights]
for weights in weights_over_time
]
with open(weight_file, 'w') as fh:
json.dump(weights_over_time, fh, indent=4)
if hasattr(method.solver.epm_chooser.model, 'p_drop_over_time'):
p_drop_file = output_file
p_drop_file = p_drop_file.replace('.json', '.pdrop')
p_drop_over_time = method.solver.epm_chooser.model.p_drop_over_time
p_drop_over_time = [
[float(p_drop) for p_drop in drops]
for drops in p_drop_over_time
]
with open(p_drop_file, 'w') as fh:
json.dump(p_drop_over_time, fh, indent=4)
# Dump the evaluated configurations as meta-data for later runs
print(method_name, method_name == 'gpmap')
if method_name == 'gpmap':
rh = method.get_runhistory()
evaluated_configurations = []
for config in rh.config_ids:
cost = rh.get_cost(config)
print(cost)
evaluated_configurations.append([config.get_dictionary(), cost])
print(evaluated_configurations)
evaluated_configs_file = output_file
evaluated_configs_file = evaluated_configs_file.replace('.json', '.configs')
print(evaluated_configs_file)
with open(evaluated_configs_file, 'w') as fh:
json.dump(evaluated_configurations, fh)
# Now output the optimization trajectory (performance)
traj = method.get_trajectory()
trajectory = pd.Series([np.NaN] * (num_function_evals + 1))
for entry in traj:
trajectory[entry.ta_runs] = entry.train_perf
trajectory.fillna(inplace=True, method='ffill')
trajectory = trajectory.to_numpy()
best = benchmark.get_empirical_f_opt()
regret = trajectory[-1] - best
regret_trajectory = trajectory - best
if (hasattr(benchmark, 'get_empirical_f_worst') or hasattr(benchmark, 'get_empirical_f_max')) and\
benchmark_name not in ['alpine', 'quadratic']:
if hasattr(benchmark, 'get_empirical_f_worst'):
worst = benchmark.get_empirical_f_worst()
else:
worst = benchmark.get_empirical_f_max()
normalizer = worst - best
print('Normalization constants', normalizer, best, worst)
print('Unnormalized trajectory', trajectory)
print('Unnormalized regret trajectory', regret_trajectory)
regret = regret / normalizer
regret_trajectory = regret_trajectory / normalizer
print('Trajectory', regret_trajectory)
print('Final regret', regret)
if output_file is not None:
results_dict = {
'regret': float(regret),
'regret_trajectory': [float(v) for v in regret_trajectory],
'trajectory': [float(v) for v in trajectory],
}
with open(output_file, 'w') as fh:
json.dump(results_dict, fh, indent=4)
| [
"feurerm@informatik.uni-freiburg.de"
] | feurerm@informatik.uni-freiburg.de |
e8ee357fd8205f654efb9640ce7e9edc737f31a8 | 4dace99563190bbf2c931572dc0ed8cffdf581b3 | /ex25.py | 02d130bdf892cc432df6ca0ca577cc5209cfa301 | [] | no_license | AxelRigal/Exercice_debutant | e21a38f8e17e022195205a7076e2a2669a193b3a | 816f2195cbd5e9140ac316e460b4fcf98685d372 | refs/heads/master | 2022-12-23T03:51:47.759407 | 2020-10-06T14:57:12 | 2020-10-06T14:57:12 | 276,771,646 | 0 | 0 | null | 2020-07-13T22:03:45 | 2020-07-03T00:32:01 | Python | UTF-8 | Python | false | false | 120 | py | liste = [1, 1, 4, 3, 3, 2, 6, 7, 7, 9, 2]
resultat = [i*(i+1%(i*5)) for i in sorted(list(set(liste)))]
print(resultat)
| [
"axel.rigal@gmail.com"
] | axel.rigal@gmail.com |
50dba4a8dc1c11bcde73b1533d03a5da993821bf | 2449715d42e89f51bd352b3f3a311ef41f3f04f2 | /exercise/news_test/muitlprocess_queue.py | 97d0eb41eb5347ae036aa4552a0c54a362fa11e7 | [] | no_license | SmallPuddingComing/PycharmProjects | d0d900c94691efe98d7d0e147f2968c28546f61c | 28965e9e2524cb26449a30a237665f404c5aab70 | refs/heads/master | 2021-01-10T15:47:21.968541 | 2016-05-03T11:23:27 | 2016-05-03T11:23:27 | 54,554,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | #coding:utf8
'''
Created on 2016-4-1
@author : yr
website from : http://www.jb51.net/article/80115.htm
'''
import multiprocessing
import requests
from multiprocessing.process import Process
#IO密集型任务
#多个进程同时下载多个网页
#利用Queue+多进程
#由于是IO密集型,所以同样可以用threading
'''
1、初始化tasks,里面放着一系列的dest_url
2、同时开启4个进程向tasks中获取任务进行执行
3、处理结果贮存在一个result
'''
def main():
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
cpu_count = multiprocessing.cpu_count()#进程数==CPU核数目
create_process(tasks, results, cpu_count)
add_tasks(tasks)
parse(tasks, results)
def create_process(tasks, results, cpu_count):
for _ in range(cpu_count):
p = multiprocessing.Process(target=_worker, args=(tasks, results))
p.daemon = True #子进程随主进程的关闭而关闭
p.start()
def _download(task):
'''下载网页
'''
try:
request = requests.get(task)
if request.status_code == 200:
return request.text
except Exception as e:
print ("connect the url is fail ,{0}".format(str(e)))
def _worker(tasks, results):
while True:
try:
task = tasks.get()
result = _download(task)
results.put(result)
finally:
tasks.task_done()
def get_urls():
urls = ["http://httpbin.org/get"] * 10
return urls
def add_tasks(tasks):
for url in get_urls():
tasks.put(url)
def _parse(results):
print results
def parse(tasks, results):
try:
tasks.join()
except KeyboardInterrupt as e:
print ("tasks has been stopped ,{0}".format(str(e)))
while not results.empty():
_parse(results)
if __name__ == '__main__':
main()
| [
"1076643147@qq.com"
] | 1076643147@qq.com |
89418b88b36775cd5558bcb8e547933c0d213a39 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/v_mware_dvs_config_spec.py | afb05925855d68f024da0b88a95e372d9f3d0e83 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VMwareDVSConfigSpec(vim, *args, **kwargs):
'''This class defines the VMware specific configuration for
DistributedVirtualSwitch.'''
obj = vim.client.factory.create('{urn:vim25}VMwareDVSConfigSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'ipfixConfig', 'linkDiscoveryProtocolConfig', 'maxMtu', 'pvlanConfigSpec',
'vspanConfigSpec', 'configVersion', 'contact', 'defaultPortConfig',
'description', 'extensionKey', 'host', 'maxPorts', 'name',
'numStandalonePorts', 'policy', 'switchIpAddress', 'uplinkPortgroup',
'uplinkPortPolicy', 'vendorSpecificConfig', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
b57adcc39b9c6b4abe4965488f9b60417cd6389c | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartA/Py反转字符串内容2.py | b6b9af014070855a9b3a79797b620fdd1f3d974e | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py |
class Solution():
def reverseString(self,s):
def helper(left, right):
if left < right:
s[left], s[right] = s[right],s[left]
helper(left + 1, right-1)
helper(0, len(s) - 1)
return s
if __name__ == "__main__":
s = Solution()
print(s.reverseString(["h","e","l","l","o"]))
| [
"2901429479@qq.com"
] | 2901429479@qq.com |
d6b58de7a4483d2044cdc0624c57e6f6d3792fbf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/335/usersdata/297/99568/submittedfiles/matriz1.py | 25872a5f7dbc94f97ca7250169563977b88328f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | # -*- coding: utf-8 -*-
matriz=[]
m=int(input('digite o numero de linhas dejado: '))
n=int(input('digite o numero de colunas dejado: '))
for i in range(m):
linha= []
for j in range(n):
linha.append(int(input('digite o valor do indice da linha%d e da coluna %d : ' %((i+1),(j+1)))))
matriz.append(linha)
linhas_superiores=m
linhas_inferiores=0
colunas_da_esquerda=n
colunas_da_direita=0
for i in range(m):
for j in range(n):
if matriz[i][j]==1:
if i<linhas_superiores :
linhas_superiores=i
if i+1>linhas_inferiores:
linhas_inferiores=i+1
if j<colunas_da_esquerda :
colunas_da_esquerda=j
if j+1>colunas_da_direita:
colunas_da_direita=j+1
print(matriz)
'''
#professor se o sr ver esse codigo abaixo testa ele pfvr e ve onde esta o erro de logica dele,pq na parte q ele testa a corte direita,eu coloco pra ele apagar o termo matriz[i][j],com for i in range,mas ele n apga da posição indicada ele apaga do ultimo j
import numpy as np
matriz=[]
m=int(input('digite o numero de linhas da matriz que voceh deseja recortar: '))
n=int(input('digite o numero de colunas da matriz que voceh deseja recortar: '))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('digite o valor do elemento da linha%d e da coluna%d desejada: '%((i+1),(j+1)))))
matriz.append(linha)
linhaszeradas=0
linhaszeradas2=0
colunaszeradas=0
colunaszeradas2=0
#corte superior
for i in range(0,m-1,1) :
y=sum(matriz[i])
if y > 0 :
break
else :
linhaszeradas=linhaszeradas+1
if linhaszeradas>0 :
for i in range(0,linhaszeradas,1):
del matriz[i]
#corte inferior
for i in range(m-linhaszeradas-1,0,-1) :
r=int(sum(matriz[i]))
if r > 0 :
break
else :
linhaszeradas2=linhaszeradas2+1
if linhaszeradas2>0:
for i in range(m-1,m-linhaszeradas2-1,-1):
del matriz[i]
t=0
#corte direito
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(0,n,1) :
if i+1<m-linhaszeradas-linhaszeradas2 :
t=t+matriz[i][j]+matriz[i+1][j]
if t > 0 :
break
else :
colunaszeradas=colunaszeradas+1
if colunaszeradas > 0:
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(colunaszeradas-1,0,-1):
del matriz[i][j]
#corte esquerdo
f=0
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(n-colunaszeradas-1,0,-1) :
f=f+matriz[i][j]
if f > 0 :
break
else :
colunaszeradas2=colunaszeradas2+1
if colunaszeradas2>0 :
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(n-colunaszeradas-colunaszeradas2,n-colunaszeradas-colunaszeradas2-1,-1):
del matriz[i][j]
#saida
print(matriz)'''
'''matriz=[]
m=int(input('digite o numero de linhas da matriz que voceh deseja recortar: '))
n=int(input('digite o numero de colunas da matriz que voceh deseja recortar: '))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('digite o valor do elemento da linha %d desejada: '%(j+1))))
matriz.append(linha)
indice_superior=m-1
indice_inferior=0
indice_superior=0
indice_superior=n-1
for i in range(0,m,1):
encontrou_na_linha = False
for j in range(0,n,1):
if matriz[i][j]==1 :'''
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b869b5aabce6c786603cfd965af2eccc2d2311c2 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_170/run_cfg.py | 72c397a816bf725adf80827a259310277ce5fb80 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1763.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1764.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1765.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1766.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1767.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
f9def055c1faf6d80a0eb67ecc555853b788a02f | 77d834eb125fdc56c96af31cf74db5b741c8e94e | /api_v5/urls.py | 99a4f29e1b51566b40e869de61c5b80f161f01cd | [] | no_license | zhouf00/learn_rest_framework | 7c17124fcb08ce48f54f94201f2da29e41e9d867 | a292e38ee9ff475e43ce4612fbb6c074b4073f84 | refs/heads/master | 2022-10-12T12:05:07.618651 | 2020-06-11T02:40:45 | 2020-06-11T02:40:45 | 268,827,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.conf.urls import url,include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
urlpatterns = [
] | [
"49618748+zhouf00@users.noreply.github.com"
] | 49618748+zhouf00@users.noreply.github.com |
05e9150c6a508e13e2e38e2590747d16dad070dd | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/create_watermark_template_request.py | fe26b61a406957be1a4292565fe85e4762ba11f4 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateWatermarkTemplateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'WatermarkTemplate'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""CreateWatermarkTemplateRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this CreateWatermarkTemplateRequest.
:return: The body of this CreateWatermarkTemplateRequest.
:rtype: WatermarkTemplate
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateWatermarkTemplateRequest.
:param body: The body of this CreateWatermarkTemplateRequest.
:type: WatermarkTemplate
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateWatermarkTemplateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3dface85d6a966e144d8e74a1ed487c73e9b9c72 | d23dab09b21553353ad85246ebafaea790f2afbd | /src/python/pants/backend/scala/lint/scalafmt/rules.py | 82f6198e1339413f45ec690c0b395020ca4addcc | [
"Apache-2.0"
] | permissive | asherf/pants | 00e8c64b7831f814bac3c4fa8c342d2237fef17d | c94d9e08f65e9baf3793dff0ec2c571d682f6b90 | refs/heads/master | 2023-05-28T14:45:35.325999 | 2023-01-18T15:16:07 | 2023-01-18T15:16:07 | 185,082,662 | 0 | 0 | Apache-2.0 | 2023-01-18T15:15:46 | 2019-05-05T21:09:43 | Python | UTF-8 | Python | false | false | 7,856 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from collections import defaultdict
from dataclasses import dataclass
from typing import cast
from pants.backend.scala.lint.scalafmt.skip_field import SkipScalafmtField
from pants.backend.scala.lint.scalafmt.subsystem import ScalafmtSubsystem
from pants.backend.scala.target_types import ScalaSourceField
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest, Partitions
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.goals.tailor import group_by_dir
from pants.core.util_rules.partitions import Partition
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.engine.unions import UnionRule
from pants.jvm.goals import lockfile
from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool, GenerateJvmToolLockfileSentinel
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
_SCALAFMT_CONF_FILENAME = ".scalafmt.conf"
@dataclass(frozen=True)
class ScalafmtFieldSet(FieldSet):
required_fields = (ScalaSourceField,)
source: ScalaSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipScalafmtField).value
class ScalafmtRequest(FmtTargetsRequest):
field_set_type = ScalafmtFieldSet
tool_subsystem = ScalafmtSubsystem
class ScalafmtToolLockfileSentinel(GenerateJvmToolLockfileSentinel):
resolve_name = ScalafmtSubsystem.options_scope
@dataclass(frozen=True)
class GatherScalafmtConfigFilesRequest:
filepaths: tuple[str, ...]
@dataclass(frozen=True)
class ScalafmtConfigFiles:
snapshot: Snapshot
source_dir_to_config_file: FrozenDict[str, str]
@dataclass(frozen=True)
class PartitionInfo:
classpath_entries: tuple[str, ...]
config_snapshot: Snapshot
extra_immutable_input_digests: FrozenDict[str, Digest]
@property
def description(self) -> str:
return self.config_snapshot.files[0]
def find_nearest_ancestor_file(files: set[str], dir: str, config_file: str) -> str | None:
while True:
candidate_config_file_path = os.path.join(dir, config_file)
if candidate_config_file_path in files:
return candidate_config_file_path
if dir == "":
return None
dir = os.path.dirname(dir)
@rule
async def gather_scalafmt_config_files(
request: GatherScalafmtConfigFilesRequest,
) -> ScalafmtConfigFiles:
"""Gather scalafmt config files and identify which config files to use for each source
directory."""
source_dirs = frozenset(os.path.dirname(path) for path in request.filepaths)
source_dirs_with_ancestors = {"", *source_dirs}
for source_dir in source_dirs:
source_dir_parts = source_dir.split(os.path.sep)
source_dir_parts.pop()
while source_dir_parts:
source_dirs_with_ancestors.add(os.path.sep.join(source_dir_parts))
source_dir_parts.pop()
config_file_globs = [
os.path.join(dir, _SCALAFMT_CONF_FILENAME) for dir in source_dirs_with_ancestors
]
config_files_snapshot = await Get(Snapshot, PathGlobs(config_file_globs))
config_files_set = set(config_files_snapshot.files)
source_dir_to_config_file: dict[str, str] = {}
for source_dir in source_dirs:
config_file = find_nearest_ancestor_file(
config_files_set, source_dir, _SCALAFMT_CONF_FILENAME
)
if not config_file:
raise ValueError(
f"No scalafmt config file (`{_SCALAFMT_CONF_FILENAME}`) found for "
f"source directory '{source_dir}'"
)
source_dir_to_config_file[source_dir] = config_file
return ScalafmtConfigFiles(config_files_snapshot, FrozenDict(source_dir_to_config_file))
@rule
async def partition_scalafmt(
request: ScalafmtRequest.PartitionRequest, tool: ScalafmtSubsystem
) -> Partitions[PartitionInfo]:
if tool.skip:
return Partitions()
toolcp_relpath = "__toolcp"
filepaths = tuple(field_set.source.file_path for field_set in request.field_sets)
lockfile_request = await Get(GenerateJvmLockfileFromTool, ScalafmtToolLockfileSentinel())
tool_classpath, config_files = await MultiGet(
Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
Get(
ScalafmtConfigFiles,
GatherScalafmtConfigFilesRequest(filepaths),
),
)
extra_immutable_input_digests = {
toolcp_relpath: tool_classpath.digest,
}
# Partition the work by which source files share the same config file (regardless of directory).
source_files_by_config_file: dict[str, set[str]] = defaultdict(set)
for source_dir, files_in_source_dir in group_by_dir(filepaths).items():
config_file = config_files.source_dir_to_config_file[source_dir]
source_files_by_config_file[config_file].update(
os.path.join(source_dir, name) for name in files_in_source_dir
)
config_file_snapshots = await MultiGet(
Get(Snapshot, DigestSubset(config_files.snapshot.digest, PathGlobs([config_file])))
for config_file in source_files_by_config_file
)
return Partitions(
Partition(
tuple(files),
PartitionInfo(
classpath_entries=tuple(tool_classpath.classpath_entries(toolcp_relpath)),
config_snapshot=config_snapshot,
extra_immutable_input_digests=FrozenDict(extra_immutable_input_digests),
),
)
for files, config_snapshot in zip(
source_files_by_config_file.values(), config_file_snapshots
)
)
@rule(desc="Format with scalafmt", level=LogLevel.DEBUG)
async def scalafmt_fmt(
request: ScalafmtRequest.Batch, jdk: InternalJdk, tool: ScalafmtSubsystem
) -> FmtResult:
partition_info = cast(PartitionInfo, request.partition_metadata)
merged_digest = await Get(
Digest,
MergeDigests([partition_info.config_snapshot.digest, request.snapshot.digest]),
)
result = await Get(
ProcessResult,
JvmProcess(
jdk=jdk,
argv=[
"org.scalafmt.cli.Cli",
f"--config={partition_info.config_snapshot.files[0]}",
"--non-interactive",
*request.files,
],
classpath_entries=partition_info.classpath_entries,
input_digest=merged_digest,
output_files=request.files,
extra_jvm_options=tool.jvm_options,
extra_immutable_input_digests=partition_info.extra_immutable_input_digests,
# extra_nailgun_keys=request.extra_immutable_input_digests,
use_nailgun=False,
description=f"Run `scalafmt` on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result)
@rule
def generate_scalafmt_lockfile_request(
_: ScalafmtToolLockfileSentinel, tool: ScalafmtSubsystem
) -> GenerateJvmLockfileFromTool:
return GenerateJvmLockfileFromTool.create(tool)
def rules():
return [
*collect_rules(),
*lockfile.rules(),
*ScalafmtRequest.rules(),
UnionRule(GenerateToolLockfileSentinel, ScalafmtToolLockfileSentinel),
]
| [
"noreply@github.com"
] | asherf.noreply@github.com |
b89e16585be9d5a208e0711271a22f5d6e201515 | 886397f22f566025c268c7591ce1f91aa1413a86 | /Stack_and_Queue/002_geeksforgeeks_Next_Larger_Element/Solution.py | 8ffe729762e7b3fa8dec35455a1e065d75178d3c | [] | no_license | Keshav1506/competitive_programming | cd4323355c96a1368135bdfb6b24511bb0b11477 | f2621cd76822a922c49b60f32931f26cce1c571d | refs/heads/master | 2023-02-04T16:48:16.723296 | 2020-12-24T04:13:45 | 2020-12-24T04:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | #
# Time : O(N); Space: O(1)
# @tag : Stack and Queue
# @by : Shaikat Majumdar
# @date: Aug 27, 2020
# **************************************************************************
# Description:
#
# Given an array A of size N having distinct elements, the task is to find the next greater element for each element of the array in order of their appearance in the array. If no such element exists, output -1
#
# Input:
# The first line of input contains a single integer T denoting the number of test cases.Then T test cases follow. Each test case consists of two lines. The first line contains an integer N denoting the size of the array. The Second line of each test case contains N space separated positive integers denoting the values/elements in the array A.
#
# Output:
# For each test case, print in a new line, the next greater element for each array element separated by space in order.
#
# Constraints:
# 1 <= T <= 100
# 1 <= N <= 107
# 1 <= Ai <= 1018
# Example:
# Input
# 2
# 4
# 1 3 2 4
# 4
# 4 3 2 1
# Output
# 3 4 4 -1
# -1 -1 -1 -1
#
# Explanation:
# Testcase1: In the array, the next larger element to 1 is 3 , 3 is 4 , 2 is 4
# and for 4 ? since it doesn't exist hence -1.
#
# **************************************************************************
# Source: https://practice.geeksforgeeks.org/problems/next-larger-element/0 (GeeksForGeeks - Next Larger Element)
#
#
from typing import List
import unittest
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
stack = []
result = [-1] * len(nums)
for i in range(len(nums)):
while stack and nums[stack[-1]] < nums[i]:
result[stack.pop()] = nums[i]
stack.append(i)
return result
class Test(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_nextGreaterElements(self) -> None:
s = Solution()
for nums, solution in (
[[1, 3, 2, 4], [3, 4, 4, -1]],
[[4, 3, 2, 1], [-1, -1, -1, -1]],
[[1, 3, 3, 4], [3, 4, 4, -1]],
[[1, 3, 3, 3], [3, -1, -1, -1]],
):
self.assertEqual(
solution,
s.nextGreaterElements(nums),
"Should return the next greater element for each element of the array in order of their appearance in the array otherwise -1",
)
if __name__ == "__main__":
unittest.main()
| [
"sm2774us@gmail.com"
] | sm2774us@gmail.com |
997edcc27f6dff73d8aad74d24578f3cf20b226d | 33feacc4ef80da09e6843c6b97469ad99b2215cb | /Dictionary/src/deldict.py | 430e54047f93a5a797e4bbc2eea892edc3b91ce0 | [] | no_license | DheerajJoshi/Python-tribble | cfbecbd4e33da38f514c5eee3d61ddd74408e071 | eb82eb04e299ceec39ad19b9fc07873e5479ab74 | refs/heads/master | 2021-05-30T07:59:17.362407 | 2015-10-25T13:33:19 | 2015-10-25T13:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!/usr/bin/python
dict1 = {'Name': 'Zara', 'Age': 7, 'Class': 'First'};
del dict1['Name']; # remove entry with key 'Name'
dict1.clear();
# remove all entries in dict1
del dict1 ;
# delete entire dictionary
print ("dict1['Age']: ", dict1['Age']);
print ("dict1['School']: ", dict1['School']);
#This will produce the following result. Note an exception raised, this is because after del dict dictionary does not
#exist any more: | [
"joshidj.12@gmail.com"
] | joshidj.12@gmail.com |
da311bd0dc542b41b6afccbf71b942ad15aa3c0a | cc5a3fa80d2ae90afc2626e4a82b9a927726dfa0 | /huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/delete_certificate_request.py | 49280d442ebc8df1c3ca56dc98901b35b2864376 | [
"Apache-2.0"
] | permissive | Logan118/huaweicloud-sdk-python-v3 | eca15e9b08bdccef7122e40735d444ddc958efa8 | bb230c03bd00225b9f5780a56adce596e9456420 | refs/heads/master | 2023-07-17T14:57:50.799564 | 2021-08-25T10:40:43 | 2021-08-25T10:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,098 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteCertificateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'certificate_id': 'str'
}
attribute_map = {
'certificate_id': 'certificate_id'
}
def __init__(self, certificate_id=None):
"""DeleteCertificateRequest - a model defined in huaweicloud sdk"""
self._certificate_id = None
self.discriminator = None
self.certificate_id = certificate_id
@property
def certificate_id(self):
"""Gets the certificate_id of this DeleteCertificateRequest.
证书ID
:return: The certificate_id of this DeleteCertificateRequest.
:rtype: str
"""
return self._certificate_id
@certificate_id.setter
def certificate_id(self, certificate_id):
"""Sets the certificate_id of this DeleteCertificateRequest.
证书ID
:param certificate_id: The certificate_id of this DeleteCertificateRequest.
:type: str
"""
self._certificate_id = certificate_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteCertificateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3ebe149e847b53c61177f7563d3477880c98187a | 88be3911c7e73d4bf71b0482ee6d15f49030463a | /Func_Decorator/demo8_decorator.py | 0957bf20ac97a758b0acbd5f52d072b9f13b2289 | [] | no_license | skyaiolos/Python_KE | 85f879d1cb637debd2e3a0239d7c8d7bfb30c827 | 8cc42c8f4d1245de4b79af429f72a9ed2508bc1a | refs/heads/master | 2021-01-22T08:47:47.761982 | 2017-05-28T14:57:02 | 2017-05-28T14:57:02 | 92,634,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | """
# Script Description:
http://www.cnblogs.com/rhcad/archive/2011/12/21/2295507.html
Python装饰器学习(九步入门)
"""
__author__ = "爱的彼岸(QQ:3124724)"
__copyright__ = "Copyright 2017,3124724@qq.com"
print("第八步:让装饰器带 类 参数")
print("------示例8: 装饰器带类参数")
'''示例8: 装饰器带类参数'''
class locker:
def __init__(self):
print("locker.__init__() should be not called.")
@staticmethod
def acquire():
print("locker.acquire() called. (这里是静态方法)")
@staticmethod
def release():
print(" locker.release() called. (不需要对象实例化)")
def deco(cls):
'''cls 必须实现acquire和release静态方法'''
def _deco(func):
def __deco():
print("before %s called [%s]." % (func.__name__, cls))
cls.acquire()
try:
return func()
finally:
cls.release()
return __deco
return _deco
@deco(locker)
def my_func():
print("my_func() called.")
my_func()
my_func()
# before my_func called [<class '__main__.locker'>].
# locker.acquire() called. (这里是静态方法)
# my_func() called.
# locker.release() called. (不需要对象实例化)
# before my_func called [<class '__main__.locker'>].
# locker.acquire() called. (这里是静态方法)
# my_func() called.
# locker.release() called. (不需要对象实例化)
| [
"skyaiolos@aliyun.com"
] | skyaiolos@aliyun.com |
d32415e83f4447be4139a778226ca0f0b28ff00f | 314245750f897949bc7867883d22b8ff1465fbe1 | /boostcamp/ex/dfs_bfs/1_solved.py | 8564949da94e717d888d487afad6e537ae1696c4 | [] | no_license | dongho108/CodingTestByPython | e608d70235cc6c6a27c71eea86ee28d1271d4d1d | 475b3665377a8f74944d7698e894ad3eafc49ad4 | refs/heads/master | 2023-05-24T15:01:56.563359 | 2021-07-01T14:23:20 | 2021-07-01T14:23:20 | 330,833,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | answer = 0
def dfs(n, sum, numbers, target):
global answer
if n == len(numbers):
if sum == target:
answer += 1
return
dfs(n+1, sum+numbers[n], numbers, target)
dfs(n+1, sum-numbers[n], numbers, target)
def solution(numbers, target):
global answer
dfs(1, numbers[0], numbers, target)
dfs(1, -numbers[0], numbers, target)
return answer | [
"dongho108@naver.com"
] | dongho108@naver.com |
f3d7325e4106686dfd04fb4b95d0df987c6a83c6 | de213b73f703fb8f285bc8cf15e388cc2f98898f | /venv/bin/IveBeenEverywhere.py | 32dad69578024fb806c25252463ac155bd2da901 | [] | no_license | adampehrson/Kattis | 18de025a6a569a46c54cc85c996eec0b55c9f74b | a04922caa356f8113fe30a523f3a148d458a6132 | refs/heads/main | 2023-07-10T02:53:29.782854 | 2021-08-14T10:44:30 | 2021-08-14T10:44:30 | 395,948,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py |
i = 0
x = int(input())
while i<x:
e = 0
cities = list()
total = 0
y = int(input())
while e <y:
newcity = input()
if cities.count(newcity) < 1:
total = total +1
cities.append(newcity)
e +=1
print(total)
i+=1 | [
"85373641+adampehrson@users.noreply.github.com"
] | 85373641+adampehrson@users.noreply.github.com |
1162591a036f543f84cd75cc9f65138a01a11000 | 8723f56398a7f969877709192922c053b0e20d56 | /Kayit/migrations/0002_auto_20191230_2010.py | 67c88c715ff83bd43a9649c5b886ab1c30655f68 | [] | no_license | vektorelpython/Python17Web | f215efd9be96062886f67e456c4d0735602b5f00 | f571a4157d2575d441f091f2450d5e24f4f3645d | refs/heads/master | 2022-04-04T20:03:02.941949 | 2020-01-13T16:16:15 | 2020-01-13T16:16:15 | 230,123,576 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Generated by Django 2.2.3 on 2019-12-30 18:10
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Kayit', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='kayitmodel',
name='kayit_eden',
field=models.ForeignKey(default=1, on_delete='CASCADE', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='kayitmodel',
name='adi',
field=models.CharField(max_length=200, verbose_name='Adı'),
),
]
| [
"ibrahim.ediz@gazi.edu.tr"
] | ibrahim.ediz@gazi.edu.tr |
2fcd4d8f4ade86e244c888b374d3a52a7905389e | 5b3c90d0426dd2adbe756e094c99f066925cda79 | /todoist_tracker/cli/base.py | eee2baa5c52b59f692be8f17b0816a697bee1379 | [
"MIT"
] | permissive | deanmalmgren/todoist-tracker | 5bd85e3903b0f5ca10b05c406535da36a92d7ab5 | f2576a6bf5a80873bc825b3d64a1dc6aed0a145b | refs/heads/master | 2021-01-12T05:02:53.769394 | 2017-01-14T20:08:57 | 2017-01-14T20:08:57 | 77,838,338 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | import argparse
import json
import os
import shutil
from todoist import TodoistAPI
import gspread
from gspread.exceptions import WorksheetNotFound
from oauth2client.service_account import ServiceAccountCredentials
class BaseCommand(object):
help_text = ''
def __init__(self, subcommand_creator):
# keep a local copy of the config file which is useful during
# autocompletion
self.config = None
# set up the subcommand options
self.subcommand_creator = subcommand_creator
self.option_parser = self.subcommand_creator.add_parser(
self.get_command_name(),
help=self.help_text,
description=self.help_text,
)
self.add_command_line_options()
def get_command_name(self):
"""The command name defaults to the name of the module."""
return self.__module__.rsplit('.', 1)[1]
def add_command_line_options(self):
self.option_parser.add_argument(
'--todoist',
type=argparse.FileType('r'),
metavar='JSONFILE',
default='todoist.json',
help='todoist credentials file in json format',
)
self.option_parser.add_argument(
'--google',
type=argparse.FileType('r'),
metavar='JSONFILE',
default='google.json',
help='google credentials file in json format',
)
self.option_parser.add_argument(
'--debug',
action='store_true',
help='log output on command line, NOT google spreadsheet'
)
def execute(self, todoist=None, google=None, debug=None, **kwargs):
"""Common execution workflows are handled here"""
# create an authenticated instance of the TodoistAPI. be sure to store
# the cached data locally and to nuke the existing sync prior to
# running. otherwise the number of outdated tasks grows
credentials = json.load(todoist)
credentials['cache'] = os.path.join(
os.getcwd(), '.todoist-tracker-sync/'
)
if os.path.exists(credentials['cache']):
shutil.rmtree(credentials['cache'])
self.todoist_api = TodoistAPI(**credentials)
# authenticate to google
google_keys = json.load(google)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
google.name,
['https://spreadsheets.google.com/feeds'],
)
gdrive = gspread.authorize(credentials)
self.gdrive_workbook = gdrive.open_by_url(google_keys['workbook_url'])
def get_or_create_worksheet(self, title, header):
try:
worksheet = self.gdrive_workbook.worksheet(title)
except WorksheetNotFound:
worksheet = self.gdrive_workbook.add_worksheet(title, 1, 26)
worksheet.insert_row(header)
return worksheet
| [
"dean.malmgren@datascopeanalytics.com"
] | dean.malmgren@datascopeanalytics.com |
226804966ea7030faa28c955e70c9aaf1b2c505e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/131/usersdata/231/45412/submittedfiles/al10.py | a573e8bb660a39123122360930bff18c017929eb | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
n=int(input('digite o número de termos:'))
numerador=2
denominador=1
produto=1
i=0
while (i)<=n:
produto=(produto*numerador)/denominador
if i%2==1:
numerador=numerador+2
else:
denominador=denomidor+2
i=i+1
print(produto)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
93de3c327880536cb8e6fadfa3bb218dd1f988d7 | 4adc1d1b8f9badefcd8c25c6e0e87c6545ccde2c | /OrcApi/Run/RunDefMod.py | 2f8dc08045cf79a3369be3dfb48b8252fd92eca1 | [] | no_license | orange21cn/OrcTestToolsKit | eb7b67e87a608fb52d7bdcb2b859fa588263c136 | 69b6a3c382a7043872db1282df4be9e413d297d6 | refs/heads/master | 2020-04-15T07:30:35.485214 | 2017-09-30T06:16:17 | 2017-09-30T06:16:17 | 68,078,991 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,612 | py | # coding=utf-8
import os
import re
from OrcLib.LibCommon import OrcString
from OrcLib import get_config
from OrcLib.LibLog import OrcLog
from OrcLib.LibNet import OrcResource
from OrcLib.LibNet import ResourceCheck
from RunData import RunData
class RunDefMod:
"""
运行列表管理,操作目录,目录名为 [类型]_[id],目录内含有 result.res 的属于执行过的, result.res 是一个xml文件
"""
def __init__(self):
self.__config = get_config()
self.__logger = OrcLog("resource.run.run_def.model")
self.__resource_batch_def = OrcResource("BatchDef")
self.__resource_case_def = OrcResource("CaseDef")
self.__data = RunData()
self.__home = self.__config.get_option("RUN", "home")
if not os.path.exists(self.__home):
os.mkdir(self.__home)
def usr_search(self, p_cond=None):
"""
查询列表
:param p_cond: 条件 [id="", run_def_type=""]
:return:
"""
run_list = os.listdir(self.__home)
rtn = list()
# 条件匹配
for _item in run_list:
_status = True
_type, _id = _item.split("_")
# 查找 flag, batch 为 batch_no, case 为 case path
if "BATCH" == _type:
_batch = self.__resource_batch_def.get(path=_id)
# 检查结果
ResourceCheck.result_status(_batch, u"查询计划信息", self.__logger)
_flag = _id if not _batch.data else _batch.data["batch_no"]
else:
_case = self.__resource_case_def.get(path=_id)
# 检查结果
ResourceCheck.result_status(_case, u"查询计划信息", self.__logger)
_flag = _id if not _case.data else _case.data["case_path"]
# 有条件时进行查找,无条件使使用全部数据
if p_cond is not None:
# 匹配 flag
if "run_flag" in p_cond and not re.search(p_cond["run_flag"], _flag):
_status = False
# 匹配 type
if "run_def_type" in p_cond and _type != p_cond["run_def_type"]:
_status = False
if _status:
# 加入当前目录
rtn.append(dict(id=_id, pid=None, run_def_type=_type, run_flag=_flag))
# 加入目录下测试项
_test_list = os.listdir(os.path.join(self.__home, _item))
rtn.extend(list(
dict(id="%s:%s" % (_id, test), pid=_id, run_def_type="TEST", run_flag=test)
for test in _test_list))
return rtn
def usr_add(self, p_data):
"""
增加执行目录 p_test=false, 为 true 时生成结果文件
:param p_data: {id, run_def_type, result}
:return:
:rtype: bool
"""
_type = p_data["run_def_type"]
_id = p_data["id"]
_result = p_data["result"] if "result" in p_data else False
# 生成目录名称
folder_root = os.path.join(self.__home, "%s_%s" % (_type, _id))
# 建目录
if not os.path.exists(folder_root):
os.mkdir(folder_root)
# 建执行结果文件
if _result:
for _index in range(100):
_flag = _index + 1
if 10 > _flag:
_flag = "%s%s" % (0, _flag)
res_folder = os.path.join(folder_root, "%s%s" % (OrcString.get_data_str(), _flag))
res_file = os.path.join(res_folder, "default.res")
if os.path.exists(res_folder):
continue
os.mkdir(res_folder)
self.__data.save_list(_type, _id, res_file)
break
return _id
def usr_delete(self, p_list):
"""
删除
:param p_list:
:type p_list: list
:return:
"""
delete_list = list()
folder_info = {_name.split('_')[1]: _name for _name in os.listdir(self.__home)}
for _item in p_list:
_path = _item.split(':')
if _path[0] in folder_info:
_path[0] = folder_info[_path[0]]
del_folder = self.__home
for _folder in _path:
del_folder = os.path.join(del_folder, _folder)
delete_list.append(del_folder)
for _item in delete_list:
if os.path.exists(_item):
import shutil
shutil.rmtree(_item)
return True
| [
"orange21cn@126.com"
] | orange21cn@126.com |
3d54b8a8ff113386e8decf0364c938387f21328f | 45b4ff6a4e4804ff84847d56400e10cdb0d96186 | /python/test/test_facility_api.py | 45f0beb3033e2eea55022dd52157db63d76832e0 | [] | no_license | pranav/mbta-libraries | fabbc9305569a344e25fa1b281cba290f0fa3f13 | e793696addd94750f722f3132aadc8dfe00adef5 | refs/heads/master | 2021-08-22T11:15:26.014862 | 2017-11-30T03:12:52 | 2017-11-30T03:12:52 | 112,558,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # coding: utf-8
"""
MBTA
MBTA service API. https://www.mbta.com
OpenAPI spec version: 3.0
Contact: developer@mbta.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.facility_api import FacilityApi
class TestFacilityApi(unittest.TestCase):
""" FacilityApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.facility_api.FacilityApi()
def tearDown(self):
pass
def test_api_facility_controller_index(self):
"""
Test case for api_facility_controller_index
"""
pass
def test_api_facility_controller_show(self):
"""
Test case for api_facility_controller_show
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"pgandhi@hubspot.com"
] | pgandhi@hubspot.com |
ed03d817b2745fe31e30d8bef403cb40adbead8c | 02bcd98063c2088e9ab6a266c7f7c57d0c06cd33 | /install.py | 9ec2a4b8a4f81785a93ba6c43892aee798a445c6 | [
"MIT"
] | permissive | kevinsegal/BabySploit | e99d2c89a041de9f0f2e6e3d4f0ce5bb015011f0 | 66bafc25e04e7512e8b87b161bd3b7201bb57b63 | refs/heads/master | 2020-04-07T04:27:18.870230 | 2018-11-18T03:34:38 | 2018-11-18T03:34:38 | 158,056,891 | 1 | 0 | MIT | 2018-11-18T06:35:20 | 2018-11-18T06:35:20 | null | UTF-8 | Python | false | false | 1,407 | py | import os, time, subprocess, sys
from sys import stdout
def Command_exe(msg,cmd):
i = "[STATUS] Processing"
stdout.write(" " + msg + " %s" % i)
stdout.flush()
if subprocess.call(cmd +' >/dev/null 2>&1', shell=True)==0:
i = "Complete [WARNING] "
else:
i = "Error [WARNING] "
stdout.write("\r" + msg +"[STATUS] %s" % i)
def start():
if os.getuid() != 0:
print("[ERROR] Install must be run as root.")
print("Login as root (sudo) or try sudo python3 install.py")
exit()
print(" == BabySploit Installation ==")
input("Press ENTER To Start Installation")
with open("/etc/apt/sources.list", "r") as myfile:
data = myfile.read().replace('\n', "")
if "http://http.kali.org/kali" not in data:
print(Command_exe("["+time.strftime('%H:%M:%S')+"] Adding Repo To Sources... ",'apt-add-repository "deb http://http.kali.org/kali kali-rolling main non-free contrib"'))
else:
pass
print(Command_exe("["+time.strftime('%H:%M:%S')+"] Installing Required Dependencies... ",'apt-get install exploitdb netcat nmap php7.0 perl'))
print(Command_exe("["+time.strftime('%H:%M:%S')+"] Installing Virtual Environment... ",'pip3 install virtualenv'))
print("Complete!")
print("Please start virtualenv and run pip3 install -r requirements.txt!")
start()
| [
"maxlikescs@gmail.com"
] | maxlikescs@gmail.com |
07f73d058858e0f3315b38071815f60bc073c77a | f03bd5bd7873c5cc33b4ef5199f219539f3a340e | /CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/plotting/preparation.py | 486c6eae84d41a5dfc524a86d527248f79a7be4c | [
"GPL-1.0-or-later",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-philippe-de-muyter",
"MIT"
] | permissive | Stargrazer82301/CAAPR | 5f8a7033b16792f23abd5d07021b53b9228a5db4 | 62b2339beb2eb956565e1605d44d92f934361ad7 | refs/heads/master | 2022-08-29T02:53:33.658022 | 2022-08-05T19:06:46 | 2022-08-05T19:06:46 | 49,977,601 | 8 | 1 | MIT | 2022-08-05T19:06:47 | 2016-01-19T19:32:42 | Python | UTF-8 | Python | false | false | 22,385 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.plotting.preparation Contains the PreparationPlotter class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from matplotlib import cm
# Import astronomical modules
from astropy.utils import lazyproperty
# Import the relevant PTS classes and modules
from .component import PlottingComponent
from ..preparation.component import PreparationComponent
from ...core.tools import filesystem as fs
from ...core.tools.logging import log
from ...magic.core.frame import Frame
from ...magic.core.io import get_frame_names
from ...magic.basics.mask import Mask, get_mask_names
from ...magic.basics.region import Region
from ...magic.plot.imagegrid import StandardImageGridPlotter
from ...core.plot.distribution import DistributionGridPlotter, DistributionPlotter
from ...core.basics.distribution import Distribution
from ...magic.plot.error import ErrorPlotter
# -----------------------------------------------------------------
class PreparationPlotter(PlottingComponent, PreparationComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
#super(PlottingComponent, self).__init__(config) # not sure this works
PlottingComponent.__init__(self, config)
PreparationComponent.__init__(self)
# -- Attributes --
# Features to plot
self.features = None
# The paths to the resulting FITS files
self.result_paths = dict()
# The paths to the sky directories
self.sky_paths = dict()
# The dictionary of prepared image frames
self.images = dict()
# The dictionary of error frames
self.errors = dict()
self.poisson_errors = dict()
self.calibration_errors = dict()
self.sky_errors = dict()
# The dictionary of sources masks
self.sources_masks = dict()
# The dictionary of sky masks
self.sky_masks = dict()
# The dictionary of sky values
self.sky_values = dict()
# The dictionary of sky annuli
self.annuli = dict()
# The dictionary of sky apertures
self.apertures = dict()
# -----------------------------------------------------------------
def run(self, features=None):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup(features)
# 2. Load the prepared images
self.load_images()
# 3. Load the error frame
self.load_errors()
# 4. Load the source and sky masks
self.load_masks()
# 5. Load the sky values
self.load_sky()
# 6. Load the galaxy and sky annuli
self.load_annuli()
# 7. Load the sky apertures
self.load_apertures()
# 8. Plot
self.plot()
# -----------------------------------------------------------------
def setup(self, features=None):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(PreparationPlotter, self).setup()
# Set features to plot
self.features = features
# Loop over all directories in the preparation directory
for directory_path, directory_name in fs.directories_in_path(self.prep_path, returns=["path", "name"]):
# Look for a file called 'result.fits'
image_path = fs.join(directory_path, "result.fits")
if not fs.is_file(image_path):
log.warning("Prepared image could not be found for " + directory_name)
continue
# Add the image path to the dictionary
self.result_paths[directory_name] = image_path
# Look for the 'sky' directory
sky_path = fs.join(directory_path, "sky")
if not fs.is_directory(sky_path):
log.warning("Sky directory is not present for " + directory_name)
continue
# Add the sky directory path to the dictionary
self.sky_paths[directory_name] = sky_path
# -----------------------------------------------------------------
def load_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the prepared images ...")
# Loop over the image paths
for label in self.result_paths:
# Open the prepared image frame
frame = Frame.from_file(self.result_paths[label])
# Set the image name
frame.name = label
# Add the image to the dictionary
self.images[label] = frame
# -----------------------------------------------------------------
def load_masks(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the masks ...")
# Load sources masks
self.load_sources_masks()
# Load sky masks
self.load_sky_masks()
# -----------------------------------------------------------------
def load_sources_masks(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sources masks ...")
# Loop over the image paths
for label in self.result_paths:
# Check whether the sources mask is present in the FITS file
if not "sources" in get_mask_names(self.result_paths[label]):
log.warning("The sources mask is not present in the " + label + " image")
# Open the sources mask
mask = Mask.from_file(self.result_paths[label], plane="sources")
# Add the mask to the dictionary
self.sources_masks[label] = mask
# -----------------------------------------------------------------
def load_sky_masks(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sky masks ...")
# Loop over the image paths
for label in self.result_paths:
# Check whether the sky mask is present in the FITS file
if not "sky" in get_mask_names(self.result_paths[label]):
log.warning("The sky mask is not present in the " + label + " image")
continue
# Open the sky mask
mask = Mask.from_file(self.result_paths[label], plane="sky")
# Add the sky mask to the dictionary
self.sky_masks[label] = mask
# -----------------------------------------------------------------
def load_sky(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sky values ...")
# Loop over the image paths
for label in self.result_paths:
# Open the sky frame
sky = Frame.from_file(self.result_paths[label], plane="sky")
# Get the sky value (assuming the sky frame is constant)
value = sky[0,0]
# Add the sky value to the dictionary
self.sky_values[label] = value
# -----------------------------------------------------------------
def load_annuli(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the galaxy and sky annuli ...")
# Loop over the sky paths
for label in self.sky_paths:
# Look for the annulus region file
region_path = fs.join(self.sky_paths[label], "sky.reg")
if not fs.is_file(region_path):
log.warning("The annulus region could not be found for " + label)
continue
# Open the annulus region
region = Region.from_file(region_path).homogenized()
# Add the region to the dictionary
self.annuli[label] = region
# -----------------------------------------------------------------
def load_apertures(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sky apertures ...")
# Loop over the sky paths
for label in self.sky_paths:
# Look for the apertures FITS file
apertures_path = fs.join(self.sky_paths[label], "apertures.fits")
if not fs.is_file(apertures_path):
log.warning("The apertures image could not be found for " + label)
continue
# Open the apertures image
apertures = Frame.from_file(apertures_path)
# Add the apertures image to the dictionary
self.apertures[label] = apertures
# -----------------------------------------------------------------
def load_errors(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Loading the error frames ...")
# Load the total errors
self.load_total_errors()
# Load the poisson errors
self.load_poisson_errors()
# Load the sky errors
self.load_sky_errors()
# Load the calibration errors
self.load_calibration_errors()
# -----------------------------------------------------------------
def load_total_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Open the errors frame
frame = Frame.from_file(self.result_paths[label], plane="errors")
# Set the image name
frame.name = label
# Add the error frame to the dictionary
self.errors[label] = frame
# -----------------------------------------------------------------
def load_poisson_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Check if the poisson_errors frame is present in the FITS file
if not "poisson_errors" in get_frame_names(self.result_paths[label]): continue
# Open the poisson errors frame
errors = Frame.from_file(self.result_paths[label], plane="poisson_errors")
# Add the error frame to the dictionary
self.poisson_errors[label] = errors
# -----------------------------------------------------------------
def load_sky_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Check if the sky_errors frame is present in the FITS file
if not "sky_errors" in get_frame_names(self.result_paths[label]):
log.warning("The sky_errors frame is not present in the " + label + " image")
continue
# Open the sky error frame
errors = Frame.from_file(self.result_paths[label], plane="sky_errors")
# Add the error frame to the dictionary
self.sky_errors[label] = errors
# -----------------------------------------------------------------
def load_calibration_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Check if the calibration_errors frame is present in the FITS file
if not "calibration_errors" in get_frame_names(self.result_paths[label]):
log.warning("The calibration_errors frame is not present in the " + label + " image")
continue
# Open the calibration error frame
errors = Frame.from_file(self.result_paths[label], plane="calibration_errors")
# Add the error frame to the dictionary
self.calibration_errors[label] = errors
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot a grid of the prepared images
if self.features is None or "images" in self.features: self.plot_images()
# Plot the grid of images with the sources masks and sky annuli overlayed
if self.features is None or "masks_annuli" in self.features: self.plot_masks_and_annuli()
# Plot a grid of the apertures
if self.features is None or "apertures" in self.features: self.plot_apertures()
# Plot the sky values
if self.features is None or "sky" in self.features: self.plot_sky()
# Plot the distributions of the relative errors
if self.features is None or "errors" in self.features: self.plot_errors()
# -----------------------------------------------------------------
def plot_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the images ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.sorted_labels: plotter.add_image(self.images[label], label)
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "preparation.pdf")
# plotter.colormap = "hot"
plotter.vmin = 0.0
plotter.set_title("Prepared images")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
def plot_sky(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the sky values ...")
# Create the distribution grid plotter
plotter = DistributionGridPlotter()
sky_path = fs.join(self.plot_preparation_path, "sky")
if not fs.is_directory(sky_path): fs.create_directory(sky_path)
# Loop over the different images
for label in self.sorted_labels:
not_nan = Mask.is_nan(self.images[label]).inverse()
# Create the distribution from the image pixel values
distribution = Distribution.from_values(self.images[label][not_nan].flatten() + self.sky_values[label])
# Create an array of all the pixels used for estimating the sky
#notnan = np.logical_not(np.isnan(self.apertures))
#print(self.apertures.dtype)
notnan = Mask.is_nan(self.apertures[label]).inverse()
sky_values = self.apertures[label][notnan]
# Create the distribution of pixel values used for the sky estimation
sky_distribution = Distribution.from_values(sky_values)
# Add the distributions
plotter.add_distribution(distribution, label)
plotter.add_distribution(sky_distribution, label)
# Plot seperately
distr_plotter = DistributionPlotter()
distr_plotter.add_distribution(distribution, "image")
distr_plotter.add_distribution(sky_distribution, "sky")
distr_plotter.run(fs.join(sky_path, label + ".pdf"))
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "sky_distribution.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_errors(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the errors ...")
# Plot histograms of the absolute error values
#self.plot_error_histograms_absolute()
# Plot histograms of the relative error values
#self.plot_error_histograms_relative()
# Plot the relative errors of each pixel
self.plot_errors_pixels()
# -----------------------------------------------------------------
def plot_error_histograms_absolute(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the absolute error values in a histogram for each prepared image compared to the histogram of the actual image values ...")
# Create the distribution grid plotter
plotter = DistributionGridPlotter()
absolute_errors_path = fs.join(self.plot_preparation_path, "absolute_errors")
if not fs.is_directory(absolute_errors_path): fs.create_directory(absolute_errors_path)
# Loop over the different images
for label in self.sorted_labels:
not_nan = Mask.is_nan(self.images[label]).inverse()
# Create the distribution from the image pixel values
distribution = Distribution.from_values(self.images[label][not_nan].flatten())
not_nan = Mask.is_nan(self.errors[label]).inverse()
# Create the distribution from the error values
error_distribution = Distribution.from_values(self.errors[label][not_nan].flatten())
# Add an entry to the distribution grid plotter
plotter.add_distribution(distribution, label)
plotter.add_distribution(error_distribution, label)
# Plot seperately
distr_plotter = DistributionPlotter()
distr_plotter.add_distribution(distribution, "image")
distr_plotter.add_distribution(error_distribution, "absolute errors")
distr_plotter.run(fs.join(absolute_errors_path, label + ".pdf"))
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "absolute_errors.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_error_histograms_relative(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the relative error values in a histogram for each prepared image ...")
# Create the distribution grid plotter
plotter = DistributionGridPlotter()
relative_errors_path = fs.join(self.plot_preparation_path, "relative_errors")
if not fs.is_directory(relative_errors_path): fs.create_directory(relative_errors_path)
# Loop over the different images
for label in self.sorted_labels:
# Calculate the relative errors
rel_errors = self.errors[label] / self.images[label]
# Create a distribution from the relative errors
rel_error_distribution = Distribution.from_values(rel_errors)
# Add the distribution to the plotter
plotter.add_distribution(rel_error_distribution, label)
# Plot seperately
rel_error_distribution.plot(title="relative errors", path=fs.join(relative_errors_path, label + ".pdf"))
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "relative_errors.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_errors_pixels(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the relative error for each pixel in each prepared image ...")
# Create the ErrorPlotter instance
plotter = ErrorPlotter()
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "errors_pixels.png")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_masks_and_annuli(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the images with the sources masks and sky annuli overlayed ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.sorted_labels: plotter.add_image(self.images[label], label, mask=self.sources_masks[label], region=self.annuli[label])
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "preparation_masks_annuli.pdf")
plotter.vmin = 0.0
plotter.set_title("Prepared images with sources masks and sky annuli")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
def plot_apertures(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the aperture frames with the sky annuli overlayed ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.sorted_labels: plotter.add_image(self.apertures[label], label, region=self.annuli[label])
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "preparation_apertures.pdf")
plotter.vmin = 0.0
plotter.set_title("Aperture frames with sky annuli")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
@lazyproperty
def sorted_labels(self):
"""
This function ...
:return:
"""
sorted_labels = sorted(self.images.keys(), key=lambda key: self.images[key].filter.pivotwavelength())
return sorted_labels
# -----------------------------------------------------------------
| [
"cjrc88@gmail.com"
] | cjrc88@gmail.com |
29e7ca9976f3be06036fd6349285796e881773cd | 45df508e4c99f453ca114053a92deb65939f18c9 | /tfx/examples/custom_components/slack/example/taxi_pipeline_slack.py | 3dbce075cb89247de90ea067d0bc447f94df9942 | [
"Apache-2.0"
] | permissive | VonRosenchild/tfx | 604eaf9a3de3a45d4084b36a478011d9b7441fc1 | 1c670e92143c7856f67a866f721b8a9368ede385 | refs/heads/master | 2020-08-09T13:45:07.067267 | 2019-10-10T03:07:20 | 2019-10-10T03:07:48 | 214,100,022 | 1 | 0 | Apache-2.0 | 2019-10-10T06:06:11 | 2019-10-10T06:06:09 | null | UTF-8 | Python | false | false | 7,087 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example pipeline to demonstrate custom TFX component.
This example consists of standard TFX components as well as a custom TFX
component requesting for manual review through Slack.
This example along with the custom `SlackComponent` will only serve as an
example and will not be supported by TFX team.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from slack_component.component import SlackComponent
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_runner import BeamRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import csv_input
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data/simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_module_file = os.path.join(_taxi_root, 'taxi_utils_slack.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
# Slack channel to push the model notifications to.
_slack_channel_id = 'my-channel-id'
# Slack token to set up connection.
_slack_token = os.environ['SLACK_BOT_TOKEN']
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_name = 'chicago_taxi_slack'
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
_metadata_db_root = os.path.join(_tfx_root, 'metadata', _pipeline_name)
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=_taxi_module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model_exports=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# This custom component serves as a bridge between pipeline and human model
# reviewers to enable review-and-push workflow in model development cycle. It
# utilizes Slack API to send message to user-defined Slack channel with model
# URI info and wait for go / no-go decision from the same Slack channel:
# * To approve the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'lgtm' or 'approve'.
# * To reject the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'decline' or 'reject'.
slack_validator = SlackComponent(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
slack_token=_slack_token,
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=slack_validator.outputs['slack_blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, infer_schema, validate_stats, transform,
trainer, model_analyzer, model_validator, slack_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
_metadata_db_root),
)
if __name__ == '__main__':
BeamRunner().run(_create_pipeline())
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
4f28e36150a4c5c9a4bf75957a1d0d02781ce721 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contourcarpet/_db.py | bb09cf5f377e552d7947bdae4b474578ed21a179 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 463 | py | import _plotly_utils.basevalidators
class DbValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="db", parent_name="contourcarpet", **kwargs):
super(DbValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "scaled"}),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
a98d186aebdc18f7d4377a743b524e38f60cc783 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_xc_functional4.py | ad5a17e378f40fa030a908f352016d1de833a6ac | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._becke884 import _becke884
from xcp2k.classes._lyp_adiabatic4 import _lyp_adiabatic4
from xcp2k.classes._becke88_lr_adiabatic4 import _becke88_lr_adiabatic4
from xcp2k.classes._becke88_lr4 import _becke88_lr4
from xcp2k.classes._lyp4 import _lyp4
from xcp2k.classes._pade4 import _pade4
from xcp2k.classes._hcth4 import _hcth4
from xcp2k.classes._optx4 import _optx4
from xcp2k.classes._libxc4 import _libxc4
from xcp2k.classes._ke_libxc4 import _ke_libxc4
from xcp2k.classes._cs14 import _cs14
from xcp2k.classes._xgga4 import _xgga4
from xcp2k.classes._ke_gga4 import _ke_gga4
from xcp2k.classes._p86c4 import _p86c4
from xcp2k.classes._pw924 import _pw924
from xcp2k.classes._pz814 import _pz814
from xcp2k.classes._tfw4 import _tfw4
from xcp2k.classes._tf4 import _tf4
from xcp2k.classes._vwn4 import _vwn4
from xcp2k.classes._xalpha4 import _xalpha4
from xcp2k.classes._tpss4 import _tpss4
from xcp2k.classes._pbe4 import _pbe4
from xcp2k.classes._xwpbe4 import _xwpbe4
from xcp2k.classes._becke974 import _becke974
from xcp2k.classes._becke_roussel4 import _becke_roussel4
from xcp2k.classes._lda_hole_t_c_lr4 import _lda_hole_t_c_lr4
from xcp2k.classes._pbe_hole_t_c_lr4 import _pbe_hole_t_c_lr4
from xcp2k.classes._gv094 import _gv094
from xcp2k.classes._beef4 import _beef4
class _xc_functional4(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.BECKE88 = _becke884()
self.LYP_ADIABATIC = _lyp_adiabatic4()
self.BECKE88_LR_ADIABATIC = _becke88_lr_adiabatic4()
self.BECKE88_LR = _becke88_lr4()
self.LYP = _lyp4()
self.PADE = _pade4()
self.HCTH = _hcth4()
self.OPTX = _optx4()
self.LIBXC_list = []
self.KE_LIBXC_list = []
self.CS1 = _cs14()
self.XGGA = _xgga4()
self.KE_GGA = _ke_gga4()
self.P86C = _p86c4()
self.PW92 = _pw924()
self.PZ81 = _pz814()
self.TFW = _tfw4()
self.TF = _tf4()
self.VWN = _vwn4()
self.XALPHA = _xalpha4()
self.TPSS = _tpss4()
self.PBE = _pbe4()
self.XWPBE = _xwpbe4()
self.BECKE97 = _becke974()
self.BECKE_ROUSSEL = _becke_roussel4()
self.LDA_HOLE_T_C_LR = _lda_hole_t_c_lr4()
self.PBE_HOLE_T_C_LR = _pbe_hole_t_c_lr4()
self.GV09 = _gv094()
self.BEEF = _beef4()
self._name = "XC_FUNCTIONAL"
self._subsections = {'BECKE88': 'BECKE88', 'LYP_ADIABATIC': 'LYP_ADIABATIC', 'BECKE88_LR_ADIABATIC': 'BECKE88_LR_ADIABATIC', 'BECKE88_LR': 'BECKE88_LR', 'LYP': 'LYP', 'PADE': 'PADE', 'HCTH': 'HCTH', 'OPTX': 'OPTX', 'CS1': 'CS1', 'XGGA': 'XGGA', 'KE_GGA': 'KE_GGA', 'P86C': 'P86C', 'PW92': 'PW92', 'PZ81': 'PZ81', 'TFW': 'TFW', 'TF': 'TF', 'VWN': 'VWN', 'XALPHA': 'XALPHA', 'TPSS': 'TPSS', 'PBE': 'PBE', 'XWPBE': 'XWPBE', 'BECKE97': 'BECKE97', 'BECKE_ROUSSEL': 'BECKE_ROUSSEL', 'LDA_HOLE_T_C_LR': 'LDA_HOLE_T_C_LR', 'PBE_HOLE_T_C_LR': 'PBE_HOLE_T_C_LR', 'GV09': 'GV09', 'BEEF': 'BEEF'}
self._repeated_subsections = {'LIBXC': '_libxc4', 'KE_LIBXC': '_ke_libxc4'}
self._attributes = ['Section_parameters', 'LIBXC_list', 'KE_LIBXC_list']
def LIBXC_add(self, section_parameters=None):
new_section = _libxc4()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.LIBXC_list.append(new_section)
return new_section
def KE_LIBXC_add(self, section_parameters=None):
new_section = _ke_libxc4()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.KE_LIBXC_list.append(new_section)
return new_section
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a108251d6955ab18aefd892dba107082ec3cb923 | 612e5a48a75121b741650d345d58a682c0a81285 | /graph/graphic_connection.py | 19540bbd9258914c1ac4f6bf67f3fe13646fdac0 | [] | no_license | BelowzeroA/stochastic-learning | e89f9f459219279eb97c53401295ec5202e11b0b | 62242fd7ca4a63cd7c908032e97368985b1b97c5 | refs/heads/master | 2021-01-25T14:45:27.057441 | 2018-03-05T19:35:21 | 2018-03-05T19:35:21 | 123,724,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from graphics import Line, Point
from math import sqrt
from brain.connection import Connection
from brain.neuron import Neuron
class GraphicConnection(Connection):
def __init__(self, brain, source, target: Neuron):
super(GraphicConnection, self).__init__(brain, source, target)
self.prev_pulsing = False
def update(self, draw=True):
super(GraphicConnection, self).update()
if self.prev_pulsing != self.pulsing and draw:
self.draw()
self.prev_pulsing = self.pulsing
def draw(self):
if self.pulsing:
color = 'red'
else:
color = 'black'
target = self.target
source = self.source
line = Line(source.location, target.location)
line.setWidth(1)
line.setFill(color)
line.setOutline(color)
line.draw(self.brain.win)
dx = target.location.x - source.location.x
dy = target.location.y - source.location.y
k = dy / dx if dx != 0 else dy
k = abs(k)
dd = 20
sign_dx = -1 if dx < 0 else 1
sign_dy = -1 if dy < 0 else 1
dx = -sign_dx * dd / sqrt(k ** 2 + 1)
dy = -sign_dy * k * dd / sqrt(k ** 2 + 1)
# sleep(1)
dp = Point(target.location.x + dx, target.location.y + dy)
line = Line(dp, target.location)
line.setWidth(3)
line.setFill(color)
line.draw(self.brain.win)
| [
"striver8"
] | striver8 |
20bf8a60d72585f0a48a322755aa4788d0275de3 | 16ac9158781d2616141433df9be4820e6d998e03 | /src/eavatar.ava/ava/runtime/config.py | a5ce582b9c006054037c6cd74a3020d2f5fcad62 | [] | no_license | pombredanne/ava-srv | 0a357fb39d0179db0c0d545eb23d707d25b0e446 | 8acef33502d4bc3089f610f0b4ee33e7a5e779ae | refs/heads/master | 2020-12-31T05:56:07.741625 | 2015-03-06T06:29:56 | 2015-03-06T06:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | # -*- coding: utf-8 -*-
"""
Configuration file reading/writing.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import logging.config
import os.path
from ConfigParser import SafeConfigParser
from ava.runtime import environ
AGENT_CONF = os.path.join(environ.conf_dir(), b'agent.ini')
LOGGING_CONF = os.path.join(environ.conf_dir(), b'logging.ini')
PACKAGES_CONF = os.path.join(environ.conf_dir(), b'packages.ini')
# The default configuration file is located at the base directory.
_defaults = dict(base_dir=environ.base_dir(),
conf_dir=environ.conf_dir(),
data_dir=environ.data_dir(),
pkgs_dir=environ.pkgs_dir(),
logs_dir=environ.logs_dir())
class ConfigFile(SafeConfigParser):
def __init__(self, filename, defaults=_defaults):
SafeConfigParser.__init__(self, defaults)
self.filename = os.path.abspath(filename)
def load(self):
self.read(self.filename)
def save(self):
with open(self.filename, 'wb') as fp:
self.write(fp)
_agent = None
def agent(file=AGENT_CONF):
global _agent
if not _agent:
_agent = ConfigFile(file)
_agent.add_section('agent')
_agent.add_section('webfront')
_agent.add_section('data')
_agent.add_section('extension')
# set defaults for various sections.
_agent.set('webfront', 'listen_port', '5000')
_agent.set('webfront', 'listen_addr', '127.0.0.1')
# loads more options from file.
_agent.load()
return _agent
_packages = None
def packages(file=PACKAGES_CONF):
global _packages
if not _packages:
_packages = ConfigFile(file)
_packages.load()
return _packages
# configure logging
logging.config.fileConfig(LOGGING_CONF, defaults=_defaults)
| [
"sam@eavatar.com"
] | sam@eavatar.com |
d67b1d1eb636e8c25af967b331aac4f245fb3869 | 58e09fac582a76428819e167e42e60765d11bb11 | /space/lib/python3.7/encodings/cp037.py | 1c28037206a7ea44682ad9e60780db25d2233c50 | [] | no_license | shanthimadugundi/DB_Project | 25eb2a0e7504f81484ad11c0fa9e902b038c85b4 | b5ba55af1bcddde164cecc60d331d615dd477165 | refs/heads/master | 2020-04-27T05:14:56.107466 | 2019-03-06T05:31:23 | 2019-03-06T05:31:23 | 174,075,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | /Users/shanthimadugundi/anaconda3/lib/python3.7/encodings/cp037.py | [
"shanthimadugundi@Shanthis-MacBook-Pro.local"
] | shanthimadugundi@Shanthis-MacBook-Pro.local |
afae17498d04492c414161c081d29bd04a00c86e | 32233aeda342ff6e107496caaf3c9be322ab80b2 | /06 Brute-Force/6.1 sum + recursiveSum.py | a17932df866565d47efd0e67da2fd83ce2a96581 | [] | no_license | kwr0113/Algo_Python | 1258ed3b71f7826ec55f2ff9e46a6485df6039bf | b17ad19ccdb1bfe979618af8c98f04c67f38495f | refs/heads/master | 2023-08-19T03:52:16.385548 | 2021-09-23T12:22:05 | 2021-09-23T12:22:05 | 407,837,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # 1부터 n까지의 합을 계산하는 반복 함수와 재귀 함수
def ssum(n):
ret = 0
for i in range(1, n+1):
ret += i
return ret
def recursiveSum(n):
if n == 1:
return 1
return n + recursiveSum(n-1)
print(ssum(10))
print(recursiveSum(10))
| [
"kwr0113@gmail.com"
] | kwr0113@gmail.com |
563f2484acad0c35b453f0173217d3702400dd48 | dcb8f9c5739b00838ffa6bb2f9850e0e6f80312c | /hw1/minh_tran_hw1/minh_tran_task1.py | dbf8c8e8cb6dfd452cc68795d420470fbd89d7b1 | [] | no_license | trademark152/Data_Mining_USC | d56c6b51b523c0a8548e94b638d3155fe189dd4e | 124c17b9c55c3880b4c24815bcf369339f198e2b | refs/heads/master | 2020-09-26T09:31:50.250840 | 2019-12-06T02:30:27 | 2019-12-06T02:30:27 | 226,227,472 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 4,845 | py | """
Task1: Data Exploration (3 points)
You will explore the dataset, user.json, containing review information for this task, and you need to write
a program to automatically answer the following questions:
"""
## LIBRARIES
import json
from pyspark import SparkContext
import sys
## TO RUN CODE
""""
spark-submit hw1/minh_tran_task1.py yelp_dataset/testUser.json outputTask1.txt
"""
## (A) Find the total number of users (0.5 point)
def taskA(taskInput):
# map to read json then map each user_id as key to a value of 1
# userID rdd: id1:1, id1:1, id2:1...
data = taskInput.map(lambda x: json.loads(x)).map(lambda x:(x["user_id"], 1))
# Grouping by userID and map all values to 1, then count
# don't need this because user ID is unique to each user
# answer = userIDDict.groupByKey().mapValues(lambda x: 1).count()
answer = data.count()
return [("total_users", answer)]
## (B) Find the average number of written reviews of all users (0.5 point)
def taskB(taskInput):
# map to read json then map each user_id as key to a value of review count
# reviewCount rdd: id1:count1, id2:count2, ...
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["user_id"], x["review_count"]))
# Calculate average number of reviews written by users
numReview = data.map(lambda tup: tup[1]).sum()
numUsers = data.map(lambda tup: tup[0]).count()
answer = numReview/numUsers
return [("avg_reviews", answer)]
## (C) Find the number of distinct user names (0.5 point)
def taskC(taskInput):
# map to read json then map each user name as key to a value of 1
# userID rdd: id1:1, id1:1, id2:1...
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: x["name"])
# Grouping by userID and map all values to 1, then count
# don't need this because user ID is unique to each user
# answer = userIDDict.groupByKey().mapValues(lambda x: 1).count()
answer = data.distinct().count()
return [("distinct_usernames", answer)]
## (D) Find the number of users that joined yelp in the year 2011 (0.5 point)
def taskD(taskInput):
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["user_id"],x["yelping_since"]))
# Filter user that joined yelp in 2011
data2011 = data.filter(lambda x: x[1][:4] == "2011")
answer = data2011.count()
return [("num_users", answer)]
## (E) Find Top 10 popular names and the number of times they appear (user names that appear the most number of times) (0.5 point)
def taskE(taskInput):
# map only "name" as key and 1 as value
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["name"],1))
# collapse by "name" while adding values to find the counts,
# sort by negative value of count and name in ascending order)
answer = data.reduceByKey(lambda x, y: x+y).sortBy(lambda x: (-x[1], x[0]), ascending=True).take(10)
return answer
## (F) Find Top 10 user ids who have written the most number of reviews (0.5 point)
def taskF(taskInput):
# map only "user_id" as key and 1 as value
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["user_id"],(x["review_count"], x["name"])))
# collapse by "name" while adding values to find the counts,
# sort by negative value of count and name in ascending order)
answer = data.sortBy(lambda x: (-x[1][0], x[1][1]), ascending=True).map(lambda x: (x[0], x[1][0])).take(10)
return answer
if __name__ == "__main__":
# ensure number of inputs is 3: py file, input file, output file
if len(sys.argv)!= 3:
print("This script requires 2 input arguments to run inputFile outputFile")
# break it
sys.exit(1)
# import input and output file path from shell
inputFile = sys.argv[1]
outputFile = sys.argv[2]
# create a spark context object using all available cores
#conf = SparkConf().setAppName("INF553_HW1_MT").setMaster("local*]")
sc = SparkContext("local[*]")
# to simplify output
sc.setLogLevel("ERROR")
# get input file and import into the SparkContext object
task1Input = sc.textFile(inputFile).persist()
# answering task 1
tA = taskA(task1Input)
tB = taskB(task1Input)
tC = taskC(task1Input)
tD = taskD(task1Input)
tE = taskE(task1Input)
tF = taskF(task1Input)
# output results based on given ordering
# initiate output
task1Output = {}
task1Output[tA[0][0]] = tA[0][1] # rhs indexing because answer is [('total_users', 4)]
task1Output[tB[0][0]] = tB[0][1]
task1Output[tC[0][0]] = tC[0][1]
task1Output[tD[0][0]] = tD[0][1]
task1Output["top10_popular_names"] = tE
task1Output["top10_most_reviews"] = tF
# write out json files
jsonOutputFile = json.dumps(task1Output)
with open(outputFile,"w") as fileOut:
fileOut.write(jsonOutputFile) | [
"trademark152@gmail.com"
] | trademark152@gmail.com |
f805b12d7737a2384464edefbddf64219b9fd22a | 926fe08bf24a8335f9cec827b651a7c75dc9c000 | /extract_from_node_results.py | b3a61703210b440e7db9725207b38bb96f2dbb59 | [] | no_license | andycasey/ges-idr4-abundances | 71d9cfa38f5909b21c98626fa75cdda8e100cd97 | c0a74f90b5e45a20ef4b3337692014a2fbbcbfa1 | refs/heads/master | 2021-01-22T09:42:53.145128 | 2015-10-14T22:39:00 | 2015-10-14T22:39:00 | 42,256,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,036 | py |
""" Extract OACT results for Li and place them in the line_abundances table """
__author__ = 'Andy Casey <arc@ast.cam.ac.uk>'
import logging
import numpy as np
import release
logger = logging.getLogger("ges")
# We may need to do this many times...
database, remove_existing = ("arc", True)
element, ion, wavelength = ("Li", 1, 6707.8)
node, measurement_type, code = ("OACT", "S", "Unknown")
ges = release.DataRelease(database)
def safe_float(s):
try:
s = float(s)
except (TypeError, ValueError):
return np.nan
else:
return s
def safe_int(s):
try:
s = int(s)
except (TypeError, ValueError):
return 0
else:
return s
# Remove any existing rows in line_abundances for this element from this node?
if remove_existing:
logger.info("Deleting existing {0} {1} line abundances from {2}".format(
element, ion, node))
ges.execute("""DELETE FROM line_abundances WHERE TRIM(node) = %s
AND TRIM(element) = %s AND ion = %s""", (node, element, ion))
ges.commit()
# Get all the details from node results.
node_results = ges.retrieve_table("""SELECT * FROM node_results
WHERE TRIM(node) = %s""", (node, ))
N = len(node_results)
for i, node_result_row in enumerate(node_results):
# Create the spectrum_filename_stub
filenames = node_result_row["filename"].strip().split("|")
spectrum_filename_stub = ("".join([filenames[0][j] \
for j in range(max(map(len, filenames))) \
if len(set([item[j] for item in filenames])) == 1]))[:-5]
# Create the new row of data.
#li1 | upper_combined_li1 | e_li1 | nn_li1 | enn_li1 | nl_li1
upper_column = "upper_{0}{1}".format(element.lower(), ion)
if upper_column not in node_result_row.dtype.names:
upper_column = "upper_combined_{0}{1}".format(element.lower(), ion)
line_abundance_row = {
"abundance_filename": "GES_iDR4_WG11_{0}.fits".format(node),
"spectrum_filename_stub": spectrum_filename_stub,
"node": node,
"cname": node_result_row["cname"],
"code": code,
"object": node_result_row["object"],
"element": element,
"ion": ion,
"wavelength": wavelength,
"ew": np.nan,
"e_ew": np.nan,
"upper_ew": 0,
"abundance": safe_float(node_result_row["{0}{1}".format(element.lower(), ion)]),
"e_abundance": safe_float(node_result_row["e_{0}{1}".format(element.lower(), ion)]),
"upper_abundance": safe_int(node_result_row[upper_column]),
"measurement_type": measurement_type,
}
line_abundance_row["scaled_abundance"] = line_abundance_row["abundance"]
logger.debug("Inserting row {0}/{1} {2}".format(i + 1, N,
line_abundance_row.items()))
ges.execute("""INSERT INTO line_abundances({0}) VALUES ({1})""".format(
", ".join(line_abundance_row.keys()),
", ".join(["%({})s".format(_) for _ in line_abundance_row.keys()])),
line_abundance_row)
ges.commit()
logger.info("Done")
| [
"andycasey@gmail.com"
] | andycasey@gmail.com |
7573a486fe8a2675af898eda3e9751590a91a632 | a9652251346d469d4e6da48ca4e44438f3b6b65d | /neural_decoding/kalman_neural_decoding.py | 190d0d6f668cf144cd31e2548f1dd6c172878bb6 | [] | no_license | weihhh/python3-practice | 201023e214881fb0c08b8dd10e86c47a621b23e5 | 7e2b40b7152ef5bf6fe68264c8b1c0bbbb16c5eb | refs/heads/master | 2021-01-11T02:40:05.087986 | 2018-02-05T15:24:35 | 2018-02-05T15:24:35 | 70,913,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,799 | py | #Import standard packages
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from scipy import io
from scipy import stats
import pickle
#Import metrics
from metrics import get_R2
from metrics import get_rho
#Import decoder functions
from decoders import KalmanFilterDecoder
#获得原始数据
data_folder=''#存储数据的路径
with open(data_folder+'example_data_s1.pickle','rb') as f:
neural_data,vels_binned=pickle.load(f)
#处理原始数据
lag=0 #What time bin of spikes should be used relative to the output
#(lag=-1 means use the spikes 1 bin before the output)相当于可以人工调整滞后
X_kf=neural_data
#格式化输出数据
'''
对于卡尔曼滤波,我们使用位置,速度,加速度作为输出
最终,我们只关注速度的拟合准确度(对于这个数据集)
但是把它们全部最为相关数据可以提高性能
'''
#决定位置
pos_binned=np.zeros(vels_binned.shape) #Initialize
pos_binned[0,:]=0 #Assume starting position is at [0,0]
#基于速度确定每个时间窗对应位置,速度乘以时间
for i in range(pos_binned.shape[0]-1):
pos_binned[i+1,0]=pos_binned[i,0]+vels_binned[i,0]*.05 #Note that .05 is the length of the time bin
pos_binned[i+1,1]=pos_binned[i,1]+vels_binned[i,1]*.05
#确定加速度??
temp=np.diff(vels_binned,axis=0) #一维时间窗,二维两个元素,x,y方向速度
acc_binned=np.concatenate((temp,temp[-1:,:]),axis=0) #假设了最后一个时间窗的加速度和倒数第二个相同,这里就是将最后一行的数据复制一份拼接到尾部
#最后的输出协变量,将各种特征拼接在一起,时间窗个数x3个特征(2,2,2)
y_kf=np.concatenate((pos_binned,vels_binned,acc_binned),axis=1)
num_examples=X_kf.shape[0]#时间窗个数
#Re-align data to take lag into account处理人工设置的滞后
if lag<0:
y_kf=y_kf[-lag:,:]
X_kf=X_kf[0:num_examples+lag,:]
if lag>0:
y_kf=y_kf[0:num_examples-lag,:]
X_kf=X_kf[lag:num_examples,:]
#决定training/testing/validation sets的分配比例
training_range=[0, 0.7]
testing_range=[0.7, 0.85]
valid_range=[0.85,1]
#考虑人工设置滞后的时间窗个数
num_examples_kf=X_kf.shape[0]
#决定数据集的实际坐标范围
#Note that each range has a buffer of 1 bin at the beginning and end
#This makes it so that the different sets don't include overlapping data
training_set=np.arange(np.int(np.round(training_range[0]*num_examples_kf))+1,np.int(np.round(training_range[1]*num_examples_kf))-1)
testing_set=np.arange(np.int(np.round(testing_range[0]*num_examples_kf))+1,np.int(np.round(testing_range[1]*num_examples_kf))-1)
valid_set=np.arange(np.int(np.round(valid_range[0]*num_examples_kf))+1,np.int(np.round(valid_range[1]*num_examples_kf))-1)
#???少了好几个数据?比如第一个?也许是为了排除相关性干扰
#Get training data
X_kf_train=X_kf[training_set,:]
y_kf_train=y_kf[training_set,:]
#Get testing data
X_kf_test=X_kf[testing_set,:]
y_kf_test=y_kf[testing_set,:]
#Get validation data
X_kf_valid=X_kf[valid_set,:]
y_kf_valid=y_kf[valid_set,:]
#归一化
#Z-score inputs
X_kf_train_mean=np.nanmean(X_kf_train,axis=0)
X_kf_train_std=np.nanstd(X_kf_train,axis=0)
X_kf_train=(X_kf_train-X_kf_train_mean)/X_kf_train_std
X_kf_test=(X_kf_test-X_kf_train_mean)/X_kf_train_std
X_kf_valid=(X_kf_valid-X_kf_train_mean)/X_kf_train_std
#Zero-center outputs
y_kf_train_mean=np.mean(y_kf_train,axis=0)
y_kf_train=y_kf_train-y_kf_train_mean
y_kf_test=y_kf_test-y_kf_train_mean
y_kf_valid=y_kf_valid-y_kf_train_mean
#Declare model
model_kf=KalmanFilterDecoder(C=1) #There is one optional parameter that is set to the default in this example (see ReadMe)
#Fit model
model_kf.fit(X_kf_train,y_kf_train)
#Get predictions
y_valid_predicted_kf=model_kf.predict(X_kf_valid,y_kf_valid)
#Get metrics of fit (see read me for more details on the differences between metrics)
#First I'll get the R^2
R2_kf=get_R2(y_kf_valid,y_valid_predicted_kf)
print('R2:',R2_kf[2:4]) #I'm just printing the R^2's of the 3rd and 4th entries that correspond to the velocities
#Next I'll get the rho^2 (the pearson correlation squared)
rho_kf=get_rho(y_kf_valid,y_valid_predicted_kf)
print('rho2:',rho_kf[2:4]**2) #I'm just printing the rho^2's of the 3rd and 4th entries that correspond to the velocities
#As an example, I plot an example 1000 values of the x velocity (column index 2), both true and predicted with the Kalman filter
#Note that I add back in the mean value, so that both true and predicted values are in the original coordinates
fig_x_kf=plt.figure()
plt.plot(y_kf_valid[1000:2000,2]+y_kf_train_mean[2],'b')
plt.plot(y_valid_predicted_kf[1000:2000,2]+y_kf_train_mean[2],'r')
plt.show()
#Save figure
# fig_x_kf.savefig('x_velocity_decoding.eps') | [
"wz591757596@163.com"
] | wz591757596@163.com |
b51213646b02a5741d23ff6c94d22ab6f7e52add | 6550dceb5b2d17dfedf94c4049f63e770c8d7712 | /ffprobe3/ffprobe.py | f9e29250daba998461d685c77f44409a45a53291 | [] | no_license | open-speech-org/openspeechcorpus.com | 03baed28e54f15ece8b8050c501e8df6e641ab44 | e2e612cacab2e0458a44f3729738c5816f57dc8f | refs/heads/master | 2022-12-23T12:32:33.501336 | 2020-11-25T03:27:52 | 2020-11-25T03:27:52 | 171,372,750 | 0 | 1 | null | 2022-12-08T03:28:10 | 2019-02-18T23:42:25 | Python | UTF-8 | Python | false | false | 7,783 | py | """
Python wrapper for ffprobe command line tool. ffprobe must exist in the path.
"""
import os
import sys
import pipes
import platform
import re
import subprocess
from ffprobe3.exceptions import FFProbeError
class FFProbe:
"""
FFProbe wraps the ffprobe command and pulls the data into an object form::
metadata=FFProbe('multimedia-file.mov')
"""
def __init__(self, video_file):
sys.path.append('/Users/ma0/custom-scripts')
print(sys.path)
self.video_file = video_file
try:
with open(os.devnull, 'w') as tempf:
subprocess.check_call(["ffprobe", "-h"], stdout=tempf, stderr=tempf)
except:
raise IOError('ffprobe not found.')
if os.path.isfile(video_file):
if str(platform.system()) == 'Windows':
cmd = ["ffprobe", "-show_streams", self.video_file]
else:
cmd = ["ffprobe -show_streams " + pipes.quote(self.video_file)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
self.format = None
self.created = None
self.duration = None
self.start = None
self.bit_rate = None
self.sample_rate = None
self.bits_per_sample = None
self.channels = None
self.streams = []
self.video = []
self.audio = []
data_lines = []
for a in iter(p.stdout.readline, b''):
a = a.decode('UTF-8')
if re.match(r'\[STREAM\]', a):
data_lines = []
elif re.match(r'\[/STREAM\]', a):
self.streams.append(FFStream(data_lines))
data_lines = []
else:
kvPair = a.strip().split('=')
if len(kvPair) > 1:
if kvPair[0] == "codec_name":
self.format = kvPair[1]
elif kvPair[0] == "created":
self.created = kvPair[1]
elif kvPair[0] == "duration":
self.duration = float(kvPair[1])
elif kvPair[0] == "bit_rate":
self.bit_rate = int(kvPair[1])
elif kvPair[0] == "sample_rate":
self.sample_rate = int(kvPair[1])
elif kvPair[0] == "bits_per_sample":
self.bits_per_sample = int(kvPair[1])
elif kvPair[0] == "channels":
self.channels = int(kvPair[1])
data_lines.append(a)
for a in iter(p.stderr.readline, b''):
a = a.decode('UTF-8')
if re.match(r'\[STREAM\]', a):
data_lines = []
elif re.match(r'\[/STREAM\]', a):
self.streams.append(FFStream(data_lines))
data_lines = []
else:
data_lines.append(a)
p.stdout.close()
p.stderr.close()
for a in self.streams:
if a.is_audio():
self.audio.append(a)
if a.is_video():
self.video.append(a)
else:
raise IOError('No such media file ' + video_file)
class FFStream:
"""
An object representation of an individual stream in a multimedia file.
"""
def __init__(self, data_lines):
for a in data_lines:
kvPair = a.strip().split('=')
if len(kvPair) > 1 :
self.__dict__[kvPair[0]] = kvPair[1]
def is_audio(self):
"""
Is this stream labelled as an audio stream?
"""
val = False
if self.__dict__['codec_type']:
if str(self.__dict__['codec_type']) == 'audio':
val = True
return val
def is_video(self):
"""
Is the stream labelled as a video stream.
"""
val = False
if self.__dict__['codec_type']:
if self.__dict__['codec_type'] == 'video':
val = True
return val
def is_subtitle(self):
"""
Is the stream labelled as a subtitle stream.
"""
val = False
if self.__dict__['codec_type']:
if self.__dict__['codec_type'] == 'subtitle':
val = True
return val
def frame_size(self):
"""
Returns the pixel frame size as an integer tuple (width,height) if the stream is a video stream.
Returns None if it is not a video stream.
"""
size = None
if self.is_video():
width = self.__dict__['width']
height = self.__dict__['height']
if width and height:
try:
size = (int(width), int(height))
except ValueError:
raise FFProbeError("None integer size %s:%s" % (width, height))
return size
def pixel_format(self):
"""
Returns a string representing the pixel format of the video stream. e.g. yuv420p.
Returns none is it is not a video stream.
"""
f = None
if self.is_video():
if self.__dict__['pix_fmt']:
f = self.__dict__['pix_fmt']
return f
def frames(self):
"""
Returns the length of a video stream in frames. Returns 0 if not a video stream.
"""
frame_count = 0
if self.is_video() or self.is_audio():
if self.__dict__['nb_frames']:
try:
frame_count = int(self.__dict__['nb_frames'])
except ValueError:
raise FFProbeError('None integer frame count')
return frame_count
def duration_seconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
duration = 0.0
if self.is_video() or self.is_audio():
if self.__dict__['duration']:
try:
duration = float(self.__dict__['duration'])
except ValueError:
raise FFProbeError('None numeric duration')
return duration
def language(self):
"""
Returns language tag of stream. e.g. eng
"""
lang = None
if self.__dict__['TAG:language']:
lang = self.__dict__['TAG:language']
return lang
def codec(self):
"""
Returns a string representation of the stream codec.
"""
codec_name = None
if self.__dict__['codec_name']:
codec_name = self.__dict__['codec_name']
return codec_name
def codec_description(self):
"""
Returns a long representation of the stream codec.
"""
codec_d = None
if self.__dict__['codec_long_name']:
codec_d = self.__dict__['codec_long_name']
return codec_d
def codec_tag(self):
"""
Returns a short representative tag of the stream codec.
"""
codec_t = None
if self.__dict__['codec_tag_string']:
codec_t = self.__dict__['codec_tag_string']
return codec_t
def bit_rate(self):
"""
Returns bit_rate as an integer in bps
"""
b = 0
if self.__dict__['bit_rate']:
try:
b = int(self.__dict__['bit_rate'])
except ValueError:
raise FFProbeError('None integer bit_rate')
return b
| [
"ma0@contraslash.com"
] | ma0@contraslash.com |
3fc014bbb8096316c01d9b5024ecbfb3f9fd4c45 | 5f1e12cf84d02bbc4220ed11758752fed7cfd6c7 | /samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-snmp-test-trap-act/nc-execute-xr-snmp-test-trap-act-416-ydk.py | 71a0f1f9882b8624945da7f63b058d9a6bf42c89 | [
"Apache-2.0"
] | permissive | eliwilliams/ydk-py-samples | 6c3b8063848c8718910c7255256f7d3aee456974 | 40aa500e7d7ad05e960fb1552c73dab3adbc08c7 | refs/heads/master | 2021-07-25T01:21:26.442018 | 2017-11-06T17:23:05 | 2017-11-06T17:23:05 | 109,724,992 | 0 | 0 | null | 2017-11-06T17:07:40 | 2017-11-06T17:07:39 | null | UTF-8 | Python | false | false | 2,677 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Execute RPC for model Cisco-IOS-XR-snmp-test-trap-act.
usage: nc-execute-xr-snmp-test-trap-act-416-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_snmp_test_trap_act \
as xr_snmp_test_trap_act
import logging
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create executor service
executor = ExecutorService()
entity_fru_fan_tray_oper_status_up_rpc = xr_snmp_test_trap_act.EntityFruFanTrayOperStatusUpRpc() # create object
# execute RPC on NETCONF device
executor.execute_rpc(provider, entity_fru_fan_tray_oper_status_up_rpc)
exit()
# End of script
| [
"saalvare@cisco.com"
] | saalvare@cisco.com |
3349e2469816e9fe64b77eb914dc2f2d778d5f7f | b3ab2979dd8638b244abdb2dcf8da26d45d7b730 | /test/test_pagination_response_permission_set_response_model.py | ce8a5fe159bc67f84afe67dff0d2b4773ad93151 | [] | no_license | CU-CommunityApps/ct-cloudcheckr-cmx-client | 4b3d9b82c5dfdaf24f8f443526868e971d8d1b15 | 18ac9fd4d6c4ae799c0d21745eaecd783da68c0c | refs/heads/main | 2023-03-03T19:53:57.685925 | 2021-02-09T13:05:07 | 2021-02-09T13:05:07 | 329,308,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | # coding: utf-8
"""
CloudCheckr API
CloudCheckr API # noqa: E501
OpenAPI spec version: v1
Contact: support@cloudcheckr.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudcheckr_cmx_client
from cloudcheckr_cmx_client.models.pagination_response_permission_set_response_model import PaginationResponsePermissionSetResponseModel # noqa: E501
from cloudcheckr_cmx_client.rest import ApiException
class TestPaginationResponsePermissionSetResponseModel(unittest.TestCase):
"""PaginationResponsePermissionSetResponseModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaginationResponsePermissionSetResponseModel(self):
"""Test PaginationResponsePermissionSetResponseModel"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudcheckr_cmx_client.models.pagination_response_permission_set_response_model.PaginationResponsePermissionSetResponseModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"pea1@cornell.edu"
] | pea1@cornell.edu |
8b28f3977b55bf2426d6d91b6eebfd7d27177db7 | 9b32771b7d1513ee37bc62dd347675abcfc1bfc9 | /example_snippets/multimenus_snippets/NewSnippets/NumPy/Pretty printing/Formatting functions for specific dtypes/Set formatter for `int` type.py | 43cc777513adf98a89e3dab51b28036320290cf7 | [
"BSD-3-Clause"
] | permissive | listar0810/jupyterlab-snippets-multimenus | 44087ef1aeb030a3074862a337508b57d50072c6 | 477f51cfdbad7409eab45abe53cf774cd70f380c | refs/heads/master | 2022-12-12T18:19:25.221083 | 2020-09-08T01:11:01 | 2020-09-08T01:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | def format_int(x):
return 'int({0})'.format(x)
with printoptions(formatter={'int': format_int}):
print(np.random.randint(-3, 4, 10)) | [
"kptan86@gmail.com"
] | kptan86@gmail.com |
b7c426a31e6b5ddbc89e14288c66694f2ec8c368 | a60e81b51935fb53c0900fecdadba55d86110afe | /python/note/改善python程序的91个建议.py | 96231cef18a927a9f58d3e1b9e81c3ecd2cbf6bd | [] | no_license | FrankieZhen/Lookoop | fab6855f5660467f70dc5024d9aa38213ecf48a7 | 212f8b83d6ac22db1a777f980075d9e12ce521d2 | refs/heads/master | 2020-07-27T08:12:45.887814 | 2019-09-16T11:48:20 | 2019-09-16T11:48:20 | 209,021,915 | 1 | 0 | null | 2019-09-17T10:10:46 | 2019-09-17T10:10:46 | null | UTF-8 | Python | false | false | 1,535 | py | # coding=utf-8
# 2019-1-28
# 改善python程序的91个建议
# %占位符
value = {'name':'yauno', 'sex':'man'}
print('name %(name)s , sex %(sex)s' % value)
# str.format
# ' 与 "的区别
print('"test"')
print("\"test\"")
# 常量的管理
# 12: 不推荐使用type来进行检查
# isinstance(object, classoinfo)
print(isinstance('string', str))
# 13. 涉及除法运算时,尽量先将操作数转换为浮点类型再做运算
# 14. 警惕使用eval()的安全漏洞
# 17. unicode
# 在2.6之后可以使用 import_unicode_literals自动将定义的普通字符识别为Uicode字符串, 这样字符串的行为将保持和pythoh3一致
# 19. import
# (1) 命名空间的冲突
# (2) 循环嵌套导入问题: 不使用 from .. import ... 直接使用 import ...
# 21. ++i 与 i += 1
# 23. else
# (1)
def print_prime(n):
for i in range(n):
for j in range(2, i):
if i % j == 0:
break # 这里终止后不执行后面打印操作
else:
print("%s is prime." % i) # 内嵌for 循环正常执行完后执行打印操作
print_prime(10)
# (2)
try:
pass
except:
pass
else:
pass
finally:
pass
# 25. finally
def finally_test(a):
try:
print("\ntesting...")
if a <= 0:
raise ValueError("data can not be negative.")
else:
return a
except ValueError as e:
print("%s" % e)
finally:
print("end")
return -1
for i in range(-1, 2):
ret = finally_test(i) # 最后返回永远都是-1, 因为返回a之前要执行finall, 而finally直接就返回了-1
print("return value: %s" % ret) | [
"33798487+YangXiaoo@users.noreply.github.com"
] | 33798487+YangXiaoo@users.noreply.github.com |
e75de761a1fb602ca147a0559319f2c10eb8981b | 9dba8607dce414f9905700d7a4ac44668de5e1f1 | /ave_SR/PS_100_7_recto/calc_voladzExtr/directs.py | a1cd56e746b990c37d2ab7486afbdfc10eb2a5c6 | [] | no_license | anaiortega/XCmodels | c0463ffe38531578aee281456e88528882255cd7 | e9b8c2f996a21b8aa3314242f3cc12b0e391b5df | refs/heads/master | 2023-08-16T22:44:01.168775 | 2023-08-14T18:15:10 | 2023-08-14T18:15:10 | 141,140,177 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | dir_int_forces='../results_voladzExtr/internalForces/'
dir_checks='../results_voladzExtr/verifications/'
| [
"ana.Ortega.Ort@gmail.com"
] | ana.Ortega.Ort@gmail.com |
aca8dd05d90354b8c7b9f7084c40e115a9c3fb42 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_jinn.py | 5f214987ef38459b4a3fc545f79b4395f6eb6f08 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py |
#calss header
class _JINN():
def __init__(self,):
self.name = "JINN"
self.definitions = [u'in Arab and Muslim traditional stories, a magical spirit who may appear in the form of a human or an animal and can take control of a person ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f34128088a213795dfa4a912f86cdfc5140eff13 | 929a816fc299959d0f8eb0dd51d064be2abd6b78 | /LintCode/ladder 08 memorized search/必修/683. Word Break III/solution.py | 11811522a165c1860914fd748163d4584a24de2f | [
"MIT"
] | permissive | vincent507cpu/Comprehensive-Algorithm-Solution | 27940da7bc0343921930a2eafbd649da93a5395d | 04e01e49622457f09af2e1133954f043c0c92cb9 | refs/heads/master | 2023-07-20T07:12:15.590313 | 2021-08-23T23:42:17 | 2021-08-23T23:42:17 | 258,644,691 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | class Solution:
"""
@param: : A string
@param: : A set of word
@return: the number of possible sentences.
"""
def wordBreak3(self, s, dict):
# Write your code here
if not s or not dict:
return 0
lower_dict = set()
for piece in dict:
lower_dict.add(piece.lower())
max_len = max([len(piece) for piece in dict])
return self.memo_search(s.lower(), lower_dict, 0, max_len, {})
def memo_search(self, s, dict, index, max_len, memo):
if index == len(s):
return 1
if index in memo:
return memo[index]
memo[index] = 0
for i in range(index, len(s)):
if i + 1 - index > max_len:
break
word = s[index:i + 1]
if word not in dict:
continue
memo[index] += self.memo_search(s, dict, i + 1, max_len, memo)
return memo[index] | [
"vincent507cpu@gmail.com"
] | vincent507cpu@gmail.com |
7911dfbc4c035eaa8f13d0d5a1931adb62c0bb1f | 72357e298521452cfa3d9ca960235e6ddf1dfe46 | /imsize.py | de19438293a2d32fb248522d0e2440e21c16b107 | [] | no_license | pydemo/project-and-sketch | 25b8fbdc1e85773b9aa150d8f63a0e7ced4c1a13 | 10397f3d40d117c15143ce3deb9bc8bf530c2269 | refs/heads/master | 2020-09-01T16:52:08.478915 | 2019-11-29T18:19:11 | 2019-11-29T18:19:11 | 219,009,137 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | # -*- coding: utf-8 -*-
import struct
import imghdr
def test_jpeg(h, f):
# SOI APP2 + ICC_PROFILE
if h[0:4] == '\xff\xd8\xff\xe2' and h[6:17] == b'ICC_PROFILE':
print "A"
return 'jpeg'
# SOI APP14 + Adobe
if h[0:4] == '\xff\xd8\xff\xee' and h[6:11] == b'Adobe':
return 'jpeg'
# SOI DQT
if h[0:4] == '\xff\xd8\xff\xdb':
return 'jpeg'
imghdr.tests.append(test_jpeg)
def get_image_size(fname):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb', ) as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
what = imghdr.what(None, head)
if what == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif what == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif what == 'jpeg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf or ftype in (0xc4, 0xc8, 0xcc):
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
get_image_size('test.JPG') | [
"olek.buzu@gmail.com"
] | olek.buzu@gmail.com |
d5c0f577677ecd87a61b9a3767430c83a25b4e9c | 64cdb9e8fdcde8a71a16ce17cd822441d9533936 | /_baekjoon/1507_궁금한 민호(플로이드워샬).py | 2413850d526ea506a6078831c86eb5db2427d6ec | [] | no_license | heecheol1508/algorithm-problem | fa42769f0f2f2300e4e463c5731e0246d7b7643c | 6849b355e15f8a538c9a071b0783d1789316d29d | refs/heads/main | 2023-07-20T23:46:07.037975 | 2021-08-31T12:47:33 | 2021-08-31T12:47:33 | 302,830,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | import sys
sys.stdin = open('input.txt', 'r')
N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
visit = [[True] * N for _ in range(N)]
flag = True
for k in range(N):
for i in range(N):
if i != k:
for j in range(N):
if j != k and i != j:
if board[i][j] > board[i][k] + board[k][j]:
flag = False
break
elif board[i][j] == board[i][k] + board[k][j] and visit[i][j] is True:
visit[i][j] = False
if flag is False:
break
if flag is False:
break
if flag is False:
print(-1)
else:
result = 0
for i in range(N - 1):
for j in range(i + 1, N):
if visit[i][j] is True:
result += board[i][j]
print(result)
| [
"heecheol1508@gmail.com"
] | heecheol1508@gmail.com |
5f90fc208121793359d9af378f9e0dbd53d87fea | 297d045a587f354b96cf493dff9a2e719739715d | /pysimplehttp/scripts/ps_to_sq.py | 3f77d7130a785960f715c81420767267909b9502 | [
"MIT"
] | permissive | liaojack8/simplehttp | 4805aef2f72dae9e2ce7eeb3f801818b0c66af43 | 1dbdea11276bc21915fc133fd9893a738654c240 | refs/heads/master | 2022-11-29T06:03:33.027824 | 2020-07-18T14:58:04 | 2020-07-18T14:58:04 | 280,678,089 | 0 | 0 | MIT | 2020-07-18T14:55:08 | 2020-07-18T14:55:07 | null | UTF-8 | Python | false | false | 3,792 | py | #!/usr/bin/env python
"""
generic pubsub to simplequeue daemon that takes command line arguments:
--pubsub-url=<http://127.0.0.1:8090/sub>
(multiple) --simplequeue-url=<http://127.0.0.1:6000>
when multiple destination simplequeue arguments are specified, the daemon will
randomly choose one endpoint to write a message to
"""
import logging
import tornado.httpclient
import tornado.options
import sys
import urllib
import random
try:
import ujson as json
except ImportError:
import json
from pysimplehttp.pubsub_reader import PubsubReader
class PubsubToSimplequeue(PubsubReader):
def __init__(self, simplequeue_urls, filter_require, filter_exclude, **kwargs):
assert isinstance(simplequeue_urls, (list, tuple))
self.simplequeue_urls = simplequeue_urls
self.filter_require = dict([data.split('=', 1) for data in filter_require])
for key, value in self.filter_require.items():
logging.info("requiring json key=%s value=%s" % (key, value) )
self.filter_exclude = dict([data.split('=', 1) for data in filter_exclude])
for key, value in self.filter_exclude.items():
logging.info("excluding json key=%s value=%s" % (key, value) )
self.http = tornado.httpclient.AsyncHTTPClient()
super(PubsubToSimplequeue, self).__init__(**kwargs)
def http_fetch(self, url, params, callback, headers={}):
url += '?' + urllib.urlencode(params)
req = tornado.httpclient.HTTPRequest(url=url,
method='GET',
follow_redirects=False,
headers=headers,
user_agent='ps_to_sq')
self.http.fetch(req, callback=callback)
def _finish(self, response):
if response.code != 200:
logging.info(response)
def callback(self, data):
"""
handle a single pubsub message
"""
if not data or len(data) == 1:
return
assert isinstance(data, str)
if self.filter_require or self.filter_exclude:
try:
msg = json.loads(data)
except Exception:
logging.error('failed json.loads(%r)' % data)
return
for key, value in self.filter_require.items():
if msg.get(key) != value:
return
for key, value in self.filter_exclude.items():
if msg.get(key) == value:
return
endpoint = random.choice(self.simplequeue_urls) + '/put'
self.http_fetch(endpoint, dict(data=data), callback=self._finish)
if __name__ == "__main__":
tornado.options.define('pubsub_url', type=str, default="http://127.0.0.1:8080/sub?multipart=0", help="url for pubsub to read from")
tornado.options.define('simplequeue_url', type=str, multiple=True, help="(multiple) url(s) for simplequeue to write to")
tornado.options.define('filter_require', type=str, multiple=True, help="filter json message to require for key=value")
tornado.options.define('filter_exclude', type=str, multiple=True, help="filter json message to exclude for key=value")
tornado.options.parse_command_line()
if not tornado.options.options.pubsub_url:
sys.stderr.write('--pubsub-url requrired\n')
sys.exit(1)
if not tornado.options.options.simplequeue_url:
sys.stderr.write('--simplequeue-url requrired\n')
sys.exit(1)
reader = PubsubToSimplequeue(
simplequeue_urls=tornado.options.options.simplequeue_url,
filter_require=tornado.options.options.filter_require,
filter_exclude=tornado.options.options.filter_exclude,
pubsub_url=tornado.options.options.pubsub_url
)
reader.start()
| [
"jehiah@gmail.com"
] | jehiah@gmail.com |
b5c10e61e40776f6dadc1d4661b2e717404b230c | 2f6817fc8f6ddb48f5f88c913d8e40b672fc3dbf | /Mining/lec3-4.py | 6d0fc9279955b2a3c15ab6c0cd0159695c4fda73 | [] | no_license | cutz-j/TodayILearned | 320b5774de68a0f4f68fda28a6a8b980097d6ada | 429b24e063283a0d752ccdfbff455abd30ba3859 | refs/heads/master | 2020-03-23T17:34:51.389065 | 2018-11-24T08:49:41 | 2018-11-24T08:49:41 | 141,865,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | from tkinter import *
from tkinter.simpledialog import *
from tkinter.filedialog import *
# 함수선언
def editFile(num):
if num == 1:
value = askinteger('제목', '설명-->', minvalue=1, maxvalue=255)
label1.configure(text=str(value))
def openFile():
fileName = askopenfilename(parent=window, filetypes=(("GIF 파일", "*.gif"), ("모든 파일", "*.*")))
label1.configure(text=fileName)
photo = PhotoImage(file = fileName)
pLabel.configure(image=photo)
pLabel.image = photo
# 변수선언
window = None
# main
window = Tk()
window.title("Memo")
window.geometry("700x700")
mainMenu = Menu(window)
window.config(menu=mainMenu)
label1 = Label(window)
label1.pack()
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="파일(F)", menu=fileMenu)
fileMenu.add_command(label="새로만들기(N)")
fileMenu.add_command(label="열기(O)...", command=lambda : openFile())
fileMenu.add_command(label="저장(S)")
fileMenu.add_command(label="다른이름으로 저장(A)...")
fileMenu.add_separator()
fileMenu.add_command(label="페이지 설정(U)...")
fileMenu.add_command(label="인쇄(P)")
fileMenu.add_separator()
fileMenu.add_command(label="끝내기(X)")
fileMenu2 = Menu(mainMenu)
mainMenu.add_cascade(label="편집(E)", menu=fileMenu2)
fileMenu2.add_command(label="실행취소(U)")
fileMenu2.add_separator()
fileMenu2.add_command(label="잘라내기(T)")
fileMenu2.add_command(label="복사(C)", command=lambda : editFile(1))
fileMenu2.add_command(label="붙여넣기(P)", command=lambda : editFile(2))
fileMenu2.add_command(label="삭제(L)", command=lambda : editFile(3))
fileMenu2.add_separator()
fileMenu2.add_command(label="찾기(F)")
fileMenu2.add_command(label="다음 찾기(N)")
fileMenu2.add_command(label="바꾸기(R)")
fileMenu2.add_command(label="이동(G)")
fileMenu2.add_separator()
fileMenu2.add_command(label="모두 선택(A)")
fileMenu2.add_command(label="시간/날짜(D)")
# 빈 사진 준
photo = PhotoImage()
pLabel = Label(window, image=photo)
pLabel.pack(expand = 3, anchor = CENTER)
window.mainloop() | [
"cutz309@gmail.com"
] | cutz309@gmail.com |
dcf2ca8eb4d1386d15d7d71c29f0837616c7b8a3 | dc1d341789a19b0dd8b905538b080149e6cd13ed | /iwmiproject/migrations/0158_yieldplantlevel_crop.py | 252f49c5a654262d3b72ffd24869389f1cbd8651 | [] | no_license | pngimbwa/Data-Collection-Tool | 2629d31742edc311501bd25a2f9728ce7ac97d06 | 6081500d2a1dc2e30af908168cf83a46a6078a0f | refs/heads/master | 2021-01-20T00:01:45.770837 | 2017-04-22T12:31:07 | 2017-04-22T12:31:07 | 89,068,181 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iwmiproject', '0157_yieldfarmlevel_crop'),
]
operations = [
migrations.AddField(
model_name='yieldplantlevel',
name='Crop',
field=models.ForeignKey(blank=True, null=True, verbose_name='Crop', to='iwmiproject.Crop'),
),
]
| [
"pngimbwa6@gmail.com"
] | pngimbwa6@gmail.com |
d5ecabadea34f17640fec743414710537061740d | 8f615c636420f969afaf54fc3bf318028ab5819e | /python_data_wrangling/sast_data_convert.py | d4eda0cff9d49bf38610981f7dd9d61993fbf6ac | [] | no_license | sheltowt/application_security_data_visualizations | a20eff2c8c93b7e03720b6f287b7a1a1f424b351 | f6950d2115d09e9f0e79313d802b9ac873944050 | refs/heads/master | 2022-10-30T04:14:38.495658 | 2020-06-19T16:35:10 | 2020-06-19T16:35:10 | 266,594,130 | 18 | 0 | null | 2020-06-15T18:55:04 | 2020-05-24T17:47:23 | HTML | UTF-8 | Python | false | false | 1,167 | py | import json
with open('../raw_data/appsecco_dvna.json') as json_file:
data = json.load(json_file)
modified_object = {}
modified_object["name"] = "DVNA sast scan"
modified_object["children"] = []
for result in data["runs"][0]["results"]:
new_result = {}
new_result["name"] = result["message"]["text"]
new_result["children"] = []
modified_object["children"].append(new_result)
for result in data["runs"][0]["results"]:
for mod_obj in modified_object["children"]:
if result["message"]["text"] == mod_obj["name"]:
new_child = {}
new_child["name"] = result["locations"][0]["physicalLocation"]["artifactLocation"]["uri"]
new_child["startLine"] = result["locations"][0]["physicalLocation"]["region"]["startLine"]
new_child["size"] = 1
mod_obj["children"].append(new_child)
unique_child_name = []
unique_children = []
for index, child in enumerate(modified_object["children"]):
if child["name"] in unique_child_name:
pass
else:
unique_child_name.append(child["name"])
unique_children.append(child)
modified_object["children"] = unique_children
with open('../public/dvna_sast.json', 'w') as outfile:
json.dump(modified_object, outfile) | [
"sheltowt@gmail.com"
] | sheltowt@gmail.com |
52c32b83c0116f75bd3a04d268912c811b5a0e60 | eeec2adfe1ca4e8cf5e7a0be9eaab2497df25861 | /erudit_catalog/checks.py | 0f45121f5d689aa777705d5ced3a2650f458c953 | [
"BSD-3-Clause"
] | permissive | fabiobatalha/erudit-ps-packtools-plugin | 3ecddab7835a25df44cbc00b228f241fae231155 | 29fabd087b8d8406b96d0b7296386f78da34aaeb | refs/heads/master | 2020-03-15T01:28:04.287126 | 2018-11-21T19:28:07 | 2018-11-21T19:28:07 | 131,894,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | #coding: utf-8
from __future__ import unicode_literals
import logging
import itertools
import json
import plumber
from packtools.style_errors import StyleError
from packtools.catalogs import catalog
LOGGER = logging.getLogger(__name__)
with open(catalog.ISO3166_CODES) as f:
ISO3166_CODES_SET = set(json.load(f))
# --------------------------------
# Basic functionality
# --------------------------------
@plumber.filter
def setup(message):
"""Prepare the message to traverse the pipeline.
The input `message` is an `etree` instance. The pipeline will inspect
this etree and append the errors on an errors list. This errors list
is instantiated at this setup pipe.
"""
return message, []
@plumber.filter
def teardown(message):
"""Finalize the processing pipeline and return the errors list.
"""
_, err_list = message
return err_list
def StyleCheckingPipeline():
"""Factory for style checking pipelines.
"""
return plumber.Pipeline(setup, doctype, country_code, teardown)
@plumber.filter
def doctype(message):
"""Make sure the DOCTYPE declaration is present.
"""
et, err_list = message
if not et.docinfo.doctype:
err = StyleError()
err.message = "Missing DOCTYPE declaration."
err_list.append(err)
return message
@plumber.filter
def country_code(message):
"""Check country codes against iso3166 alpha-2 list.
"""
et, err_list = message
elements = et.findall('//*[@country]')
for elem in elements:
value = elem.attrib['country']
if value not in ISO3166_CODES_SET:
err = StyleError()
err.line = elem.sourceline
err.message = "Element '%s', attribute country: Invalid country code \"%s\"." % (elem.tag, value)
err_list.append(err)
return message
| [
"fabiobatalha@gmail.com"
] | fabiobatalha@gmail.com |
a3ce2a229b18dafa49e9ae81174f429d22c71cc6 | a1b7c1357181320b272ef4c72b70d22600a407c1 | /examples/test_get_locale_code.py | 978cee5e42bcf637ac20361309a79c5c225e4623 | [
"MIT"
] | permissive | BarryYBL/SeleniumBase | 5c96e21eaebd45e2f6ac26d5bd563b3ba300e6f6 | e3cb810331183fa003cea8af81057e4136dfd660 | refs/heads/master | 2022-12-04T11:34:20.134294 | 2020-08-28T05:45:24 | 2020-08-28T05:45:24 | 290,998,663 | 1 | 0 | MIT | 2020-08-28T08:52:44 | 2020-08-28T08:52:44 | null | UTF-8 | Python | false | false | 387 | py | from seleniumbase import BaseCase
class LocaleTestClass(BaseCase):
def test_get_locale_code(self):
self.open("data:,")
locale_code = self.get_locale_code()
message = '\nLocale Code = "%s"' % locale_code
print(message)
self.set_messenger_theme(
theme="flat", location="top_center")
self.post_message(message, duration=4)
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
204f1f4fafb9264a0cf66934ec779f4e94f7674c | a09740e643d6277ada23c82d8e87853a1cd1a9e5 | /oProto/omsql/wipdev/bycols_inupd.py | 68d0417689a3a5d61efb19227176666e68519131 | [
"Apache-2.0"
] | permissive | FuckBrains/omEngin | c5fb011887c8b272f9951df3880a879456f202e8 | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | refs/heads/main | 2023-03-20T18:27:53.409976 | 2021-03-14T15:50:11 | 2021-03-14T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,740 | py | import pandas as pd
import numpy as np
import os
def drop_cols(df, col2drop = []):
if len(col2drop) > 0:
cols = df.columns.to_list()
ncols = []
for i in range(len(cols)):
match = 0
for j in range(len(col2drop)):
if cols[i] == col2drop[j]:
match = 1
if match == 0:
ncols.append(cols[i])
ndf = df[ncols]
return ndf
else:
return df
def qrybuilt(tbl, ndf, bycol, oncols = False):
dfx = drop_cols(ndf, bycol)
ncols = dfx.columns.to_list()
lsqry = []
for i in range(len(ndf)):
x = ''
y = ''
for j in range(len(bycol)):
x1 = str(bycol[j]) + "='" + str(ndf.loc[i, bycol[j]]) + "'"
if x == '':
x = x1
else:
x = x + " and " + x1
for n in range(len(ncols)):
if oncols == False:
a1 = str(ncols[n])
a2 = "'" + str(ndf.loc[i, ncols[n]]) + "'"
if y == '':
y = a1 + '=' + a2
else:
y = y + "," + a1 + '=' + a2
else:
a1 = str(ncols[n])
mat = 0
for j in range(len(oncols)):
if oncols[j] == a1:
mat = 1
break
if mat == 1:
a2 = "'" + str(ndf.loc[i, ncols[n]]) + "'"
if y == '':
y = a1 + '=' + a2
else:
y = y + "," + a1 + '=' + a2
qry = "update " + tbl + ' set ' + y + ' Where ' + x
lsqry.append(qry)
return lsqry
def CheckExist(conn , tbl, colname, values):
qry = "select * from " + tbl + " where " + colname + "='" + values + "'"
dfx = pd.read_sql(qry, conn)
rw = dfx.shape[0]
return rw
def get_key(my_dict, val):
for value, key in my_dict.items():
if value == val:
return key
def modstr(strval):
if isinstance(strval, str):
s1 = strval.replace("'","\\'")
s2 = s1.replace(":","\\:")
return s2
def insert_into_sql(tbl, tbl_property, lscol, lsval):
col = ''
val = ''
dic = tbl_property
if isinstance(lscol, list) and isinstance(lsval, list) and len(lscol) == len(lsval):
for i in range(len(lscol)):
valmod = ''
try:
if lsval[i] != '' and lsval[i] is not None:
dtype = get_key(dic,lscol[i])
if dtype == 'text' or dtype == 'varchar':
valmod = modstr(lsval[i])
else:
valmod = str(lsval[i])
if val == '':
col = lscol[i]
val = "'" + valmod + "'"
else:
col = col + ',' + lscol[i]
val = val + ',' + "'" + valmod + "'"
else:
pass
except:
pass
qry = "insert into " + tbl + " (" + col + ") values (" + val + ")"
return qry
else:
return ""
def prep_update(lscol,lsval):
hp = ''
stval = ''
if isinstance(lscol, list) and isinstance(lsval, list):
if len(lscol) == len(lsval):
for i in range(len(lscol)):
if lsval[i] is not None:
if isinstance(lsval[i],str):
xxx1 = lsval[i].replace("'","\\'")
stval = xxx1.replace(":","\\:")
else:
stval = str(lsval[i])
x = str(lscol[i]) + "='" + stval + "'"
if hp == '' and len(stval) > 0 :
hp = x
else:
if len(stval) > 0:
hp = hp + ',' + x
else:
pass
else:
pass
else:
print('num of col and value are not same')
return hp
elif isinstance(lscol, str) and isinstance(lsval, str):
hp = ""
comma = lsval.count(',')
invertcomma = lsval.count("'")
if invertcomma == (comma+1)*2:
x1 = lscol.split(',')
x2 = lsval.split(',')
print(x1,x2)
for i in range(len(x1)):
x = x1[i] + "=" + x2[i]
if hp == '':
hp = x
else:
hp = hp + ',' + x
if invertcomma <= 2:
x1 = lscol.split(',')
x2 = lsval.split(',')
for i in range(len(x1)):
x = str(x1[i]) + "='" + str(x2[i]) + "'"
if hp == '':
hp = x
else:
hp = hp + ',' + x
return hp
def UPIN(df, tbl, conn, bycols, oncols = False, operation = "and"):
cr = conn.cursor()
if isinstance(bycols, list):
xdf = None
bydf = df[bycols]
ndf = drop_cols(df, bycols)
if oncols:
xdf = ndf[oncols]
else:
xdf = ndf
fcols = xdf.columns.to_list()
fcols_pbycol = xdf.columns.to_list()
for n in range(len(bycols)):
fcols_pbycol.append(bycols[n])
dfup = df[fcols_pbycol]
x = ''
#print(fcols, fcols_pbycol, len(fcols), len(fcols_pbycol))
lsqry = []
for i in range(len(df)):
x = ''
for j in range(len(bycols)):
lss = bycols[j]
lsv = df.loc[i,lss]
st = str(lss) + "='" + str(lsv) + "'"
if x == '':
x = st
else:
x = x + " " + operation + " " + st
qr = "select * from " + tbl + " where " + x
dfx = pd.read_sql(qr, conn)
rw = dfx.shape[0]
ls = []
if rw != 0:
for n in range(len(fcols)):
ls.append(df.loc[i, fcols[n]])
qry = "update " + tbl + ' set ' + prep_update(fcols,ls) + ' where ' + x
else:
for n in range(len(fcols_pbycol)):
ax = df.loc[i, fcols_pbycol[n]]
ls.append(ax)
qry = "insert into " + tbl + ' ' + insert_into_sql(fcols_pbycol,ls)
cr.execute(qry)
lsqry.append(qry)
conn.commit()
print('update done for ', len(lsqry), ' rows ')
return lsqry
elif isinstance(bycols, str):
xdf = None
byc = df[bycols].values.tolist()
ndf = drop_cols(df, [bycols])
if oncols:
xdf = ndf[oncols]
else:
xdf = ndf
fcols = xdf.columns.to_list()
fcols_pbycol = xdf.columns.to_list()
fcols_pbycol.append(bycols)
lsqry = []
for i in range(len(byc)):
condval = byc[i]
rs = CheckExist(conn, tbl, bycols, condval)
ls = []
if rs != 0:
for c1 in xdf:
ls.append(xdf.loc[i,c1])
qry = "update " + tbl + ' set ' + prep_update(fcols,ls) + ' where ' + bycols + "='" + condval + "'"
else:
for c1 in ndf:
ls.append(ndf.loc[i,c1])
ls.append(condval)
qry = "insert into " + tbl + ' ' + insert_into_sql(fcols_pbycol,ls)
print(qry)
cr.execute(qry)
lsqry.append(qry)
conn.commit()
print('update done for ', len(lsqry), ' rows ')
return lsqry | [
"omi.kabirr@gmail.com"
] | omi.kabirr@gmail.com |
65ee2d6385a0bdfe37108f1dcac07c4caeedc45c | bf92a619b9b850678bb691915e45c39cd740fa63 | /examples/work/run_main.py | 1a7dae80a015feb1de2c673cd31a55474f62081c | [] | no_license | jrecuero/jc2cli | a045f1efa431f53351dfac968852fd82e8c963b6 | c97615828880021b3965756aed939e39bac949b6 | refs/heads/master | 2021-05-10T10:16:34.698398 | 2018-11-06T17:43:53 | 2018-11-06T17:43:53 | 118,377,662 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from jc2cli.namespace import Handler
# MAIN = __import__('examples.work.main')
# __import__('examples.work.config')
# __import__('examples.work.execute')
class RunCli(object):
def __init__(self):
__import__('examples.work.main')
# __import__('examples.work.config')
# __import__('examples.work.execute')
handler = Handler()
handler.create_namespace('examples.work.main')
handler.switch_and_run_cli_for_namespace('examples.work.main', rprompt='<RUN>')
if __name__ == '__main__':
RunCli()
| [
"jose.recuero@gmail.com"
] | jose.recuero@gmail.com |
453e33e779ceba90beb4a31868d07efe7f5fd23e | aff88e0922ae5c75f18b624cb1c81c263d12f2af | /layout/Calc.py | 5adecf98c6435ee96b79364527e4a428e621c5fd | [] | no_license | TianJin85/Qtwindow | 44f42c8972382bcdbde7bc26a4a7f5121736e0aa | 3af712d8528d825cb3cecd6bc21c8f836232e775 | refs/heads/master | 2020-10-01T23:05:30.271773 | 2020-02-09T14:39:12 | 2020-02-09T14:39:12 | 227,642,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # -*- encoding: utf-8 -*-
"""
@File : Calc.py
@Time : 2020/1/5 15:59
@Author : Tianjin
@Email : tianjincn@163.com
@Software: PyCharm
"""
'''
栅格布局,实现计算器UI
'''
import sys
from PyQt5.QtWidgets import *
class Calc(QWidget):
def __int__(self):
super(Calc, self).__int__()
self.setWindowTitle('栅格布局')
def initUI(self):
grid = QGridLayout()
self.setLayout(grid)
names = ['Cls', 'Back', '', 'Close',
'7', '8', '9', '/',
'4', '5', '6', '*',
'1', '2', '3', '-',
'0', '.', '=', '+']
positions = [(i, j) for i in range(5) for j in range(4)]
for position, name in zip(positions, names):
if name == '':
continue
button = QPushButton(name)
grid.addWidget(button, *position)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = Calc()
main.initUI()
main.show()
sys.exit(app.exec_()) | [
"307440205@qq.com"
] | 307440205@qq.com |
ea7fe2a8309980eadbd5238e810707e3d19f9d55 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/intersectingDiscs_20200804190524.py | 450fd58ee08624f00420da84936bbe498750bb5c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | def discs(A):
newArr = []
opendiscs = 0
intersections = 0
for i in range(len(A)):
newArr.append((i-A[i]))
newArr.sort()
i = 0
j = 0
while i < len(newArr) and j < len(A):
if i == len(newArr)- 1:
break
if newArr[i]<=A[j]:
opendiscs +=1
if opendiscs == 2:
intersections +=1
if
i+=1
elif newArr[i] > A[j]:
opendiscs -=1
j+=1
print('intersections',intersections)
discs([1,5,2,1,4,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
6dd8c29a98d12f0bea1db2e80ccf811aded94176 | c92398a728817578850ecf508ec4197afe91a88f | /DemoYield Fun.py | b2e001740a146c35ba1011e91c7382282fbeecef | [] | no_license | HitanshuSoni/Python_practice | 4d0ec0378124da85e364a15a7b94ddbbfe2fc929 | 7a3d0977b218ef76f91517d88518b1c0b68b9528 | refs/heads/main | 2023-04-18T21:55:12.709161 | 2021-05-08T15:39:08 | 2021-05-08T15:39:08 | 365,550,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def checkyield():
yield 1
yield 2
yield 3
'''return 1
return 2'''
for value in checkyield():
print(value)
| [
"hitanshusoni10@gmail.com"
] | hitanshusoni10@gmail.com |
f31f2e6b0b3bd7e6c5b2de59aad83d0d08c29089 | 077f29021738c3b577c7c3d9ef5851d76e93cbed | /demo/funs/passing_funs.py | a955b2a06551fd66872ba07965f05428360f0019 | [] | no_license | srikanthpragada/PYTHON_10_JULY_2020 | fb410d87260eb290ebcc5ac6a88b6d6b01ee15b5 | b7a586cbcd49934d36facb4dd748c54038838334 | refs/heads/master | 2022-12-05T09:05:33.192365 | 2020-08-26T14:27:09 | 2020-08-26T14:27:09 | 279,319,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | # func,value,value
def math_op(oper, n1, n2):
return oper(n1, n2)
def multiply(n1, n2):
return n1 * n2
def power(n1,n2):
return n1 ** n2
print(math_op(multiply, 10, 20))
print(math_op(power, 10, 20))
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
4f765a9facddb729a6c6639b7da5a76717970f85 | e6713c7e72d6950c2e35c836ac88588bc673c19e | /auth_api/api.py | 2367c4cb93b42931b648e5b2e9820f2db88543c8 | [] | no_license | tanjibpa/scrumboard-with-drf | 905175069d065b7174f3485832e6c9e8bcb453da | 3d54b33f91a1719c4373677fe9efc7352b6ce53f | refs/heads/master | 2020-06-25T03:59:06.358201 | 2017-06-13T16:43:55 | 2017-06-13T16:43:55 | 94,235,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from django.contrib.auth import authenticate, login, logout
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
from rest_framework import views, status
from rest_framework.response import Response
from .serializers import UserSerializer
class LoginView(views.APIView):
@method_decorator(csrf_protect)
def post(self, request):
user = authenticate(
request,
username=request.data.get('username'),
password=request.data.get('password')
)
if user is None or not user.is_active:
return Response({
'status': 'Unauthorized',
'message': 'Username or password is incorrect'
}, status=status.HTTP_401_UNAUTHORIZED)
login(request, user)
return Response(UserSerializer(user).data)
class LogoutView(views.APIView):
def get(self, request):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT) | [
"ikram.tanjib@gmail.com"
] | ikram.tanjib@gmail.com |
3e2c65de07c553b2bcc3d88da7d9142bd8231865 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/DeiT_ID1558_for_PyTorch/timm/data/parsers/parser_tfds.py | 01460f55212b8cc9ec49743582ba7c955e37c98f | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 13,386 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Dataset parser interface that wraps TFDS datasets
Wraps many (most?) TFDS image-classification datasets
from https://github.com/tensorflow/datasets
https://www.tensorflow.org/datasets/catalog/overview#image_classification
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import io
import math
import torch
import torch.distributed as dist
from PIL import Image
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
try:
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu)
import tensorflow_datasets as tfds
except ImportError as e:
print(e)
print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.")
exit(1)
from .parser import Parser
MAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities
SHUFFLE_SIZE = 16834 # samples to shuffle in DS queue
PREFETCH_SIZE = 4096 # samples to prefetch
def even_split_indices(split, n, num_samples):
partitions = [round(i * num_samples / n) for i in range(n + 1)]
return [f"{split}[{partitions[i]}:{partitions[i+1]}]" for i in range(n)]
class ParserTfds(Parser):
""" Wrap Tensorflow Datasets for use in PyTorch
There several things to be aware of:
* To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of
dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last
https://github.com/pytorch/pytorch/issues/33413
* With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch
from each worker could be a different size. For training this is worked around by option above, for
validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced
across replicas are of same size. This will slightly alter the results, distributed validation will not be
100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse
since there are up to N * J extra samples with IterableDatasets.
* The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of
replicas and dataloader workers you can use. For really small datasets that only contain a few shards
you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the
benefit of distributed training or fast dataloading should be much less for small datasets.
* This wrapper is currently configured to return individual, decompressed image samples from the TFDS
dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible
to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream
components.
"""
def __init__(self, root, name, split='train', shuffle=False, is_training=False, batch_size=None, repeats=0):
super().__init__()
self.root = root
self.split = split
self.shuffle = shuffle
self.is_training = is_training
if self.is_training:
assert batch_size is not None,\
"Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper"
self.batch_size = batch_size
self.repeats = repeats
self.subsplit = None
self.builder = tfds.builder(name, data_dir=root)
# NOTE: please use tfds command line app to download & prepare datasets, I don't want to call
# download_and_prepare() by default here as it's caused issues generating unwanted paths.
self.num_samples = self.builder.info.splits[split].num_examples
self.ds = None # initialized lazily on each dataloader worker process
self.worker_info = None
self.dist_rank = 0
self.dist_num_replicas = 1
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
def _lazy_init(self):
""" Lazily initialize the dataset.
This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that
will be using the dataset instance. The __init__ method is called on the main process,
this will be called in a dataloader worker process.
NOTE: There will be problems if you try to re-use this dataset across different loader/worker
instances once it has been initialized. Do not call any dataset methods that can call _lazy_init
before it is passed to dataloader.
"""
worker_info = torch.utils.data.get_worker_info()
# setup input context to split dataset across distributed processes
split = self.split
num_workers = 1
if worker_info is not None:
self.worker_info = worker_info
num_workers = worker_info.num_workers
global_num_workers = self.dist_num_replicas * num_workers
worker_id = worker_info.id
# FIXME I need to spend more time figuring out the best way to distribute/split data across
# combo of distributed replicas + dataloader worker processes
"""
InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used.
My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True)
between the splits each iteration, but that understanding could be wrong.
Possible split options include:
* InputContext for both distributed & worker processes (current)
* InputContext for distributed and sub-splits for worker processes
* sub-splits for both
"""
# split_size = self.num_samples // num_workers
# start = worker_id * split_size
# if worker_id == num_workers - 1:
# split = split + '[{}:]'.format(start)
# else:
# split = split + '[{}:{}]'.format(start, start + split_size)
if not self.is_training and '[' not in self.split:
# If not training, and split doesn't define a subsplit, manually split the dataset
# for more even samples / worker
self.subsplit = even_split_indices(self.split, global_num_workers, self.num_samples)[
self.dist_rank * num_workers + worker_id]
if self.subsplit is None:
input_context = tf.distribute.InputContext(
num_input_pipelines=self.dist_num_replicas * num_workers,
input_pipeline_id=self.dist_rank * num_workers + worker_id,
num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact?
)
else:
input_context = None
read_config = tfds.ReadConfig(
shuffle_seed=42,
shuffle_reshuffle_each_iteration=True,
input_context=input_context)
ds = self.builder.as_dataset(
split=self.subsplit or self.split, shuffle_files=self.shuffle, read_config=read_config)
# avoid overloading threading w/ combo fo TF ds threads + PyTorch workers
ds.options().experimental_threading.private_threadpool_size = max(1, MAX_TP_SIZE // num_workers)
ds.options().experimental_threading.max_intra_op_parallelism = 1
if self.is_training or self.repeats > 1:
# to prevent excessive drop_last batch behaviour w/ IterableDatasets
# see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading
ds = ds.repeat() # allow wrap around and break iteration manually
if self.shuffle:
ds = ds.shuffle(min(self.num_samples // self._num_pipelines, SHUFFLE_SIZE), seed=0)
ds = ds.prefetch(min(self.num_samples // self._num_pipelines, PREFETCH_SIZE))
self.ds = tfds.as_numpy(ds)
def __iter__(self):
if self.ds is None:
self._lazy_init()
# compute a rounded up sample count that is used to:
# 1. make batches even cross workers & replicas in distributed validation.
# This adds extra samples and will slightly alter validation results.
# 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size
# batches are produced (underlying tfds iter wraps around)
target_sample_count = math.ceil(max(1, self.repeats) * self.num_samples / self._num_pipelines)
if self.is_training:
# round up to nearest batch_size per worker-replica
target_sample_count = math.ceil(target_sample_count / self.batch_size) * self.batch_size
sample_count = 0
for sample in self.ds:
img = Image.fromarray(sample['image'], mode='RGB')
yield img, sample['label']
sample_count += 1
if self.is_training and sample_count >= target_sample_count:
# Need to break out of loop when repeat() is enabled for training w/ oversampling
# this results in extra samples per epoch but seems more desirable than dropping
# up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes)
break
if not self.is_training and self.dist_num_replicas and 0 < sample_count < target_sample_count:
# Validation batch padding only done for distributed training where results are reduced across nodes.
# For single process case, it won't matter if workers return different batch sizes.
# FIXME if using input_context or % based subsplits, sample count can vary by more than +/- 1 and this
# approach is not optimal
yield img, sample['label'] # yield prev sample again
sample_count += 1
@property
def _num_workers(self):
return 1 if self.worker_info is None else self.worker_info.num_workers
@property
def _num_pipelines(self):
return self._num_workers * self.dist_num_replicas
def __len__(self):
# this is just an estimate and does not factor in extra samples added to pad batches based on
# complete worker & replica info (not available until init in dataloader).
return math.ceil(max(1, self.repeats) * self.num_samples / self.dist_num_replicas)
def _filename(self, index, basename=False, absolute=False):
assert False, "Not supported" # no random access to samples
def filenames(self, basename=False, absolute=False):
""" Return all filenames in dataset, overrides base"""
if self.ds is None:
self._lazy_init()
names = []
for sample in self.ds:
if len(names) > self.num_samples:
break # safety for ds.repeat() case
if 'file_name' in sample:
name = sample['file_name']
elif 'filename' in sample:
name = sample['filename']
elif 'id' in sample:
name = sample['id']
else:
assert False, "No supported name field present"
names.append(name)
return names
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
194e61356c8666faa3168ed6093752c9ba74d3fc | ebd9c249d446d809abc9a0f3e4593f34922a1b93 | /leetcode/235_lowest_common_ancestor_of_a_binary_search_tree.py | b20699956509fd6d35339669565185cc0d94e6c0 | [] | no_license | jaychsu/algorithm | ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d | 91892fd64281d96b8a9d5c0d57b938c314ae71be | refs/heads/master | 2023-05-11T00:40:39.237813 | 2022-09-14T07:43:12 | 2022-09-14T07:43:12 | 106,277,156 | 143 | 39 | null | 2022-09-14T07:43:13 | 2017-10-09T11:51:48 | Python | UTF-8 | Python | false | false | 617 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
while root:
if root.val > p.val and root.val > q.val:
root = root.left
elif root.val < p.val and root.val < q.val:
root = root.right
else:
return root
| [
"hi@jaych.su"
] | hi@jaych.su |
bc6fdbc733f95c52d979d6784b735214a3e8dbc3 | 344e2956b4e2a30a8ef7532d951f96d995d1dd1e | /18_mmaction/lib/mmcv/tests/test_image/test_photometric.py | f2e86d450da174e0e5bc8d0b5e362df36a5b7ca6 | [
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | karndeepsingh/Monk_Object_Detection | e64199705326e4cd65e4b29946cae210a4ef9649 | 425fa50a3236cb9097389646275da06bf9185f6b | refs/heads/master | 2022-12-22T18:26:53.933397 | 2020-09-28T12:49:50 | 2020-09-28T12:49:50 | 299,307,843 | 1 | 1 | Apache-2.0 | 2020-09-28T12:52:18 | 2020-09-28T12:52:17 | null | UTF-8 | Python | false | false | 3,235 | py | # Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import cv2
import numpy as np
from numpy.testing import assert_array_equal
import mmcv
class TestPhotometric:
@classmethod
def setup_class(cls):
# the test img resolution is 400x300
cls.img_path = osp.join(osp.dirname(__file__), '../data/color.jpg')
cls.img = cv2.imread(cls.img_path)
cls.mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
cls.std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
def test_imnormalize(self):
rgb_img = self.img[:, :, ::-1]
baseline = (rgb_img - self.mean) / self.std
img = mmcv.imnormalize(self.img, self.mean, self.std)
assert np.allclose(img, baseline)
assert id(img) != id(self.img)
img = mmcv.imnormalize(rgb_img, self.mean, self.std, to_rgb=False)
assert np.allclose(img, baseline)
assert id(img) != id(rgb_img)
def test_imnormalize_(self):
img_for_normalize = np.float32(self.img)
rgb_img_for_normalize = np.float32(self.img[:, :, ::-1])
baseline = (rgb_img_for_normalize - self.mean) / self.std
img = mmcv.imnormalize_(img_for_normalize, self.mean, self.std)
assert np.allclose(img_for_normalize, baseline)
assert id(img) == id(img_for_normalize)
img = mmcv.imnormalize_(
rgb_img_for_normalize, self.mean, self.std, to_rgb=False)
assert np.allclose(img, baseline)
assert id(img) == id(rgb_img_for_normalize)
def test_imdenormalize(self):
norm_img = (self.img[:, :, ::-1] - self.mean) / self.std
rgb_baseline = (norm_img * self.std + self.mean)
bgr_baseline = rgb_baseline[:, :, ::-1]
img = mmcv.imdenormalize(norm_img, self.mean, self.std)
assert np.allclose(img, bgr_baseline)
img = mmcv.imdenormalize(norm_img, self.mean, self.std, to_bgr=False)
assert np.allclose(img, rgb_baseline)
def test_iminvert(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[255, 127, 0], [254, 128, 1], [253, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.iminvert(img), img_r)
def test_solarize(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[0, 127, 0], [1, 127, 1], [2, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.solarize(img), img_r)
img_r = np.array([[0, 127, 0], [1, 128, 1], [2, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.solarize(img, 100), img_r)
def test_posterize(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]],
dtype=np.uint8)
assert_array_equal(mmcv.posterize(img, 1), img_r)
img_r = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]],
dtype=np.uint8)
assert_array_equal(mmcv.posterize(img, 3), img_r)
| [
"abhishek4273@gmail.com"
] | abhishek4273@gmail.com |
22ed3addbe4bfa6066226c177ab15e87da4ccc4c | 4a4579254118db40fb008439d18ad8c573e8fc1a | /devel/lib/python2.7/dist-packages/jsk_gui_msgs/msg/_TouchEvent.py | e602a239330814d3e80962f75199c5cbbf48a8d4 | [] | no_license | amilearning/AD_mpc_ws | 86ff6ef9e61c6cc5aae6e12f20c2c875b1930d41 | 1fc2d385f281e00c16aff688948f7296e02cbd3a | refs/heads/master | 2023-06-24T13:54:59.759921 | 2021-07-16T01:08:52 | 2021-07-16T01:08:52 | 386,465,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,280 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from jsk_gui_msgs/TouchEvent.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TouchEvent(genpy.Message):
_md5sum = "f074642ed1ad51ea5afc186cab8aaca1"
_type = "jsk_gui_msgs/TouchEvent"
_has_header = False # flag to mark the presence of a Header object
_full_text = """byte DOWN=0
byte UP=1
byte MOVE=2
byte state
float32 x
float32 y
float32 w
float32 h"""
# Pseudo-constants
DOWN = 0
UP = 1
MOVE = 2
__slots__ = ['state','x','y','w','h']
_slot_types = ['byte','float32','float32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
state,x,y,w,h
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TouchEvent, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.state is None:
self.state = 0
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.w is None:
self.w = 0.
if self.h is None:
self.h = 0.
else:
self.state = 0
self.x = 0.
self.y = 0.
self.w = 0.
self.h = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_b4f().pack(_x.state, _x.x, _x.y, _x.w, _x.h))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 17
(_x.state, _x.x, _x.y, _x.w, _x.h,) = _get_struct_b4f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_b4f().pack(_x.state, _x.x, _x.y, _x.w, _x.h))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 17
(_x.state, _x.x, _x.y, _x.w, _x.h,) = _get_struct_b4f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b4f = None
def _get_struct_b4f():
global _struct_b4f
if _struct_b4f is None:
_struct_b4f = struct.Struct("<b4f")
return _struct_b4f
| [
"hojin.projects@gmail.com"
] | hojin.projects@gmail.com |
82c22f6bda3155da5a29ca1d146703656037eb57 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories/141822/AXA_Telematics-master/Create_Ensemble/Combine_Solutions_with_sorting_Scott.py | feabcb82de2a11145b698fa1cca60e40f335e7b2 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,317 | py | import csv
import os
import sys
import time
import string
import numpy as np
import matplotlib.pyplot as plt
from file_paths import *
def getkey(item):
brk = string.split(item[0],'_')
return brk[0]
def getkey_route(item):
brk = string.split(item[0],'_')
return int(brk[1])
def secondvalue(item):
return item[1]
def thirdvalue(item):
return item[2]
# read the input files
def read_results_file(file_name):
file_open = open(file_name,'rb')
file_csv = csv.reader(file_open)
results =[]
header = file_csv.next()
for row in file_csv:
if (len(row) ==2):
results.append( [ row[0], float(row[1]), 0.0 ] )
else:
results.append( [ row[0], float(row[1]), float(row[2]) ] )
file_open.close()
return header,results
# write the result file
def write_results_file(header,results,output_file_name):
file_open = open(output_file_name,'wb')
file_csv = csv.writer(file_open)
file_csv.writerow(header)
for row in results:
file_csv.writerow(row)
file_open.close()
return
# normalize the value range to go from zero to 1
def normalize_values(results):
max_value = -1000
min_value = 1000
# find the max and min
for row in results:
value = row[1]
max_value = max(max_value,value)
min_value = min(min_value,value)
# normalize to zero
for cnt in xrange(0,len(results)):
results[cnt][1] = results[cnt][1] - min_value
# normalize max_value to zero
max_value = max_value - min_value
# normalize to max of 100
for cnt in xrange(0,len(results)):
results[cnt][1] = results[cnt][1] / max_value * 100
return(results)
# keep the path solution if it is a 1, otherwise use the telematics solution
#@profile
def combine_results(path_solution, telematics_solution):
driver_list=[]
for driver in telematics_solution:
driver_list.append(driver[0])
combined_results = []
# these are the ordered path scores
for path_value in path_solution:
driver = path_value[0]
score = path_value[1]
combined_results.append([driver,score])
# add in the adjusted telematics scores
for telematics_value in telematics_solution:
driver = telematics_value[0]
score = telematics_value[1]
combined_results.append([driver,score])
combined_results = sorted(combined_results, key=getkey_route) # sort on the route id
combined_results = sorted(combined_results, key=getkey) # sort on the driver
return combined_results
# keep the path solution if it is a 1, otherwise use the telematics solution
def sort_path_matches(path_solution, telematics_solution):
sorted_path_results = []
telematics_only_results = []
driver_list=[]
for driver in telematics_solution:
driver_list.append(driver[0])
for cnt,path_value in enumerate(path_solution):
driver = path_value[0]
score = path_value[1]
if (driver == driver_list[cnt]): # if the telematics and path solution are sorted the same
telematics_score = telematics_solution[cnt][1]
else: # search for the telematics score
index1 = driver_list.index(driver)
telematics_score = telematics_solution[index1][1]
if (score >=1): # if the path score is above a 1, remember it
sorted_path_results.append([driver,score,telematics_score])
else: # otherwise use the telematics results
telematics_only_results.append([driver,telematics_score])
# this sorting will make a route that matches 2 other routes higher ranked than a route that matches 1 other route
# and a route that matches 3 higher than any of them. However if there are two route that match 1 other route
# for instance than their ranking will be driven by the telematics score
sorted_path_results = sorted(sorted_path_results , key=thirdvalue) # first sort by the telematics score
sorted_path_results = sorted(sorted_path_results , key=secondvalue) # then sort by the number of matches found
sorted_score = 100.
increment = .0001
for cnt in range(0,len(sorted_path_results)):
if (sorted_path_results[cnt][1] >=1):
sorted_path_results[cnt][1] = sorted_score # this ensures that no two path results have the same score
sorted_score += increment # the ones that would are instead sorted by the telematics method
# sort the telematics results on their score
telematics_only_results = sorted(telematics_only_results , key=secondvalue) # sort by telematics score
sorted_score = 0.
increment = .0001
for cnt in range(0,len(telematics_only_results)):
telematics_only_results[cnt][1] = sorted_score # enumerate on the telematics scores
sorted_score += increment
telematics_only_results = sorted(telematics_only_results , key=secondvalue)# first sort by the telematics score
return sorted_path_results, telematics_only_results
# call this re-sort and renormalize every so often so we don't run into numeric problems with
# trying to interpolate between numbers that are too close
def sort_telematics_values( telematics_only_results):
# sort the telematics results on their score
telematics_only_results = sorted(telematics_only_results , key=secondvalue) # sort by telematics score
sorted_score = 0.
increment = .0001
for cnt in range(0,len(telematics_only_results)):
telematics_only_results[cnt][1] = sorted_score # enumerate on the telematics scores
sorted_score += increment
return telematics_only_results
# adjust the scores in the telematics solution based on the adjust scores list
#@profile
def adjust_telematics_scores(telematics_solution, adjust_score):
driver_list=[]
driver_only_list = []
driver_num =0
for driver in telematics_solution:
driver_list.append(driver[0])
driver_id = int(string.split(driver[0],'_')[0])
if (driver_id != driver_num):
driver_num = driver_id
driver_only_list.append(driver_num)
num_telematics = len(telematics_solution)
for cnt in range(0,len(adjust_score)):
if (cnt%600 ==0):
print("number of adjustments made ",cnt," out of ",len(adjust_score))
driver= adjust_score[cnt][0] # see if the driver is in this list and not the path solution
driver_found = 0
try:
#driver_id = int(string.split(driver[0],'_')[0])
#route_id = int(string.split(driver[0],'_')[1])
#index0 = driver_only_list.index(driver_id)
#index1 = driver_id * 200 + (route_id -1)
#
#if (driver_list[index1] != driver): # if the matching didn't work
index1 = driver_list.index(driver)
driver_found=1
except:
driver_found = 0
pass
if (driver_found ==1):
if (adjust_score[cnt][1] > 0.0000001 or adjust_score[cnt][1] < -0.0000001): # move up or down a percentage
target_spot = index1 + int(num_telematics * adjust_score[cnt][1] / 100 )
elif (adjust_score[cnt][2] > 0): # move to a desired percentage
target_spot = int(num_telematics * adjust_score[cnt][2] / 100 )
else: # otherwise don't move
target_spot = index1
# see if we are at the top or bottom of the list, and also get the new score
if (target_spot < 0): # put it in the first spot
target_spot = 0
telematics_solution[index1][1] = telematics_solution[target_spot][1] - .0001
elif (target_spot > num_telematics-1): # put it in the last spot
target_spot = num_telematics-1
telematics_solution[index1][1] = telematics_solution[target_spot][1] + .0001
else: # take the average of the before and after spots
telematics_solution[index1][1] = (telematics_solution[target_spot-1][1] + telematics_solution[target_spot][1]) / 2.0
if (target_spot > index1):
moving_spot = telematics_solution[index1]
telematics_solution.insert(target_spot,moving_spot) # insert the new score into the right position
telematics_solution.pop(index1) # remove the old score
moving_driver = driver_list[index1]
driver_list.insert(target_spot, moving_driver)# insert the driver into the right position
driver_list.pop(index1) # remove the old driver
elif (target_spot < index1): # do it in the other order so the list indices don't get messed up.
moving_spot = telematics_solution[index1]
telematics_solution.pop(index1) # remove the old score
telematics_solution.insert(target_spot,moving_spot) # insert the new score into the right position
moving_driver = driver_list[index1]
driver_list.pop(index1) # remove the old driver
driver_list.insert(target_spot, moving_driver)# insert the driver into the right position
if (cnt%200 ==0):
# resort every so often to make sure errors don't propagate
telematics_solution = sort_telematics_values( telematics_solution)
driver_list=[]
for driver in telematics_solution:
driver_list.append(driver[0])
return telematics_solution
print("Doing Adjust telematics solution & combine with path solution")
header,path_solution = read_results_file('Current_Best_Path_Solution.csv')
header,telematics_solution = read_results_file('Telematics_results_combined.csv')
header2,adjust_score = read_results_file('Adjust_Score.csv')
path_solution = sorted(path_solution, key=getkey)
telematics_solution = sorted(telematics_solution, key=getkey)
adjust_score = sorted(adjust_score, key=getkey_route) # sort on the route id
adjust_score = sorted(adjust_score, key=getkey) # sort on the driver
print("here1*********")
path_solution, telematics_solution = sort_path_matches(path_solution,telematics_solution)
print("here3*********")
path_solution = sorted(path_solution, key=getkey_route) # sort on the route id
path_solution = sorted(path_solution, key=getkey) # sort on the driver
# adjust the scores in the telematics solution based on the adjust scores list
telematics_solution = adjust_telematics_scores(telematics_solution, adjust_score)
print("here4*********")
combined_results = combine_results(path_solution,telematics_solution)
print("here5*********")
#write_results_file(header,path_solution,'test_path.csv')
#write_results_file(header,telematics_solution,'test_telematics.csv')
write_results_file(header,combined_results,'combined_results.csv')
print("Finish Adjust telematics solution & combine with path solution") | [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
c9a1e1f387c7a2cb2ba7b89988bfb22731bdb725 | 68e5e2c9a7e9372f536edf3d99847067eb734e75 | /05-奠定项目基础-Model/typeidea/typeidea/comment/migrations/0001_initial.py | b025fc6f66017f5dd054dcd8948eed32546a206b | [] | no_license | gy0109/Django-enterprise-development-logs--huyang | f04d21df6d45f5d2f226760d35e38042f74a7ea8 | ab4505f8cdaf0c1f9e3635591cd74645a374a73f | refs/heads/master | 2020-05-17T05:24:51.602859 | 2019-05-08T03:42:13 | 2019-05-08T03:42:13 | 183,534,431 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | # Generated by Django 2.1.7 on 2019-04-27 15:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=50, verbose_name='昵称')),
('email', models.EmailField(max_length=254, verbose_name='邮箱')),
('website', models.URLField(verbose_name='网站')),
('content', models.CharField(max_length=50, verbose_name='内容')),
('status', models.PositiveIntegerField(choices=[('STATUS_NORMAL', '正常'), ('STATUS_DELETE', '删除')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('target', models.ForeignKey(on_delete=True, to='blog.Post', verbose_name='评论目标')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
},
),
]
| [
"1974326896@qq.com"
] | 1974326896@qq.com |
15a8de32a49edae93fb23dd1983e2c341bfda6a0 | 531a5c09ed774dca6f85f3c96827ff4d9f8fc3be | /AutotestWebD/apps/webportal/scripts/emailcopy.py | c6e55aa3e0f74b03e545142c9e259a3103d5155d | [
"MIT"
] | permissive | xiaochaom/sosotest | a98db41088d9411aa7d2723894f5bdc60bfbbd52 | a3a5ce67c3dc302cf4bca906496ec6ee26b42c33 | refs/heads/master | 2020-07-06T09:31:39.598616 | 2020-06-23T07:51:00 | 2020-06-23T07:51:00 | 202,971,957 | 0 | 0 | MIT | 2019-08-18T07:12:52 | 2019-08-18T07:12:51 | null | UTF-8 | Python | false | false | 935 | py | import django
import sys,os
rootpath = os.path.dirname(os.path.realpath(__file__)).replace("\\","/")
rootpath = rootpath.split("/apps")[0]
# print(rootpath)
syspath=sys.path
sys.path=[]
sys.path.append(rootpath) #指定搜索路径绝对目录
sys.path.extend([rootpath+i for i in os.listdir(rootpath) if i[0]!="."])#将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
from apps.common.func.WebFunc import *
from apps.webportal.services.webPortalService import WebPortalService
from all_models.models import *
from apps.task.services.HTTP_taskService import HTTP_taskService
if __name__ == "__main__":
versionTask = TbVersionTask.objects.filter(versionName="v1807")
task = TbTask.objects.filter()
for taskIndex in versionTask:
taskIndex.emailList = task.filter(taskId=taskIndex.taskId)[0].emailList
taskIndex.save()
# tmp = taskIndex.taskId
# print(te)
| [
"wangjilianglong@163.com"
] | wangjilianglong@163.com |
ac170695f08d24863f873bcc35ea080070054620 | 1b1b074cca3a8c9f5a6d0630cd40d56a1d8b7468 | /motorista/migrations/0006_auto_20170526_1645.py | 7814af59c9ad678721f1bcc2b9b4edb020d7c863 | [] | no_license | DiegoDigo/buScool | e392f62e3f43a0a892f14608447329d2b2d8b50a | fca579c0951dfedfabce79b52031804c9b6373ed | refs/heads/master | 2021-01-21T07:00:39.043740 | 2017-05-26T20:19:24 | 2017-05-26T20:19:24 | 91,592,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-26 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motorista', '0005_motorista_descricao'),
]
operations = [
migrations.AddField(
model_name='motorista',
name='deficiente',
field=models.BooleanField(default=False, verbose_name='Aceita Deficiência'),
),
migrations.AddField(
model_name='veiculo',
name='capacidadeDeficiente',
field=models.PositiveIntegerField(blank=True, default=1, null=True),
),
]
| [
"di3g0d0ming05@gmail.com"
] | di3g0d0ming05@gmail.com |
29c3ad28b41ef7b1b7689b86a33f01448b53bf57 | ef42fa903820055b9b0a8b4ebb1863a16d386171 | /config/urls.py | 788b8ccbf55a2bf64fe274419f089478f6da357b | [] | no_license | sinjorjob/django-simple-capture-inquery-form | 2537c8e03bc2c0118f772b69a59866ffb34d7cac | 8bd2900a6bdf97b97ddca7b7240b42f478e14884 | refs/heads/master | 2023-07-02T14:40:43.840669 | 2021-08-10T21:24:24 | 2021-08-10T21:24:24 | 394,784,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('captcha/', include('captcha.urls')),
path('', include('contact.urls')),
]
| [
"sinforjob@gmail.com"
] | sinforjob@gmail.com |
60d3de32df2546f2b7d1a59f47cd31ade136afe5 | 29a580900743a35c0d870c75b02decf3bfd24513 | /src/windows_sniffer_example.py | 6ef3b8a9447679386fb83b1fc2311d7238477496 | [] | no_license | rduvalwa5/PythonNetworking | 6695da9552beb62c3af0711a14c68e52fd412b12 | 52340292e4fbe0f628727838cabdf647c0a62e07 | refs/heads/master | 2021-01-19T17:47:31.589447 | 2019-07-11T06:02:25 | 2019-07-11T06:02:25 | 31,050,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | '''
Created on Nov 26, 2018
https://docs.python.org/3.0/library/ssl.html
@author: rduvalwa2
OSXAir:src rduvalwa2$ ls
Asyncio echo_client.py simpleServer.py
PyNet echo_server.py windows_sniffer_example.py
PyProgramming_Chap5 simpleClient.py
OSXAir:src rduvalwa2$ sudo python windows_sniffer_example.py
Password:
Traceback (most recent call last):
File "windows_sniffer_example.py", line 23, in <module>
s.ioctl(socket.SIO_RCVALL,socket.RCVALL_ON)
AttributeError: '_socketobject' object has no attribute 'ioctl'
OSXAir:src rduvalwa2$
'''
import socket
# the public network interface
HOST = socket.gethostbyname(socket.gethostname())
# create a raw socket and bind it to the public interface
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
s.bind((HOST, 0))
# Include IP headers
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# receive all packages
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
# receive a package
print(s.recvfrom(65565))
# disabled promiscuous mode
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF) | [
"rduvalwa5@hotmail.com"
] | rduvalwa5@hotmail.com |
23a1ed94a24ef6a5903ca4baec53f1c87ce65dc4 | 2d0bada349646b801a69c542407279cc7bc25013 | /examples/waa/apps/adas_detection/build_flow/DPUCADF8H_u200/scripts/utility/readme_gen/readme_gen.py | c5ff1bc54c59bd1e660ee64f2fa3992ea34e611f | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 16,713 | py | #!/usr/bin/env python
from sys import argv
import json
import os
import subprocess
DSA = 'xilinx:vcu1525:dynamic'
VERSION = 'SDAccel 2018.2'
DEVICES = {
'xilinx:kcu1500:dynamic': {
'version': '5.0',
'name': 'Xilinx Kintex UltraScale KCU1500',
'nae': 'nx4'
},
'xilinx:vcu1525:dynamic': {
'version': '5.0',
'name': 'Xilinx Virtex UltraScale+ VCU1525',
'nae': 'nx5'
}
}
def header(target,data):
target.write(data["example"])
target.write("\n")
target.write("======================\n\n")
target.write("This README file contains the following sections:\n\n")
target.write("1. OVERVIEW\n")
target.write("2. HOW TO DOWLOAD THE REPOSITORY\n")
target.write("3. SOFTWARE TOOLS AND SYSTEM REQUIREMENTS\n")
target.write("4. DESIGN FILE HIERARCHY\n")
target.write("5. COMPILATION AND EXECUTION\n")
target.write("6. EXECUTION IN CLOUD ENVIRONMENTS\n")
target.write("7. SUPPORT\n")
target.write("8. LICENSE AND CONTRIBUTING TO THE REPOSITORY\n")
target.write("9. ACKNOWLEDGEMENTS\n\n\n")
return
def download(target):
target.write("## 2. HOW TO DOWNLOAD THE REPOSITORY\n")
target.write("To get a local copy of the SDAccel example repository, clone this repository to the local system with the following command:\n")
target.write("```\n")
target.write("git clone https://github.com/Xilinx/SDAccel_Examples examples\n")
target.write("```\n")
target.write("where examples is the name of the directory where the repository will be stored on the local system.")
target.write("This command needs to be executed only once to retrieve the latest version of all SDAccel examples. The only required software is a local installation of git.\n\n")
return
def overview(target,data):
target.write("## 1. OVERVIEW\n")
target.write(('\n').join(data["overview"]))
target.write("\n\n")
if 'more_info' in data:
target.write(('\n').join(data["more_info"]))
target.write("\n\n")
if 'perf_fields' in data:
target.write("### PERFORMANCE\n")
target.write(data["perf_fields"][0])
target.write("|")
target.write(data["perf_fields"][1])
target.write("|")
target.write(data["perf_fields"][2])
target.write("\n")
target.write("----|-----|-----\n")
for result in data["performance"]:
target.write(result["system"])
target.write("|")
target.write(result["constraint"])
target.write("|")
target.write(result["metric"])
target.write("\n")
if 'key_concepts' in data:
target.write("***KEY CONCEPTS:*** ")
elem_count = len(data["key_concepts"])
for result in data["key_concepts"]:
elem_count -= 1
target.write(result)
if elem_count != 0:
target.write(", ")
target.write("\n\n")
if 'keywords' in data:
target.write("***KEYWORDS:*** ")
word_count = len(data["keywords"])
for result in data["keywords"]:
word_count -= 1
target.write(result)
if word_count != 0:
target.write(", ")
target.write("\n\n")
return
def requirements(target,data):
target.write("## 3. SOFTWARE AND SYSTEM REQUIREMENTS\n")
target.write("Board | Device Name | Software Version\n")
target.write("------|-------------|-----------------\n")
boards = []
if 'board' in data:
board = data['board']
boards = [word for word in DEVICES if word in board]
else:
nboard = []
if 'nboard' in data:
nboard = data['nboard']
boards = [word for word in DEVICES if word not in nboard]
for board in boards:
target.write(DEVICES[board]['name'])
target.write("|")
target.write(board)
target.write("|")
for version in VERSION:
target.write(version)
target.write("\n")
target.write("\n\n")
target.write("*NOTE:* The board/device used for compilation can be changed by adding the DEVICES variable to the make command as shown below\n")
target.write("```\n")
target.write("make DEVICES=<device name>\n")
target.write("```\n")
target.write("where the *DEVICES* variable accepts either 1 device from the table above or a comma separated list of device names.\n\n")
try:
if data['opencv']:
target.write("***OpenCV for Example Applications***\n\n")
target.write("This application requires OpenCV runtime libraries. If the host does not have OpenCV installed use the Xilinx included libraries with the following command:\n\n")
target.write("```\n")
target.write("export LD_LIBRARY_PATH=$XILINX_SDX/lnx64/tools/opencv/:$LD_LIBRARY_PATH\n")
target.write("```\n")
except:
pass
return
def hierarchy(target):
target.write("## 4. DESIGN FILE HIERARCHY\n")
target.write("Application code is located in the src directory. ")
target.write("Accelerator binary files will be compiled to the xclbin directory. ")
target.write("The xclbin directory is required by the Makefile and its contents will be filled during compilation. A listing of all the files ")
target.write("in this example is shown below\n\n")
target.write("```\n")
tree_cmd = ['git', 'ls-files']
proc = subprocess.Popen(tree_cmd,stdout=subprocess.PIPE)
output = proc.communicate()[0]
target.write(output)
target.write("```\n")
target.write("\n")
return
def compilation(target,data):
target.write("## 5. COMPILATION AND EXECUTION\n")
target.write("### Compiling for Application Emulation\n")
target.write("As part of the capabilities available to an application developer, SDAccel includes environments to test the correctness of an application at both a software functional level and a hardware emulated level.\n")
target.write("These modes, which are named sw_emu and hw_emu, allow the developer to profile and evaluate the performance of a design before compiling for board execution.\n")
target.write("It is recommended that all applications are executed in at least the sw_emu mode before being compiled and executed on an FPGA board.\n")
target.write("```\n")
target.write("make TARGETS=<sw_emu|hw_emu> all\n")
target.write("```\n")
target.write("where\n")
target.write("```\n")
target.write("\tsw_emu = software emulation\n")
target.write("\thw_emu = hardware emulation\n")
target.write("```\n")
target.write("*NOTE:* The software emulation flow is a functional correctness check only. It does not estimate the performance of the application in hardware.\n")
target.write("The hardware emulation flow is a cycle accurate simulation of the hardware generated for the application. As such, it is expected for this simulation to take a long time.\n")
target.write("It is recommended that for this example the user skips running hardware emulation or modifies the example to work on a reduced data set.\n")
target.write("### Executing Emulated Application \n")
target.write("***Recommended Execution Flow for Example Applications in Emulation*** \n\n")
target.write("The makefile for the application can directly executed the application with the following command:\n")
target.write("```\n")
target.write("make TARGETS=<sw_emu|hw_emu> check\n\n")
target.write("```\n")
target.write("where\n")
target.write("```\n")
target.write("\tsw_emu = software emulation\n")
target.write("\thw_emu = hardware emulation\n")
target.write("```\n")
target.write("If the application has not been previously compiled, the check makefile rule will compile and execute the application in the emulation mode selected by the user.\n\n")
target.write("***Alternative Execution Flow for Example Applications in Emulation*** \n\n")
target.write("An emulated application can also be executed directly from the command line without using the check makefile rule as long as the user environment has been properly configured.\n")
target.write("To manually configure the environment to run the application, set the following\n")
target.write("```\n")
target.write("export LD_LIBRARY_PATH=$XILINX_SDX/runtime/lib/x86_64/:$LD_LIBRARY_PATH\n")
target.write("export XCL_EMULATION_MODE=")
try:
if not data['xcl']:
target.write('true')
else:
target.write('<sw_emu|hw_emu>')
except:
target.write('<sw_emu|hw_emu>')
target.write('\n')
target.write("emconfigutil --platform '" + DSA + "' --nd 1\n")
target.write("```\n")
target.write("Once the environment has been configured, the application can be executed by\n")
target.write("```\n")
target.write(data["em_cmd"])
target.write("\n```\n")
target.write("This is the same command executed by the check makefile rule\n")
target.write("### Compiling for Application Execution in the FPGA Accelerator Card\n")
target.write("The command to compile the application for execution on the FPGA acceleration board is\n")
target.write("```\n")
target.write("make all\n")
target.write("```\n")
target.write("The default target for the makefile is to compile for hardware. Therefore, setting the TARGETS option is not required.\n")
target.write("*NOTE:* Compilation for application execution in hardware generates custom logic to implement the functionality of the kernels in an application.\n")
target.write("It is typical for hardware compile times to range from 30 minutes to a couple of hours.\n\n")
def execution(target):
target.write("## 6. Execution in Cloud Environments\n")
target.write("FPGA acceleration boards have been deployed to the cloud. For information on how to execute the example within a specific cloud, take a look at the following guides.\n");
target.write("* [AWS F1 Application Execution on Xilinx Virtex UltraScale Devices]\n")
target.write("* [Nimbix Application Execution on Xilinx Kintex UltraScale Devices]\n")
target.write("\n")
def nimbix(target):
target.write("The developer instance hosting the SDAccel tools on Nimbix is not directly connected to an FPGA accelerator card.\n")
target.write("FPGA Accelerator cards are available as part of the SDAccel Runtime application. There are several ways of executing an application on the available cards:\n\n")
target.write("***Submit the application from the developer to runtime instances (recommended flow)***\n")
target.write("* Create a credentials file for the runtime machine based on your Nimbix username and API key. For more information on how to obtain the API key, refer to ")
target.write("[Nimbix Application Submission README][]. The credentials file ( ~/.nimbix_creds.json ) should look like\n")
target.write("```\n")
target.write("{\n")
target.write("\t\"username\": \"<username>\",\n")
target.write("\t\"api-key\": \"<apikey>\"\n")
target.write("}\n")
target.write("```\n\n")
target.write("where the values for username and apikey have been set to the values from your Nimbix account.\n\n")
target.write("*NOTE:* The home directory of a SDAccel developer instance is not persistent across sessions. Only files stored in the /data directory are kept between sessions.")
target.write("It is recommended that a copy of the nimbix_creds.json file be stored in the /data directory and copied to the appropriate location in the home directory ")
target.write("at the start of each development session.\n")
target.write("* Launch the application\n")
target.write("```\n")
target.write("make check\n")
target.write("```\n")
target.write("***Launch the application from a remote system outside of the Nimbix environment***\n")
target.write("* Follow the instructions in [Nimbix Application Submission README][]\n\n")
target.write("* Use the following command to launch the application from the users terminal (on a system outside of the Nimbix environment)\n")
target.write("```\n")
target.write(data["hw_cmd"])
target.write("\n")
target.write("```\n\n")
target.write("***Copy the application files from the Developer to Runtime instances on Nimbix***\n")
target.write("* Copy the application *.exe file and xclbin directory to the /data directory\n")
target.write("* Launch the application using the Nimbix web interface as described in [Nimbix Getting Started Guide][]\n")
target.write("* Make sure that the application launch options in the Nimbix web interface reflect the applications command line syntax\n")
target.write("```\n")
target.write(data["em_cmd"])
target.write("\n")
target.write("```\n")
return
def power(target):
target.write("\n## 6. COMPILATION AND EXECUTION FOR IBM POWER SERVERS\n")
target.write("View the SuperVessel [Walkthrough Video][] to become familiar with the environment.\n\n")
target.write("Compile the application with the following command\n")
target.write("```\n")
target.write("make ARCH=POWER all\n")
target.write("```\n")
return
def support(target):
target.write("\n## 7. SUPPORT\n")
target.write("For more information about SDAccel check the [SDAccel User Guides][]\n\n")
target.write("For questions and to get help on this project or your own projects, visit the [SDAccel Forums][].\n\n")
target.write("To execute this example using the SDAccel GUI, follow the setup instructions in [SDAccel GUI README][]\n\n")
return
def license(target):
target.write("\n## 8. LICENSE AND CONTRIBUTING TO THE REPOSITORY\n")
target.write("The source for this project is licensed under the [3-Clause BSD License][]\n\n")
target.write("To contribute to this project, follow the guidelines in the [Repository Contribution README][]\n")
return
def ack(target,data):
target.write("\n## 9. ACKNOWLEDGEMENTS\n")
target.write("This example is written by developers at\n")
for contributor in data["contributors"]:
target.write("- [")
target.write(contributor["group"])
target.write("](")
target.write(contributor["url"])
target.write(")\n")
target.write("\n")
return
def dirTraversal(stop_file):
stop_search = None
level_count = 1
s = os.path.join('..', stop_file)
while level_count < 20:
s = os.path.join('..', s)
if os.path.isfile(s):
break
level_count += 1
return level_count
def relativeTree(levels):
s = '../'
for i in range(0,levels):
s += '../'
return s
def footer(target):
relativeLevels = dirTraversal("LICENSE.txt")
root = relativeTree(relativeLevels)
target.write("[3-Clause BSD License]: " + root + "LICENSE.txt\n")
target.write("[SDAccel Forums]: https://forums.xilinx.com/t5/SDAccel/bd-p/SDx\n")
target.write("[SDAccel User Guides]: http://www.xilinx.com/support/documentation-navigation/development-tools/software-development/sdaccel.html?resultsTablePreSelect=documenttype:SeeAll#documentation\n")
target.write("[Nimbix Getting Started Guide]: http://www.xilinx.com/support/documentation/sw_manuals/xilinx2016_2/ug1240-sdaccel-nimbix-getting-started.pdf\n")
target.write("[Walkthrough Video]: http://bcove.me/6pp0o482\n")
target.write("[Nimbix Application Submission README]: " + root + "utility/nimbix/README.md\n")
target.write("[Repository Contribution README]: " + root + "CONTRIBUTING.md\n")
target.write("[SDaccel GUI README]: " + root + "GUIREADME.md\n")
target.write("[AWS F1 Application Execution on Xilinx Virtex UltraScale Devices]: https://github.com/aws/aws-fpga/blob/master/SDAccel/README.md\n")
target.write("[Nimbix Application Execution on Xilinx Kintex UltraScale Devices]: " + root + "utility/nimbix/README.md\n")
return
# Get the argument from the description
script, desc_file = argv
# load the description file
print "SDAccel README File Genarator"
print "Opening the description file '%s'" % desc_file
desc = open(desc_file,'r')
# load the json data from the file
print "Parsing the description file"
data = json.load(desc)
desc.close()
assert("OpenCL" in data['runtime'])
print "Generating the README for %s" % data["example"]
target = open("README.md","w")
header(target,data)
overview(target,data)
download(target)
requirements(target,data)
hierarchy(target)
compilation(target,data)
execution(target)
#nimbix(target)
#power(target)
support(target)
license(target)
ack(target,data)
footer(target)
target.close
| [
"hanxue.lee@xilinx.com"
] | hanxue.lee@xilinx.com |
808f3448e73d4b265567ef20529e5295ec1bb6ac | 286a49d0360ee2eb718dd9a496be88555cef3227 | /二叉树/__init__.py | 0a783c22d3adee2845fb651ca69d0ca320a3ce51 | [] | no_license | NaiveteYaYa/data-structrue | 0618ab6bb7accc99c40e39a3ca60bbc0a9723c2f | a376863c1a8e007efafd5c1ed84929a80321b1b9 | refs/heads/master | 2023-07-02T03:15:33.523855 | 2021-08-14T02:02:07 | 2021-08-14T02:02:07 | 395,857,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | # -*- coding: utf-8 -*-
# @Time : 2020/4/22 12:45
# @Author : WuxieYaYa
from .full_binary_tree import *
| [
"jgm247878528@.qq.com"
] | jgm247878528@.qq.com |
efac4b3cf729f3ef140268b15ed0ff26865674c9 | f71deab2aabb43128d42d6a9e7d8ccd74740c7dd | /binance/handlers/handlers.py | a39b12896cc0df084fd6dc9cb4df788b42a1e9b1 | [] | no_license | kp-forks/python-binance-sdk | a1d3740d39f6b7b03bf7dc2ba81170de71967020 | 7e1962fe28226c69a5789c2a6f9eba9552f7b051 | refs/heads/master | 2023-03-26T22:43:04.951187 | 2021-03-25T07:33:46 | 2021-03-25T07:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | import traceback
from datetime import datetime
import sys
from typing import TextIO
from binance.common.constants import (
STREAM_TYPE_MAP,
STREAM_OHLC_MAP
)
from binance.common.types import (
DictPayload,
ListPayload
)
from .base import Handler
class HandlerExceptionHandlerBase(Handler):
def receive(
_,
e: Exception,
file: TextIO = sys.stderr
):
"""
Print current datetime and error call stacks
Args:
e (Exception): the error
file (:obj:`TextIO`, optional): output target of the printer, defaults to `sys.stderr`
Returns:
Exception: the error itself
"""
print(f'[{datetime.now()}] ', end='', file=file)
traceback.print_exc(file=file)
return e
BASE_TRADE_COLUMNS_MAP = {
**STREAM_TYPE_MAP,
'E': 'event_time',
's': 'symbol',
'p': 'price',
'q': 'quantity',
'T': 'trade_time',
'm': 'is_maker'
}
TRADE_COLUMNS_MAP = {
**BASE_TRADE_COLUMNS_MAP,
't': 'trade_id',
'b': 'buyer_order_id',
'a': 'seller_order_id'
}
TRADE_COLUMNS = TRADE_COLUMNS_MAP.keys()
class TradeHandlerBase(Handler):
COLUMNS_MAP = TRADE_COLUMNS_MAP
COLUMNS = TRADE_COLUMNS
AGG_TRADE_COLUMNS_MAP = {
**BASE_TRADE_COLUMNS_MAP,
'a': 'agg_trade_id',
'f': 'first_trade_id',
'l': 'last_trade_id',
}
AGG_TRADE_COLUMNS = AGG_TRADE_COLUMNS_MAP
class AggTradeHandlerBase(Handler):
COLUMNS_MAP = AGG_TRADE_COLUMNS_MAP
COLUMNS = AGG_TRADE_COLUMNS
KLINE_COLUMNS_MAP = {
**STREAM_TYPE_MAP,
'E': 'event_time',
't': 'open_time',
'T': 'close_time',
's': 'symbol',
'i': 'interval',
'f': 'first_trade_id',
'L': 'last_trade_id',
**STREAM_OHLC_MAP,
'x': 'is_closed',
'v': 'volume',
'q': 'quote_volume',
'V': 'taker_volume',
'Q': 'taker_quote_volume',
'n': 'total_trades'
}
KLINE_COLUMNS = KLINE_COLUMNS_MAP.keys()
class KlineHandlerBase(Handler):
COLUMNS_MAP = KLINE_COLUMNS_MAP
COLUMNS = KLINE_COLUMNS
def _receive(self, payload: DictPayload):
"""The payload of kline has unnecessary hierarchy,
so just flatten it.
"""
k = payload['k']
k['E'] = payload['E']
return super()._receive(k)
MINI_TICKER_COLUMNS_MAP = {
**STREAM_TYPE_MAP,
'E': 'event_time',
's': 'symbol',
**STREAM_OHLC_MAP,
'v': 'volume',
'q': 'quote_volume',
}
MINI_TICKER_COLUMNS = MINI_TICKER_COLUMNS_MAP.keys()
class MiniTickerHandlerBase(Handler):
COLUMNS_MAP = MINI_TICKER_COLUMNS_MAP
COLUMNS = MINI_TICKER_COLUMNS
TICKER_COLUMNS_MAP = {
**MINI_TICKER_COLUMNS_MAP,
'p': 'price',
'P': 'percent',
'w': 'weighted_average_price',
'x': 'first_trade_price',
'Q': 'last_quantity',
'b': 'best_bid_price',
'B': 'best_bid_quantity',
'O': 'stat_open_time',
'C': 'stat_close_time',
'F': 'first_trade_id',
'L': 'last_trade_id',
'n': 'total_trades'
}
TICKER_COLUMNS = TICKER_COLUMNS_MAP.keys()
class TickerHandlerBase(Handler):
COLUMNS_MAP = TICKER_COLUMNS_MAP
COLUMNS = TICKER_COLUMNS
class AllMarketMiniTickersHandlerBase(Handler):
COLUMNS_MAP = MINI_TICKER_COLUMNS_MAP
COLUMNS = MINI_TICKER_COLUMNS
def _receive(self, payload: ListPayload):
return super()._receive(
payload, None)
class AllMarketTickersHandlerBase(Handler):
COLUMNS_MAP = TICKER_COLUMNS_MAP
COLUMNS = TICKER_COLUMNS
def _receive(self, payload: ListPayload):
return super()._receive(
payload, None)
| [
"i+github@kael.me"
] | i+github@kael.me |
bca560451b0408d76f387dc12b62152b768ac6ba | 3db40bfa5c9e686293aa7f0540aa392be2e99a3b | /__init__.py | 7e361a5cb12d48e18e7f290b9678075d4a2cd44a | [] | no_license | OpenVoiceOS/tskill-ocp-cps | 0926a21f653dfc151ecd1e87f34dfaa95d3157f2 | 216fd7096f090e20ff1dc30846f61de66d8e616f | refs/heads/master | 2023-09-03T15:06:04.577257 | 2021-10-21T16:53:30 | 2021-10-21T16:53:30 | 419,805,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | from mycroft import intent_file_handler
from mycroft.skills.common_play_skill import CommonPlaySkill, CPSMatchLevel
import random
from mycroft.util.parse import match_one
track_dict = {
'bomb jack': 'http://remix.kwed.org/files/RKOfiles/Chronblom%20-%20Bomb%20Jack%20subtune%206%20(violin%20version).mp3',
'druid': 'http://remix.kwed.org/files/RKOfiles/Revel%20Craft%20-%20Druid.mp3',
'crazy comets': 'http://remix.kwed.org/files/RKOfiles/Makke%20-%20Crazy%20Comets%20(Komet%20Non-Stop).mp3',
'boulder dash': 'http://remix.kwed.org/files/RKOfiles/Mahoney%20-%20BoulderDash%20(Commodore%2069%20mix).mp3',
'garfield': 'http://remix.kwed.org/files/RKOfiles/Reyn%20Ouwehand%20-%20Garfield.mp3'
}
class Test(CommonPlaySkill):
"""
say "test audio service play/pause/resume/queue/stop"
-> confirm direct usage of audio service is routed to OCP
say "play crazy comets"
-> verify a track from this skill can be played if selected directly
(if needed remove other ocp skills)
-> verify the track from this skill is in search results and can be
played (select it from playlist, if needed install other ocp skills)
"""
@intent_file_handler("play.intent")
def handle_play_intent(self, message):
uri = track_dict[random.choice(list(track_dict.keys()))]
self.audioservice.play(uri)
@intent_file_handler("queue.intent")
def handle_queue_intent(self, message):
self.audioservice.queue(list(track_dict.values()))
@intent_file_handler("stop.intent")
def handle_stop_intent(self, message):
self.audioservice.stop()
@intent_file_handler("pause.intent")
def handle_pause_intent(self, message):
self.audioservice.pause()
@intent_file_handler("resume.intent")
def handle_resume_intent(self, message):
self.audioservice.resume()
@intent_file_handler("prev.intent")
def handle_prev_intent(self, message):
self.audioservice.prev()
@intent_file_handler("next.intent")
def handle_next_intent(self, message):
self.audioservice.next()
def CPS_match_query_phrase(self, phrase):
""" This method responds wether the skill can play the input phrase.
The method is invoked by the PlayBackControlSkill.
Returns: tuple (matched phrase(str),
match level(CPSMatchLevel),
optional data(dict))
or None if no match was found.
"""
# Get match and confidence
match, confidence = match_one(phrase, track_dict)
# If the confidence is high enough return a match
if confidence > 0.5:
return (match, CPSMatchLevel.TITLE, {"track": match})
# Otherwise return None
else:
return None
def CPS_start(self, phrase, data):
""" Starts playback.
Called by the playback control skill to start playback if the
skill is selected (has the best match level)
"""
url = data['track']
self.audioservice.play(url)
def create_skill():
return Test()
| [
"jarbasai@mailfence.com"
] | jarbasai@mailfence.com |
8a626e8d7d80f256f5efeb0b52ebc5927bc653a7 | c6d389f085c683f33cc0d0ab6497b3f042f7c905 | /vector.py | 3ee0da0ab4351f22b2d671407b21f41f284307f2 | [] | no_license | irhadSaric/computer-geometry | 0d23fbafbedb18b22df30cc8071f4103237eef2d | 25a73c756472896c316d685ca6792c8c94f31361 | refs/heads/master | 2020-04-04T08:01:38.501815 | 2019-02-26T20:05:08 | 2019-02-26T20:05:08 | 155,768,457 | 0 | 0 | null | 2019-02-26T20:10:33 | 2018-11-01T19:56:17 | Python | UTF-8 | Python | false | false | 3,160 | py | from Point import *
class Vector:
def __init__(self, head: 'Point', tail: 'Point'):
self.head = head
self.tail = tail
self.currentPosition = head.y
def changeCurrentPosition(self, value):
self.currentPosition = value
def __lt__(self, other: 'Vector'):
if self.head.x == other.head.x and self.head.y == other.head.y:
return self.tail.y > other.tail.y or (self.tail.y == other.tail.y and self.tail.x > other.tail.x)
return self.head.y < other.head.y or (self.head.y == other.head.y and self.head.x < other.head.x)
def __le__(self, other):
if self.head.x == other.head.x and self.head.y == other.head.y:
return self.tail.y > other.tail.y or (self.tail.y == other.tail.y and self.tail.x > other.tail.x)
return self.head.y < other.head.y or (self.head.y == other.head.y and self.head.x < other.head.x)
def __gt__(self, other: 'Vector'):
return self.head.y > other.head.y or (self.head.y == other.head.y and self.head.x > other.head.x)
def __repr__(self):
return '({}, {})'.format(self.head, self.tail)
def magnitude(self):
return self.head.euclidean_distance(self.tail)
def dot(self, v: 'Vector') -> float:
prod_x = (self.tail.x - self.head.x) * (v.tail.x - v.head.x)
prod_y = (self.tail.y - self.head.y) * (v.tail.y - v.head.y)
return prod_x + prod_y
@staticmethod
def do_intersect(s_1: 'Vector', s_2: 'Vector') -> bool:
# orientation of the (self.tail, self.head, s_2.tail) triangle
s_1_orientation_tail = Point.orientation(s_1.tail, s_1.head, s_2.tail)
# orientation of the (self.tail, self.head, s_2.head) triangle
s_1_orientation_head = Point.orientation(s_1.tail, s_1.head, s_2.head)
# orientation of the (s_2.tail, s_2.head, self.tail) triangle
s_2_orientation_tail = Point.orientation(s_2.tail, s_2.head, s_1.tail)
# orientation of the (s_2.tail, s_2.head, self.head) triangle
s_2_orientation_head = Point.orientation(s_2.tail, s_2.head, s_1.head)
# general case
if s_1_orientation_tail != s_1_orientation_head and s_2_orientation_tail != s_2_orientation_head:
return True
# collinear case
if s_1_orientation_tail == 0 and s_1_orientation_head == 0 and s_2_orientation_tail == 0 and s_2_orientation_head == 0:
if s_1.tail.between(s_2.head, s_2.tail) or s_1.head.between(s_2.head, s_2.tail) \
or s_2.tail.between(s_1.head, s_1.tail) or s_2.head.between(s_1.head, s_1.tail):
return True
return False
@staticmethod
def point_of_intersection(s_1: 'Vector', s_2: 'Vector') -> Point:
x12 = s_1.head.x - s_1.tail.x
x34 = s_2.head.x - s_2.tail.x
y12 = s_1.head.y - s_1.tail.y
y34 = s_2.head.y - s_2.tail.y
c = x12 * y34 - y12 * x34
a = s_1.head.x * s_1.tail.y - s_1.head.y * s_1.tail.x
b = s_2.head.x * s_2.tail.y - s_2.head.y * s_2.tail.x
x = (a * x34 - b * x12) / c
y = (a * y34 - b * y12) / c
return Point(x, y)
| [
"irhad.saric@hotmail.com"
] | irhad.saric@hotmail.com |
12670c04398ed5572e2b9d26b7ba81c93a37eea4 | b125f9a750a519c9c7a5ed66adb8530e0237367b | /loop/LoopPractice.py | ae419e05d2a6e216cbccf54d37fdad4281e03600 | [] | no_license | isisisisisitch/geekPython | 4e49fe19b4cca9891056f33464518272265e3dab | 635e246dca7a221d87a3b3c5b07d1e177527498f | refs/heads/master | 2021-05-27T01:29:09.755225 | 2021-01-18T23:18:46 | 2021-01-18T23:18:46 | 254,200,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | #一个小球从10米的高空落下,每次落地跳回原来的一半,再落下,求第10次时落地的高度
height = 10
for i in range(2,11):
height/=2
print(i,height) | [
"dallucus@gmail.com"
] | dallucus@gmail.com |
f9815346cd1953430a86b298cf50c513fed4f963 | 5330918e825f8d373d3907962ba28215182389c3 | /RecoTracker/RingESSource/python/RingESSourceTIFTIBTOB_cff.py | 007f2f6354f449c09d356f06e54b30988dd8d96e | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | Python | false | false | 349 | py | import FWCore.ParameterSet.Config as cms
# geometry
# tracker geometry
# tracker numbering
import copy
from RecoTracker.RingESSource.RingESSource_cfi import *
# rings esproducer
ringsTIFTIBTOB = copy.deepcopy(rings)
ringsTIFTIBTOB.InputFileName = 'RecoTracker/RingESSource/data/rings_tiftibtob-0004.dat'
ringsTIFTIBTOB.ComponentName = 'TIFTIBTOB'
| [
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] | sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch |
41134729528c1d14ae99ed8d555dec4c20966af9 | cfb6923223bd5b2cad56ece404f74fbb6889837b | /TAPI_RI/funcs_TapiNotification/context_NotifsubscriptionUuid_NotificationNotification_UuidAdditionalinfoImpl.py | a0dbbf54c5e02c9551ac997190cb2f5fc644be56 | [
"Apache-2.0"
] | permissive | XingZhao-CATR/Snowmass-ONFOpenTransport | 27206bd84ff8d9ea2ec7b8ee25a9085b9c96af6d | c5807944bb1333a8ed83d6beea3e55922d006495 | refs/heads/develop | 2021-01-13T16:59:27.016238 | 2016-12-21T13:19:17 | 2016-12-21T13:19:17 | 77,099,371 | 1 | 0 | null | 2016-12-22T01:29:16 | 2016-12-22T01:29:16 | null | UTF-8 | Python | false | false | 690 | py | import os.path, sys
sys.path.append(os.path.join('/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])))
import backend.backend as be
class Context_NotifsubscriptionUuid_NotificationNotification_UuidAdditionalinfoImpl:
@classmethod
def get(cls, uuid, notification_uuid):
print 'handling get'
if uuid in be.Context._notifSubscription:
if notification_uuid in be.Context._notifSubscription[uuid]._notification:
return be.Context._notifSubscription[uuid]._notification[notification_uuid].additionalInfo
else:
raise KeyError('notification_uuid')
else:
raise KeyError('uuid')
| [
"ricard.vilalta@cttc.es"
] | ricard.vilalta@cttc.es |
3526b5be57d277f2ad79e0df9304069a43370768 | 9fe60c108003f6b40efa69f6481f3cd2f2a08b2a | /rcsb/ccmodels/search/ChemCompModelAssemble.py | 610b6abe4d55f477c854e40b7097a7d84c4edcf7 | [
"Apache-2.0"
] | permissive | rcsb/py-rcsb_ccmodels | 0407e3ecaec51d7469acbbaa7677d2a864ca441c | 92094b8246b5cef3548bbe585a3e2b28972e5702 | refs/heads/master | 2023-06-23T05:52:52.932499 | 2023-06-20T15:04:23 | 2023-06-20T15:04:23 | 321,114,174 | 0 | 0 | Apache-2.0 | 2023-06-20T15:04:24 | 2020-12-13T16:55:17 | Python | UTF-8 | Python | false | false | 12,899 | py | ##
# File: ChemCompModelAssemble.py
# Author: J. Westbrook
# Date: 4-Feb-2021
# Version: 0.001
#
# Updated:
##
"""
Assemble model files and adjust audit records for the chemical component model workflow.
Models identifiers and audit creation dates are reconciled with the audit details of the
prior public model collection.
"""
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "john.westbrook@rcsb.org"
__license__ = "Apache 2.0"
import datetime
import logging
import os
import time
from collections import defaultdict
from operator import itemgetter
from rcsb.ccmodels.search.ChemCompModelBuild import ChemCompModelBuild
from rcsb.ccmodels.search.CODModelBuild import CODModelBuild
from rcsb.ccmodels.search import __version__
from rcsb.utils.chemref.ChemCompModelProvider import ChemCompModelProvider
from rcsb.utils.io.MarshalUtil import MarshalUtil
logger = logging.getLogger(__name__)
class ChemCompModelAssemble(object):
def __init__(self, cachePath, prefix=None, urlTarget=None):
self.__cachePath = cachePath
self.__prefix = prefix
self.__urlTarget = urlTarget
#
self.__ccdcmb = ChemCompModelBuild(cachePath=self.__cachePath, prefix=self.__prefix)
self.__codmb = CODModelBuild(cachePath=self.__cachePath, prefix=self.__prefix)
# self.__startTime = time.time()
logger.info("Starting assemble (%s) at %s", __version__, time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def assemble(self, maxRFactor=10.0):
"""Concatenate models into the input file path subject to the R value constraint.
Relabel the models sequentially for each parent chemical component.
Args:
assembleModelPath (str): path for concatenated model file
maxRFactor (float, optional): limiting R-value. Defaults to 10.0.
Returns:
(bool): True for success or False otherwise
"""
dataContainerL = []
mU = MarshalUtil(workPath=self.__cachePath)
# combine CCDC and COD model build index files
modelIndexD = self.__ccdcmb.fetchModelIndex()
codD = self.__codmb.fetchModelIndex()
for pId, mDL in codD.items():
if pId in modelIndexD:
modelIndexD[pId] += codD[pId]
else:
modelIndexD[pId] = codD[pId]
#
modelIndexD = self.__addPriorMatchDetails(modelIndexD)
modelIndexD = self.__updateVariantDetails(modelIndexD)
priorMapD = {}
for _, mDL in modelIndexD.items():
try:
mDLS = sorted(mDL, key=itemgetter("priorModelId", "variantType", "rFactor"), reverse=False)
except Exception:
mDLS = sorted(mDL, key=itemgetter("priorModelId", "variantType"), reverse=False)
numStd = 0
matchIdD = {}
for mD in mDLS:
isStd = False
if mD["variantType"].startswith("A"):
numStd += 1
isStd = True
#
if "rFactor" in mD and mD["rFactor"] and mD["rFactor"] > maxRFactor:
logger.info("Skipping model %s isStd (%r) rValue (%r)", mD["modelId"], isStd, mD["rFactor"])
continue
if numStd and not isStd:
logger.info("Skipping model %s isStd (%r) numStd (%d)", mD["modelId"], isStd, numStd)
continue
#
# Exclude duplicate matches in priority order ...
if mD["matchId"] in matchIdD:
logger.info("Skipping duplicate matchId %r in %r", mD["matchId"], mD["modelId"])
continue
#
matchIdD[mD["matchId"]] = True
cL = mU.doImport(mD["modelPath"], fmt="mmcif")
logger.debug("Read %d from %s", len(cL), mD["modelPath"])
dataContainerL.extend(cL)
if not mD["priorModelId"].startswith("Z"):
priorMapD[mD["modelId"]] = (mD["priorModelId"], mD["priorMatchDate"])
#
logger.debug("priorMapD %r", priorMapD)
fn = "chem_comp_models-%s.cif" % self.__getToday()
assembleModelPath = os.path.join(self.__ccdcmb.getModelDirFilePath(), fn)
# -- relabel
parentModelCountD = defaultdict(int)
priorIdLD = {}
for dataContainer in dataContainerL:
tModelId = dataContainer.getName()
tId = self.__parseId(tModelId)[0]
pId = tId.split("|")[0]
if tModelId in priorMapD:
pCount = self.__parseId(priorMapD[tModelId][0])[1]
priorIdLD.setdefault(pId, []).append(pCount)
self.__replaceModelId(dataContainer, tModelId, priorMapD[tModelId][0])
self.__updateAuditDate(dataContainer, priorMapD[tModelId][1])
parentModelCountD[pId] = sorted(priorIdLD[pId])[-1]
logger.debug("%s current model %r prior model %r count %d", pId, tModelId, priorMapD[tModelId][0], parentModelCountD[pId])
else:
parentModelCountD[pId] += 1
pModelId = self.__makePublicModelId(pId, parentModelCountD[pId])
self.__replaceModelId(dataContainer, tModelId, pModelId)
ok = mU.doExport(assembleModelPath, dataContainerL, fmt="mmcif")
logger.info("Assembled %d models status %r", len(dataContainerL), ok)
self.__checkAssembledModels(assembleModelPath)
return len(dataContainerL)
def __parseId(self, modelId):
"""Parse the input model identifier and return component/PRD id and model count.
Args:
modelId (str): model identifier for chemical component or BIRD
Returns:
(str, str): component/bird, model count
"""
mId = None
mCount = None
try:
ff = modelId.split("_")
if len(ff) == 3:
mId = ff[1]
mCount = int(ff[2])
elif len(ff) == 4:
mId = "_".join(ff[1:3])
mCount = int(ff[3])
except Exception as e:
logger.exception("Failing for %r with %s", modelId, str(e))
#
return (mId, mCount)
def __getAuditDetails(self):
"""Fetch the audit details from the current model dictionary file.
Returns:
(dict): {ccId: [{"model_id": modelId, "db_name": dbName, "db_code": dbCode, "audit_list": aL}]
where aL = [{"audit_date": auditDate, "action_type": auditAction}]
"""
if self.__urlTarget:
ccm = ChemCompModelProvider(cachePath=self.__cachePath, useCache=False, urlTarget=self.__urlTarget)
else:
ccm = ChemCompModelProvider(cachePath=self.__cachePath, useCache=False)
rD = ccm.getAuditDetails()
return rD
def __addPriorMatchDetails(self, modelIndexD):
""""""
priorMatchLD = self.__getAuditDetails()
for pId, mDL in modelIndexD.items():
for mD in mDL:
mD["priorModelId"] = "Znone"
if pId in priorMatchLD:
logger.debug("priorMatchLD %r", priorMatchLD[pId])
numMatch = 0
for priorMatch in priorMatchLD[pId]:
priorModelId = priorMatch["model_id"]
priorDbName = priorMatch["db_name"]
priorDbCode = priorMatch["db_code"]
priorDate = priorMatch["audit_list"][-1]["audit_date"]
# compare with current matches
for mD in mDL:
curDbName = "CSD"
# curDbName = mD["matchDb"]
curDbCode = mD["matchId"]
if priorDbName == curDbName and curDbCode == priorDbCode:
numMatch += 1
mD["priorModelId"] = priorModelId
mD["priorMatchDate"] = priorDate
if numMatch:
logger.info("%s has prior matches (%d)", pId, numMatch)
return modelIndexD
def __updateVariantDetails(self, modelIndexD):
""""""
for _, mDL in modelIndexD.items():
for mD in mDL:
if not mD["variantType"]:
mD["variantType"] = "Anone"
return modelIndexD
def __makePublicModelId(self, parentId, modelNum=1):
modelId = "M_" + parentId + "_%05d" % modelNum
return modelId
def __checkAssembledModels(self, assembleModelPath):
catNameL = [
"pdbx_chem_comp_model",
"pdbx_chem_comp_model_atom",
"pdbx_chem_comp_model_bond",
"pdbx_chem_comp_model_descriptor",
"pdbx_chem_comp_model_reference",
"pdbx_chem_comp_model_feature",
"pdbx_chem_comp_model_audit",
]
mU = MarshalUtil(workPath=self.__cachePath)
dataContainerL = mU.doImport(assembleModelPath, fmt="mmcif")
logger.info("Read %d data containers", len(dataContainerL))
rD = {}
cnD = {}
for dataContainer in dataContainerL:
nm = dataContainer.getName()
logger.debug("datacontainer %r", nm)
if nm in cnD:
logger.info("Duplicate container id %r", nm)
cnD[nm] = True
#
pId = self.__parseId(nm)[0]
cObj = dataContainer.getObj("pdbx_chem_comp_model")
modelId = cObj.getValue("id", 0)
if modelId != nm:
logger.error("modelId %r datablock %r", modelId, nm)
#
tD = {}
for catName in catNameL:
cObj = dataContainer.getObj(catName)
nRows = cObj.getRowCount()
tD[catName] = nRows
cObj = dataContainer.getObj("pdbx_chem_comp_model_feature")
skip = False
for ii in range(cObj.getRowCount()):
fN = cObj.getValue("feature_name", ii)
fV = cObj.getValue("feature_value", ii)
if fN == "heavy_atoms_only" and fV == "Y":
skip = True
break
if not skip:
rD.setdefault(pId, []).append(tD)
#
for pId, tDL in rD.items():
for catName in catNameL:
minV = 100000
maxV = -1
for tD in tDL:
minV = min(minV, tD[catName])
maxV = max(maxV, tD[catName])
if maxV - minV > 2 and catName not in ["pdbx_chem_comp_model_feature"]:
logger.error("%s %s row count inconsistency %d %d", pId, catName, minV, maxV)
def __replaceModelId(self, dataContainer, oldModelId, newModelId):
"""Update all instances of the model in the input container with the input
replacement value.
Args:
dataContainerList (obj): input container list
newModelId (str): replacement modelId value
"""
updateL = [
("pdbx_chem_comp_model", "id"),
("pdbx_chem_comp_model_atom", "model_id"),
("pdbx_chem_comp_model_bond", "model_id"),
("pdbx_chem_comp_model_descriptor", "model_id"),
("pdbx_chem_comp_model_reference", "model_id"),
("pdbx_chem_comp_model_feature", "model_id"),
("pdbx_chem_comp_model_audit", "model_id"),
]
tup = ()
try:
dataContainer.setName(newModelId)
for tup in updateL:
cObj = dataContainer.getObj(tup[0])
cObj.replaceValue(oldModelId, newModelId, tup[1])
except Exception as e:
logger.exception("Failing for cat %r %r and %r with %s", tup, oldModelId, newModelId, str(e))
return dataContainer
def __updateAuditDate(self, dataContainer, auditDate):
"""Update the audit date for input container
Args:
dataContainerList (obj): input container list
auditDate (str): original audit date
"""
try:
cObj = dataContainer.getObj("pdbx_chem_comp_model_audit")
ii = cObj.getRowCount()
cObj.setValue(auditDate, "date", ii - 1)
except Exception as e:
logger.exception("Failing for %r with %s", auditDate, str(e))
return dataContainer
def __getToday(self):
"""Return a CIF style date-timestamp value for current local time -"""
today = datetime.datetime.today()
# format ="%Y-%m-%d:%H:%M"
fmt = "%Y-%m-%d"
return str(today.strftime(fmt))
def __makePublicModelId(self, parentId, modelNum=1):
modelId = "M_" + parentId + "_%05d" % modelNum
return modelId
| [
"john.westbrook@rcsb.org"
] | john.westbrook@rcsb.org |
b2dc3ba20396023b2dfbc243d1019fe2d64a8aed | e987cd566edc75997f9b02377514d4f3a0dba12c | /sys/src/Python/glue/GlueDoc.py | d232ba9d1930437ecefe845450be4cea57ffe4bc | [] | no_license | 7u83/maxdb-buildtools | f942adff2cd55d0a046b6ef3e18f6645b011a26e | ce9a56943f6195d6755e983035aa96cbe95e6cb2 | refs/heads/master | 2020-05-04T18:23:30.849371 | 2015-02-15T19:25:49 | 2015-02-15T19:25:49 | 30,428,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py |
#
# ========== licence begin LGPL
# Copyright (C) 2002 SAP AG
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ========== licence end
#
class GlueDoc:
isMethod = None
def __init__ (self, name, doc, language = None):
self.pyname = name
self.doc = doc
self.language = language
def writeGlue (self, glue):
pass
def methods (self):
return []
def isRealConstructor (self):
return None
def supportFor (self, key, value):
if key != 'language':
return 1
if self.language == None:
return 1
return self.language == value
def isDocumented (self):
return 1
class GlueExample (GlueDoc):
language = None
def __init__ (self, name, doc, code = []):
self.pyname = name
self.doc = doc
self.code = code
def getCode (self):
return code
class GlueImportExample (GlueExample):
def __init__ (self, name, comment, fname):
GlueExample.__init__ (self, name, comment)
self.fname = fname
def getCode (self):
try:
data = open (self.fname, "r").read ()
except IOException:
data = "Access to '%s' has been denied" % self.fname
return data
class GlueExternExample (GlueExample):
def __init__ (self, name, comment, fname):
GlueExample.__init__ (self, name, comment)
self.fname = fname
def getCode (self):
return None
class GlueDirectory (GlueDoc):
def __init__ (self, name, language, items):
GlueDoc.__init__ (self, name, '', language)
self.items = items
def methods (self):
return []
| [
"7u83@mail.ru"
] | 7u83@mail.ru |
461e3e4738dde29dad72d1244aac00aa59a41a84 | 4ba29d0e50d0af604231834b099faa35f2cb369f | /task.py | 8134f928af0feb5ec687a03e666609b86200749f | [] | no_license | boxabhi/task | 8a10b90c429c3e3bdd1d86a5a7e8bfb97653b1ec | 242b7c325821941a95962bdfce384bb4519861fe | refs/heads/main | 2023-04-14T11:13:13.381423 | 2021-04-27T05:48:41 | 2021-04-27T05:48:41 | 361,995,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py |
def checkout_time_for_customers(customers,cashregisters:int):
if cashregisters == 1:
return sum(customers)
elif len(customers) <= cashregisters:
return max(customers)
registers = {}
for i in range(cashregisters):
registers[i] = customers.pop(0)
total_time_taken = 0
while any(registers.values()):
for r in registers.copy():
registers[r] -= 1
if registers[r] <= 0:
try:
registers[r] = customers.pop(0)
except IndexError:
registers[r] = 0
total_time_taken += 1
return total_time_taken
print( checkout_time_for_customers([5, 1, 3], 1))
def check_string(string_list):
str1 , str2 = string_list
set_str1 = set()
for i in str1:
set_str1.add(i.lower())
for i in str2:
if i.lower() not in set_str1:
return False
return True
print(check_string(["hello", "Hello"]))
print(check_string(["hello", "hey"]))
print(check_string(["Alien", "line"]))
| [
"abhijeetg40@gmail.com"
] | abhijeetg40@gmail.com |
e5006a48e87f2df9244a3b3122b5a6df5ffdd88a | 8c96b3a657cfb1cd360b469dac564af58a946199 | /repo/migrations/0001_initial.py | 9a2ccef5ac67f632900fb51ac16ffd866142fa31 | [] | no_license | nc415/ub40 | db3de90533c2adcb997f1ffa57a3099b1905a331 | a3642aad555d355dc8bfd29a3a34bfd6e7507a43 | refs/heads/master | 2021-05-10T18:49:52.746856 | 2018-01-19T14:28:38 | 2018-01-19T14:28:38 | 118,134,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-01-15 07:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('BU_Name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Company_Name', models.CharField(max_length=128)),
('Company_Region', models.CharField(blank=True, max_length=128)),
('pageid', models.SlugField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
| [
"njcollins@live.co.uk"
] | njcollins@live.co.uk |
52b7a8983058473eafef32a6b44cae2203355232 | 052b287892e5e224df4a12f86a26efbbb31e0e2e | /canaimagnulinux/web/policy/utils.py | adab4ec0d8fc5cfb15e7c416dfcdf46ecbb06cdf | [] | no_license | soullessdead/canaimagnulinux.web.policy | 6deb5771777f91f3e117c8960406942a81593a39 | b9adffb0247633c9d89e87bb974bd764e1ae4abd | refs/heads/master | 2021-01-18T12:41:36.024673 | 2014-07-02T23:43:29 | 2014-07-02T23:43:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,292 | py | # -*- coding: utf-8 -*-
import transaction
import os, sys, re, string
from sets import Set
from StringIO import StringIO
from time import gmtime, strftime
from zLOG import LOG, INFO
from zExceptions import BadRequest
from App.config import getConfiguration
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.DirectoryView import addDirectoryViews
from Globals import package_home
from canaimagnulinux.web.policy.fixes import fix
from canaimagnulinux.web.policy import GLOBALS
IMPORT_POLICY = "backup"
osp = os.path
ALLOWED_IMPORT_POLICY = ["only_new", "backup", "overwrite"]
INTRO_TO_INSTANCE = "< Started copying object files from Product import directory to Instance one."
SUMMARY_TO_INSTANCE = "> Finished copying."
INTRO_TO_ROOT = "< Started import %s file[s] with '%s' policy."
SUMMARY_TO_ROOT = "> Finished importing."
INTRO_CLEAN = "< Started cleaning Instance import directory."
SUMMARY_CLEAN = "> Finished cleaning."
CREXP_INVALID_ID = re.compile('^The id \"(.*?)\" is invalid - it is already in use.$', re.DOTALL|re.IGNORECASE|re.MULTILINE)
# supporting qPSD-0.5.3 version
CSS_BASE_IDS_QPSD053 = ['id','expression','enabled','cookable','media','rel','title','rendering']
def checkIfImport():
""" Return if perform importing, based on checking
*zexp files in <SkinProduct>/import directory.
"""
instance_ipath, product_ipath = getImportedPathes()
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
if product_ilist:
return 1
return 0
def getImportedPathes():
""" Return Plone instance and Skin product import pathes."""
# Based on instance path, construct import pathes
cfg = getConfiguration()
instance_ipath = osp.join(cfg.instancehome, "import")
product_ipath = osp.join(package_home(GLOBALS), "import")
# Check presence of Product import directory
if not osp.isdir(product_ipath):
raise BadRequest, "Skin Product's import directory '%s' - does not exist or is'nt direcory" % product_ipath
# Check presence of Instance import directory
if not osp.isdir(instance_ipath):
raise BadRequest, "Instance import directory '%s' - does not exist or isn't direcory" % instance_ipath
return [instance_ipath, product_ipath]
def copyFile(src_dir, dst_dir, f_name):
""" Copy file from src_dir to dst_dir under original name."""
try:
src_file = open(osp.join(src_dir, f_name),"rb")
dst_file = open(osp.join(dst_dir, f_name),"wb")
dst_file.write(src_file.read())
dst_file.close()
src_file.close()
except Exception, e:
msg = "!!! In copying files from < %s > dir to < %s > dir exception occur. Details: %s." % (src_dir,dst_dir, str(e))
print >> import_out, msg
LOG('performImportToPortal',INFO,'copyFile', msg)
def moveToTemp(same_instance_files, instance_ipath, temp_dir_path):
""" Move samenamed files from Instanse's dir to temp dir."""
os.mkdir(temp_dir_path) # Create temp back_[date] dir
try:
[copyFile(instance_ipath, temp_dir_path, f_name) for f_name in same_instance_files]
[os.remove(osp.join(instance_ipath, f_name)) for f_name in same_instance_files]
except Exception, e:
msg = "!!! Exception occur during moving files from Instance's dir to temp dir. Detaile:%s." % str(e)
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveToTemp', msg)
def copyToInstanceImport():
""" Perform copying imported files from <SkinProduct>/import dir
to Plone's instance import dir.
"""
print >> import_out, INTRO_TO_INSTANCE
instance_ipath, product_ipath = getImportedPathes()
# Compose temp dir back_[date] dir path in Instance import directory
temp_dir_id = "back_%s" % strftime("%Y%m%d%H%M%S", gmtime())
temp_dir_path = osp.join(instance_ipath, temp_dir_id)
# Get *.zexp files from Skin Product's import dir and Plone's instance import dir files
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
instance_ilist = [i for i in os.listdir(instance_ipath) \
if osp.isfile(osp.join(instance_ipath,i)) and i.endswith('.zexp')]
# Check for presence samenamed files in Instance and Product import directories.
same_instance_files = [f_name for f_name in instance_ilist if f_name in product_ilist]
if same_instance_files:
moveToTemp(same_instance_files, instance_ipath, temp_dir_path)
# Copy all *zexp files from Product's import dir to Instance's import dir
[copyFile(product_ipath, instance_ipath, f_name) for f_name in product_ilist]
print >> import_out, SUMMARY_TO_INSTANCE
return [instance_ipath, product_ipath, temp_dir_path, product_ilist]
def importObject(portal, file_name):
""" Work around old Zope bug in importing."""
try:
portal.manage_importObject(file_name)
except:
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def makeBackUp(portal, portal_objects, temp_dir_path, obj_id):
""" Perfom backup same named portal objects in temp folder."""
# Get id of temp folder-object
durty_path,temp_id = osp.split(temp_dir_path)
if not temp_id:
durty_path,temp_id = osp.split(durty_path)
# Get temp folder-object
if temp_id not in portal_objects:
portal.invokeFactory('Large Plone Folder', id=temp_id)
print >> import_out, "! Created '%s' backup directory with same-ids " \
"objects from portal root." % temp_id
temp_dir = getattr(portal, temp_id)
# Move object with same id to temp folder-object
#get_transaction().commit(1)
transaction.savepoint()
obj = portal.manage_cutObjects(ids=[obj_id])
temp_dir.manage_pasteObjects(obj)
print >> import_out, "! '%s' Object moved from portal root to '%s' backup directory." % (obj_id, temp_id)
def performImport(portal, temp_dir_path, file_name):
""" Importing an object to portal."""
portal_objects = portal.objectIds()
try:
portal.manage_importObject(file_name)
except Exception, e:
msg = str(e)
is_invalid_id = CREXP_INVALID_ID.match(msg)
if is_invalid_id:
obj_id = is_invalid_id.group(1)
if IMPORT_POLICY == "only_new":
msg = "! Object with '%s' id was not importing because it's already exist " \
"in portal root." % obj_id
print >> import_out, msg
elif IMPORT_POLICY == "backup":
makeBackUp(portal, portal_objects, temp_dir_path, obj_id)
importObject(portal, file_name)
elif IMPORT_POLICY == "overwrite":
portal.manage_delObjects(ids=[obj_id])
importObject(portal, file_name)
else:
# work around old Zope bug in importing
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def importToPortalRoot(portal, product_file_names, temp_dir_path):
""" Import all objects from *zexp files to portal root (based on IMPORT_POLICY)."""
if not IMPORT_POLICY in ALLOWED_IMPORT_POLICY:
raise Exception("%s - wrong import policy, must be one of the %s" \
% (IMPORT_POLICY, ALLOWED_IMPORT_POLICY) )
print >> import_out, INTRO_TO_ROOT % (product_file_names, IMPORT_POLICY)
for file_name in product_file_names:
try:
# Temporary allow implicitly adding Large Plone Folder
types_tool = getToolByName(portal, 'portal_types')
lpf_fti = types_tool['Large Plone Folder']
lpf_global_setting = lpf_fti.global_allow
lpf_fti.global_allow = 1
try:
performImport(portal, temp_dir_path, file_name)
finally:
lpf_fti.global_allow = lpf_global_setting
except Exception, error:
msg = '!!! Under "%s" policy importing exception occur: %s.' % (IMPORT_POLICY, str(error))
print >> import_out, msg
LOG('performImportToPortal',INFO,'importToPortalRoot', msg)
print >> import_out, SUMMARY_TO_ROOT
def cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path):
""" Cleaning Plone's import dir."""
print >> import_out, INTRO_CLEAN
# Erase all copied *zexp files from Instance's import dir
for f_name in product_file_names:
f_path = osp.join(instance_ipath, f_name)
if osp.exists(f_path) and osp.isfile(f_path):
os.remove(f_path)
else:
msg = '! "%s" file was not deleted from "%s" import directory.' %\
(f_name, osp.join(instance_ipath))
print >> import_out, msg
LOG('performImportToPortal',INFO,'cleanInstanceImport', msg)
# Move all files from temp back_[date] dir to Instance's import dir
if osp.exists(temp_dir_path) and osp.isdir(temp_dir_path):
f_names = os.listdir(temp_dir_path)
try:
[copyFile(temp_dir_path, instance_ipath, f_name) for f_name in f_names]
[os.remove(osp.join(temp_dir_path, f_name)) for f_name in f_names]
# Erase temp back_[date] dir
os.rmdir(temp_dir_path)
except Exception, e:
msg = "!!! In moving files from temp dir to Instance's import dir exception occur."
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveFromTempToImport', msg)
print >> import_out, SUMMARY_CLEAN
def fixImportingIssues(portal, beforeimporting_objects):
''' Fix defects of importing process: reindexing, other'''
afterimporting_objects = portal.objectItems()
diff_objects = list(Set(afterimporting_objects)-Set(beforeimporting_objects))
for id, ob in diff_objects:
if id.startswith('back_'):
continue
fix(ob)
def performImportToPortal(portal):
""" Import objects from Skin Product to Portal root."""
globals()['import_out'] = StringIO()
instance_ipath, product_ipath, temp_dir_path, product_file_names = copyToInstanceImport()
if product_file_names:
beforeimporting_objects = portal.objectItems()
importToPortalRoot(portal, product_file_names, temp_dir_path)
fixImportingIssues(portal, beforeimporting_objects)
cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path)
else:
print >> import_out, "!!! Failure importing: there is no file for importing to be found."
result = import_out.getvalue()
del globals()['import_out']
return result
####################
def importZEXPs(context):
''' '''
if context.readDataFile("canaimagnulinux.web.policy_various.txt") is None:
return
portal = context.getSite()
if checkIfImport():
performImportToPortal(portal)
def createCollage(context, title):
"""Crea un Collage en el contexto dado..
"""
id = idnormalizer.normalize(title, 'es')
if not hasattr(context, id):
context.invokeFactory('Collage', id=id, title=title) | [
"leonardocaballero@gmail.com"
] | leonardocaballero@gmail.com |
d20980431d2ee44f070b5a9c1c96fb17d2e08daa | 7c16a9f999f966060c064ae5bd4bddaf8f4e1dd0 | /factorialkabaap.py | b7d7d4c960d665df5099436437cc7c666ee6b215 | [] | no_license | sbd2309/Adv.Python | fd5ed698b14c75484903006da7753a155cf11b47 | f7ef906cd78114643ffaaaaca6d4cb0ccfb34f62 | refs/heads/master | 2021-10-25T01:48:29.420102 | 2021-10-17T06:20:11 | 2021-10-17T06:20:11 | 232,631,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | def factorialbaap(f,p):
n=1
for i in range (f,0,-1):
n=n*i
#print(n)
x=p
flag=0
ans=0
while 1==1:
for i in range (1,1000,1):
x=p**i
if n%x==0:
ans=i
elif x>n:
flag=1
break
break
if flag==1 and ans!=0:
print(ans)
else:
print(0)
n=int(input())
for i in range(0,n,1):
x,y =[int(i) for i in input().strip().split(' ')]
factorialbaap(x,y)
| [
"noreply@github.com"
] | sbd2309.noreply@github.com |
c264903b770885106ba842f139ebd7276582f48c | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/iterators/iterator.py | af8ec00738a77944aec8db10c66ae4989a13a74a | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 237 | py | class Counter():
def __init__(self):
self.count = 0
def __iter__(self):
return self
def __next__(self):
self.count += 1
return self.count
for c in Counter():
print(c)
if c > 10:
break
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
3cf933e63768f3782c4288670a9dbd91e6322762 | e267c91f23055397201c3d9c23d7583b269d51b8 | /backend/pugorugh/tests/test_models.py | 8dc2afd1f43216ec105938d91567b1737188eab6 | [] | no_license | mcintoshsg/pug_or_ugh_v1 | 8678213b4b4ea09a70f369aa08002ff4a8194a29 | 3e735cd840ffc5a85497eab48518800f0757d9f3 | refs/heads/master | 2020-03-19T15:26:41.152968 | 2018-06-14T01:30:49 | 2018-06-14T01:30:49 | 136,670,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,032 | py | from django.contrib.auth.models import User
from django.test import TestCase
from pugorugh.models import Dog, UserDog, UserPref
# 2. Test get all dogs
# 3. Test get single dog
# 4. Test delete single dog
# 5. Test update single dog
# 6. Test create user preferences
# 7. Test get user preferences
# 8. Test update user preferences
# 9. Test update new user prefernces - updates all dogs that match with U
# 10. Test validiators - bad entries
# 11. Test get all liked dogs
# 12. Test get all unliked dogs
# 13. Test get all undecided dogs
# 14. Test iterate through next like or disliked or undecided
# 15. Test new user creation - token creates
# 16. Test the URLS
# create a base modeltest case the models
class BaseTestCase(TestCase):
def setUp(self):
''' setup up dummy data for the Dog model '''
dog_1 = {
'name': 'dog_1',
'image_filename': '1.jpg',
'breed': 'mutt',
'age': 12,
'gender': 'm',
'size': 'm'
}
dog_2 = {
'name': 'dog_2',
'image_filename': '2.jpg',
'breed': 'mutt',
'age': 48,
'gender': 'f',
'size': 'l'
}
self.dog_1 = Dog.objects.create(**dog_1)
self.dog_2 = Dog.objects.create(**dog_2)
def tearDown(self):
pass
class UserModelTestCase(BaseTestCase):
''' test cases for the user model '''
@staticmethod
def create_test_users(count=2):
''' this test creates 2 users in the database
'''
for i in range(count):
User.objects.create(
username='user_{}'.format(i),
email='test_{}@example.com'.format(i),
password='password'
)
def test_create_user(self):
''' test the creation of the user '''
self.create_test_users()
self.assertEqual(User.objects.count(), 2)
self.assertEqual(User.objects.get(id=1).password,'password')
class DogModelTests(BaseTestCase):
''' testing of the Dog model '''
def test_dog_creation(self):
''' test out the creation of our model '''
balto = Dog.objects.get(name="dog_1")
self.assertEqual(balto, self.dog_1)
alfie = Dog.objects.get(name="dog_2")
self.assertEqual(alfie, self.dog_2)
class UserDogModelTests(BaseTestCase):
''' testing of the UserDog model '''
def create_user_dogs(self):
UserModelTestCase.create_test_users(2)
self.user_1 = User.objects.get(id=1)
self.user_2 = User.objects.get(id=2)
UserDog.objects.create(user=self.user_1, dog=self.dog_1, status='u')
UserDog.objects.create(user=self.user_1, dog=self.dog_2, status='u')
UserDog.objects.create(user=self.user_2, dog=self.dog_1, status='u')
UserDog.objects.create(user=self.user_2, dog=self.dog_2, status='u')
def test_user_dog_creation(self):
''' test the creation of userdogs '''
self.create_user_dogs()
self.assertEqual(UserDog.objects.count(), 4)
self.assertEqual(UserDog.objects.get(id=1).user, self.user_1)
self.assertEqual(UserDog.objects.get(id=1).status, 'u')
class UserPrefModelTests(BaseTestCase):
''' testing of the UserDog model '''
def create_user_prefs(self):
UserModelTestCase.create_test_users(1)
self.user_1 = User.objects.get(id=1)
UserPref.objects.create(user=self.user_1,
age='b,y',
gender='m,f',
size='l,xl'
)
def test_user_dog_creation(self):
''' test the creation of userdogs '''
self.create_user_prefs()
self.assertEqual(UserPref.objects.count(), 1)
self.assertEqual(UserPref.objects.get(id=1).user, self.user_1)
self.assertEqual(UserPref.objects.get(id=1).gender, 'm,f')
| [
"s.g.mcintosh@gmail.com"
] | s.g.mcintosh@gmail.com |
edd7051f3b24b7ae5b7bbd28ade6fb8b9621ccf9 | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /DFS-BFS/백준/골드/신기한 소수_dfs.py | 118ff0683648aeac2cd16c566aebdc946eedd153 | [] | no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py |
n = int(input())
prime = [2,3,5,7]
def isprime(num):
if num<2: return False
for i in range(2,num):
if num%i==0: return False
return True
def dfs(first,num):
if num == 0: print(first)
for i in range(1,10,2):
tmp = first*10 + i
if isprime(tmp): dfs(tmp,num-1)
for i in range(4):
dfs(prime[i],n-1)
| [
"41579282+jamwomsoo@users.noreply.github.com"
] | 41579282+jamwomsoo@users.noreply.github.com |
fda5619a7e5ab87fb558f09dcbc1753b0164f43d | 1f177b5e7bdaca49076c6ff806f5e2be9a86e834 | /database/orm/models.py | d75da518cbf97101ca5fe3648930f63c92200bf2 | [] | no_license | silverlyjoo/TIL | 9e19ba407a9dc82c231e66e352f1c7783e767782 | 98a139770a6d19598d787674bcf20d2fe744ced0 | refs/heads/master | 2021-08-17T02:10:35.101212 | 2019-08-26T08:21:32 | 2019-08-26T08:21:32 | 162,099,046 | 6 | 1 | null | 2021-06-10T21:20:36 | 2018-12-17T08:32:39 | Jupyter Notebook | UTF-8 | Python | false | false | 393 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# Table 만들기
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
def __repr__(self):
return f"<user '{self.username}'>"
| [
"silverlyjoo@gmail.com"
] | silverlyjoo@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.