text stringlengths 8 6.05M |
|---|
class KeyError(Exception):
''' This error is used to be raised on invalid Domainr API key '''
message = "In order to query against Domainr you will need to provide valid Domainr key."
def __init__(self, error_code=None, http_code=None):
Exception.__init__(self, self.message)
self.message = self.message
self.error_code = error_code
self.http_code = http_code
class DomainError(Exception):
''' This error is used to be raised on invalid domain '''
message = "Invalid domain name provided"
def __init__(self, error_code=None, http_code=None):
Exception.__init__(self, self.message)
self.message = self.message
self.error_code = error_code
self.http_code = http_code
class RequestTypeError(Exception):
''' This error is used to be raised on invalid Domainr request type '''
message = "Invalid request type provided"
def __init__(self, error_code=None, http_code=None):
Exception.__init__(self, self.message)
self.message = self.message
self.error_code = error_code
self.http_code = http_code
class ResponseError(Exception):
''' This error is used to be raised on Domainr error '''
def __init__(self, message='', error_code=None, http_code=None):
Exception.__init__(self, message)
self.message = message
self.error_code = error_code
self.http_code = http_code
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Gusseppe Bravo <gbravor@uni.pe>
# License: BSD 3 clause
"""
En esta clase se define que problema se va a solucionar.
Sea de clasificacion, regression, clustering. Ademas se debe
dar una idea de los posibles algoritmos que pueden ser usados.
"""
from pyspark.sql import SQLContext
from pyspark.sql.functions import col, rand, randn, when
#from pyspark import SparkContext, SparkConf#version 1.62
#try:
# from pyspark.ml.linalg import Vectors#Version 2
#except ImportError:
# from pyspark.mllib.linalg import Vectors#Version 1.62
class Define:
def __init__(self,
spark_session,
data_path=None,
df=None,
header=None,
response='class',
num_features=None,
cat_features=None,
problem_type='classification'):
self.spark_session = spark_session
self.data_path = data_path
self.df = df
self.header = header
self.response = response
self.metadata = dict()
self.problem_type = problem_type
self.infer_algorithm = 'LogisticR'
self.n_features = None
self.num_features = num_features
self.cat_features = cat_features
self.samples = None
self.size = None
self.data = None
self.X = None
self.y = None
def pipeline(self):
definers = list()
definers.append(self.read)
definers.append(self.description)
[m() for m in definers]
return self
def read(self):
"""Read the dataset.
Returns
-------
out : ndarray
"""
try:
if self.df is not None:
df = self.df.dropna()
self.data = df.dropna()
if self.header is not None:
self.header = self.header
self.X = self.data.drop(self.response)#.show()
self.y = self.data.select(self.response)
elif self.data_path is not None and self.response is not None:
df = self.spark_session.read\
.format("csv")\
.option("header", "true")\
.option("mode", "DROPMALFORMED")\
.option("inferSchema", "true")\
.csv(self.data_path)
df = df.dropna()
self.data = df
if self.header is not None:
self.header = self.header
self.X = self.data.drop(self.response)#.show()
self.y = self.data.select(self.response)
except Exception as e:
print("Error reading | ", e)
def description(self):
self.n_features = len(self.X.columns)
self.samples = self.data.count()
#def likelyAlgorithms(self):
#if self.samples < 50:
#print("Not enough data")
#else:
#pass
|
from django.contrib import admin
from packages.models import (
ItemsList,
Item,
PackageSettings,
MonthBudgetAmount,
) # , UploadKey, UploadKeyList
# Register your models here.
admin.site.register(
[
ItemsList,
Item,
PackageSettings,
MonthBudgetAmount,
# UploadKey,
# UploadKeyList
]
)
|
import json
import socket
# UDP IP address and port
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
class RPCClient:
def __init__(self, func):
# wrap the function
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tempFunc = func
def __call__(self, *args, **kwargs):
# Format JSON message
json_message = {"name": self.tempFunc.__name__, "args": args}
json_message = json.dumps(json_message)
# Send JSON message
self.sock.sendto(json_message.encode(), (UDP_IP, UDP_PORT))
# Wait to receive Data
returned_data, addr = self.sock.recvfrom(1024)
# Decode Data
decoded_return = returned_data.decode()
return decoded_return
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# Import functions
from mycode import abc, double_int, say_hello, rpc_test, doggo_test, favorite_number, half_float, bye_professor
# Wrap all functions
wrapped_abc = RPCClient(abc)
wrapped_double = RPCClient(double_int)
wrapped_hello = RPCClient(say_hello)
wrapped_rpc_test = RPCClient(rpc_test)
wrapped_doggo_test = RPCClient(doggo_test)
wrapped_favorite_number = RPCClient(favorite_number)
wrapped_half_float = RPCClient(half_float)
wrapped_bye_professor = RPCClient(bye_professor)
# Run functions that have been wrapped
print(wrapped_abc(5, 'AAA'))
print(wrapped_double(5.0))
print(wrapped_hello())
print(wrapped_doggo_test('WOOF!'))
print(wrapped_favorite_number(11))
print(wrapped_favorite_number(713))
print(wrapped_rpc_test())
print(wrapped_half_float(13.0))
print(wrapped_bye_professor())
|
from turtle import *
window = Screen()
flecha = Turtle()
for i in range(4):
flecha.forward(100)
flecha.left(90)
|
import sublime
import posixpath
from collections import OrderedDict
import os
from abc import ABCMeta, abstractmethod
from ._compat.pathlib import Path
from ._util.glob import get_glob_matcher
from ._compat.typing import List, Optional, Tuple, Iterable, Union
__all__ = ['ResourcePath']
def _abs_parts(path: Path) -> Tuple[str, ...]:
return (path.drive, path.root) + path.parts[1:]
def _file_relative_to(path: Path, base: Path) -> Optional[Tuple[str, ...]]:
"""
Like Path.relative_to, except:
- Both paths must be relative.
- `base` must be a single Path object.
- The error message is blank.
- Only a tuple of parts is returned.
Surprisingly, this is much, much faster.
"""
child_parts = _abs_parts(path)
base_parts = _abs_parts(base)
n = len(base_parts)
cf = path._flavour.casefold_parts # type: ignore
if cf(child_parts[:n]) != cf(base_parts):
return None
return child_parts[n:]
class ResourceRoot(metaclass=ABCMeta):
"""
Represents a directory containing packages.
"""
def __init__(self, root: object, path: Union[Path, str]) -> None:
self.resource_root = ResourcePath(root)
self.file_root = Path(path)
def resource_to_file_path(self, resource_path: object) -> Path:
"""
Given a :class:`ResourcePath`,
return the corresponding :class:`Path` within this resource root.
:raise ValueError: if the :class:`ResourcePath` is not within this resource root.
"""
resource_path = ResourcePath(resource_path)
parts = resource_path.relative_to(self.resource_root)
if parts == ():
return self.file_root
else:
return self._package_file_path(*parts)
def file_to_resource_path(self, file_path: Union[Path, str]) -> Optional['ResourcePath']:
"""
Given an absolute :class:`Path`,
return the corresponging :class:`ResourcePath` within this resource root,
or ``None`` if there is no such :class:`ResourcePath`.
:raise ValueError: if the :class:`Path` is relative.
"""
file_path = wrap_path(file_path)
if not file_path.is_absolute():
raise ValueError("Cannot convert a relative file path to a resource path.")
parts = _file_relative_to(file_path, self.file_root)
if parts is None:
return None
elif parts == ():
return self.resource_root
else:
return self._package_resource_path(*parts)
@abstractmethod
def _package_file_path(
self,
package: str,
*parts: str
) -> Path: # pragma: no cover
"""
Given a package name and zero or more path segments,
return the corresponding :class:`Path` within this resource root.
"""
...
@abstractmethod
def _package_resource_path(
self,
package: str,
*parts: str
) -> 'ResourcePath': # pragma: no cover
"""
Given a package name and zero or more path segments,
return the corresponding :class:`ResourcePath` within this resource root.
"""
...
class DirectoryResourceRoot(ResourceRoot):
"""
Represents a directory containing unzipped package directories.
"""
def _package_file_path(self, *parts: str) -> Path:
return self.file_root.joinpath(*parts)
def _package_resource_path(self, *parts: str) -> 'ResourcePath':
return self.resource_root.joinpath(*parts)
class InstalledResourceRoot(ResourceRoot):
"""
Represents a directory containing zipped sublime-package files.
"""
def _package_file_path(self, package: str, *rest: str) -> Path:
# This is not currently called because there are no installed-only roots.
return self.file_root.joinpath(package + '.sublime-package', *rest)
def _package_resource_path(self, package: str, *rest: str) -> 'ResourcePath':
package_path = (self.resource_root / package).remove_suffix('.sublime-package')
return package_path.joinpath(*rest)
def wrap_path(p: Union[str, Path]) -> Path:
if isinstance(p, Path):
return p
else:
return Path(p)
_ROOTS = None # type: Optional[List[ResourceRoot]]
def get_roots() -> List[ResourceRoot]:
global _ROOTS
if _ROOTS is None:
_ROOTS = [
DirectoryResourceRoot('Cache', sublime.cache_path()),
DirectoryResourceRoot('Packages', sublime.packages_path()),
InstalledResourceRoot('Packages', sublime.installed_packages_path()),
InstalledResourceRoot('Packages', Path(sublime.executable_path()).parent / 'Packages'),
]
return _ROOTS
class ResourcePath():
"""
A pathlib-inspired representation of a Sublime Text resource path.
Resource paths are similar to filesystem paths in many ways,
yet different in other ways.
Many features of :class:`pathlib.Path` objects
are not implemented by :class:`ResourcePath`,
and other features may have differerent interpretations.
A resource path consists of one or more parts
separated by forward slashes (regardless of platform).
The first part is the root.
At the present time, the only roots that Sublime uses are
``'Packages'`` and ``'Caches'``.
Resource paths are always absolute;
dots in resource paths have no special meaning.
:class:`ResourcePath` objects are immutable and hashable.
The forward slash operator is a shorthand for :meth:`joinpath`.
The string representation of a :class:`ResourcePath`
is the raw resource path in the form that Sublime Text uses.
Some methods accept glob patterns as arguments.
Glob patterns are interpreted as in pathlib.
Recursive globs (**) are always allowed, even in :meth:`match`.
Leading slashes are not matched literally.
A pattern with a leading slash must match the entire path
and not merely a suffix of the path.
.. versionadded:: 1.2
"""
@classmethod
def glob_resources(cls, pattern: str) -> List['ResourcePath']:
"""
Find all resources that match the given pattern
and return them as :class:`ResourcePath` objects.
"""
match = get_glob_matcher(pattern)
return [
cls(path) for path in sublime.find_resources('')
if match(path)
]
@classmethod
def from_file_path(cls, file_path: Union[Path, str]) -> 'ResourcePath':
"""
Return a :class:`ResourcePath` corresponding to the given file path.
If the file path corresponds to a resource inside an installed package,
then return the path to that resource.
:raise ValueError: if the given file path does not correspond to any resource path.
:raise ValueError: if the given file path is relative.
.. code-block:: python
>>> ResourcePath.from_file_path(
os.path.join(sublime.packages_path(), 'My Package', 'foo.py')
)
ResourcePath("Packages/My Package/foo.py")
>>> ResourcePath.from_file_path(
os.path.join(
sublime.installed_packages_path(),
'My Package.sublime-package',
'foo.py'
)
)
ResourcePath("Packages/My Package/foo.py")
"""
file_path = wrap_path(file_path)
candidates = (root.file_to_resource_path(file_path) for root in get_roots())
path = next(filter(None, candidates), None)
if path:
return path
else:
raise ValueError(
"Path {!r} does not correspond to any resource path.".format(file_path)
)
def __init__(self, *pathsegments: object):
"""
Construct a :class:`ResourcePath` object with the given parts.
:raise ValueError: if the resulting path would be empty.
"""
first, *rest = pathsegments
if isinstance(first, ResourcePath):
self._parts = first.parts + self._parse_segments(rest)
else:
self._parts = self._parse_segments(pathsegments)
if self._parts == ():
raise ValueError("Empty path.")
def _parse_segments(self, pathsegments: Iterable[object]) -> Tuple[str, ...]:
return tuple(
part
for segment in pathsegments if segment
for part in posixpath.normpath(str(segment)).split('/')
)
def __hash__(self) -> int:
return hash(self.parts)
def __repr__(self) -> str:
return "{}({!r})".format(self.__class__.__name__, str(self))
def __str__(self) -> str:
return '/'.join(self.parts)
def __eq__(self, other: object) -> bool:
return isinstance(other, ResourcePath) and self._parts == other.parts
def __truediv__(self, other: object) -> 'ResourcePath':
return self.joinpath(other)
@property
def parts(self) -> Tuple[str, ...]:
"""
A tuple giving access to the path’s various components.
"""
return self._parts
@property
def parent(self) -> 'ResourcePath':
"""
The logical parent of the path. A root path is its own parent.
"""
if len(self._parts) == 1:
return self
else:
return self.__class__(*self._parts[:-1])
@property
def parents(self) -> Tuple['ResourcePath', ...]:
"""
An immutable sequence providing access to the path's logical ancestors.
"""
parent = self.parent
if self == parent:
return ()
else:
return (parent,) + parent.parents
@property
def name(self) -> str:
"""
A string representing the final path component.
"""
return self._parts[-1]
@property
def suffix(self) -> str:
"""
The final component's last suffix, if any.
"""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self) -> List[str]:
"""
A list of the final component's suffixes, if any.
"""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self) -> str:
"""
The final path component, minus its last suffix.
"""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
@property
def root(self) -> str:
"""
The first path component (usually ``'Packages'`` or ``'Cache'``).
"""
return self._parts[0]
@property
def package(self) -> Optional[str]:
"""
The name of the package the path is within,
or ``None`` if the path is a root path.
"""
if len(self._parts) >= 2:
return self._parts[1]
else:
return None
def match(self, pattern: str) -> bool:
"""
Return ``True`` if this path matches the given glob pattern,
or ``False`` otherwise.
:raise ValueError: if `pattern` is invalid.
"""
match = get_glob_matcher(pattern)
return match(str(self))
def joinpath(self, *other: object) -> 'ResourcePath':
"""
Combine this path with all of the given strings.
"""
return self.__class__(self, *other)
def relative_to(self, *other: object) -> Tuple[str, ...]:
"""
Compute a tuple `parts` of path components such that ``self == other.joinpath(*parts)``.
`other` will be converted to a :class:`ResourcePath`.
:raise ValueError: if this path is not a descendant of `other`.
.. versionadded:: 1.3
"""
other_path = ResourcePath(*other)
other_len = len(other_path.parts)
if other_path.parts == self._parts[:other_len]:
return self._parts[other_len:]
else:
raise ValueError("{!s} does not start with {!s}".format(self, other_path))
def with_name(self, name: str) -> 'ResourcePath':
"""
Return a new path with the name changed.
"""
if len(self._parts) == 1:
return self.__class__(name)
else:
return self.parent / name
def add_suffix(self, suffix: str) -> 'ResourcePath':
"""
Return a new path with the suffix added.
.. versionadded:: 1.3
"""
return self.with_name(self.name + suffix)
def remove_suffix(
self, suffix: Optional[str] = None, *, must_remove: bool = True
) -> 'ResourcePath':
"""
Return a new path with the suffix removed.
If `suffix` is ``None`` (the default), then ``self.suffix`` will be removed.
If `suffix` is a string, then only that suffix will be removed.
Otherwise, if `suffix` is iterable,
then the longest possible item in `suffix` will be removed.
:raise ValueError: if `must_remove` is ``True`` (the default)
and no suffix can be removed.
.. versionadded:: 1.3
"""
new_name = None
if suffix is None:
if self.suffix:
new_name = self.stem
else:
if isinstance(suffix, str):
suffixes = [suffix]
else:
suffixes = sorted(suffix, key=len, reverse=True)
old_name = self.name
new_name = next((
old_name[:i]
for s in suffixes
for i in (old_name.rfind(s),)
if i > 0
), None)
if new_name is not None:
return self.with_name(new_name)
elif must_remove:
raise ValueError('Cannot remove suffix {!r} from {!r}.'.format(suffix, self))
else:
return self
def with_suffix(self, suffix: str) -> 'ResourcePath':
"""
Return a new path with the suffix changed.
If the original path doesn’t have a suffix, the new suffix is appended
instead. If the new suffix is an empty string, the original suffix is
removed.
Equivalent to ``self.remove_suffix(must_remove=False).add_suffix(suffix)``.
"""
return self.with_name(self.stem + suffix)
def file_path(self) -> Path:
"""
Return a :class:`Path` object representing a filesystem path
inside one of Sublime's data directories.
Even if there is a resource at this path,
there may not be a file at that filesystem path.
The resource could be in a default package or an installed package.
:raise ValueError: if the path's root is not used by Sublime.
"""
for root in get_roots():
try:
return root.resource_to_file_path(self)
except ValueError:
continue
raise ValueError("Can't find a filesystem path for {!r}.".format(self.root)) from None
def exists(self) -> bool:
"""
Return ``True`` if there is a resource at this path,
or ``False`` otherwise.
The resource system does not keep track of directories.
Even if a path does not point to a resource,
there may be resources beneath that path.
"""
return str(self) in sublime.find_resources(self.name)
def read_text(self) -> str:
"""
Load the resource at this path and return it as text.
:raise FileNotFoundError: if there is no resource at this path.
:raise UnicodeDecodeError: if the resource cannot be decoded as UTF-8.
"""
try:
return sublime.load_resource(str(self))
except IOError as err:
raise FileNotFoundError(str(self)) from err
def read_bytes(self) -> bytes:
"""
Load the resource at this path and return it as bytes.
:raise FileNotFoundError: if there is no resource at this path.
"""
try:
return sublime.load_binary_resource(str(self))
except IOError as err:
raise FileNotFoundError(str(self)) from err
def glob(self, pattern: str) -> List['ResourcePath']:
"""
Glob the given pattern at this path, returning all matching resources.
:raise ValueError: if `pattern` is invalid.
"""
base = '/' + str(self) + '/' if self._parts else ''
return ResourcePath.glob_resources(base + pattern)
def rglob(self, pattern: str) -> List['ResourcePath']:
"""
Shorthand for ``path.glob('**/' + pattern)``.
:raise ValueError: if `pattern` is invalid.
:raise NotImplementedError: if `pattern` begins with a slash.
"""
if pattern.startswith('/'):
raise NotImplementedError("Non-relative patterns are unsupported")
return self.glob('**/' + pattern)
def children(self) -> List['ResourcePath']:
"""
Return a list of paths that are direct children of this path
and point to a resource at or beneath that path.
"""
depth = len(self._parts)
return [
self / next_part
for next_part in OrderedDict.fromkeys(
resource.parts[depth]
for resource in self.glob('**')
)
]
def copy(self, target: object, exist_ok: bool = True) -> None:
"""
Copy this resource to the given `target`.
`target` should be a string representing a filesystem path
or a value convertible to string.
If `target` exists and is a file,
and `exist_ok` is ``True`` (the default),
it will be silently replaced.
:raise FileNotFoundError: if there is no resource at this path.
:raise IsADirectoryError: if `target` is a directory.
:raise FileExistsError: if `target` is a file and `exist_ok` is ``False``.
.. versionadded:: 1.3
"""
if exist_ok:
mode = 'w'
else:
mode = 'x'
data = self.read_bytes()
with open(str(target), mode + 'b') as file:
file.write(data)
def copytree(self, target: Union[Path, str], exist_ok: bool = False) -> None:
"""
Copy all resources beneath this path into a directory tree rooted at `target`.
All missing parent directories of `target` will be created.
If `exist_ok` is ``False`` (the default),
then `target` must not already exist.
If `exist_ok` is ``True``,
then existing files under `target` will be overwritten.
:raise FileExistsError: if `target` already exists and `exist_ok` is ``False``.
.. versionadded:: 1.3
"""
target = wrap_path(target)
os.makedirs(str(target), exist_ok=exist_ok)
for resource in self.rglob('*'):
file_path = target.joinpath(*resource.relative_to(self))
os.makedirs(str(file_path.parent), exist_ok=True)
resource.copy(file_path)
|
from functools import partial
import itertools
import posixpath
import threading
try:
from twitter.common import log
except ImportError:
import logging as log
from twitter.common.concurrent import Future
from .group_base import (
Capture,
GroupBase,
GroupInterface,
Membership,
set_different)
from kazoo.client import KazooClient
from kazoo.protocol.states import (
EventType,
KazooState,
KeeperState)
import kazoo.security as ksec
import kazoo.exceptions as ke
# TODO(wickman) Put this in twitter.common somewhere?
def partition(items, predicate=bool):
a, b = itertools.tee((predicate(item), item) for item in items)
return ([item for pred, item in a if not pred], [item for pred, item in b if pred])
class KazooGroup(GroupBase, GroupInterface):
"""
An implementation of GroupInterface against Kazoo.
"""
DISCONNECT_EXCEPTIONS = (ke.ConnectionLoss, ke.OperationTimeoutError, ke.SessionExpiredError)
@classmethod
def translate_acl(cls, acl):
if not isinstance(acl, dict) or any(key not in acl for key in ('perms', 'scheme', 'id')):
raise TypeError('Expected acl to be Acl-like, got %s' % type(acl))
return ksec.ACL(acl['perms'], ksec.Id(acl['scheme'], acl['id']))
@classmethod
def translate_acl_list(cls, acls):
if acls is None:
return acls
try:
acls = list(acls)
except (ValueError, TypeError):
raise TypeError('ACLs should be a list, got %s' % type(acls))
if all(isinstance(acl, ksec.ACL) for acl in acls):
return acls
else:
return [cls.translate_acl(acl) for acl in acls]
def __init__(self, zk, path, acl=None):
if not isinstance(zk, KazooClient):
raise TypeError('KazooGroup must be initialized with a KazooClient')
self._zk = zk
self.__state = zk.state
self.__listener_queue = []
self.__queue_lock = threading.Lock()
self._zk.add_listener(self.__state_listener)
self._path = '/' + '/'.join(filter(None, path.split('/'))) # normalize path
self._members = {}
self._member_lock = threading.Lock()
self._acl = self.translate_acl_list(acl)
def __state_listener(self, state):
"""Process appropriate callbacks on any kazoo state transition."""
with self.__queue_lock:
self.__state = state
self.__listener_queue, triggered = partition(self.__listener_queue,
lambda element: element[0] == state)
for _, callback in triggered:
callback()
def _once(self, keeper_state, callback):
"""Ensure a callback is called once we reach the given state: either
immediately, if currently in that state, or on the next transition to
that state."""
invoke = False
with self.__queue_lock:
if self.__state != keeper_state:
self.__listener_queue.append((keeper_state, callback))
else:
invoke = True
if invoke:
callback()
def __on_connected(self, callback):
return self.__on_state(callback, KazooState.CONNECTED)
def __on_expired(self, callback):
return self.__on_state(callback, KazooState.LOST)
def info(self, member, callback=None):
if member == Membership.error():
raise self.InvalidMemberError('Cannot get info on error member!')
capture = Capture(callback)
def do_info():
self._zk.get_async(path).rawlink(info_completion)
with self._member_lock:
member_future = self._members.setdefault(member, Future())
member_future.add_done_callback(lambda future: capture.set(future.result()))
dispatch = False
with self._member_lock:
if not member_future.done() and not member_future.running():
try:
dispatch = member_future.set_running_or_notify_cancel()
except:
pass
def info_completion(result):
try:
content, stat = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_info)
return
except ke.NoNodeException:
future = self._members.pop(member, Future())
future.set_result(Membership.error())
return
except ke.KazooException as e:
log.warning('Unexpected Kazoo result in info: (%s)%s' % (type(e), e))
future = self._members.pop(member, Future())
future.set_result(Membership.error())
return
self._members[member].set_result(content)
if dispatch:
path = posixpath.join(self._path, self.id_to_znode(member.id))
do_info()
return capture()
def join(self, blob, callback=None, expire_callback=None):
membership_capture = Capture(callback)
expiry_capture = Capture(expire_callback)
def do_join():
self._zk.create_async(
path=posixpath.join(self._path, self.MEMBER_PREFIX),
value=blob,
acl=self._acl,
sequence=True,
ephemeral=True,
makepath=True
).rawlink(acreate_completion)
def do_exists(path):
self._zk.exists_async(path, watch=exists_watch).rawlink(partial(exists_completion, path))
def exists_watch(event):
if event.type == EventType.DELETED:
expiry_capture.set()
def expire_notifier():
self._once(KazooState.LOST, expiry_capture.set)
def exists_completion(path, result):
try:
if result.get() is None:
expiry_capture.set()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, partial(do_exists, path))
def acreate_completion(result):
try:
path = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_join)
return
except ke.KazooException as e:
log.warning('Unexpected Kazoo result in join: (%s)%s' % (type(e), e))
membership = Membership.error()
else:
created_id = self.znode_to_id(path)
membership = Membership(created_id)
with self._member_lock:
result_future = self._members.get(membership, Future())
result_future.set_result(blob)
self._members[membership] = result_future
if expire_callback:
self._once(KazooState.CONNECTED, expire_notifier)
do_exists(path)
membership_capture.set(membership)
do_join()
return membership_capture()
def cancel(self, member, callback=None):
capture = Capture(callback)
def do_cancel():
self._zk.delete_async(posixpath.join(self._path, self.id_to_znode(member.id))).rawlink(
adelete_completion)
def adelete_completion(result):
try:
success = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_cancel)
return
except ke.NoNodeError:
success = True
except ke.KazooException as e:
log.warning('Unexpected Kazoo result in cancel: (%s)%s' % (type(e), e))
success = False
future = self._members.pop(member.id, Future())
future.set_result(Membership.error())
capture.set(success)
do_cancel()
return capture()
def monitor(self, membership=frozenset(), callback=None):
capture = Capture(callback)
def wait_exists():
self._zk.exists_async(self._path, exists_watch).rawlink(exists_completion)
def exists_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.type == EventType.CREATED:
do_monitor()
elif event.type == EventType.DELETED:
wait_exists()
def exists_completion(result):
try:
stat = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, wait_exists)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected exists_completion result: (%s)%s' % (type(e), e))
return
if stat:
do_monitor()
def do_monitor():
self._zk.get_children_async(self._path, get_watch).rawlink(get_completion)
def get_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.state != KeeperState.CONNECTED:
return
if event.type == EventType.DELETED:
wait_exists()
return
if event.type != EventType.CHILD:
return
if set_different(capture, membership, self._members):
return
do_monitor()
def get_completion(result):
try:
children = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_monitor)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected get_completion result: (%s)%s' % (type(e), e))
capture.set(set([Membership.error()]))
return
self._update_children(children)
set_different(capture, membership, self._members)
do_monitor()
return capture()
def list(self):
wait_event = threading.Event()
while True:
wait_event.clear()
try:
try:
return sorted(Membership(self.znode_to_id(znode))
for znode in self._zk.get_children(self._path)
if self.znode_owned(znode))
except ke.NoNodeException:
return []
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, wait_event.set)
wait_event.wait()
class ActiveKazooGroup(KazooGroup):
def __init__(self, *args, **kwargs):
super(ActiveKazooGroup, self).__init__(*args, **kwargs)
self._monitor_queue = []
self._monitor_members()
def monitor(self, membership=frozenset(), callback=None):
capture = Capture(callback)
if not set_different(capture, membership, self._members):
self._monitor_queue.append((membership, capture))
return capture()
def _monitor_members(self):
def wait_exists():
self._zk.exists_async(self._path, exists_watch).rawlink(exists_completion)
def exists_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.type == EventType.CREATED:
do_monitor()
elif event.type == EventType.DELETED:
wait_exists()
def exists_completion(result):
try:
stat = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, wait_exists)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected exists_completion result: (%s)%s' % (type(e), e))
return
if stat:
do_monitor()
def do_monitor():
self._zk.get_children_async(self._path, get_watch).rawlink(get_completion)
def get_watch(event):
if event.state == KeeperState.EXPIRED_SESSION:
wait_exists()
return
if event.state != KeeperState.CONNECTED:
return
if event.type == EventType.DELETED:
wait_exists()
return
do_monitor()
def get_completion(result):
try:
children = result.get()
except self.DISCONNECT_EXCEPTIONS:
self._once(KazooState.CONNECTED, do_monitor)
return
except ke.NoNodeError:
wait_exists()
return
except ke.KazooException as e:
log.warning('Unexpected get_completion result: (%s)%s' % (type(e), e))
return
children = [child for child in children if self.znode_owned(child)]
_, new = self._update_children(children)
for child in new:
def devnull(*args, **kw): pass
self.info(child, callback=devnull)
monitor_queue = self._monitor_queue[:]
self._monitor_queue = []
members = set(Membership(self.znode_to_id(child)) for child in children)
for membership, capture in monitor_queue:
if set(membership) != members:
capture.set(members)
else:
self._monitor_queue.append((membership, capture))
do_monitor()
|
#socket udp client
import socket
target_ip = "127.0.0.1"
port = 12345
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect((target_ip,port))
while 1:
cmd = input("please input cmd")
cmd = cmd.encode(encoding="UTF-8")
s.send(cmd)
s.close()
|
"""
Arturo Alquicira
DPWP
Mad Lib
"""
"""
Global Variables
"""
name = raw_input("Your name: ")
hometown = raw_input("Your hometown: ")
noun = raw_input("Noun: ")
your_age = raw_input("Your age: ")
random_number = raw_input("Random number: ")
lucky_number = raw_input("Your lucky number: ")
"""
Float - year of birth
"""
def year_of_birth(age, this_year):
y = this_year - age
return y
actual_year = 2014
year = year_of_birth(int(your_age), actual_year)
"""
Array - Pizzas
"""
pizzas = ["pepperoni", "meat lovers", "vegetarian"]
"""
Dictionary
"""
languages = dict()
languages = {"spanish":"hola", "english":"hello", "french":"bonjour"}
|
"""
File: profiler.py
Defines a class for profiling sort algorithms.
A Profiler object tracks the list, the number of comparisons
and exchanges, and the running time. The profiler can also
print a traced and can create a list of unique or duplicate
numbers.
Example use:
from profiler import Profiler
from algorithms import selectionSort
p = Profiler()
p.test(selectionSort, size=15,com=True,exch=True, trace=True)
"""
import time
import random
class Profiler(object):
def test(self, function, lyst=None, size=10, unique=True,
comp=True, exch=True, trace=False):
"""
function: the algorithm being profiled
target: the search target if profiling a search
lyst: allows the caller to use her list
size: the size of the list, 10 by default
unique: if True, list contains unique integers
comp: if True, count comparisons
exch: if True, count exchanges
trace: if True, print the list after each exchange
Run the function with the given attributes and print
its profile results
"""
self._comp = comp
self._exch = exch
self._trace = trace
if lyst != None:
self._lyst = range(1, size + 1)
random.shuffle(self._lyst)
elif unique:
self._lyst = range(1, size + 1)
for count in range(size):
self._lyst.append(random.randint(1, size))
self._exchCount = 0
self._cmpCount = 0
self._startClock()
function(self._lyst, self)
self._stopClock()
print(self)
def exchange(self):
"""Counts exchange if on."""
if self._exch:
self._exchCount += 1
if self._trace:
self._trace:
print(self._lyst)
def comparison(self):
"""Counts comparisons if on."""
if self._comp:
self._cmpCount += 1
def _startClock(self):
"""Recording the starting time."""
self._start = time.time()
def _stopClock(self):
"""Stops the clock and computes the elapsed time
in seconds. to the nearest millisecond."""
self._elapsedTime = round(time.time() - self._start, 3)
def __str__(self):
"""Returns the resulting as a string"""
result = "Problemsize:"
result += str(len(self._lyst)) + "\n"
result += "Elapsed time:"
result += str(self._elapsedTime) + "\n"
if self._comp:
result += "comparisons:"
result += str(self._cmpCount) + "\n"
if self._exch:
result += "Exchanges: "
result += str(self._exchCount) + "\n"
return result |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 5 18:04:59 2021
@author: delizhu
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
#cerebro = bt.Cerebro(**kwargs)#创建Cerebro框架
#cerebro.addstrategy(MyStrategy,mypara1,mypara2)#增添交易策略
#
###增添其他元素
##.addwriter
##.addanalyzer
##.addobserver
#
###改变broker
#cerebro.broker = broker
#
##接受通知
#cerebro.notify_store
#
##运行Cerebro
#result = cerebro.run(**kwargs)
#cerebro.plot()
import datetime
import backtrader as bt
class StrategyClass(bt.Strategy):
def __init__(self):
self.sma = bt.ind.SMA(period = 15)
#指标必须要定义在策略类中的初始化函数中 !!!!
#sma源码位于indicators\sma.py
#self.wma = bt.ind.WeightedMovingAverage(period = 15)
def next(self):
#移动平均线
#self.data.close 收盘价
#收盘价大于sma均线,买入,反之卖出
if self.data.close > self.sma:
self.buy()
if self.data.close <= self.sma:
self.sell()
##加权移动平均WeightedMovingAverage
# if self.data.close > self.wma:
# self.buy()
# if self.data.close <= self.wma:
# self.sell()
cerebro=bt.Cerebro()
#datapath="/Users/delizhu/Desktop/Quant trading System/Tre10y.xls"
data = bt.feeds.GenericCSVData(dataname = 'Tre10y.csv' ,
fromdate = datetime.datetime(2018, 1, 1),
todate = datetime.datetime(2020, 3, 20),
nullvalue=0.0,
dtformat=('%Y-%m-%d'),
datetime=0,
high=3,
low=4,
open=1,
close=2,
volume=5,
openinterest=-1)
cerebro.adddata(data)
cerebro.addstrategy(StrategyClass)
cerebro.broker.set_cash(200000)
cerebro.run(maxcpu=1)
cerebro.plot()
|
# coding=utf-8
import scrapy
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy_spider.items import DoubanItem
from scrapy.spiders import CrawlSpider, Rule
class DoubanSpider(CrawlSpider):
name = "douban_manhua"
allowed_domains = ['book.douban.com']
start_urls = ['https://book.douban.com/tag/%E6%BC%AB%E7%94%BB']
rules = [
Rule(SgmlLinkExtractor(allow=('/tag/%E6%BC%AB%E7%94%B'),
restrict_xpaths=("//div[@class='paginator']/span[@class='next']/a")),
callback='parse_item',
follow=True)
]
def parse_item(self,response):
item = DoubanItem()
for sel in response.xpath("//div[@class='info']"):
item['title'] = sel.xpath('.//h2/a/@title').extract_first()
item['link'] = sel.xpath('.//h2/a/@href').extract_first()
item['info'] = sel.xpath(".//div[@class='pub']/text()").extract_first()
item['desc'] = sel.xpath(".//p/text()").extract_first()
yield item
|
class No():
def __init__(self, *args, **kwargs):
self.x = kwargs.get("x")
self.y = kwargs.get("y")
self.visitado = False
self.qtdLigacoes = 0
self.ligacoes = list()
self.nos = list()
def getPosicao(self):
return [self.x, self.y]
def isVisitado(self):
return self.visitado
def setVisitado(self):
self.visitado = True
|
import numpy as np
import pandas as pd
class CrdRbd:
def __init__(self, data):
self.SST = None,
self.MSST = None,
self.SSt = None
self.MSSt = None
self.MSSE = None
self.MSSe = None
self.n = None
self.N = None
self.k = None
self.b = None
self.data = data
print("""
T SQR = TOTAL SQUARED,
T MEAN = TOTAL MEAN
G = GRAND TOTAL
""")
def data_manipulation(self, in_data, prob):
if prob == 'crd':
rep = 'T'
else:
rep = 'B'
sum_of_Yi = pd.DataFrame(np.sum(in_data).T, columns=[rep])
mean_of_Yi = pd.DataFrame(np.mean(in_data).T, columns=['mean'])
sqr_of_Yi = pd.DataFrame(np.square(np.sum(in_data)).T, columns=['{}^2'.format(rep)])
t_sqr_of_Yi = np.sum(np.square(np.sum(in_data)))
total = np.sum(np.sum(in_data))
total_mean = total / in_data.size
total_squared = np.sum(np.sum(np.square(in_data)))
total_sqr = total ** 2
print("N : {}".format(in_data.size))
print("G : {}".format(total))
print("G^2 : {}".format(total_sqr))
print("T SQR : {}".format(total_squared))
print("T MEAN : {}".format(total_mean))
new_table = sum_of_Yi.join(mean_of_Yi).join(sqr_of_Yi)
print("\n")
print(new_table.T)
val = {
'G': total,
'N': in_data.size,
'total_squared': total_squared,
'sqr_of_Yi': sqr_of_Yi,
't_sqr_of_Yi': t_sqr_of_Yi,
'G^2': total_sqr,
}
return val
def anova(self, test, sse, sst_, df):
if len(test) == 1:
SSt = test[0]
t_D_F = df[0]
E_D_F = df[1]
T_D_F = df[2]
elif len(test) == 2:
SSt = test[0]
SSb = test[1]
t_D_F = df[0]
b_D_F = df[1]
E_D_F = df[2]
T_D_F = df[3]
pre_table = [[SSt,
t_D_F,
SSt / t_D_F,
(SSt / t_D_F) / (sse / E_D_F),
],
[sst_,
T_D_F,
sst_ / T_D_F,
'-'
]
]
if len(test) == 1:
pre_table.insert(1, [sse,
E_D_F,
sse / E_D_F,
'-'
])
if len(test) > 1:
index = ['treatment', 'block', 'error', 'total']
else:
index = ['treatment', 'error', 'total']
if len(test) > 1:
pre_table.insert(1, [SSb,
b_D_F,
SSb / b_D_F,
(SSb / b_D_F) / (sse / E_D_F),
])
pre_table.insert(2, [sse,
E_D_F,
sse / E_D_F,
'-'
])
table = pd.DataFrame(np.array(
pre_table
), index=index, columns=['SS', 'DF', 'MSS', 'Variance ration (F)'])
print("\n")
print(table)
def crd(self):
print("----treatments-----")
print("k : {} ".format(self.data.shape[0]))
n = self.data.shape[1]
var = self.data.T
calc = self.data_manipulation(var, 'crd')
SST = calc['total_squared'] - calc['G^2'] / calc['N']
T_D_F = calc['N'] - 1
sum_ys = np.sum(calc['t_sqr_of_Yi'])
SSt = sum_ys / n - calc['G^2'] / calc['N']
t_D_F = self.data.shape[0] - 1
SSe = calc['total_squared'] - sum_ys / n
E_D_F = self.data.shape[0] * (n - 1)
print("\n")
print('--- ANOVA TABLE ---')
self.anova([SSt], SSe, SST, [t_D_F, E_D_F, T_D_F])
def rbd(self):
print("\n-----with blocks------")
print("b : {} ".format(self.data.shape[1]))
b = self.data.shape[1]
k = self.data.shape[0]
var = self.data.T
calc = self.data_manipulation(var, 'crd')
sum_ys = np.sum(calc['t_sqr_of_Yi'])
SSt = sum_ys / b - calc['G^2'] / (b * k)
t_D_F = k - 1
calc = self.data_manipulation(self.data, 'rbd')
SST = calc['total_squared'] - calc['G^2'] / (b * k)
T_D_F = b * k - 1
sum_ys = np.sum(calc['t_sqr_of_Yi'])
SSb = sum_ys / k - calc['G^2'] / (b * k)
b_D_F = b - 1
SSe = SST - (SSt + SSb)
E_D_F = (b - 1) * (k - 1)
print("\n")
print('--- ANOVA TABLE ---')
self.anova([SSt, SSb], SSe, SST, [t_D_F, b_D_F, E_D_F, T_D_F])
|
# Generated by Django 2.1.4 on 2020-12-20 02:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forensics', '0002_auto_20201214_2107'),
]
operations = [
migrations.CreateModel(
name='Caseraw',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('case_id', models.CharField(max_length=30, unique=True)),
('case_details', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'dashboard_caseraw',
'managed': False,
},
),
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'dashboard_dashboard',
'managed': False,
},
),
migrations.CreateModel(
name='DashboardAppevidencetype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('evidence_type', models.CharField(max_length=50)),
('evidence_image', models.CharField(blank=True, max_length=200, null=True)),
('evidence_group', models.CharField(blank=True, max_length=30, null=True)),
('list_location', models.IntegerField(blank=True, null=True)),
('app_name', models.CharField(blank=True, max_length=30, null=True)),
],
options={
'db_table': 'dashboard_appevidencetype',
'managed': False,
},
),
migrations.CreateModel(
name='Dashboardpermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'dashboard_dashboardpermissions',
'managed': False,
},
),
migrations.CreateModel(
name='Evidenceraw',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('case_id', models.CharField(max_length=30)),
('evidence_details', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'dashboard_evidenceraw',
'managed': False,
},
),
migrations.AlterModelOptions(
name='actionsperformed',
options={'managed': False},
),
migrations.AlterModelOptions(
name='actiontypes',
options={'managed': False},
),
migrations.AlterModelOptions(
name='agency',
options={'managed': False},
),
migrations.AlterModelOptions(
name='agencypersonnel',
options={'managed': False},
),
migrations.AlterModelOptions(
name='alcohol',
options={'managed': False},
),
migrations.AlterModelOptions(
name='ammo',
options={'managed': False},
),
migrations.AlterModelOptions(
name='arrest',
options={'managed': False},
),
migrations.AlterModelOptions(
name='blacklistmapping',
options={'managed': False},
),
migrations.AlterModelOptions(
name='canceledrequest',
options={'managed': False},
),
migrations.AlterModelOptions(
name='case',
options={'managed': False},
),
migrations.AlterModelOptions(
name='caseagency',
options={'managed': False},
),
migrations.AlterModelOptions(
name='caseoffense',
options={'managed': False},
),
migrations.AlterModelOptions(
name='caseperson',
options={'managed': False},
),
migrations.AlterModelOptions(
name='covidlightvalues',
options={'managed': False},
),
migrations.AlterModelOptions(
name='covidpatient',
options={'managed': False},
),
migrations.AlterModelOptions(
name='covidpositivebyweek',
options={'managed': False},
),
migrations.AlterModelOptions(
name='covidrejected',
options={'managed': False},
),
migrations.AlterModelOptions(
name='covidsample',
options={'managed': False},
),
migrations.AlterModelOptions(
name='covidtest',
options={'managed': False},
),
migrations.AlterModelOptions(
name='cssurequestextension',
options={'managed': False},
),
migrations.AlterModelOptions(
name='dashboardapp',
options={'managed': False},
),
migrations.AlterModelOptions(
name='dfsfburequestextension',
options={'managed': False},
),
migrations.AlterModelOptions(
name='eventlog',
options={'managed': False},
),
migrations.AlterModelOptions(
name='evidence',
options={'managed': False},
),
migrations.AlterModelOptions(
name='evidencemapping',
options={'managed': False},
),
migrations.AlterModelOptions(
name='evidencetransfer',
options={'managed': False},
),
migrations.AlterModelOptions(
name='fbuevidenceextension',
options={'managed': False},
),
migrations.AlterModelOptions(
name='feuammo',
options={'managed': False},
),
migrations.AlterModelOptions(
name='feuresultextension',
options={'managed': False},
),
migrations.AlterModelOptions(
name='firearms',
options={'managed': False},
),
migrations.AlterModelOptions(
name='forensicunit',
options={'managed': False},
),
migrations.AlterModelOptions(
name='foresightservicemapping',
options={'managed': False},
),
migrations.AlterModelOptions(
name='foresightservices',
options={'managed': False},
),
migrations.AlterModelOptions(
name='globalevidencetype',
options={'managed': False},
),
migrations.AlterModelOptions(
name='labdepartment',
options={'managed': False},
),
migrations.AlterModelOptions(
name='labpersonnel',
options={'managed': False},
),
migrations.AlterModelOptions(
name='latentserviceextension',
options={'managed': False},
),
migrations.AlterModelOptions(
name='lfuresultextension',
options={'managed': False},
),
migrations.AlterModelOptions(
name='location',
options={'managed': False},
),
migrations.AlterModelOptions(
name='narcoticidentification',
options={'managed': False},
),
migrations.AlterModelOptions(
name='offense',
options={'managed': False},
),
migrations.AlterModelOptions(
name='offensemapping',
options={'managed': False},
),
migrations.AlterModelOptions(
name='osticketstats',
options={'managed': False},
),
migrations.AlterModelOptions(
name='personnelteam',
options={'managed': False},
),
migrations.AlterModelOptions(
name='phlsample',
options={'managed': False},
),
migrations.AlterModelOptions(
name='porterleeforesight',
options={'managed': False},
),
migrations.AlterModelOptions(
name='property',
options={'managed': False},
),
migrations.AlterModelOptions(
name='request',
options={'managed': False},
),
migrations.AlterModelOptions(
name='requestcaseoffense',
options={'managed': False},
),
migrations.AlterModelOptions(
name='requestevidence',
options={'managed': False},
),
migrations.AlterModelOptions(
name='requestperson',
options={'managed': False},
),
migrations.AlterModelOptions(
name='result',
options={'managed': False},
),
migrations.AlterModelOptions(
name='savedqueries',
options={'managed': False},
),
migrations.AlterModelOptions(
name='service',
options={'managed': False},
),
migrations.AlterModelOptions(
name='stacssample',
options={'managed': False},
),
migrations.AlterModelOptions(
name='stacsspecimen',
options={'managed': False},
),
migrations.AlterModelOptions(
name='team',
options={'managed': False},
),
migrations.AlterModelOptions(
name='toxanalytes',
options={'managed': False},
),
migrations.AlterModelOptions(
name='toxconfirmation',
options={'managed': False},
),
migrations.AlterModelOptions(
name='toxscreen',
options={'managed': False},
),
migrations.AlterModelOptions(
name='vehicle',
options={'managed': False},
),
]
|
from PIL import Image
from io import BytesIO
import base64
def crop_faces(img_file, faces_data):
if len(faces_data["images"]) == 0:
return {"status": "NO_FACE"}
im = Image.open(img_file)
faces = faces_data["images"][0]["faces"]
cropped_images = []
for face in faces:
face_location = face["face_location"]
height = face_location["height"]
width = face_location["width"]
left = face_location["left"]
top = face_location["top"]
cropped_images.append(im.crop((left, top, left + width, top + height)))
return cropped_images
def transform_image_to_base64_string(image):
return base64.b64encode(image).decode('ascii')
def transform_pil_image_to_image(image):
buffered = BytesIO()
# transform to RGB to support formats without alpha channel
img_rgb = image.convert("RGB")
img_rgb.save(buffered, format="JPEG")
return buffered.getvalue()
|
import socket
host = '127.0.0.1'
port = 5000
s = socket.socket()
s.bind((host, port))
s.listen(1)
c, addr = s.accept()
print("Connection From:" + str(addr))
data = c.recv(1024).decode('utf-8')
while(data != ":q"):
print(str(addr) +" says: " + data)
response = "Echo back - " + data
c.send(response.encode('utf-8'))
data = c.recv(1024).decode('utf-8')
c.close()
print(str(addr) + " closed connection.") |
from django.urls import path
from . import views_student
urlpatterns = [
path('', views_student.student_day_all, name='student_day_all'),
path('add/', views_student.StudentDayAdd.as_view(), name='student_day_add'),
path('change/<int:pk>/', views_student.student_day_change, name='student_day_change'),
path('remove/<int:pk>/', views_student.student_day_remove, name='student_day_remove'),
path('<int:pk>/', views_student.student_day_single, name='student_day_single'),
path('mark/add/<int:pk_day>/', views_student.student_attendance_add, name='student_attendance_add'),
path('mark/remove/<int:pk>/', views_student.student_attendance_remove, name='student_attendance_remove'),
]
|
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
result = [[-1 for _ in range(len(obstacleGrid[0]))] for _ in range(len(obstacleGrid))]
for i in range(len(obstacleGrid)):
for j in range(len(obstacleGrid[0])):
if obstacleGrid[i][j] == 1:
result[i][j] = 0
flag = False
for j in range(len(obstacleGrid[0])):
if result[0][j] != 0 and not flag:
result[0][j] = 1
elif result[0][j] == 0:
flag = True
elif flag:
result[0][j] = 0
flag = False
for i in range(len(obstacleGrid)):
if result[i][0] != 0 and not flag:
result[i][0] = 1
elif result[i][0] == 0:
flag = True
elif flag:
result[i][0] = 0
for i in range(1, len(obstacleGrid)):
for j in range(1, len(obstacleGrid[0])):
if result[i][j] == 0:
continue
elif result[i][j] == -1:
result[i][j] = result[i-1][j] + result[i][j-1]
return result[-1][-1]
print(Solution().uniquePathsWithObstacles([
[0,1,0],
]
)) |
from django.http import HttpResponse
from .models import Pawn
from .models import SplendorGame
from .models import SplendorGameState
from .models import SplendorPlayerState
import decimal
def index(request):
return HttpResponse("Hello, world")
def read(request):
pawn = Pawn.objects.last()
response = HttpResponse(str(pawn.xpos) + ',' + str(pawn.ypos))
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "X-Requested-With, Content-Type"
return response
def pasha(request):
return HttpResponse("you still found pasha's url")
def write(request):
myxpos = request.GET['xpos']
myypos = request.GET['ypos']
p = Pawn(xpos=decimal.Decimal(myxpos), ypos=decimal.Decimal(myypos))
p.save()
return HttpResponse("you saved, go you")
def splendor_write(request):
game_name = request.GET['game']
game = SplendorGame.objects.get(name=game_name)
game_state = SplendorGameState.objects.filter(game=game).last()
return HttpResponse(game_state)
|
import datetime
import math
import logging
import os
import stat
import pandas as pd
import paramiko
import sqlite3
import yaml
from itertools import chain
from multiprocessing import Process, Queue
from paramiko.client import SSHClient
from tqdm import tqdm
def list_sftp_epns(host, user, key_path, data_path):
with SSHClient() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=host, username=user, key_filename=key_path)
with ssh.open_sftp() as sftp:
sftp.chdir(data_path)
return sftp.listdir()
def is_dir(sftp_attr):
return stat.S_ISDIR(sftp_attr.st_mode)
def list_all_files(sftp_client, path):
sftp_client.chdir(path)
names = sftp_client.listdir()
attrs = sftp_client.listdir_attr()
for name, attr in zip(names, attrs):
if is_dir(attr):
for x in list_all_files(sftp_client, os.path.join(path, name)):
yield x
else:
yield os.path.join(path, name)
def create_file(sftp, epn, path):
try:
stats = sftp.lstat(path)
size = stats.st_size
mtime = datetime.datetime.fromtimestamp(stats.st_mtime)
except FileNotFoundError as fno:
logging.error("Could not calculate stats on: {}\n{}".format(path, fno))
size = 0
mtime = datetime.datetime.now()
return epn, path, size, mtime
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
class Worker(object):
def __init__(self, name, queue, process, db):
self.name = name
self.queue = queue
self.process = process
self.db = db
def epn_worker(i, queue, db_path, epns, hostname, user, key_path):
logging.basicConfig(filename="worker{}_errors.log".format(1), level=logging.ERROR)
with SSHClient() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=hostname, username=user, key_filename=key_path)
with ssh.open_sftp() as sftp:
conn = sqlite3.connect(db_path)
conn.execute('''create table files
(epn text,
path text unique,
size bigint,
modified datetime)''')
for i, epn in enumerate(epns):
files = (create_file(sftp, epn, pth) for pth in
list_all_files(sftp, os.path.join('/data', epn)))
with conn:
conn.executemany("insert into files(epn, path, size, modified) values (?, ?, ?, ?)", files)
queue.put(i + 1)
conn.close()
def process_epns(epns, hostname, user, key_path, processes=4):
count = 0
workers = []
with tqdm(total=len(epns)) as pbar:
for i, eps in enumerate(chunks(epns, int(math.ceil(len(epns) / processes)))):
q = Queue()
db_path = "files{}.db".format(i)
worker = Process(target=epn_worker, args=(i, q, db_path, eps, hostname, user, key_path))
worker.daemon = True
worker.start()
workers.append(Worker("Worker {}".format(i + 1), q, worker, db_path))
while count < len(epns):
count = 0
for worker in workers:
count += worker.queue.get()
pbar.update(count)
for worker in workers:
worker.process.join()
if __name__ == '__main__':
try:
config_file = open("config.yml", "r")
except IOError:
print('Config file not found')
exit(1)
else:
try:
cfg = yaml.load(config_file, Loader=yaml.FullLoader)
epns_path = cfg["epns_path"]
hostname = cfg["hostname"]
user = cfg["user"]
key_path = cfg["key_path"]
except yaml.YAMLError as e:
print(e)
exit(1)
print("Config loaded: ", cfg)
epns_names = os.listdir(epns_path)
epns = {name: pd.read_excel(os.path.join(epns_path, name)) for name in epns_names}
epns = pd.concat(epns).reset_index()
epns.columns = ["File name", "File index", "EPN", "Start", "End", "Title", "Firt name", "Last name", "Email"]
sftp_epn_list = list_sftp_epns(hostname, user, key_path, 'data')
print("sftp epn list length %s", len(sftp_epn_list))
matched_epns = set(epns['EPN'].tolist()).intersection(set(sftp_epn_list))
print("matched epn list length %s", len(matched_epns))
process_epns(list(matched_epns), hostname, user, key_path)
|
from pyquery import PyQuery as pq
import requests
url="http://nanabt.com/index.php?c=thread&fid=28"
req=requests.get(url)
html=req.text
doc = pq(html)
print(doc('.thread_posts_list').text()) |
# 创建空字典,常定义在循环之前
dict1 = {}
# # 创建普通字典
# stu_grade = {'stu1001': 95, 'stu1002': 80, 'stu1003': 75}
# 创建嵌套字典
stu_info = { 'stu1001': {'name': 'Jack', 'gender': 'male', 'age': 26},
'stu1002': {'name': 'Tom', 'gender': 'male', 'age': 25},
'stu1003': {'name': 'Lucy', 'gender': 'female', 'age': 25}}
# 字典工厂dict函数方法
dict2 = dict((('name','Jack'), ['gender','male'], ['age', 26]))
print(dict2)
# 内建fromkeys()方法,第一个参数为不能为集合类型,第二个参数为可以数据类型
dict_1 = {}.fromkeys('xyz', 100)
dict_2 = dict.fromkeys(['x', 'y', 'z'], [12,23])
dict_3 = {}.fromkeys(('x', 'y', 'z'), '234')
dict_4 = dict.fromkeys({'age':20, 'gender': 'male'},100) # 注意此时的结果
dict_5 = dict.fromkeys('xyz')
print(dict_1) # {'y': 100, 'z': 100, 'x': 100}
print(dict_2) # {'y': [12, 23], 'z': [12, 23], 'x': [12, 23]}
print(dict_3) # {'y': '234', 'z': '234', 'x': '234'}
print(dict_4) # {'gender': 100, 'age': 100}
print(dict_5) # {'y': None, 'z': None, 'x': None}
# 修改元素
dict_1['x'] = 200
dict_2['x'][0] = 34
dict_1['w'] = 1000
dict_2['w'] = [100, 100]
dict_2['w'][0] = 99
print(''.center(50,'-'))
print(dict_1)
print(dict_2)
# 创建普通字典
stu_grade = {'stu1001': 95, 'stu1002': 80, 'stu1003': 75}
# 增加元素
stu_grade['stu1004'] = 100
# 修改元素
stu_grade['stu1004'] = 89
# 删除元素 pop方法,del删除,popitem随机删除方法
# 查找
# in 标准查找方法,存在返回True,否则返回False
print('stu1001' in stu_grade)
# 获取 get方法,没有找到返回 None ,否则返回正确的值
print(None == stu_grade.get('stu1005'))
#类下标法,存在返回value, 不存在出错,使用请注意
print(stu_grade['stu1003']) # 75
# print(stu_grade['stu1005']) #出错
#
# print(stu_grade.values())
# print(type(stu_grade.values()))
# print(stu_grade.keys())
# print(type(stu_grade.keys()))
# setdefault方法,如果该 key - value 存在,则不变,否则增加成员, 并且返回真正的value
# stu_grade.setdefault('stu1006', 98) # {'stu1001': 95, 'stu1006': 98, 'stu1004': 89, 'stu1002': 80, 'stu1003': 75}
# bb.txt = stu_grade.setdefault('stu1003', 98) # {'stu1004': 89, 'stu1003': 75, 'stu1002': 80, 'stu1001': 95}
# print(stu_grade)
# print(bb.txt)
#
# print('----------')
# print(enumerate(stu_grade)) # 得到地址
#
# # dict.items
# print(stu_grade.items())
#
# for key in stu_grade:
# print(key)
#
# for key, value in stu_grade.items(): # 需要先转换为列表,然后取出key和value,
# print('%s: %s' %(key, value))
#
# for index, key in enumerate(stu_grade):
# print('%s: %s' % (index, key))
# a.update(b)方法,相当于 复制+替换
#
# print('---------')
# print(dict_1)
#
# # 增加元素
# stu_info['stu1004'] = {'name': 'Katty', 'gender': 'famale', 'age': 24}
# stu_grade['stu1004'] = 90
# # 修改元素
# stu_info['stu1001']['age'] = 27
# stu_grade['stu1001'] = 89
#
# # 删除元素,方法有pop(), del, popitem()
# # stu_info.pop('stu1004') # pop 方法
# # stu_grade.pop('stu1004')
#
# # del stu_info['stu1004']
# # del stu_grade['stu1004']
#
# # stu_info.popitem() # popitem()方法 随机删
# # stu_grade.popitem()
#
# print(stu_info)
# print(stu_grade)
#
#
#
# dict1 = { '2':2, '1':23, '1':24, '1':[123, 23]}
# print(dict1) |
# 조건문에서 아무 일도 하지 않게 설정하고 싶다면?
poket = ['paper', 'money', 'cellphone']
if 'money' in poket:
pass # C언어의 Continue와 같은 역할
else:
print('카드를 꺼내라') |
from app.database.cache import get_from_cache, add_to_cache
from app.database.database import execute_query
def get_funding_by_state(year):
query = get_query(year)
data = get_from_cache(query)
print("DB Query: " + query)
if data is None:
data = execute_query(query)
add_to_cache(query, data)
for d in data:
if d['total'] != 0:
d['abstinence rate'] = round(d['abstinence only'] / d['total'], 3)
d['comprehensive rate'] = round(d['comprehensive sex education'] / d['total'], 3)
else:
d['abstinence rate'] = 0
d['comprehensive rate'] = 0
return data
def get_query(year):
return f'''
SELECT *
FROM FundingAndBirthRatesPerState
WHERE "year" = {year}
ORDER BY "fips";
'''
|
graph ={
'5': ['3','7'],
'3': ['2','4'],
'7': ['8'],
'2': [],
'4': ['8'],
'8': []
}
visited = []
queue =[]
def dfs(visited, graph, node):
visited.append(node)
queue.append(node)
while queue:
m = queue.pop(0)
print(m, end= " ")
for n in graph[m]:
if n not in visited:
visited.append(n)
queue.append(n)
print("Following is the breadth-first search")
dfs(visited, graph, '5')
|
HOST = "irc.twitch.tv"
PORT = 6667
NICK = "supermegacoolbot"
PASS = "oauth:avvelogoxcp40nh58p05ku8ky3b88z"
WORDSPATH = "badwords.txt"
|
from Configurables import LoKi__Hybrid__DTFDict
from Configurables import TupleToolKinematic
from Configurables import TupleToolMCTruth
from Configurables import TupleToolDecayTreeFitter
from Configurables import LoKi__Hybrid__DictOfFunctors
from Configurables import LoKi__Hybrid__Dict2Tuple
from Configurables import CheckPV
from Configurables import TupleToolTISTOS
from Configurables import TupleToolRecoStats
from Configurables import FilterDesktop
from Configurables import TupleToolGeometry
from DecayTreeTuple.Configuration import *
from Configurables import CombineParticles
from Configurables import TupleToolStripping
from Configurables import TupleToolANNPID
from Configurables import MCDecayTreeTuple
from Configurables import DaVinci
from Configurables import TupleToolEventInfo
from Configurables import TupleToolTrackInfo
from Configurables import TupleToolPid
from Configurables import TupleToolMCBackgroundInfo
from Configurables import TupleToolPropertime
from Configurables import LoKi__VoidFilter
from Configurables import TupleToolPrimaries
from Configurables import LoKi__Hybrid__TupleTool
from Configurables import GaudiSequencer
LoKi__VoidFilter('SelFilterPhys_StdAllNoPIDsKaons_Particles',
Code = "\n 0<CONTAINS('Phys/StdAllNoPIDsKaons/Particles',True)\n ")
TupleToolPid('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolPid')
TupleToolANNPID('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolANNPID')
TupleToolPropertime('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolPropertime')
TupleToolPrimaries('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolPrimaries')
TupleToolTrackInfo('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolTrackInfo')
TupleToolEventInfo('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolEventInfo')
TupleToolKinematic('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolKinematic')
TupleToolGeometry('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolGeometry')
TupleToolMCTruth('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolMCTruth')
TupleToolRecoStats('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolRecoStats')
TupleToolMCBackgroundInfo('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolMCBackgroundInfo')
TupleToolStripping('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolStripping',
TriggerList = ['StrippingLb2LcPiLc2PKPiBeauty2CharmDecision', 'StrippingLb2XicPiXic2PKPiBeauty2CharmDecision', 'StrippingLb2LcKLc2PKPiBeauty2CharmDecision', 'StrippingLb2XicKXic2PKPiBeauty2CharmDecision'])
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple',
Inputs = ['Phys/X_b0To_X_cTopKpi_bachelorPi_MCSel/Particles'],
ToolList = [],
Decay = '[Lambda_b0 -> ^(Lambda_c+ -> ^p+ ^K- ^pi+) ^pi-]CC')
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolPrimaries('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolPrimaries'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolTrackInfo('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolTrackInfo'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolPid('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolPid'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolGeometry('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolGeometry'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolStripping('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolStripping'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolANNPID('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolANNPID'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolMCBackgroundInfo('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolMCBackgroundInfo'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolMCTruth('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolMCTruth'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolRecoStats('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolRecoStats'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolPropertime('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolPropertime'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolKinematic('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolKinematic'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addTupleTool(TupleToolEventInfo('X_b0To_X_cTopKpi_bachelorPi_MCTuple.TupleToolEventInfo'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').addBranches({'X_b0': '[Lambda_b0 -> (Lambda_c+ -> p+ K- pi+) pi-]CC', 'X_c': '[Lambda_b0 -> ^(Lambda_c+ -> p+ K- pi+) pi-]CC', 'K': '[Lambda_b0 -> (Lambda_c+ -> p+ ^K- pi+) pi-]CC', 'p': '[Lambda_b0 -> (Lambda_c+ -> ^p+ K- pi+) pi-]CC', 'bachelorPi': '[Lambda_b0 -> (Lambda_c+ -> p+ K- pi+) ^pi-]CC', 'pi': '[Lambda_b0 -> (Lambda_c+ -> p+ K- ^pi+) pi-]CC'})
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_NoMass_MCdict',
Variables = {'Vtx_NoMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'Vtx_NoMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_NoMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_NoMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_NoMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_NoMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'Vtx_NoMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_NoMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_NoMass_MCdict')
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_NoMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_NoMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_NoMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_NoMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_NoMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_NoMass_MCDTF'))
TupleToolTISTOS('X_b0To_X_cTopKpi_bachelorPi_MCTuple.X_b0_TupleToolTISTOS',
Verbose = True,
VerboseHlt1 = True,
VerboseHlt2 = True,
TriggerList = ['L0MuonDecision', 'L0DiMuonDecision', 'L0HadronDecision', 'L0MuonHighDecision', 'L0ElectronDecision', 'L0PhotonDecision', 'Hlt1SingleHadronDecision', 'Hlt1DiHadronDecision', 'Hlt1TrackAllL0Decision', 'Hlt1TrackMuonDecision', 'Hlt2Topo2BodySimpleDecision', 'Hlt2Topo3BodySimpleDecision', 'Hlt2Topo4BodySimpleDecision', 'Hlt2Topo2BodyBBDTDecision', 'Hlt2Topo3BodyBBDTDecision', 'Hlt2Topo4BodyBBDTDecision', 'Hlt2TopoMu2BodyBBDTDecision', 'Hlt2TopoMu3BodyBBDTDecision', 'Hlt2TopoMu4BodyBBDTDecision', 'Hlt2TopoE2BodyBBDTDecision', 'Hlt2TopoE3BodyBBDTDecision', 'Hlt2TopoE4BodyBBDTDecision', 'Hlt2TopoRad2BodyBBDTDecision', 'Hlt2TopoRad2plus1BodyBBDTDecision', 'Hlt2IncPhiDecision', 'Hlt2IncPhiSidebandsDecision'],
VerboseL0 = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XcMass_MCdict',
Variables = {'NoVtx_XcMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'NoVtx_XcMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_XcMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'NoVtx_XcMass_Xc_m12': 'CHILD(M12, 1)', 'NoVtx_XcMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_XcMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_XcMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XcMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_XcMass_MCdict',
daughtersToConstrain = ['Lambda_c+'],
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XcMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XcMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XcMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_XcMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XcMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XcMass_MCDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_NoMass_MCdict',
Variables = {'NoVtx_NoMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_NoMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_NoMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_NoMass_Xc_m12': 'CHILD(M12, 1)', 'NoVtx_NoMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'NoVtx_NoMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'NoVtx_NoMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_NoMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_NoMass_MCdict',
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_NoMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_NoMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_NoMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_NoMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_NoMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_NoMass_MCDTF'))
TupleToolDecayTreeFitter('Vtx_BothMass_MC',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'],
Verbose = True,
constrainToOriginVertex = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_BothMass_MCdict',
Variables = {'NoVtx_BothMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_BothMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'NoVtx_BothMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_BothMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'NoVtx_BothMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'NoVtx_BothMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_BothMass_Xc_m12': 'CHILD(M12, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_BothMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_BothMass_MCdict',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'],
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_BothMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_BothMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_BothMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_BothMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_BothMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_BothMass_MCDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XbMass_MCdict',
Variables = {'NoVtx_XbMass_Xc_m12': 'CHILD(M12, 1)', 'NoVtx_XbMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'NoVtx_XbMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_XbMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_XbMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_XbMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'NoVtx_XbMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XbMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_XbMass_MCdict',
daughtersToConstrain = ['Lambda_b0'],
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XbMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XbMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XbMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_XbMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XbMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XbMass_MCDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_BothMass_MCdict',
Variables = {'Vtx_BothMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'Vtx_BothMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_BothMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_BothMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'Vtx_BothMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_BothMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_BothMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_BothMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_BothMass_MCdict',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'])
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_BothMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_BothMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_BothMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_BothMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_BothMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_BothMass_MCDTF'))
TupleToolDecayTreeFitter('NoVtx_BothMass_MC',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'],
Verbose = True)
LoKi__Hybrid__TupleTool('X_b0To_X_cTopKpi_bachelorPi_MCTuple.X_b0_LoKi__Hybrid__TupleTool',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Variables = {'mcMatch_ignoreResWNonDecay': "switch(mcMatch('[Lambda_b0 --x> (Lambda_c+ --x> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_ignoreResWPhotons': "switch(mcMatch('[Lambda_b0 ==> (Lambda_c+ ==> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_directWNonDecayWPhotons': "switch(mcMatch('[Lambda_b0 =x> (Lambda_c+ =x> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_directWNonDecay': "switch(mcMatch('[Lambda_b0 -x> (Lambda_c+ -x> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_ignoreRes': "switch(mcMatch('[Lambda_b0 --> (Lambda_c+ --> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_direct': "switch(mcMatch('[Lambda_b0 -> (Lambda_c+ -> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_directWPhotons': "switch(mcMatch('[Lambda_b0 => (Lambda_c+ => p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_ignoreResWNonDecayWPhotons': "switch(mcMatch('[Lambda_b0 ==x> (Lambda_c+ ==x> p+ K- pi+) pi-]CC'), 1, 0)"})
TupleToolDecayTreeFitter('Vtx_NoMass_MC',
Verbose = True,
constrainToOriginVertex = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XcMass_MCdict',
Variables = {'Vtx_XcMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'Vtx_XcMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_XcMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'Vtx_XcMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_XcMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_XcMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_XcMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XcMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_XcMass_MCdict',
daughtersToConstrain = ['Lambda_c+'])
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XcMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XcMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XcMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_XcMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XcMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XcMass_MCDTF'))
TupleToolDecayTreeFitter('Vtx_XbMass_MC',
daughtersToConstrain = ['Lambda_b0'],
Verbose = True,
constrainToOriginVertex = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XbMass_MCdict',
Variables = {'Vtx_XbMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_XbMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'Vtx_XbMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_XbMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_XbMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_XbMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'Vtx_XbMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XbMass_MCDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_XbMass_MCdict',
daughtersToConstrain = ['Lambda_b0'])
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XbMass_MCDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XbMass_MCdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XbMass_MC',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_XbMass_MCDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XbMass_MC').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XbMass_MCDTF'))
TupleToolDecayTreeFitter('Vtx_XcMass_MC',
daughtersToConstrain = ['Lambda_c+'],
Verbose = True,
constrainToOriginVertex = True)
TupleToolDecayTreeFitter('NoVtx_XcMass_MC',
daughtersToConstrain = ['Lambda_c+'],
Verbose = True)
TupleToolDecayTreeFitter('NoVtx_NoMass_MC',
Verbose = True)
TupleToolDecayTreeFitter('NoVtx_XbMass_MC',
daughtersToConstrain = ['Lambda_b0'],
Verbose = True)
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.ToolList = []
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_NoMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolTISTOS('X_b0To_X_cTopKpi_bachelorPi_MCTuple.X_b0_TupleToolTISTOS'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_XcMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XbMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XcMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_BothMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XcMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_BothMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_BothMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XbMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_NoMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(LoKi__Hybrid__TupleTool('X_b0To_X_cTopKpi_bachelorPi_MCTuple.X_b0_LoKi__Hybrid__TupleTool'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_XbMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_NoMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_NoMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_BothMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_XcMass_MC'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_XbMass_MC'))
LoKi__VoidFilter('SelFilterPhys_StdAllNoPIDsProtons_Particles',
Code = "\n 0<CONTAINS('Phys/StdAllNoPIDsProtons/Particles',True)\n ")
FilterDesktop('Kminus_MCSel',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Inputs = ['Phys/StdAllNoPIDsKaons/Particles'],
Code = "mcMatch('[K-]CC')",
Output = 'Phys/Kminus_MCSel/Particles')
FilterDesktop('pplus_MCSel',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Inputs = ['Phys/StdAllNoPIDsProtons/Particles'],
Code = "mcMatch('[p+]CC')",
Output = 'Phys/pplus_MCSel/Particles')
FilterDesktop('piplus_MCSel',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Inputs = ['Phys/StdAllNoPIDsPions/Particles'],
Code = "mcMatch('[pi+]CC')",
Output = 'Phys/piplus_MCSel/Particles')
CombineParticles('X_b0To_X_cTopKpi_bachelorPi_MCSel',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Inputs = ['Phys/Lambda_cplusTopplusKminuspiplus_MCSel/Particles', 'Phys/piminus_MCSel/Particles'],
MotherCut = "mcMatch('[Lambda_b0 ==> (Lambda_c+ ==> p+ K- pi+) pi-]CC')",
DecayDescriptors = ['[Lambda_b0 -> Lambda_c+ pi-]cc'],
Output = 'Phys/X_b0To_X_cTopKpi_bachelorPi_MCSel/Particles')
CombineParticles('Lambda_cplusTopplusKminuspiplus_MCSel',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Inputs = ['Phys/pplus_MCSel/Particles', 'Phys/Kminus_MCSel/Particles', 'Phys/piplus_MCSel/Particles'],
MotherCut = "mcMatch('[Lambda_c+ ==> p+ K- pi+]CC')",
DecayDescriptors = ['[Lambda_c+ -> p+ K- pi+]cc'],
Output = 'Phys/Lambda_cplusTopplusKminuspiplus_MCSel/Particles')
FilterDesktop('piminus_MCSel',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Inputs = ['Phys/StdAllNoPIDsPions/Particles'],
Code = "mcMatch('[pi-]CC')",
Output = 'Phys/piminus_MCSel/Particles')
LoKi__VoidFilter('SelFilterPhys_StdAllNoPIDsPions_Particles',
Code = "\n 0<CONTAINS('Phys/StdAllNoPIDsPions/Particles',True)\n ")
CheckPV('CheckPV')
GaudiSequencer('X_b0To_X_cTopKpi_bachelorPi_MCSeq',
Members = [CheckPV('CheckPV'), LoKi__VoidFilter('SelFilterPhys_StdAllNoPIDsProtons_Particles'), FilterDesktop('pplus_MCSel'), LoKi__VoidFilter('SelFilterPhys_StdAllNoPIDsKaons_Particles'), FilterDesktop('Kminus_MCSel'), LoKi__VoidFilter('SelFilterPhys_StdAllNoPIDsPions_Particles'), FilterDesktop('piplus_MCSel'), CombineParticles('Lambda_cplusTopplusKminuspiplus_MCSel'), FilterDesktop('piminus_MCSel'), CombineParticles('X_b0To_X_cTopKpi_bachelorPi_MCSel'), DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCTuple')])
MCDecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCAll',
Decay = '[Lambda_b0 ==> ^(Lambda_c+ ==> ^p+ ^K- ^pi+) ^pi-]CC')
TupleToolKinematic('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolKinematic')
TupleToolRecoStats('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolRecoStats')
TupleToolEventInfo('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolEventInfo')
TupleToolMCTruth('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolMCTruth')
TupleToolPrimaries('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolPrimaries')
TupleToolMCBackgroundInfo('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolMCBackgroundInfo')
TupleToolGeometry('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolGeometry')
TupleToolStripping('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolStripping',
TriggerList = ['StrippingLb2LcPiLc2PKPiBeauty2CharmDecision', 'StrippingLb2XicPiXic2PKPiBeauty2CharmDecision', 'StrippingLb2LcKLc2PKPiBeauty2CharmDecision', 'StrippingLb2XicKXic2PKPiBeauty2CharmDecision'])
TupleToolPropertime('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolPropertime')
TupleToolPid('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolPid')
TupleToolTrackInfo('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolTrackInfo')
TupleToolANNPID('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolANNPID')
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple',
ReFitPVs = True,
ToolList = [],
Decay = '[Lambda_b0 -> ^(Lambda_c+ -> ^p+ ^K- ^pi+) ^pi-]CC',
Inputs = ['/Event/AllStreams/Phys/Lb2LcPiLc2PKPiBeauty2CharmLine/Particles'])
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolPrimaries('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolPrimaries'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolTrackInfo('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolTrackInfo'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolPid('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolPid'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolGeometry('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolGeometry'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolStripping('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolStripping'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolANNPID('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolANNPID'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolMCBackgroundInfo('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolMCBackgroundInfo'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolMCTruth('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolMCTruth'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolRecoStats('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolRecoStats'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolPropertime('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolPropertime'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolKinematic('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolKinematic'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addTupleTool(TupleToolEventInfo('X_b0To_X_cTopKpi_bachelorPiTuple.TupleToolEventInfo'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').addBranches({'X_b0': '[Lambda_b0 -> (Lambda_c+ -> p+ K- pi+) pi-]CC', 'X_c': '[Lambda_b0 -> ^(Lambda_c+ -> p+ K- pi+) pi-]CC', 'K': '[Lambda_b0 -> (Lambda_c+ -> p+ ^K- pi+) pi-]CC', 'p': '[Lambda_b0 -> (Lambda_c+ -> ^p+ K- pi+) pi-]CC', 'bachelorPi': '[Lambda_b0 -> (Lambda_c+ -> p+ K- pi+) ^pi-]CC', 'pi': '[Lambda_b0 -> (Lambda_c+ -> p+ K- ^pi+) pi-]CC'})
LoKi__Hybrid__TupleTool('X_b0To_X_cTopKpi_bachelorPiTuple.X_b0_LoKi__Hybrid__TupleTool',
Preambulo = ['from LoKiPhysMC.decorators import *', 'from LoKiPhysMC.functions import mcMatch'],
Variables = {'mcMatch_ignoreResWNonDecay': "switch(mcMatch('[Lambda_b0 --x> (Lambda_c+ --x> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_ignoreResWPhotons': "switch(mcMatch('[Lambda_b0 ==> (Lambda_c+ ==> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_directWNonDecayWPhotons': "switch(mcMatch('[Lambda_b0 =x> (Lambda_c+ =x> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_directWNonDecay': "switch(mcMatch('[Lambda_b0 -x> (Lambda_c+ -x> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_ignoreRes': "switch(mcMatch('[Lambda_b0 --> (Lambda_c+ --> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_direct': "switch(mcMatch('[Lambda_b0 -> (Lambda_c+ -> p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_directWPhotons': "switch(mcMatch('[Lambda_b0 => (Lambda_c+ => p+ K- pi+) pi-]CC'), 1, 0)", 'mcMatch_ignoreResWNonDecayWPhotons': "switch(mcMatch('[Lambda_b0 ==x> (Lambda_c+ ==x> p+ K- pi+) pi-]CC'), 1, 0)"})
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XcMassdict',
Variables = {'Vtx_XcMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'Vtx_XcMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_XcMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'Vtx_XcMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_XcMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_XcMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_XcMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XcMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_XcMassdict',
daughtersToConstrain = ['Lambda_c+'])
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XcMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XcMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XcMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_XcMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XcMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XcMassDTF'))
TupleToolDecayTreeFitter('Vtx_XbMass',
daughtersToConstrain = ['Lambda_b0'],
Verbose = True,
constrainToOriginVertex = True)
TupleToolTISTOS('X_b0To_X_cTopKpi_bachelorPiTuple.X_b0_TupleToolTISTOS',
Verbose = True,
VerboseHlt1 = True,
VerboseHlt2 = True,
TriggerList = ['L0MuonDecision', 'L0DiMuonDecision', 'L0HadronDecision', 'L0MuonHighDecision', 'L0ElectronDecision', 'L0PhotonDecision', 'Hlt1SingleHadronDecision', 'Hlt1DiHadronDecision', 'Hlt1TrackAllL0Decision', 'Hlt1TrackMuonDecision', 'Hlt2Topo2BodySimpleDecision', 'Hlt2Topo3BodySimpleDecision', 'Hlt2Topo4BodySimpleDecision', 'Hlt2Topo2BodyBBDTDecision', 'Hlt2Topo3BodyBBDTDecision', 'Hlt2Topo4BodyBBDTDecision', 'Hlt2TopoMu2BodyBBDTDecision', 'Hlt2TopoMu3BodyBBDTDecision', 'Hlt2TopoMu4BodyBBDTDecision', 'Hlt2TopoE2BodyBBDTDecision', 'Hlt2TopoE3BodyBBDTDecision', 'Hlt2TopoE4BodyBBDTDecision', 'Hlt2TopoRad2BodyBBDTDecision', 'Hlt2TopoRad2plus1BodyBBDTDecision', 'Hlt2IncPhiDecision', 'Hlt2IncPhiSidebandsDecision'],
VerboseL0 = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XbMassdict',
Variables = {'Vtx_XbMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_XbMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'Vtx_XbMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_XbMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_XbMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_XbMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'Vtx_XbMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XbMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_XbMassdict',
daughtersToConstrain = ['Lambda_b0'])
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XbMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_XbMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XbMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_XbMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XbMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_XbMassDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_BothMassdict',
Variables = {'NoVtx_BothMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_BothMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'NoVtx_BothMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_BothMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'NoVtx_BothMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'NoVtx_BothMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_BothMass_Xc_m12': 'CHILD(M12, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_BothMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_BothMassdict',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'],
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_BothMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_BothMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_BothMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_BothMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_BothMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_BothMassDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_NoMassdict',
Variables = {'NoVtx_NoMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_NoMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_NoMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_NoMass_Xc_m12': 'CHILD(M12, 1)', 'NoVtx_NoMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'NoVtx_NoMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'NoVtx_NoMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_NoMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_NoMassdict',
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_NoMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_NoMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_NoMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_NoMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_NoMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_NoMassDTF'))
TupleToolDecayTreeFitter('Vtx_BothMass',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'],
Verbose = True,
constrainToOriginVertex = True)
TupleToolDecayTreeFitter('NoVtx_XcMass',
daughtersToConstrain = ['Lambda_c+'],
Verbose = True)
TupleToolDecayTreeFitter('NoVtx_BothMass',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'],
Verbose = True)
TupleToolDecayTreeFitter('NoVtx_NoMass',
Verbose = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_BothMassdict',
Variables = {'Vtx_BothMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'Vtx_BothMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_BothMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_BothMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'Vtx_BothMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_BothMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_BothMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_BothMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_BothMassdict',
daughtersToConstrain = ['Lambda_b0', 'Lambda_c+'])
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_BothMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_BothMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_BothMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_BothMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_BothMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_BothMassDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_NoMassdict',
Variables = {'Vtx_NoMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'Vtx_NoMass_Xc_m12': 'CHILD(M12, 1)', 'Vtx_NoMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'Vtx_NoMass_Xc_m13': 'CHILD(M13, 1)', 'Vtx_NoMass_Xc_m23': 'CHILD(M23, 1)', 'Vtx_NoMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'Vtx_NoMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_NoMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_Vtx_NoMassdict')
LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_NoMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_Vtx_NoMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_NoMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_Vtx_NoMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_NoMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_Vtx_NoMassDTF'))
TupleToolDecayTreeFitter('NoVtx_XbMass',
daughtersToConstrain = ['Lambda_b0'],
Verbose = True)
TupleToolDecayTreeFitter('Vtx_XcMass',
daughtersToConstrain = ['Lambda_c+'],
Verbose = True,
constrainToOriginVertex = True)
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XbMassdict',
Variables = {'NoVtx_XbMass_Xc_m12': 'CHILD(M12, 1)', 'NoVtx_XbMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'NoVtx_XbMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_XbMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_XbMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_XbMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)', 'NoVtx_XbMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XbMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_XbMassdict',
daughtersToConstrain = ['Lambda_b0'],
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XbMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XbMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XbMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_XbMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XbMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XbMassDTF'))
LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XcMassdict',
Variables = {'NoVtx_XcMass_Xc_mpKK': 'CHILD(WMASS("p+", "K-", "K+"), 1)', 'NoVtx_XcMass_Xc_mppipi': 'CHILD(WMASS("p+", "pi-", "pi+"), 1)', 'NoVtx_XcMass_Xc_mKK': 'CHILD((((493.677**2 + CHILD(P2, 1))**.5 + CHILD(E, 2))**2 - (CHILD(PX, 1) + CHILD(PX, 2))**2 - (CHILD(PY, 1) + CHILD(PY, 2))**2 - (CHILD(PZ, 1) + CHILD(PZ, 2))**2)**.5, 1)', 'NoVtx_XcMass_Xc_m12': 'CHILD(M12, 1)', 'NoVtx_XcMass_Xc_m23': 'CHILD(M23, 1)', 'NoVtx_XcMass_Xc_m13': 'CHILD(M13, 1)', 'NoVtx_XcMass_Xc_mKKpi': 'CHILD(WMASS("K+", "K-", "pi+"), 1)'})
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XcMassDTF',
Source = 'LoKi::Hybrid::DictOfFunctors/Xb_lokituple_NoVtx_XcMassdict',
daughtersToConstrain = ['Lambda_c+'],
constrainToOriginVertex = False)
LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XcMassDTF').addTool(LoKi__Hybrid__DictOfFunctors('Xb_lokituple_NoVtx_XcMassdict'))
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XcMass',
Source = 'LoKi::Hybrid::DTFDict/Xb_lokituple_NoVtx_XcMassDTF',
NumVar = 7)
LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XcMass').addTool(LoKi__Hybrid__DTFDict('Xb_lokituple_NoVtx_XcMassDTF'))
TupleToolDecayTreeFitter('Vtx_NoMass',
Verbose = True,
constrainToOriginVertex = True)
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.ToolList = []
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_XcMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolTISTOS('X_b0To_X_cTopKpi_bachelorPiTuple.X_b0_TupleToolTISTOS'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_NoMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_BothMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_XbMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XcMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XcMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_NoMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('Vtx_NoMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_XbMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__TupleTool('X_b0To_X_cTopKpi_bachelorPiTuple.X_b0_LoKi__Hybrid__TupleTool'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_Vtx_BothMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_BothMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(LoKi__Hybrid__Dict2Tuple('Xb_lokituple_NoVtx_XbMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_XcMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_NoMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_XbMass'))
DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple').X_b0.addTupleTool(TupleToolDecayTreeFitter('NoVtx_BothMass'))
CheckPV('CheckPV')
GaudiSequencer('StrippingLb2LcPiLc2PKPiBeauty2Charm-Sequence',
Members = [CheckPV('CheckPV'), DecayTreeTuple('X_b0To_X_cTopKpi_bachelorPiTuple')])
DaVinci('DaVinci',
DataType = '2012',
TupleFile = 'DVTuples.root',
UserAlgorithms = [GaudiSequencer('StrippingLb2LcPiLc2PKPiBeauty2Charm-Sequence'), GaudiSequencer('X_b0To_X_cTopKpi_bachelorPi_MCSeq'), MCDecayTreeTuple('X_b0To_X_cTopKpi_bachelorPi_MCAll')],
CondDBtag = 'sim-20130522-1-vc-mu100',
HistogramFile = 'DVHistos.root',
Lumi = True,
Simulation = True,
DDDBtag = 'dddb-20130929-1')
dv = DaVinci('DaVinci')
|
"""
For CLI usage
"""
import argparse
from .Interpretor import Interpretor
DESCRIPTION = "PyCalc"
def parse_args():
"""Arguments parsing."""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-c',
'--cmd',
type=str,
help='a command to interpret put result in stdout')
parser.add_argument('-f',
'--file',
type=str,
help='a specific path to a save file.')
parser.add_argument('-v',
'--verbose',
help='verbose mode',
action='count',
default=0
)
args = parser.parse_args()
return args
if __name__ == '__main__':
parse = parse_args()
it = Interpretor()
if parse.cmd:
print(it.interpret(parse.cmd))
|
from .domain import Domain
from .rule import Rule
__all__ = ["Rule", "Domain"]
|
import numpy as np
__author__ = 'Yuji Ikeda'
class EOS(object):
"""
p[0] = E_0
p[1] = B_0
p[2] = B'_0
p[3] = V_0
"""
@staticmethod
def ev(volume, *p):
raise NotImplementedError
@staticmethod
def pv(volume, *p):
raise NotImplementedError
@staticmethod
def bv(volume, *p):
raise NotImplementedError
class EOSVinet(EOS):
"""
A. Otero-de-la-Roza and V. Luaña, Comput. Phys. Commun. 182 , 1708 (2011).
"""
name = 'Vinet'
@staticmethod
def ev(volume, *p):
eta = np.cbrt(volume / p[3])
xi = 3.0 / 2 * (p[2] - 1)
return p[0] + (9 * p[1] * p[3] / (xi ** 2)
* (1 + (xi * (1 - eta) - 1) * np.exp(xi * (1 - eta))))
@staticmethod
def pv(volume, *p):
eta = np.cbrt(volume / p[3])
xi = 3.0 / 2 * (p[2] - 1)
return 3 * p[1] / (eta ** 2) * (1 - eta) * np.exp(xi * (1 - eta))
@staticmethod
def bv(volume, *p):
eta = np.cbrt(volume / p[3])
xi = 3.0 / 2 * (p[2] - 1)
return p[1] * ((2 - eta) / (eta ** 2) + xi * (1 - eta) / eta) * np.exp(xi * (1 - eta))
class EOSBM2(EOS):
name = 'BM2'
@staticmethod
def ev(volume, *p):
x = (p[3] / volume)
f = (x ** (2. / 3.) - 1.) * 0.5
c2 = 9. / 2. * p[1] * p[3]
return p[0] + c2 * f ** 2
@staticmethod
def pv(volume, *p):
x = (p[3] / volume)
f = (x ** (2. / 3.) - 1.) * 0.5
return 3.0 * p[1] * f * (2.0 * f + 1.0) ** (5.0 / 2.0)
@staticmethod
def bv(volume, *p):
x = (p[3] / volume)
f = (x ** (2. / 3.) - 1.) * 0.5
return p[1] * (7.0 * f + 1.0) * (2.0 * f + 1.0) ** (5.0 / 2.0)
class EOSBM3(EOS):
name = 'BM3'
@staticmethod
def ev(volume, *p):
x = (p[3] / volume)
f = (x ** (2. / 3.) - 1.) * 0.5
c2 = 9. / 2. * p[1] * p[3]
c3 = c2 * (p[2] - 4.0)
return EOSBM2.ev(volume, *p) + c3 * f ** 3
@staticmethod
def pv(volume, *p):
x = (p[3] / volume)
f = (x ** (2. / 3.) - 1.) * 0.5
c2 = 9. / 2. * p[1] * p[3]
c3 = c2 * (p[2] - 4.0)
return EOSBM2.pv(volume, *p) + c3 * f ** 2 * x ** (5. / 3.)
@staticmethod
def bv(volume, *p):
x = (p[3] / volume)
f = (x ** (2. / 3.) - 1.) * 0.5
c2 = 9. / 2. * p[1] * p[3]
c3 = c2 * (p[2] - 4.0)
return EOSBM2.bv(volume, *p) + c3 / 3. * f * (9. * f + 2.) * x ** (5. / 3.)
class EOSMurnaghan(EOS):
name = 'Murnaghan'
@staticmethod
def ev(volume, *p):
x = (p[3] / volume) ** p[2]
y = p[2] - 1.0
return p[0] + p[1] * volume / p[2] * (x / y + 1) - p[1] * p[3] / y
@staticmethod
def pv(volume, *p):
x = (p[3] / volume) ** p[2]
return p[1] / p[2] * (x - 1.0)
@staticmethod
def bv(volume, *p):
return p[1] + p[2] * EOSMurnaghan.pv(volume, *p)
class EOSFactory(object):
def __init__(self, name: str):
self._name = name
def create(self) -> EOS:
name = self._name
if name == 'Vinet':
return EOSVinet()
elif name == 'BM2':
return EOSBM2()
elif name == 'BM3':
return EOSBM3()
elif name == 'Murnaghan':
return EOSMurnaghan()
else:
raise ValueError(name)
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 31 16:32:44 2016
@author: nmvenuti
Modeling grid search
"""
#Import packages
import pandas as pd
import numpy as np
import glob
import os
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.ensemble import RandomForestRegressor
import time
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
#from sknn import mlp
startTime=time.time()
################################
#####Import and clean data######
################################
def addRank(signalDF):
#Add in group ranking
groupNameList=['WBC', 'PastorAnderson', 'NaumanKhan', 'DorothyDay', 'JohnPiper', 'Shepherd',
'Rabbinic', 'Unitarian', 'MehrBaba']
groupRankList=[1,2,3,4,4,4,6,7,8]
groupRankDF=pd.DataFrame([[groupNameList[i],groupRankList[i]] for i in range(len(groupNameList))],columns=['groupName','rank'])
signalDF['groupName']=signalDF['groupId'].map(lambda x: x.split('_')[0])
signalDF=signalDF.merge(groupRankDF, on='groupName')
return(signalDF)
#Define data filepath
rawPath='./github/nmvenuti/DSI_Religion/pythonOutput/run1/cleanedOutput'
#Get raw files
rawFileList=[]
for dirpath, dirnames, filenames in os.walk(rawPath):
for filename in [f for f in filenames ]:
if 'masterOutput.csv' in filename:
rawFileList.append(os.path.join(dirpath, filename))
#Create list of lists with coco,cv,netAng,45,SC
cleanFileList=[[int(x) for x in y.replace('/','_').split('_') if x.isdigit()]+[y] for y in rawFileList]
#Convert to dataframe
fileDF=pd.DataFrame(cleanFileList,columns=['coco','cv','netAng','SC','filepath'])
#Check for incomplete runs and complete failed runs
#incomplete
fileDF['id']=fileDF['coco'].map(str)+'_'+fileDF['cv'].map(str)+'_'+fileDF['SC'].map(str)+'_'+fileDF['netAng'].map(str)
print(fileDF['id'].value_counts()[fileDF['id'].value_counts()<3])
#complete fails
neededCuts=[str(coco)+'_'+str(cv)+'_'+str(sw)+'_'+str(ang) for coco in [2,3,4,5,6] for cv in [2,3,4,5,6] for sw in [0,10,20,30] for ang in [30,45,60,75]
if str(coco)+'_'+str(cv)+'_'+str(sw)+'_'+str(ang) not in set(fileDF['id']) ]
for cut in neededCuts:
print(cut.split('_'))
resultsList=[]
failedFiles=[]
for iteration in range(len(cleanFileList)):
#only pull files with startcount ==0
if cleanFileList[iteration][3]==0:
try:
#Get data frame for each cut
signalDF=pd.read_csv(cleanFileList[iteration][4])
signalDF=addRank(signalDF)
#Set up modeling parameters
xList=['perPos','perNeg','perPosDoc','perNegDoc','judgementFrac','judgementCount','avgSD', 'avgEVC']
yList=['rank']
signalDF=signalDF[signalDF['files']>5]
signalDF=signalDF.dropna()
#Set up test train splits
trainIndex=[x for x in signalDF['groupId'] if 'train' in x]
testIndex=[x for x in signalDF['groupId'] if 'test' in x]
signalTrainDF=signalDF[signalDF['groupId'].isin(trainIndex)]
signalTestDF=signalDF[signalDF['groupId'].isin(testIndex)]
yActual=signalTestDF['rank'].tolist()
#Random Forest Regressor
rfModel=RandomForestRegressor(n_estimators=10,max_depth=10,
min_samples_split=1, max_features='auto',
random_state=0,n_jobs=-1)
rfModel.fit(signalTrainDF[xList],signalTrainDF[yList])
#Predict New Data
yPred=rfModel.predict(signalTestDF[xList])
#Get accuracy
rfAccuracy=float(len([i for i in range(len(yPred)) if abs(yActual[i]-yPred[i])<1])/float(len(yPred)))
rfMAE=np.mean(np.abs(yActual-yPred))
#Perform same analysis with scaled data
#Scale the data
sc = StandardScaler()
sc=sc.fit(signalTrainDF[xList])
signalStdTrainDF= pd.DataFrame(sc.transform(signalTrainDF[xList]),columns=xList)
signalStdTestDF = pd.DataFrame(sc.transform(signalTestDF[xList]),columns=xList)
signalSVR=svm.SVR(C=3,epsilon=0.1,kernel='rbf',max_iter=100000)
signalSVR.fit(signalStdTrainDF[xList],signalTrainDF[yList])
#Predict New Data
yPred=signalSVR.predict(signalStdTestDF[xList])
#Get accuracy
svmAccuracy=float(len([i for i in range(len(yPred)) if abs(yActual[i]-yPred[i])<1])/float(len(yPred)))
svmMAE=np.mean(np.abs(yActual-yPred))
resultsList.append(['_'.join(map(str,cleanFileList[iteration][0:4]))]+cleanFileList[iteration][0:4]+[rfAccuracy,rfMAE,svmAccuracy,svmMAE])
except:
print(cleanFileList[iteration][4]+' failed')
failedFiles.append(cleanFileList[iteration][4])
print(failedFiles)
resultsDF=pd.DataFrame(resultsList,columns=['id','cocowindow','cvWindow','netAngle','startCount','rfAccuracy','rfMae','svmAccuracy','svmMae'])
resultsDF.to_csv(rawPath+'-summaryOutput-full.csv')
#Summarize data
#summaryDF=resultsDF.groupby(['id']).agg({'rfAccuracy':{'meanRF':'mean','minRF':'min','maxRF':'max'},
#'svmAccuracy':{'meanSVM':'mean','minSVM':'min','maxSVM':'max'}})
#summaryDF.to_csv(rawPath+'summaryOutput.csv')
#summaryDF.reset_index(inplace=True)
#summaryDF=pd.DataFrame(np.array(summaryDF))
#summaryDF.columns=['id','meanRF','minRF','maxRF','meanSVM','minRF','meanSVM']
#summaryDF.describe()
#summaryDF['cocoWindow']=summaryDF['id'].map(lambda x: int(x.split('_')[0]))
#summaryDF['cvWindow']=summaryDF['id'].map(lambda x: int(x.split('_')[1]))
#summaryDF['startCount']=summaryDF['id'].map(lambda x: int(x.split('_')[2]))
#summaryDF['netAngle']=summaryDF['id'].map(lambda x: int(x.split('_')[3]))
#summaryDF.drop('id',inplace=True,axis=1)
#Plot accuracy versus different parameters
ax=sns.pairplot(resultsDF.drop('id', axis=1))
|
from typing import List
from ndb_adapter.search_report import *
from ndb_adapter.statistics import Statistics
class SearchResult(object):
"""Base class for search result"""
def __init__(self):
"""Default constructor"""
self._count = 0
self._report = []
def get_count(self) -> int:
"""Gets search results count
:return: search results count
:rtype: int
"""
return self._count
def get_report(self) -> list:
"""Gets search results report
:return: list of search results reports
:rtype: list
"""
return self._report
def set_count(self, count: int) -> None:
"""Sets result count
:param count: value to set as count
:type count: int
:return: None
"""
self._count = count
def set_report(self, report: list) -> None:
"""Sets result report list
:param report: list to be set as report
:type report: list
:return: None
"""
self._report = report
report = property(get_report, set_report, doc="Gets result report")
"""Search report property gets report list"""
count = property(get_count, set_count, doc="Gets result count")
"""Search count property gets report count"""
def __str__(self):
return "Count: " + str(self._count) + ", Report:" + str([str(x) for x in self._report])
class SimpleResult(SearchResult):
"""Class for simple search result"""
def __init__(self):
"""Default constructor"""
super().__init__()
def get_report(self) -> List[SimpleReport]:
"""Gets search results report list
:return: list of simple search reports
:rtype: List[SimpleReport]
"""
return self._report
def download(self, download_type: DownloadType = DownloadType.Pdb,
save: bool = False, target_dir: str = '') -> List[str]:
"""Download PDB files from NDB
:param download_type: files download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if files should be saved or not (default value = False)
:type save: bool
:return: list of strings or None
:rtype: List[str]
"""
files = []
for rep in self.get_report():
file = ''
try:
file = rep.download(download_type, save, target_dir)
except FileNotFoundError:
print("No file with pdb_id: " + rep.pdb_id)
pass
except AttributeError:
print("Structure has not pdb_id and ndb_id in report")
pass
files.append(file)
return files
class AdvancedResult(SearchResult):
"""Class for advanced search result"""
def __init__(self):
"""Default constructor"""
super().__init__()
self._statistics = Statistics()
def get_report(self) -> List[AdvancedReport]:
"""Gets advanced search results report list. You should annotate return type depending on ReportType.
:return: list of advanced search reports
:rtype: List[AdvancedReport]
"""
return self._report
def download(self, download_type: DownloadType = DownloadType.Pdb,
save: bool = False, target_dir: str = '') -> List[str]:
"""Download PDB files from NDB
:param download_type: files download type (default value is DownloadType.PDB)
:type download_type: DownloadType
:param target_dir: where to save file (default value is current dir)
:type target_dir: str
:param save: tells if files should be saved or not (default value = False)
:type save: bool
:return: list of strings or None
:rtype: List[str]
"""
try:
files = []
for rep in self._report:
file = ''
try:
file = rep.download(download_type, save, target_dir)
except FileNotFoundError:
print("No file with id: " + rep.pdb_id)
pass
except AttributeError:
print("Structure has not pdb_id in report")
pass
files.append(file)
return files
except (NotImplementedError, KeyError):
print("This report type doesn't support download")
return []
def get_statistics(self) -> Statistics:
"""Get statistics of advanced search
:return: statistics of advanced search
:rtype Statistics
"""
return self._statistics
def set_statistics(self, report: list) -> None:
"""Sets statistic
:param report: report list to be parse as statistic
:type report: list
:return: None
"""
self._statistics.set_report(report)
statistics = property(get_statistics, set_statistics, doc="Statistics of advanced search")
"""Statistic report property gets report statistic"""
def __str__(self):
return "Count: " + str(self._count) + ", Report:" + str([str(x) for x in self._report]) + \
"Statistics: " + str(self._statistics)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TextLSTM(nn.Module):
def __init__(self, args):
#在子类中调用父类的初始化方法
super(TextLSTM, self).__init__()
if args.static:
self.embedding = nn.Embedding.from_pretrained(args.vectors, freeze = not args.fineTuneWordEm)
else:
self.embedding = nn.Embedding(args.vocab_size, args.embeddingDim)
self.lstm = nn.LSTM(input_size = args.embeddingDim,
hidden_size = args.hidden_size,
num_layers = args.num_layers,
bias = True,
batch_first = False, #为True,则模型可以接受第一维度为batch_size的输入
dropout = args.dropout,
bidirectional = True) #num_directions==2
self.fc = nn.Linear(args.hidden_size, args.classNum)
def forward(self, x):
#x即同TrainModel.py中的input
#x:[batchsize, sententce_size]
x = self.embedding(x)
#print("x shape: ", x.shape)
#x:[batchsize, sententce_size, embedding_size]
output, (h_n, c_n) = self.lstm(x.permute(1,0,2)) #h_0和c_0均初始化为0
#print("x shape: ", x.shape)
#print("x.permute(1,0,2) shape: ", x.permute(1,0,2).shape)
#output, (h_n, c_n) = self.lstm(x) #h_0和c_0均初始化为0
#print("output shape: ", output.permute(1,0,2).shape) #[batchsize, sententce_size, num_directions*hiddenSize_LSTM]
#print("h_n shape", h_n.shape) #[num_layers * num_directions, batch_size, hidden_size]
#x = F.relu(output.permute(1,0,2))
#x = F.relu(h_n.permute(1,0,2))
#print("x shape: ", x.shape) #[batchsize, sententce_size, num_directions*hiddenSize_LSTM]
#print("x[:, -1, :] shape: ", x[:, -1, :].shape) #[batch_size, num_directions*hiddenSize_LSTM]
#print("torch.mean(x, 1) shape: ", torch.mean(x, 1).shape) #[batch_size, num_directions*hiddenSize_LSTM]
#print("h_n[-1] shape: ", h_n[-1].shape) #[batch_size, hidden_size]
#print("h_n shape[0]: ", h_n.shape[0]) #[batch_size, hidden_size]
x = self.fc(h_n[1,:,:])
#x = self.fc(torch.mean(x, 1))
#x = self.fc(x[:, -1, :])
#上面的两种写法应该不对, 即不应该获取output,而应该是最后一个时刻的隐状态: h_n
#x shape: [batch_size, classnum]
return x
#TODO
#增加梯度裁剪
#变长RNN实现
#def forward(self, x):
|
from __future__ import print_function
import sys
import codecs
_stdin = codecs.getreader('sjis')(sys.stdin)
print(_stdin.read())
|
if __name__ == "__main__":
lucky_numbers = {
'Gerard': 1,
'Angelo': 9,
'Lisa': 5,
'Robert': 3,
'Kjell': 7
}
for key, value in lucky_numbers.items():
print(str(key) + " is " + str(value))
|
# -*- coding: utf-8 -*-
import scrapy
from ..items import ShortWordLink
class IndonesiaRestaurantAddressSpider(scrapy.Spider):
name = 'indonesia_restaurant_address'
allowed_domains = ['www.tripadvisor.co.id/Restaurants-g294225-Indonesia.html#LOCATION_LIST']
start_urls = [
'http://www.tripadvisor.co.id/Restaurants-g294225-Indonesia.html#LOCATION_LIST/',
# 'https://www.tripadvisor.co.id/Restaurants-g294225-oa{}-Indonesia.html#LOCATION_LIST'.format(i) for i in
# range(20, 20 * 29, 20)
]
def parse(self, response):
# address_links = response.xpath('//*[@id="BROAD_GRID"]/div/div/div/div/div[1]/div[2]/a/@href').extract()
address_links = response.xpath('//*[@id="LOCATION_LIST"]/ul/li/a/@href').extract()
# address_links = ['https://www.tripadvisor.co.id' + item for item in address_links]
# for link in address_links:
# item = ShortWordLink()
# item['url'] = link
# yield item
# address = response.xpath('//*[@id="LOCATION_LIST"]/ul/li/a/text()').extract()
# address = [item.split('di')[-1].strip() for item in address]
# self.save(address)
address = response.xpath('//*[@id="BROAD_GRID"]/div/div/div/div/div[1]/div[2]/a/text()').extract()
address = [item.split('di')[-1].strip() for item in address]
self.save(address)
def save(self, result):
with open(r'C:\Users\Administrator\Desktop\indonesia_temp\indonesia_city.txt', 'a',
encoding='utf-8')as f:
f.write('\n'.join(result))
|
"""This is 'our_toplogy' for this project
Three directly connected switches plus a host and three servers for each switch.
Adding the 'topos' dict with a key/value pair to generate 'our_toplogy'
enables one to pass in '--topo=our_topology' from the command line.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
import time
import threading
class MyTopo( Topo ):
def __init__( self ):
# Initializing topology
Topo.__init__( self )
# Adding hosts, servers and switches
host1 = self.addHost( 'h1' )
host2 = self.addHost( 's1a' )
host3 = self.addHost( 's1b' )
host4 = self.addHost( 's1c' )
host5 = self.addHost( 'h2' )
host6 = self.addHost( 's2a' )
host7 = self.addHost( 's2b' )
host8 = self.addHost( 's2c' )
host9 = self.addHost( 'h3' )
host10 = self.addHost( 's3a' )
host11 = self.addHost( 's3b' )
host12 = self.addHost( 's3c' )
switch1 = self.addSwitch( 'switch1' )
switch2 = self.addSwitch( 'switch2' )
switch3 = self.addSwitch( 'switch3' )
# Adding links
self.addLink( host1, switch1 )
self.addLink( host2, switch1 )
self.addLink( host3, switch1 )
self.addLink( host4, switch1 )
self.addLink( switch1, switch2 )
self.addLink( host5, switch2 )
self.addLink( host6, switch2 )
self.addLink( host7, switch2 )
self.addLink( host8, switch2 )
self.addLink( switch2, switch3 )
self.addLink( host9, switch3 )
self.addLink( host10, switch3 )
self.addLink( host11, switch3 )
self.addLink( host12, switch3 )
self.addLink( switch3, switch1 )
topos = { 'our_topology': ( lambda: MyTopo() ) }
#def pin_conn():
# threading.Timer(10.0,pin_conn).start()
# top = MyTopo()
# net=Mininet(top)
# net.start()
# net.pingAll()
# net.stop()
#
#pin_conn()
#
|
import turtle as tu
tu.goto(0, 0)
for i in range(5):
tu.forward(100)
tu.left(180-36)
tu.penup()
tu.goto(0, 200)
tu.pendown()
for i in range(11):
tu.forward(100)
tu.left(180-360/22)
|
def test_sort_topics(client):
client.login_admin().follow()
page = client.get('/topics/themen')
page = page.click('Thema')
page.form['title'] = "Topic 1"
page = page.form.submit().follow()
page = client.get('/topics/themen')
page = page.click('Thema')
page.form['title'] = "Topic 2"
page = page.form.submit().follow()
page = page.click('Sortieren')
page = page.follow()
assert "Topic 1" in page
assert "Topic 2" in page
def get_select_option_id_by_text(select_form, search_text):
found = []
for option in select_form.options:
# each option is a tuple (id, bool, select text)
if search_text in option[2]:
found.append(option[0]) # append page id
if len(found) == 1:
return found[0]
else:
print(f'Found multiple ids with {search_text}: {found}')
return None
def test_move_topics(client):
client.login_admin().follow()
page = client.get('/topics/themen')
page = page.click('Thema')
page.form['title'] = "Topic 1"
page = page.form.submit().follow()
assert page.status_code == 200
page = client.get('/topics/themen')
page = page.click('Thema')
page.form['title'] = "Topic 2"
page = page.form.submit().follow()
assert page.status_code == 200
page = page.click('Verschieben') # move topic 2 under topic 1
parent_id = get_select_option_id_by_text(page.form['parent_id'], 'Topic 1')
page.form['parent_id'].select(parent_id)
page = page.form.submit().follow()
assert page.status_code == 200
assert client.get('/topics/themen/topic-1/topic-2')
# move page topic-1 to 'root' including subpage
page = client.get('/topics/themen/topic-1')
page = page.click('Verschieben')
page.form['parent_id'].select('root')
page = page.form.submit().follow()
print(page.request.url)
assert client.get('/topics/topic-1')
assert client.get('/topics/topic-1/topic-2')
# test moving topic to itself (which is invalid)
page = client.get('/topics/topic-1/topic-2')
page = page.click('Verschieben')
parent_id = get_select_option_id_by_text(page.form['parent_id'], 'Topic 2')
page.form['parent_id'].select(parent_id)
page = page.form.submit()
assert page.pyquery('.alert')
assert page.pyquery('.error')
assert 'Ungültiger Zielort gewählt' in page
# test moving topic to a child (which is invalid)
page = client.get('/topics/topic-1')
page = page.click('Verschieben')
parent_id = get_select_option_id_by_text(page.form['parent_id'], 'Topic 2')
page.form['parent_id'].select(parent_id)
page = page.form.submit()
assert page.pyquery('.alert')
assert page.pyquery('.error')
assert 'Ungültiger Zielort gewählt' in page
def test_contact_info_visible(client):
client.login_admin().follow()
page = client.get('/topics/themen')
page = page.click('Bearbeiten')
page.form['contact'] = "Test contact info"
page = page.form.submit().follow()
assert "Test contact info" in page
page = page.click('Bearbeiten')
page.form['hide_contact'] = True
page = page.form.submit().follow()
assert "Test contact info" not in page
page = page.click('Bearbeiten')
page.form['hide_contact'] = False
page = page.form.submit().follow()
assert "Test contact info" in page
def test_view_page_as_member(client):
admin = client
client.login_admin()
new_page = admin.get('/topics/organisation').click('Thema')
new_page.form['title'] = "Test"
new_page.form['access'] = 'member'
page = new_page.form.submit().follow()
page_url = '/topics/organisation/test'
# Test if admin can see page
admin.get(page_url)
page = admin.get('/topics/organisation')
assert 'Test' in page
# Test if a member can see the page
member = client.spawn()
member.login_member()
member.get(page_url)
page = member.get('/topics/organisation')
assert 'Test' in page
# Test if a visitor can not see the page
anon = client.spawn()
anon.get(page_url, status=403)
page = anon.get('/topics/organisation')
assert 'Test' not in page
|
# Jaemin Lee (aka, J911)
# 2019
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 30, kernel_size=10)
self.conv2 = nn.Conv2d(30, 100, kernel_size=10)
self.mp1 = nn.MaxPool2d(6)
self.mp2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(105800, 5000)
self.fc2 = nn.Linear(5000, 1000)
self.fc3 = nn.Linear(1000, 500)
self.fc4 = nn.Linear(500, 50)
self.fc5 = nn.Linear(50, 10)
self.fc6 = nn.Linear(10, 2)
def forward(self, x):
x = x.type('torch.FloatTensor') # torch.ByteTensor -> torch.FloatTensor
in_size = x.size(0)
x = self.mp1(x) # Dimension collapse
x = F.relu(self.mp2(self.conv1(x)))
x = F.relu(self.mp2(self.conv2(x)))
x = x.view(in_size, -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = self.fc6(x)
return x
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Nombre: QLineEdit.py
# Autor: Miguel Andres Garcia Niño
# Creado: 22 de Mayo 2018
# Modificado: 22 de Mayo 2018
# Copyright: (c) 2018 by Miguel Andres Garcia Niño, 2018
# License: Apache License 2.0
# ----------------------------------------------------------------------------
__versión__ = "1.0"
"""
El módulo *QLineEdit* permite probar varias funciones públicas de un QLineEdit
y las señales que este widget emite.
"""
from PyQt5.QtGui import QIcon, QRegExpValidator, QFont
from PyQt5.QtCore import Qt, QRegExp
from PyQt5.QtWidgets import QApplication, QMainWindow, QLineEdit
# =================== CLASE ventanaPrincipal =======================
class ventanaPrincipal(QMainWindow):
def __init__(self, parent=None):
super(ventanaPrincipal, self).__init__(parent)
self.setWindowTitle("QLineEdit en PyQt5 por: ANDRES NIÑO")
self.setWindowIcon(QIcon("icono.png"))
self.setWindowFlags(Qt.WindowCloseButtonHint | Qt.MSWindowsFixedSizeDialogHint)
self.setFixedSize(400, 400)
self.initUI()
def initUI(self):
# =================== WIDGET QLINEEDIT =====================
self.lineEdit = QLineEdit(self)
# ================== FUNCIONES PÚBLICAS ==================
self.lineEdit.setGeometry(20, 20, 360, 24)
self.lineEdit.setText("Andres Niño")
# self.lineEdit.setAlignment(Qt.AlignLeft)
# self.lineEdit.setClearButtonEnabled(True)
# self.lineEdit.setCursorPosition(6)
# self.lineEdit.home(True)
# self.lineEdit.end(True)
# self.lineEdit.setEchoMode(QLineEdit.Password)
# self.lineEdit.setFrame(False)
# self.lineEdit.setMaxLength(2)
# self.lineEdit.setPlaceholderText("Andres Niño")
# self.lineEdit.setReadOnly(True)
# self.lineEdit.setSelection(3, 2)
# self.lineEdit.selectAll()
# self.lineEdit.deselect()
# self.lineEdit.setTextMargins(10, 0, 6, 1)
# self.lineEdit.setInputMask(">AAAAA-AAAAA-AAAAA-AAAAA-AAAAA;#")
# self.lineEdit.setValidator(QRegExpValidator(QRegExp("[0-9]+")))
# print(self.lineEdit.text())
fuente = QFont()
fuente.setPointSize(10)
fuente.setCapitalization(QFont.Capitalize)
self.lineEdit.setFont(fuente)
# ======================= SEÑALES ========================
# self.lineEdit.returnPressed.connect(lambda: print("Se presiono la tecla Enter..."))
# self.lineEdit.textChanged.connect(lambda: print("El texto cambio..."))
# self.lineEdit.textEdited.connect(lambda: print("El texto cambio..."))
# ================================================================
if __name__ == '__main__':
import sys
aplicacion = QApplication(sys.argv)
ventana = ventanaPrincipal()
ventana.show()
sys.exit(aplicacion.exec_())
|
from PIL import Image
user_input = input("Which photo do you want to downsize?: ")
im = Image.open(user_input)
width, height = im.size
scaledown = 0.5
new_width = int(round(width*scaledown))
new_height = int(round(height*scaledown))
im = im.resize((new_width, new_height), Image.ANTIALIAS)
im.save(user_input[:-4]+"_half_compressed.jpg")
|
import ini_files.ini as ini
# объявление 2х функций для switch-case
class _switch(object):
value = None
def __new__(class_, value):
class_.value = value
return True
def _case(*args):
return any((arg == _switch.value for arg in args))
def choice_thing():
""" ф-я выбора значения """
PATH = "connections.ini"
print("Текущие настройки:")
print(*ini.get_config_parameters(PATH, "DEFAULT"))
print("\nКакие настройки подключения использовать:")
try:
answer = int(input("1-использовать текущие\n"
"2-установить другие\n"
"3-ввести новые\n"))
except ValueError:
answer = 0
while _switch(answer):
if _case(1):
print("используем текущие настройки", end='')
break
if _case(2):
print("Выберите нужное соединение")
connection_name = ini.get_connections_list(PATH)
try:
answer = int(input("Укажите номер соединения\n"))
parameters = ini.get_config_parameters(PATH, connection_name[answer - 1])
ini.update_default_section(PATH, parameters)
print("Обновили default значения")
except IndexError:
print("Нет сохраненных соединений, либо указали неверный номер")
break
if _case(3):
print("Ввод нового соединения")
ini.create_new_section(PATH)
break
print("Указали некорректный вариант")
break
|
from subprocess import PIPE, run
def getOnline(app, xen_cfg, ext=0):
command = ['sudo', '/bin/bash', app['path'] + '/bin/getServer.sh']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
server = []
cfg = ''
for line in result.stdout.splitlines():
cfg = line + '.cfg'
if ext:
server.append(line)
else:
server.append(cfg)
return server
def getOffline(app, xen_cfg):
online = getOnline(app, xen_cfg)
server = []
for cfg in xen_cfg:
if cfg not in online:
server.append(cfg)
return server
|
from __future__ import division
import numpy as np
output = open("./A-large-practice.out", 'w+')
with open('A-large-practice.in') as fp:
T = int(fp.readline())
cur_rd = 1
while cur_rd <= T:
key = 'Case #'+str(int(cur_rd))+': '
cur_rd += 1
NP = fp.readline().strip('\n')
[N, P] = [int(n) for n in NP.split()]
count = 0
suffix = []
while count < P:
count = count + 1
print('Count:\t', count)
cur = fp.readline().strip('\n')
flag = 1
if len(suffix) == 0:
suffix.append(cur)
print('Add:\t', cur)
else:
for suf in suffix:
if cur.startswith(suf):
flag = 0
break
elif suf.startswith(cur):
suffix.remove(suf)
print('Rem:\t', suf)
if flag is 1:
suffix.append(cur)
print('Add:\t', cur)
# then calculate the number of possible combinations
total = pow(2, N)
for suf in suffix:
cur_len = len(suf)
total -= pow(2, N-cur_len)
total_str = str(total)
print(key + total_str)
print('{0}'.format(key+total_str), file=output)
|
import cv2
import numpy as np
from PIL import Image
tab = np.array([[9,1,4,2,6],[7,8,9,2,7],[6,6,5,3,3],[8,1,4,7,1],[4,6,2,1,3]])
#Fonction pour afficher le code binaire pour chaque pixel en appliquant LTP
def Binary(mat):
L = []
L.append(mat[0][0])
L.append(mat[0][1])
L.append(mat[0][2])
L.append(mat[1][2])
L.append(mat[2][2])
L.append(mat[2][1])
L.append(mat[2][0])
L.append(mat[1][0])
#L.reverse()
return L
#Fonction pour convertir du binaire au dicimal
def BinToDec(mat):
mat.reverse()
decimal=0
for i in range(len(mat)):
decimal+= mat[i]*(2**i)
return decimal
#Fontion qui affiche les resultats de LTP
def LTP(img):
mat = np.zeros((3, 3), int)
listUpper = []
t = 5
for i in range(1, img.shape[0] - 1):
for j in range(1, img.shape[1] - 1):
a = 0
for x in range(i - 1, i + 2):
b = 0
for y in range(j - 1, j + 2):
if img[x, y] > img[i, j] + t:
mat[a, b] = 1
elif img[x, y] < img[i, j] - t:
mat[a, b] = -1
else:
mat[a, b] = 0
#print(mat)
b += 1
#print(mat)
a += 1
listUpper.append(Binary(mat))
return listUpper
# Fonction qui calcule Upper patter l'histograme des nombre positifs les 1
def LTPUpperPattern(img):
mat = np.zeros((3, 3), int)
listUpper = []
t = 5
for i in range(1, img.shape[0] - 1):
for j in range(1, img.shape[1] - 1):
a = 0
for x in range(i - 1, i + 2):
b = 0
for y in range(j - 1, j + 2):
if img[x, y] > img[i, j] + t:
mat[a, b] = 1
elif img[x, y] < img[i, j] - t:
mat[a, b] = 0
else:
mat[a, b] = 0
#print(mat)
b += 1
#print(mat)
a += 1
listUpper.append(Binary(mat))
return listUpper
# Fonction qui calcule et affiche les lower patterns l'histograme des nombres negatives -1
def LTPLowerPattern(img):
t = 5
mat = np.zeros((3, 3), int)
image = np.empty((img.shape[0] - 1, img.shape[1] - 1), int)
listLower = []
for i in range(1, img.shape[0] - 1):
for j in range(1, img.shape[1] - 1):
a = 0
for x in range(i - 1, i + 2):
b = 0
for y in range(j - 1, j + 2):
if img[x, y] > img[i, j] + t:
mat[a, b] = 0
elif img[x, y] < img[i, j] - t:
mat[a, b] = 1
else:
mat[a, b] = 0
b += 1
a += 1
#print(BinToDec(Binary(mat)))
#image[i, j] = BinToDec(mat)
listLower.append(Binary(mat))
print( listLower)
#afficher les resultat LTP
print(LTP(tab))
#afficher les resultat LTPUpperPattern
print(LTPUpperPattern(tab))
#afficher les resultat LTPLowerPatter
print(LTPLowerPattern(tab)) |
# Generated by Django 2.2 on 2020-09-10 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20200607_1428'),
]
operations = [
migrations.CreateModel(
name='Categorie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_title', models.CharField(max_length=200)),
('category_content', models.TextField()),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team_title', models.CharField(max_length=200)),
('team_content', models.TextField()),
],
),
migrations.RemoveField(
model_name='review',
name='review_published',
),
]
|
# This console application allows you to add two numbers together
# Exceptions and try excepts add resiliency to your code
# If all inputs are correct and everything is how it is supposed to be (Happy Path Execution)
# Exceptions are for when code does not follow happy path execution
print("Awesome adding app")
in1 = input("Enter your first number")
in2 = input("Enter your second number")
try:
num1 = float(in1)
num2 = float(in2)
sum = num1 + num2
print("The sum is {}".format(sum))
except ValueError:
print("That was an invalid number :[")
finally:
print("I print at the end")
|
#Kullanıcıdan aldığınız bir sayının mükemmel olup olmadığını bulmaya çalışın.
#Bir sayının kendi hariç bölenlerinin toplamı kendine eşitse bu sayıya "mükemmel sayı" denir.
#Örnek olarak, 6 mükemmel bir sayıdır. (1 + 2 + 3 = 6)
sayı = int(input("Bir Sayı Giriniz:"))
i=1
toplam=0
while(i<sayı):
if(sayı%i==0):
toplam += i
i+=1
if(toplam==sayı):
print("Mükemmel sayıdır..")
else:
print("Mükemmel sayı değildir..")
|
from django.contrib import admin
from .models import *
from candidates.models import Candidato, Experiencia
# Register your models here.
@admin.register(Competencia)
class CompetenciaAdmin(admin.ModelAdmin):
list_display = (
'description',
'estado'
)
@admin.register(Idioma)
class IdiomaAdmin(admin.ModelAdmin):
list_display = (
'nombre',
'estado'
)
@admin.register(Capacitacion)
class CapacitacionAdmin(admin.ModelAdmin):
list_display = (
'descripcion',
'nivel',
'fecha_desde',
'fecha_hasta',
'institucion',
'estado'
)
@admin.register(Puesto)
class PuestoAdmin(admin.ModelAdmin):
list_display = (
'nombre',
'nivel_riesgo',
'nivel_minimo_salario',
'nivel_maximo_salario',
'estado',
)
@admin.register(Experiencia)
class ExperienciaAdmin(admin.ModelAdmin):
list_display = (
'empresa',
'puesto_ocupado',
'fecha_desde',
'fecha_hasta',
'salario'
)
@admin.register(Empleado)
class EmpleadoAdmin(admin.ModelAdmin):
list_display = (
'cedula',
'nombre',
'fecha_ingreso',
'departamento',
'salario_mensual',
'estado'
)
@admin.register(Candidato)
class CandidatoAdmin(admin.ModelAdmin):
list_display = (
'cedula',
'nombre',
'departamento',
'puesto',
'recomendado_por'
)
|
from myhdl import *
def ram(dout, din, addr, we, clk, depth=256):
""" Ram model """
mem = [Signal(intbv(0)[8:]) for i in range(depth)]
@always(clk.posedge)
def write():
if we:
mem[addr].next = din
@always_comb
def read():
dout.next = mem[addr]
return write, read
dout = Signal(intbv(0)[16:])
dout_v = Signal(intbv(0)[8:])
din = Signal(intbv(0)[16:])
addr = Signal(intbv(0)[7:])
we = Signal(bool(0))
clk = Signal(bool(0))
def main():
toVerilog.name = 'ram_1'
toVerilog(ram, dout, din, addr, we, clk)
toVHDL(ram, dout, din, addr, we, clk)
if __name__ == '__main__':
main()
|
"""
Author: JiaHui (Jeffrey) Lu
Student ID: 25944800
"""
import numpy as np
def function1(x):
return np.power(x, 3) - 2 * x - 5
def function1_d(x):
return 3 * np.power(x, 2) - 2
def function2(x):
return np.exp(-x) - x
def function2_d(x):
return -np.exp(-x) - 1
def function3(x):
return x * np.sin(x) - 1
def function3_d(x):
return x * np.cos(x) + np.sin(x)
def function4(x):
return np.power(x, 3) - 3 * np.power(x, 2) + 3 * x - 1
def function4_d(x):
return 3 * np.power(x, 2) - 6 * x + 3
def newton(fun, fun_d, i=0):
x_current = i
while True:
x_next = x_current - fun(x_current)/fun_d(x_current)
error = np.abs(x_next - x_current)
if error < 0.000000001:
return x_next
else:
x_current = x_next
def secant(fun, i1=0, i2=0.01):
x_current = i1
x_previous = i2
while True:
x_next = x_current - (fun(x_current)*(x_current-x_previous))/(fun(x_current)-fun(x_previous))
error = np.abs(x_next - x_current)
if error < 0.000000001:
return x_next
else:
x_previous = x_current
x_current = x_next
if __name__ == "__main__":
print("Newton's Method: ")
print("The root for function 1 is: ", newton(function1, function1_d))
print("The root for function 2 is: ", newton(function2, function2_d))
# This gives divide by zero error unless the starting position is initialized correctly
print("The root for function 3 is: ", newton(function3, function3_d, 0.5))
print("The root for function 4 is: ", newton(function4, function4_d))
print("Secant Method: ")
print("The root for function 1 is: ", secant(function1))
print("The root for function 2 is: ", secant(function2))
# This gives divide by zero error unless the starting position is initialized correctly
print("The root for function 3 is: ", secant(function3))
print("The root for function 4 is: ", secant(function4)) |
import re
string = "hello Long 86 bal .. dfsjldf ,: \n hello"
#string = re.findall(r"[\w']+", string)
string = re.sub(r"\W",'',string)
"""
The "re" module is the regular expression module. The r character signals we are not ignoring special characters.
In this line, we are substituting everything that is not a word character ([\W]) with an empty string.
Note that [\w] is the set of word characters and [\W] is the set of non-word characters. See https://docs.python.org/3/library/re.html
for more info.
"""
print(string) |
import numpy as np
import struct
import numpy as np
import struct
c = 268435455
cs = 4294967295
print()
a = 5;
print(struct.pack('>i',a))
print(struct.pack('<i',a))
a = -5;
print(struct.pack('>i',a))
print(struct.pack('<i',a))
var = 4.33
print(struct.pack('>d',var))
print(struct.pack('<d',var))
|
import re,os
from urllib import request
from bs4 import BeautifulSoup
def open_url(url):
req = request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36')
page = request.urlopen(req)
html = page.read()
return html
def get_img(html):
soup = BeautifulSoup(html.decode('utf-8'),features='lxml')
for i in soup.find(class_='chartcontainer').find_all('img'):
filename = i['src'].split('/')[-1]
print(filename)#test
with open(filename,'wb') as f:
f.write(open_url(i['src']))
return
def main():
folder = '北京空气质量报告爬虫存图'
try:
os.mkdir(folder)
except:
print('文件夹已存在')
pass
os.chdir(folder)
url = 'http://www.beijing-air.com/'
get_img(open_url(url))
return
if __name__ == '__main__':
main()
|
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
analyzer, InnerDoc, Completion, Keyword, Text, Index, FacetedSearch, TermsFacet
from elasticsearch_dsl.query import MultiMatch, Match
from flask import current_app as app
from ..models import User
class BookmarkWorkSearch(InnerDoc):
title = Text()
username = Text()
user_id = Text()
def create_from_json(self, work_json):
self.title=work_json['title']
self.username=work_json['username']
self.user_id=work_json['user_id']
self.meta.id = work_json['work_id']
def save_from_json(self, work_json):
BookmarkWorkSearch.init()
bookmark_work = self.create_from_json(work_json)
bookmark_work.save()
class BookmarkSearch(DocType):
curator_title = Text()
created_at = Date()
updated_on = Date()
rating = Text()
description = Text()
user_id = Text()
work = Nested(BookmarkWorkSearch)
class Meta:
index = 'bookmark'
class Index:
name = 'bookmark'
def save(self, ** kwargs):
self.created_at = datetime.now()
return super().save(** kwargs)
def create_from_json(self, bookmark_json):
BookmarkSearch.init()
self.curator_title=bookmark_json['curator_title']
if "description" in bookmark_json:
self.description=bookmark_json['description']
if "rating" in bookmark_json:
self.rating=bookmark_json['rating']
self.user_id=bookmark_json['user_id']
self.meta.id = bookmark_json['id']
bookmark_work_search = BookmarkWorkSearch()
bookmark_work_search.create_from_json(bookmark_json['work'])
self.work.append(
bookmark_work_search)
self.save()
|
from django.conf.urls import url
from . import views
app_name = 'games'
urlpatterns = [
# /games/
url(r'^$', views.IndexView.as_view(), name='index'),
# /games/<game_id>/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# /games/add/
url(r'^add/$', views.AddView.as_view(), name='add'),
# /games/<game_id>/delete
url(r'^(?P<pk>[0-9]+)/delete/$', views.DeleteView.as_view(), name='delete'),
# /games/<game_id>/update
url(r'^(?P<pk>[0-9]+)/update/$', views.UpdateView.as_view(), name='update'),
]
|
from django.shortcuts import render
# Create your views here.
def employee_list(request):
return
def employee_form(request):
return
def employee_delete(request):
return |
#!/bin/python3
"""
Place parentheses in a list of integers and math ops to maximize value
Ops are add, sub, mult
"""
import operator
import re
ops_table = { '+': operator.add, '-': operator.sub, '*': operator.mul }
def main():
expression = input()
data = [ int(i) for i in re.findall('\d+', expression) ]
ops = [ ops_table[c] for c in re.findall('[+*-]', expression) ]
print(get_maximum_value(data, ops))
def get_maximum_value(data, ops):
min_vals = make_array(data)
max_vals = make_array(data)
for s in range(1, len(data)):
for i, j in [ (n, s + n) for n in range(len(data) - s) ]:
vals = []
for k in range(i, j):
op = ops[k]
vals.extend([
op(max_vals[i][k], max_vals[k+1][j]),
op(min_vals[i][k], max_vals[k+1][j]),
op(max_vals[i][k], min_vals[k+1][j]),
op(min_vals[i][k], min_vals[k+1][j]),
])
min_vals[i][j] = min(vals)
max_vals[i][j] = max(vals)
return max_vals[0][-1]
def make_array(data):
n = len(data)
array = [ [0] * n for _ in range(n) ]
for i in range(n):
array[i][i] = data[i]
return array
if __name__ == "__main__":
main()
|
from random import randint
from django.http import JsonResponse
from django.shortcuts import render
from client.settings import SECRET_KEY, BASE_DIR
from main.models import User, Plate
def encode(password):
from hashlib import sha224
return sha224((SECRET_KEY + password).encode('utf-8')).hexdigest()
def sign_in(request):
password = encode(request.POST.get('password'))
try:
user = User.objects.get(login=request.POST.get('login'),
password=password)
if not user.token:
user.token = encode("{0}{1}{2}".format(str(user.id), '-', str(randint(0, 1023))))
user.save()
request.session['token'] = user.token
return user.token
except:
return None
def update_token(request):
try:
user = User.objects.get(token=request.session['token'])
user.token = encode("{0}{1}{2}".format(str(user.id), '-', str(randint(0, 1023))))
user.save()
request.session['token'] = user.token
return True
except:
return False
def render_authorization_page(request):
return render(request,
BASE_DIR + '/main/templates/authorization.html')
|
l = [1, 2, 5, 13, 2, 27, 100, 34, 44, 6, 34];
def bsearch(item):
list.sort(l)
low = 0
high = len(l) - 1;
while (low <= high):
mid = (low + high) / 2
if (l[mid] == item):
return l[mid]
elif (l[mid] < item):
low = mid + 1
else:
high = mid - 1
print bsearch(30)
print bsearch(177)
print bsearch(1)
print bsearch(34)
|
import pprint
import reprlib
def _format_repr_exception(exc, obj):
exc_name = type(exc).__name__
try:
exc_info = str(exc)
except Exception:
exc_info = "unknown"
return '<[{}("{}") raised in repr()] {} object at 0x{:x}>'.format(
exc_name, exc_info, obj.__class__.__name__, id(obj)
)
def _ellipsize(s, maxsize):
if len(s) > maxsize:
i = max(0, (maxsize - 3) // 2)
j = max(0, maxsize - 3 - i)
return s[:i] + "..." + s[len(s) - j :]
return s
class SafeRepr(reprlib.Repr):
"""subclass of repr.Repr that limits the resulting size of repr()
and includes information on exceptions raised during the call.
"""
def __init__(self, maxsize):
super().__init__()
self.maxstring = maxsize
self.maxsize = maxsize
def repr(self, x):
try:
s = super().repr(x)
except Exception as exc:
s = _format_repr_exception(exc, x)
return _ellipsize(s, self.maxsize)
def repr_instance(self, x, level):
try:
s = repr(x)
except Exception as exc:
s = _format_repr_exception(exc, x)
return _ellipsize(s, self.maxsize)
def safeformat(obj):
"""return a pretty printed string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info.
"""
try:
return pprint.pformat(obj)
except Exception as exc:
return _format_repr_exception(exc, obj)
def saferepr(obj, maxsize=240):
"""return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself. This function is a wrapper
around the Repr/reprlib functionality of the standard 2.6 lib.
"""
return SafeRepr(maxsize).repr(obj)
|
# -*- coding: utf-8 -*-
import nysol._nysolshell_core as n_core
from nysol.mcmd.nysollib.core import NysolMOD_CORE
from nysol.mcmd.nysollib import nysolutil as nutil
class Nysol_Readcsv(NysolMOD_CORE): # i=必須にする? #err処理は?
_kwd ,_inkwd,_outkwd = n_core.getparalist("readcsv",3)
def __init__(self,*args, **kw_args) :
if len(args) != 1 :
print("arge only one")
return None
if isinstance(args[0],str) :
kw_args["i"] = args[0]
elif isinstance(args[0],list) :
kw_args["i"] = ','.join(args[0])
else :
print("unsuport type")
return None
super(Nysol_Readcsv,self).__init__("readcsv",nutil.args2dict((),kw_args,Nysol_Readcsv._kwd))
|
class Solution(object):
def deleteDuplicates(self, head):
if head is None:
return
fake_head = ListNode(None)
fake_head.next = head
anchor = fake_head
while anchor.next != None:
cursor = anchor.next
cursor2 = cursor.next
while cursor2 != None and cursor2.val == cursor.val:
cursor2 = cursor2.next
if cursor.next != cursor2: # Duplicate
anchor.next = cursor2
else:
anchor = cursor
return fake_head.next
|
import sys
import random
def egcd(b, n):
(x0, x1, y0, y1) = (1, 0, 0, 1)
while n != 0:
(q, b, n) = (b // n, n, b % n)
(x0, x1) = (x1, x0 - q * x1)
(y0, y1) = (y1, y0 - q * y1)
return (b, x0, y0)
def getPrimes(maximum):
res = []
for i in range(3, maximum-1):
if isPrime(i):
res += [i]
return res
def isPrime(number):
for i in range(2,number):
if number%i==0:
return False
return True
p = int(sys.argv[1])
q = int(sys.argv[2])
if isPrime(p) and isPrime(q):
n = p*q
print("n : "+str(n))
indicatrice_euler = (p-1)*(q-1)
print("indicatrice_euler : "+str(indicatrice_euler))
list_of_primes = getPrimes(indicatrice_euler)
c = random.choice(list_of_primes)
print("c : "+str(c))
(_, d, k) = egcd(c, indicatrice_euler)
print("d : "+str(d))
print("k : "+str(k))
print("cd mod(indicatrice_euler) : "+str((c*d)%indicatrice_euler))
print("cd + k(indicatrice_euler) : "+str(c*d+k*indicatrice_euler)) |
class CustomPaginationMixin(object):
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator
def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination
is disabled.
"""
if self.paginator is None:
return None
return self.paginator.paginate_queryset(
queryset, self.request, view=self)
def get_paginated_response(self, data):
"""
Return a paginated style `Response` object for the given
output data.
"""
assert self.paginator is not None
return self.paginator.get_paginated_response(data)
|
# encoding: utf-8
from web.ext.acl import when
from ..templates.admin.admintemplate import page as _page
from ..templates.requests import requeststemplate
@when(when.matches(True, 'session.authenticated', True))
class Logout:
__dispatch__ = 'resource'
def __init__(self, context, name, *arg, **args):
self._name = name
self._ctx = context
self.queries = context.queries
def get(self, *arg, **args):
self._ctx.deauthenticate(self._ctx)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import json
import os
import requests
import datetime
#import twilio.twiml
from flask import Flask
from flask import jsonify
from flask import url_for
from flask import request
from flask import make_response
from flask_ask import Ask, request, session, question, statement, audio, delegate, context
from twilio.rest import Client
# Flask app should start in global layout
app = Flask(__name__)
ask = Ask(app, "/")
#Helper function for Dialog Delegate
def get_dialog_state():
return session['dialogState']
#Launch skill messages
@ask.launch
def launched():
if session.user.accessToken == None:
return statement('To start using the Optus skill, please use the companion app to authenticate on Amazon') \
.link_account_card()
else:
accesstoken = session.user.accessToken
userdetails = get_user_info(accesstoken)
if userdetails is None:
return question('Hello. Welcome to the Optus skill on Amazon Alexa. You can check your account balance, your data usage and get answers to questions on relocation request, learn what to do if your phone has no signal or how to enable internet on your phone. What do you want to do today')
else:
return question('Hello '+userdetails['name'].split(' ')[0]+'! Welcome to the Optus skill on Amazon Alexa. You can check your account balance, your data usage and get answers to questions on relocation request, learn what to do if your phone has no signal or how to enable internet on your phone. What do you want to do today')
#Get account balance
@ask.intent("accountbalance")
def Accountbalance():
if session.user.accessToken == None:
return statement('To start using this skill, please use the companion app to authenticate on Amazon') \
.link_account_card()
else:
accesstoken = session.user.accessToken
print(accesstoken)
balance, accnumber, duedate = getbalance(accesstoken)
print(balance, accnumber, duedate)
client = Client(os.environ.get('TWILIO_ACCOUNT_SID'), os.environ.get('TWILIO_AUTH_TOKEN'))
client.messages.create(from_='+14696467609',
to='+61421183854',
body='Your balance is ' + balance + '. Your account number is ' + accnumber + ' and bill is due on ' + duedate)
return statement('<speak> Your account balance is ' + balance + '. Your account number is <say-as interpret-as="digits"> ' + accnumber + '</say-as> and your bill due date is ' + duedate + '. We have sent you an SMS with the details to your mobile number. </speak>')
#.simple_card('Channel', speech)
#Get data usage status
@ask.intent("datausage")
def DataUsage():
if session.user.accessToken == None:
return statement('To start using this skill, please use the companion app to authenticate on Amazon') \
.link_account_card()
else:
accesstoken = session.user.accessToken
print(accesstoken)
consumedpercent, datacap, remainingdata, effectivedate = getusage(accesstoken)
print(consumedpercent, datacap, remainingdata, effectivedate)
client = Client(os.environ.get('TWILIO_ACCOUNT_SID'), os.environ.get('TWILIO_AUTH_TOKEN'))
client.messages.create(from_='+14696467609',
to='+61421183854',
body='You have used ' + consumedpercent + ' of your monthly limit of ' + datacap + ' data and have ' + remainingdata + ' left until ' + effectivedate)
return statement('You have used ' + consumedpercent + ' of your monthly limit of ' + datacap + ' data and have ' + remainingdata + ' left until ' + effectivedate + '. You can get more details about your data breakdown in Myoptus app or login to your account at www.optus.com.au')
#.simple_card('Channel', speech)
#Submit relocate request
@ask.intent("relocaterequest")
def RelocateRequest():
if session.user.accessToken == None:
return statement('To start using this skill, please use the companion app to authenticate on Amazon') \
.link_account_card()
else:
#delegate dialog to Alexa until all parameters are set
dialog_state = get_dialog_state()
print(dialog_state)
if dialog_state != "COMPLETED":
return delegate(speech=None)
accesstoken = session.user.accessToken
print(accesstoken)
session.attributes['intent_name'] = "relocaterequest"
return question('<speak> You can now apply for a relocation request online or chat with our customer service or else call us at <say-as interpret-as="telephone"> 1300555241 </say-as> <break time="1500ms"/> Would you like to chat with our live chat team now? </speak>').reprompt('I did not get that. Would you like to initiate a chat now?')
#.simple_card('Channel', speech)
#Customer FAQ
@ask.intent("faq")
def Faqtopic(faqtopic):
#delegate dialog to Alexa until all parameters are set
dialog_state = get_dialog_state()
print(dialog_state)
if dialog_state != "COMPLETED":
return delegate(speech=None)
slotvalue = faqtopic
print(slotvalue)
list_1 = ['top up my phone', 'topup my phone', 'topup', 'top up', 'recharge', 'recharge my phone', 'top of my phone', 'top pop my phone', 'recharge my phone', 'recharge my balance']
list_2 = ['no signal', 'signal issues', 'low signal']
list_3 = ['setup my phone for the internet', 'set up my phone for the internet', 'setup my phone to the internet', 'set up my phone for the internet']
if slotvalue in list_1:
return statement('There are various options for recharging your Prepaid Mobile or Mobile Broadband service. The My Optus app is the easiest way to top up or recharge. You can also store your credit card details to make your next recharge even easier. The My Optus app is available on Android and Apple smartphones. Recharge over the phone with a credit card by calling 555 from your Prepaid Mobile. Dont worry if you have run out of credit as calls to 555 are free. AutoRecharge is the simplest way to stay on top of your Prepaid plan. Just choose up the amount you want to recharge and the frequency of how often you would like the recharge to be applied. We will take care of the rest.')
elif slotvalue in list_2:
return statement('To check for a problem with the Optus Mobile Network & service in your area, see our coverage maps at www.optus.com.au/about/network/service-status. Once you have selected the Outage tab and entered an address or location at the top of the map, you will be presented with all the towers in the searched area. Red coloured tower or towers indicate there is a problem or outage, Orange coloured tower indicate we are performing maintenance or upgrades and Green coloured tower indicate that everything is running okay.')
elif slotvalue in list_3:
return statement('To set up, configure or troubleshoot your new device, check out our animated guides. They are all located at www.devicehelp.optus.com.au for selected devices and are are simple to follow online.')
else:
return statement('Kindly try our FAQ page at www.optus.com.au for answers to your queries')
#Yes Intent
@ask.intent('AMAZON.YesIntent')
def yes_intent():
intent_name = session.attributes['intent_name']
print(intent_name)
if intent_name == "relocaterequest":
client = Client(os.environ.get('TWILIO_ACCOUNT_SID'), os.environ.get('TWILIO_AUTH_TOKEN'))
client.messages.create(from_='+14696467609',
to='+61421183854',
body='Please click on this link http://yesopt.us/chat2us to initiate a livechat with our live chat team.')
return statement('We have sent a SMS with the link to initiate a live chat to your mobile number. Please click on that link to chat with us')
else:
return statement('You can ask Alexa about your Optus account balance, data usage or for information about Optus services')
#No Intent
@ask.intent('AMAZON.NoIntent')
def no_intent():
return statement('<speak> Please visit www.offer.optus.com.au/relocation# to submit your relocation request online or call us at <say-as interpret-as="telephone"> 1300555241 </say-as> to speak to our customer service team. </speak>')
#Stop Intent
@ask.intent('AMAZON.StopIntent')
def stop():
return statement("Goodbye")
#Cancel Intent
@ask.intent('AMAZON.CancelIntent')
def cancel():
return statement("Goodbye")
#End Session Intent
@ask.session_ended
def session_ended():
return "{}", 200
#Helper function for balance, account number and due date
def getbalance(accesstoken):
balance = 'Thirteen dollars'
accnumber = '34567654'
duedate = '06/20/2018'
return (balance, accnumber, duedate)
#Helper function for consumedpercent, datacap, remainingdata, effectivedate
def getusage(accesstoken):
consumedpercent = '43%'
datacap = '6 GB'
remainingdata = '3.42 GB'
effectivedate = '06/28/2018'
return (consumedpercent, datacap, remainingdata, effectivedate)
#Helper function for user information
def get_user_info(accesstoken):
#print access_token
amazonProfileURL = 'https://api.amazon.com/user/profile?access_token='
r = requests.get(url=amazonProfileURL+accesstoken)
if r.status_code == 200:
return r.json()
else:
return False
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print ('Starting app on port %d' % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
from typing import Callable
# Realize a function which takes a function as an arguments and finds a point where it takes the values zéro
# bisection(f) returns an x where f(x)=0
# the bisection takes three arguments:
# the function f (function)
# a: the start of the interval of study
# b: the end of the interval of study
# N.B : The method is applicable for numerically solving the equation function(x) = 0 for the real variable x, where f is a continuous function defined on an interval [a, b] and where function(a) and function(b) have opposite signs.
def bisection(function: Callable[[float], float], a: float, b: float) -> float:
start: float = a
end: float = b
if (function(a) == 0): return a
if (function(b) == b): return b
if (function(a) * function(b) > 0):
raise ValueError("bisection method does not work in this case scenario.")
while (end - start > 10**(-7)):
m: float = (start + end)/2
print('m :', m)
if (function(start)*function(m) <= 0):
end = m
else:
start = m
return (start + end)/2
#exemple
a = 0
b = 1
f = lambda x : x*x + 1/16
b_point = bisection(f, a, b)
print('bisection point: ', b_point)
|
from filters import Filter
from glob import glob
import cv2
import matplotlib.pyplot as plt
import numpy as np
from moviepy.editor import VideoFileClip
def main():
filter = Filter(model_file="model.p",scaler_file="scaler.p")
clip = VideoFileClip("project_video_short3.mp4")
cnt = 0
stop_frame_num = 113
for img in clip.iter_frames():
cnt += 1
if (cnt == stop_frame_num):
if img.shape[2] == 4:
img = img[:, :, :3]
ret = filter.pipepine(img)
plt.figure(figsize=(16, 10))
plt.imshow(filter.diagScreen)
plt.subplots_adjust(left=0.03, bottom=0.03, right=1, top=1)
plt.show()
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
from django.db.models import Prefetch
from dicts.models import ProfessionalArea, City
from pages.models import PartnersPage
from partners import models
from snippets.views import BaseTemplateView
class PartnersView(BaseTemplateView):
"""Страница партнеров"""
template_name = 'partners/partners_index.html'
def get_context_data(self, **kwargs):
kwargs = super(PartnersView, self).get_context_data(**kwargs)
partners_page = PartnersPage.get_solo()
partners_qs = models.Partner.objects.published()
partners = partners_qs \
.prefetch_related(
Prefetch(
'professional_areas',
queryset=ProfessionalArea.objects.published(),
to_attr='professional_areas_cache'
),
)\
.select_related('city')\
.order_by('ordering', 'city__ordering', 'title')
cities = City.objects.published().order_by('ordering', 'title')\
.filter(partners__in=partners_qs).distinct()
professional_areas = ProfessionalArea.objects.published().order_by('ordering', 'title')\
.filter(partners__in=partners_qs).distinct()
kwargs.update(
cities=cities,
partners=partners,
partners_page=partners_page,
professional_areas=professional_areas
)
return kwargs
|
from setuptools import setup
setup(name='pylobby',
version='0.1.0',
description='Distributed chat system',
long_description=open('README.rst').read(),
author='Marin Atanasov Nikolov',
author_email='dnaeon@gmail.com',
license='BSD',
url='https://github.com/dnaeon/pylobby',
download_url='https://github.com/dnaeon/pylobby/releases',
packages=[
'pylobby',
'pylobby.client',
'pylobby.server',
],
install_requires=[
'pyzmq >= 14.4.1',
]
)
|
# Copyright 2020, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import contextlib
import typing
import urllib.parse
from http import HTTPStatus
import aiohttp
import aiohttp.test_utils
import yarl
import opentelemetry.ext.aiohttp_client
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCanonicalCode
class TestAioHttpIntegration(TestBase):
maxDiff = None
def assert_spans(self, spans):
self.assertEqual(
[
(
span.name,
(span.status.canonical_code, span.status.description),
dict(span.attributes),
)
for span in self.memory_exporter.get_finished_spans()
],
spans,
)
def test_url_path_span_name(self):
for url, expected in (
(
yarl.URL("http://hostname.local:1234/some/path?query=params"),
"/some/path",
),
(yarl.URL("http://hostname.local:1234"), "/"),
):
with self.subTest(url=url):
params = aiohttp.TraceRequestStartParams("METHOD", url, {})
actual = opentelemetry.ext.aiohttp_client.url_path_span_name(
params
)
self.assertEqual(actual, expected)
self.assertIsInstance(actual, str)
@staticmethod
def _http_request(
trace_config,
url: str,
method: str = "GET",
status_code: int = HTTPStatus.OK,
request_handler: typing.Callable = None,
**kwargs
) -> typing.Tuple[str, int]:
"""Helper to start an aiohttp test server and send an actual HTTP request to it."""
async def do_request():
async def default_handler(request):
assert "traceparent" in request.headers
return aiohttp.web.Response(status=int(status_code))
handler = request_handler or default_handler
app = aiohttp.web.Application()
parsed_url = urllib.parse.urlparse(url)
app.add_routes([aiohttp.web.get(parsed_url.path, handler)])
app.add_routes([aiohttp.web.post(parsed_url.path, handler)])
app.add_routes([aiohttp.web.patch(parsed_url.path, handler)])
with contextlib.suppress(aiohttp.ClientError):
async with aiohttp.test_utils.TestServer(app) as server:
netloc = (server.host, server.port)
async with aiohttp.test_utils.TestClient(
server, trace_configs=[trace_config]
) as client:
await client.start_server()
await client.request(
method, url, trace_request_ctx={}, **kwargs
)
return netloc
loop = asyncio.get_event_loop()
return loop.run_until_complete(do_request())
def test_status_codes(self):
for status_code, span_status in (
(HTTPStatus.OK, StatusCanonicalCode.OK),
(HTTPStatus.TEMPORARY_REDIRECT, StatusCanonicalCode.OK),
(HTTPStatus.SERVICE_UNAVAILABLE, StatusCanonicalCode.UNAVAILABLE),
(
HTTPStatus.GATEWAY_TIMEOUT,
StatusCanonicalCode.DEADLINE_EXCEEDED,
),
):
with self.subTest(status_code=status_code):
host, port = self._http_request(
trace_config=opentelemetry.ext.aiohttp_client.create_trace_config(),
url="/test-path?query=param#foobar",
status_code=status_code,
)
self.assert_spans(
[
(
"GET",
(span_status, None),
{
"component": "http",
"http.method": "GET",
"http.url": "http://{}:{}/test-path?query=param#foobar".format(
host, port
),
"http.status_code": int(status_code),
"http.status_text": status_code.phrase,
},
)
]
)
self.memory_exporter.clear()
def test_span_name_option(self):
for span_name, method, path, expected in (
("static", "POST", "/static-span-name", "static"),
(
lambda params: "{} - {}".format(
params.method, params.url.path
),
"PATCH",
"/some/path",
"PATCH - /some/path",
),
):
with self.subTest(span_name=span_name, method=method, path=path):
host, port = self._http_request(
trace_config=opentelemetry.ext.aiohttp_client.create_trace_config(
span_name=span_name
),
method=method,
url=path,
status_code=HTTPStatus.OK,
)
self.assert_spans(
[
(
expected,
(StatusCanonicalCode.OK, None),
{
"component": "http",
"http.method": method,
"http.url": "http://{}:{}{}".format(
host, port, path
),
"http.status_code": int(HTTPStatus.OK),
"http.status_text": HTTPStatus.OK.phrase,
},
)
]
)
self.memory_exporter.clear()
def test_url_filter_option(self):
# Strips all query params from URL before adding as a span attribute.
def strip_query_params(url: yarl.URL) -> str:
return str(url.with_query(None))
host, port = self._http_request(
trace_config=opentelemetry.ext.aiohttp_client.create_trace_config(
url_filter=strip_query_params
),
url="/some/path?query=param&other=param2",
status_code=HTTPStatus.OK,
)
self.assert_spans(
[
(
"GET",
(StatusCanonicalCode.OK, None),
{
"component": "http",
"http.method": "GET",
"http.url": "http://{}:{}/some/path".format(
host, port
),
"http.status_code": int(HTTPStatus.OK),
"http.status_text": HTTPStatus.OK.phrase,
},
)
]
)
def test_connection_errors(self):
trace_configs = [
opentelemetry.ext.aiohttp_client.create_trace_config()
]
for url, expected_status in (
("http://this-is-unknown.local/", StatusCanonicalCode.UNKNOWN),
("http://127.0.0.1:1/", StatusCanonicalCode.UNAVAILABLE),
):
with self.subTest(expected_status=expected_status):
async def do_request(url):
async with aiohttp.ClientSession(
trace_configs=trace_configs
) as session:
async with session.get(url):
pass
loop = asyncio.get_event_loop()
with self.assertRaises(aiohttp.ClientConnectorError):
loop.run_until_complete(do_request(url))
self.assert_spans(
[
(
"GET",
(expected_status, None),
{
"component": "http",
"http.method": "GET",
"http.url": url,
},
)
]
)
self.memory_exporter.clear()
def test_timeout(self):
async def request_handler(request):
await asyncio.sleep(1)
assert "traceparent" in request.headers
return aiohttp.web.Response()
host, port = self._http_request(
trace_config=opentelemetry.ext.aiohttp_client.create_trace_config(),
url="/test_timeout",
request_handler=request_handler,
timeout=aiohttp.ClientTimeout(sock_read=0.01),
)
self.assert_spans(
[
(
"GET",
(StatusCanonicalCode.DEADLINE_EXCEEDED, None),
{
"component": "http",
"http.method": "GET",
"http.url": "http://{}:{}/test_timeout".format(
host, port
),
},
)
]
)
def test_too_many_redirects(self):
async def request_handler(request):
# Create a redirect loop.
location = request.url
assert "traceparent" in request.headers
raise aiohttp.web.HTTPFound(location=location)
host, port = self._http_request(
trace_config=opentelemetry.ext.aiohttp_client.create_trace_config(),
url="/test_too_many_redirects",
request_handler=request_handler,
max_redirects=2,
)
self.assert_spans(
[
(
"GET",
(StatusCanonicalCode.DEADLINE_EXCEEDED, None),
{
"component": "http",
"http.method": "GET",
"http.url": "http://{}:{}/test_too_many_redirects".format(
host, port
),
},
)
]
)
|
from django.conf import settings
from django.conf.urls import include, static, url
from django.contrib import admin
urlpatterns = [
url(r'^accounts/', include('allauth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('workmate.urls')),
]
if settings.DEBUG:
urlpatterns += static.static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from using_decorators import show_args
@show_args
def mapRoots(start, number):
'''
Return the square root of integers between min and max
'''
print('Using Map')
roots = map(lambda x: x**0.5, range(start,number+1))
for root in roots:
print (root)
def compRoots(start, number):
print('Using List comprehension')
roots = [x**0.5 for x in range(start,number+1)]
for root in roots:
print (root)
def genRoots(start, number):
print('Using Generator comprehension')
roots = (x**0.5 for x in range(start,number+1))
for root in roots:
print (root)
if __name__ == '__main__':
# number = int(input('Enter number : '))
start = 0
number = 12
mapRoots(start, number)
compRoots(start, number)
genRoots(start, number)
|
import os
import time
import traceback
from pprint import pprint
import pandas as pd
import pandas_ta as ta
from binance.spot import Spot
from binance.websocket.spot.websocket_client import SpotWebsocketClient
from dotenv import load_dotenv
import matplotlib.pyplot as plt
TEST_NET = True
load_dotenv()
class TradingBot:
def __init__(self, testnet):
self.connectToBinance(testnet)
pprint(self.getAccountInfo())
def connectToBinance(self, testnet):
client = Spot()
if testnet:
client = Spot(base_url='https://testnet.binance.vision', key=os.getenv('TESTNET_API_KEY'), secret=os.getenv('TESTNET_API_SECRET'))
ws_client = SpotWebsocketClient(stream_url='wss://testnet.binance.vision/ws')
# ws_client.start()
print('Connected to the TestNet server')
else:
client = Spot(key=os.getenv('API_KEY'), secret=os.getenv('API_SECRET'))#
ws_client = SpotWebsocketClient()
# ws_client.start()
print('Connected to the main server')
self.client = client
self.ws_client = ws_client
def getAccountInfo(self):
return self.client.account()
def getData(self, symbol, interval, limit=None):
frame = pd.DataFrame(self.client.klines(symbol.upper(), interval, limit=limit))
frame = frame.iloc[:,:6]
frame.columns = ['Time', 'Open', 'High', 'Low', 'Close', 'Volume']
frame = frame.set_index('Time')
frame.index = pd.to_datetime(frame.index, unit='ms')
frame = frame.astype(float)
return frame
def calculateRsi(self, symbol, interval, time_period):
df = self.getData(symbol, interval)
rsi_df = df.ta.rsi(close='Close', length=time_period)
return rsi_df
def strategyTest(self, symbol, quantity, entried=False):
'''
Buy if asset falls by more than 0.2% within the last 30mins
Sell if asset rises by more than 0.15% or falls further by 0.15%
'''
df = self.getData(symbol, '1m', 30)
cumulative_returns = (df.Open.pct_change() + 1).cumprod() - 1
try:
if not entried:
if cumulative_returns[-1] > -0.002:
order = self.client.new_order(
symbol=symbol,
side='BUY',
type='MARKET',
quantity=quantity
)
# pprint(order)
print(f'Bought {float(order["fills"][0]["qty"])}{order["fills"][0]["commissionAsset"]} at {float(order["fills"][0]["price"])}')
pprint(self.getAccountInfo())
entried = True
else:
print('No Trade has been executed')
if entried:
while True:
df = self.getData(symbol, '1m', 30)
since_buy = df.loc[df.index > pd.to_datetime(order['transactTime'], unit='ms')]
if len(since_buy) > 0:
since_buy_return = (since_buy.Open.pct_change() + 1).cumprod() - 1
if since_buy_return[-1] > 0.0015 or since_buy_return[-1] < -0.0015:
order = self.client.new_order(
symbol=symbol,
side='SELL',
type='MARKET',
quantity=quantity
)
# pprint(order)
print(f'Sold {float(order["fills"][0]["qty"])}{order["fills"][0]["commissionAsset"]} at {float(order["fills"][0]["price"])}')
pprint(self.getAccountInfo())
break
except KeyboardInterrupt:
order = self.client.new_order(
symbol=symbol,
side='SELL',
type='MARKET',
quantity=quantity
)
print(f'Sold {float(order["fills"][0]["qty"])}{order["fills"][0]["commissionAsset"]} at {float(order["fills"][0]["price"])}')
pprint(self.getAccountInfo())
def strategyOne(self):
'''
BUY: If current price is above 200 MA and 14 (4h) RSI < 45 and 7 (4h) RSI < 40
SELL: If 14 (4h) RSI > 65 and 7 (4h) RSI > 70 or price decreases by 1.5%
'''
if __name__ == "__main__":
tradingBot = TradingBot(TEST_NET)
# tradingBot.strategyTest('BTCUSDT', 0.01)
print(tradingBot.calculateRsi('BTCUSDT', '4h', 10)) |
import requests
from tqdm import tqdm
to_download = list()
with open("need_num_releases.txt") as input_file:
for line in input_file:
to_download.append(line.rstrip().replace("_", "/", 1))
output = open("project_to_num_releases_second_round.txt", "w+")
for i in tqdm(range(len(to_download))):
element = to_download[i]
url = "https://api.github.com/repos/" + element + "/releases"
response = requests.get(url, auth=('username', '40dec8eb34d3b654c3204b750f0725c610dfb705'))
if 200 <= response.status_code <= 299:
data = response.json()
print(data)
output.write(element + "\t" + str(len(data)) + "\n")
else:
print(url)
output.close() |
import logging
import string
from torch.utils.data import Dataset, DataLoader
import unicodedata
from typing import List
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
logging.basicConfig(level=logging.INFO)
PAD = 0
EOS = 1
LETTRES = string.ascii_letters + string.punctuation + string.digits + ' '
id2lettre = dict(zip(range(2,len(LETTRES)+2),LETTRES))
id2lettre[0] = ''
id2lettre[1] = 'END'
lettre2id = dict(zip(id2lettre.values(),id2lettre.keys()))
def normalize(s):
return ''.join(c for c in unicodedata.normalize('NFD',s) if c in LETTRES)
def string2code(s):
return [lettre2id[c] for c in normalize(s)]
def code2string(t):
if type(t) != list:
t = t.tolist()
return ''.join(id2lettre[i] for i in t)
class TextDataset(Dataset):
def __init__(self, text: str, *, maxsent=None, maxlen=None):
self.sentences = [torch.LongTensor(string2code(sentence.strip())+[EOS]) for sentence in text.split('.')]
def __len__(self):
return len(self.sentences)
def __getitem__(self, i):
return self.sentences[i], len(self.sentences[i])
def collate_fn(samples: List[List[int]]):
sentences,lengths = zip(*samples)
padded_batch = torch.zeros(max(lengths), len(samples)).long() # l x b
for i in range(len(samples)):
padded_batch[:lengths[i], i] = sentences[i]
return padded_batch
class LSTM(nn.Module):
def __init__(self,dico_size,emb_size,latent_size,device='cpu'):
super(LSTM,self).__init__()
self.embedding = nn.Embedding(dico_size,emb_size)
self.gate_f = nn.Linear(emb_size+latent_size, latent_size)
self.gate_i = nn.Linear(emb_size+latent_size, latent_size)
self.gate_o = nn.Linear(emb_size+latent_size, latent_size)
self.gate_c = nn.Linear(emb_size+latent_size, latent_size)
self.act = nn.Sigmoid()
self.decoder = nn.Linear(latent_size, dico_size)
self.device = device
def one_step(self,embedded,h):
# embedded : b x e
# h : b x latent size
# out : b x latent size
entry = torch.cat((embedded,h),dim=1)
f = self.act(self.gate_f(entry))
i = self.act(self.gate_i(entry))
self.c = f*self.c + i*torch.tanh(self.gate_c(entry))
o = self.act(self.gate_o(entry))
out = o*torch.tanh(self.c)
return out
def forward(self,x,h):
# x : l x b
# h : b x latent_size
# out : l x b x latent_size
l = []
embedded = self.embedding(x)
self.c = torch.zeros(h.shape).to(self.device)
# self.Ct = torch.zeros((x.shape[0],self.D_hidden)).to(device)
for i in range(x.shape[0]):
h = self.one_step(embedded[i],h)
l.append(h)
return torch.stack(l)
def decode(self, h):
# h : b x latent size
return self.decoder(h) # b x dico size
class GRU(nn.Module):
def __init__(self,dico_size,emb_size,latent_size):
super(GRU,self).__init__()
self.embedding = nn.Embedding(dico_size,emb_size)
self.latent_size = latent_size
self.gate_z = nn.Linear(emb_size+latent_size, latent_size)
self.gate_r = nn.Linear(emb_size+latent_size, latent_size)
self.gate_h = nn.Linear(emb_size+latent_size, latent_size)
self.linear = nn.Linear(latent_size,dico_size)
self.act = nn.Sigmoid()
self.decoder = nn.Linear(latent_size, dico_size)
def one_step(self,embedded,h):
# embedded : b x e
# h : b x latent size
# out : b x latent size
entry = torch.cat((embedded,h),dim=1)
z = self.act_gate(self.gate_z(entry))
r = self.act_gate(self.gate_r(entry))
return (1-z)*h + z*torch.tanh(self.gate_h(torch.cat((embedded,h*r),dim=1)))
def forward(self,x,h):
# x : l x b
# h : b x latent_size
# out : l x b x latent_size
l = []
embedded = self.embedding(x)
for i in range(x.shape[0]):
h = self.one_step(embedded[i],h)
l.append(h)
return torch.stack(l)
def decode(self, h):
# h : b x latent size
return self.decoder(h) # b x dico size
def train(model,iterator,optimizer,criterion,device,clip=1):
model.train()
epoch_loss = 0
for x in iterator:
batch_loss = 0
h = torch.zeros(batch_size, latent_size).to(device)
x = x.to(device)
latents = model.forward(x[:-1],h)
for i in range(x.shape[0]-1):
yhat = model.decode(latents[i])
loss = criterion(yhat,x[i+1])
batch_loss += loss
epoch_loss += loss.item()
batch_loss /= (x.shape[0]-1)
# print(batch_loss)
optimizer.zero_grad()
batch_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
return epoch_loss/len(iterator)
def generate(model,random_gen=True,eos=EOS,start="",maxlen=200,device='cpu'):
with torch.no_grad():
model.to(device)
if start=="":
sentence = id2lettre[np.random.choice(range(28,54))]
x = torch.LongTensor(string2code(sentence)).view(-1,1).to(device)
h = torch.zeros(1,latent_size).to(device)
h = model.forward(x, h)[-1] # 1 x latent_size
softmax = nn.Softmax(dim=1)
for i in range(maxlen):
yhat = model.decode(h)
if random_gen:
probas = softmax(yhat).cpu().view(-1).detach().numpy()
ind = torch.tensor(np.random.choice(len(probas), p=probas)).view(1).to(device)
else:
ind = yhat.argmax().view(1).to(device)
if ind==eos:
break
sentence += code2string(ind)
h = model.one_step(model.embedding(ind),h)
return sentence
def generate_beam(model,emb_size,k=3,start="",argmax=False,max_len=200,device='cpu'):
with torch.no_grad():
model.to(device)
model.eval()
softmax = nn.Softmax(dim=1)
# init entries
if start=="":
start = id2lettre[np.random.choice(range(28,54))]
x = torch.LongTensor(string2code(start)).unsqueeze(1).to(device) # 1 x 1
h = torch.zeros(1,latent_size).to(device) # 1 x latent_size
h = model.forward(x, h)[-1] # 1 x latent_size
#decoding and tracking
yhat = model.decode(h) # 1 x dico_size
indices = yhat.argsort(descending=True)[0,:k] # k
k_probas = torch.log(softmax(yhat)[0,indices]).unsqueeze(1) # k x 1
h = torch.cat(k*[h]) # k x latent_size
sentence_id = indices.unsqueeze(0) # 1 x k
for i in range(max_len):
h = model.one_step(model.embedding(sentence_id[-1]),h) # k x latent_size
yhat = model.decode(h) # k x dico_size
if argmax:
kxk_indices = yhat.argsort(descending=True)[:,:k].reshape(-1) # k*k
else:
kxk_indices = torch.zeros(k,k)
probas = softmax(yhat)
kxk_indices = torch.multinomial(probas,num_samples=k)
kxk_indices = kxk_indices.reshape(-1) # k x k
arange = torch.arange(k).repeat_interleave(k) # k*k
kxk_probas = (k_probas + torch.log(softmax(yhat)[arange,kxk_indices]).reshape(k,k)).reshape(k*k) # k x k
indices = kxk_probas.argsort(descending=True)[:k] # k
k_probas = kxk_probas[indices] # k
new_h = h.permute(1,0).repeat_interleave(k,dim=1).permute(1,0)
h = new_h[indices]
sentence_id = sentence_id.repeat_interleave(k,dim=1)[:,indices]
sentence_id = torch.cat((sentence_id,kxk_indices[indices].unsqueeze(0)))
sentence = []
for i in range(k):
sentence.append((start + code2string(sentence_id[:,i])).split("|")[0])
return sentence
''' dataset '''
with open ("trump_full_speech.txt","r") as f:
text = f.read()
dico_size = len(id2lettre)
''' iterator '''
batch_size = 128
train_iterator = DataLoader(TextDataset(text), collate_fn=collate_fn, batch_size=batch_size, shuffle=True, drop_last=True)
''' hyperparameters '''
emb_size = 64
latent_size = 32
lr = 1e-3
''' model '''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTM(dico_size, emb_size, latent_size, device).to(device)
''' objective '''
optimizer = optim.Adam(model.parameters(),lr=lr)
criterion = nn.CrossEntropyLoss(ignore_index=PAD)
nb_epochs = 5
print("Training ...")
for epoch in range(nb_epochs):
train_loss = train(model,train_iterator,optimizer,criterion,device,clip=1)
print(f'Epoch {epoch+1:02}')
print(f'\tTrain Loss : {train_loss:.3f}')
sentences = generate_beam(model,emb_size,max_len=100,device=device)
for i in range(len(sentences)):
print('\t' + sentences[i])
|
#using recursion to implement power and factorial functions
def power (num, pwr):
#breaking case
if pwr == 0:
return 1
else:
return num * power(num, pwr -1)
def factorial (num):
if num == 0:
return 1
else:
return num * factorial(num -1)
print ("{} to the power of {} is equal to {}".format(2, 3, power(2,3)))
print ("The factorial of {} is {}".format(5, factorial(5)))
|
from django.http import HttpResponse
from django.shortcuts import render
import mysql.connector
def homepage(request):
return render(request, 'home.html', {'key1':'value1'})
def selecting(request):
return render(request, 'selecting.html')
def count(request):
fulltext = request.GET['fulltext']
print("request_print:")
print(request)
wordlst = fulltext.split()
wordcnt = len(wordlst)
worddict = dict()
for word in wordlst:
worddict[word] = worddict.get(word, 0) + 1
return render(request, 'count.html', {'fulltext': fulltext, 'wordcnt': wordcnt, 'worddict': worddict,
'items': worddict.items()})
def about(request):
return render(request, 'about.html')
def sign_up(request):
return render(request, 'sign_up.html')
def new_game(request):
return render(request, 'new_game.html')
def signup_complete(request):
cnx = mysql.connector.connect(host='106.14.189.3', port='3306', user='root', passwd='L18_jhk123qwe',database='signup')
cursor = cnx.cursor()
# sql_create_table="CREATE TABLE game01(userid SMALLINT NOT NULL auto_increment PRIMARY KEY, " \
# + "name CHAR(20)," \
# + "age SMALLINT," \
# + "gender CHAR(4)" \
# + ");"
# cursor.execute(sql_create_table)
# name = request.GET['name']
signup_dict = dict()
for i in request.GET:
key = i
value = request.GET[i]
signup_dict[key] = value
keystr, valuestr = "", ""
for i, j in signup_dict.items():
print(i)
print(j)
keystr = keystr + i + ','
if(i != 'age'):
valuestr = valuestr + "'" + j + "'" + ','
if(i == 'age'):
valuestr = valuestr + j + ','
keystr = keystr.rstrip(',')
valuestr = valuestr.rstrip(',')
print(keystr)
print(valuestr)
cursor.execute("insert into game01(" + keystr + ") VALUES(" + valuestr + ");")
cnx.commit()
cursor.close()
cnx.close()
return render(request, 'signup_complete.html')
|
import spacy
import textacy
nlp = spacy.load('en')
import re
import json
from pprint import pprint
def match_id_pattern(text):
pattern = 'id: GO:[0-9]*$'
m = re.search(pattern, text)
if m is not None:
return True
else:
return False
def match_def_pattern(text):
pattern = '^def:*'
m = re.search(pattern, text)
if m is not None:
return True
else:
return False
res_dict = {}
key = None
val = None
with open('go.obo', 'r') as f:
l = f.readline()
while l:
l = l.strip('\n')
txt = l.strip()
if match_id_pattern(txt):
key = txt
if match_def_pattern(txt):
val = txt
res_dict[key] = val
l = f.readline()
temp_json = 'temp_data.json'
with open(temp_json, 'w') as file:
file.write(json.dumps(res_dict))
with open(temp_json) as tmp_file:
data_dict = json.loads(tmp_file.read())
stop_words = ['OBSOLETE', 'def', '"']
def process_key(key_txt):
res = key_txt.split(':')[-1]
return res
def process_val(val_txt):
global stop_words
for s in stop_words:
try:
val_txt = val_txt.replace(s, '')
except:
pass
return val_txt
def main_1():
data_dict_1 = {}
for k, v in data_dict.items():
k = process_key(k)
v = process_val(v)
data_dict_1[k] = v
temp_json_1 = 'temp_data_1.json'
with open(temp_json_1, 'w') as file:
file.write(json.dumps(data_dict_1))
def tokenize_text(txt):
txt = textacy.preprocess.remove_punct(txt, marks=';,:[]()-+.=<>')
txt = textacy.preprocess.replace_urls(txt, replace_with=' ')
txt = textacy.preprocess.replace_numbers(txt, replace_with = ' ')
txt = textacy.preprocess.replace_currency_symbols(txt, replace_with=None)
# res = []
# doc = nlp(txt)
# tokens = [t for t in doc]
# for i in range(len(tokens)):
# # Lemmatization
# t = str(tokens[i].lemma_)
# res.append(t)
# txt = ' '.join(res)
res = []
txt = textacy.preprocess.normalize_whitespace(txt)
# print (txt)
doc = textacy.Doc(txt, lang='en')
for s in textacy.extract.words(doc, exclude_pos=None, min_freq=1):
if len(str(s)) > 2 :
res.append(str(s).lower())
return res
def main_2():
temp_json_1 = 'temp_data_1.json'
with open(temp_json_1) as tmp_file:
data_dict_2 = json.loads(tmp_file.read())
for k, v in data_dict_2.items():
v = tokenize_text(v)
data_dict_2[k] = v
temp_json_2 = 'temp_data_2.json'
with open(temp_json_2, 'w') as file:
file.write(json.dumps(data_dict_2))
# -----------#
main_1()
main_2() |
import pygame
from Settings import maze as maze_settings
import MazeFunctions
from Color import Color
global coordinate_text_size
coordinate_text_size = 10
class Cell:
"""
:param tuple coordinate: square = (row, column), circle = (ring, element in ring), hexagon = (ring, element in ring) triangle = (row, element in row)
:param tuple position:
:param tuple graph_position:
:param int index: cell index in cell_list of a maze
:param list of tuples walls: list of coordinate pairs of wall endpoints
:param list of booleans walls_bool: tuple of walls, if they exists or not
:param list of tuples border_points: endpoints points of walls
:param boolean visible: if a cell is discovered or not, used in discovery of a maze
"""
def __init__(self, coordinate, index, walls_bool):
self.coordinate = coordinate
self.position = None
self.graph_position = None
self.index = index
self.walls = None # coordinate pairs of walls
self.walls_bool = walls_bool # tuple of walls, if they exists or not
self.border_points = None # points of walls
self.visible = None # used in discovery of a maze
def color_graph(self, screen, color, size=maze_settings.graph_cell_size):
"""
:param screen:
:param color:
:param size:
"""
pygame.draw.circle(screen, color, self.graph_position, size)
def color(self, screen, color, graph_bool=False, line_color=Color.black, coordinate_text_bool=False):
"""
:param screen:
:param color:
:param graph_bool:
:param line_color:
:param coordinate_text_bool:
"""
pygame.draw.polygon(screen, color, self.border_points)
l = len(self.walls)
for i in range(0, l):
if self.walls_bool[i]:
pygame.draw.line(screen, line_color, self.walls[i][0], self.walls[i][1])
else:
pygame.draw.line(screen, color, self.walls[i][0], self.walls[i][1])
for point in self.border_points:
pygame.draw.line(screen, line_color, point, point)
if coordinate_text_bool:
self.text_display(screen, str(self.coordinate), coordinate_text_size, Color.black, color)
if graph_bool:
if color == Color.white:
color = Color.black
self.color_graph(screen, color, maze_settings.graph_cell_size)
def color_border_points(self, screen, line_color):
for point in self.border_points:
pygame.draw.line(screen, line_color, point, point)
def color_grid(self, screen, color, graph_bool=False, line_color=Color.black, coordinate_text_bool=False,
walls_bool=False):
"""
:param walls_bool:
:param screen:
:param color:
:param graph_bool:
:param line_color:
:param coordinate_text_bool:
"""
pygame.draw.polygon(screen, color, self.border_points)
l = len(self.walls)
for i in range(0, l):
if bool(walls_bool):
if walls_bool[i]:
pygame.draw.line(screen, line_color, self.walls[i][0], self.walls[i][1])
else:
pygame.draw.line(screen, color, self.walls[i][0], self.walls[i][1])
else:
pygame.draw.line(screen, line_color, self.walls[i][0], self.walls[i][1])
self.color_border_points(screen, line_color)
if coordinate_text_bool:
self.text_display(screen, str(self.coordinate), coordinate_text_size, Color.black, color)
if graph_bool:
if color == Color.white:
color = Color.black
self.color_graph(screen, color, maze_settings.graph_cell_size)
def text_display(self, screen, text, text_size, text_color=Color.black, background_color=Color.white):
"""
displays text to a specific cell
:param screen:
:param text:
:param text_size:
:param text_color:
:param background_color:
"""
MazeFunctions.text_display(screen, self.position[0], self.position[1], text, text_size, text_color,
background_color)
def draw_sign(self, screen, color, sign_size=0):
"""
:param screen:
:param color:
:param sign_size:
"""
pygame.draw.circle(screen, color, self.position, sign_size)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="preproc_chip_like_data",
version="0.1.0",
author="Hanbin Lu",
author_email="lhb032@gmail.com",
license="LICENSE",
packages=find_packages(where="src"),
package_dir={"": "src"},
scripts=["scripts/sra_chip_to_bw.py", "scripts/tagdir_to_bw.py"],
)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.allTodos, name='alltodos'),
path('delete_task/<int:pk>', views.deleteTodos, name='deletetodos'),
path('edit_task/<int:pk>', views.editTodos, name='edittodos'),
] |
import n2
import numpy as np
from pathlib import Path
import tqdm
class KNNClassifier(object):
def __init__(self, fingerprint_kind, dimension, verbose):
self.fingerprint_kind = fingerprint_kind
self.dimension = dimension
self.verbose=verbose
def build_ann_index(self, fingerprints, nthreads=1, overWrite=False):
"""WARNING: set threads correctly! I set it to 1 so you don't run out of memory.
This builds an approximate nearest neighbors index, used to build a kNN graph.
n2 is a good choice because it is fast and also allows streaming upload. Further,
it outperforms many other libraries according to ann_benchmarks. n2 is awesome.
It does not, however, offer dice, jaccard, or tanimoto. In practice cosine works fine."""
index_file = Path("../processed_data/"+self.fingerprint_kind+'_n2_index.hnsw')
if index_file.is_file() and not overWrite:
raise Exception('Index file exists already. Set `overWrite` to true to re-write it')
else:
pass
if not isinstance(fingerprints, np.ndarray):
if self.verbose:
print('converting to numpy')
fingerprints = fingerprints.toarray()
if self.verbose:
print('adding vector data to n2')
index = n2.HnswIndex(self.dimension, "angular")
for fp in tqdm.tqdm(fingerprints,smoothing=0):
index.add_data(fp)
if self.verbose:
print(f'building index with {nthreads}')
index.build(n_threads=nthreads)
index.save('../processed_data/'+self.fingerprint_kind+'_n2_index.hnsw')
|
#!/usr/local/bin/python3
def checkout(score):
for line in open("checkouts_file", "r"):
check = line.split()
checkout = {}
checkout[int(check[0])] = check[1:]
if score in checkout.keys():
print('Get these to checkout:\t', ', '.join(checkout[score]))
def game_121(type):
print('\nThe aim of this game is to checkout', type, 'in 9 darts.\nIf successful, increase the target by 1 and keep going!!!')
score = type
goes = 0
while score > 0:
print('\t')
checkout(score)
print('You need to get', score)
print('Number of goes left =', 3 - goes)
dart = int(input('What did you score?\t'))
score = score - dart
goes += 1
if score < 0 or score == 1:
print('You have gone bust! Start again!!')
game_121(type)
if score == 0 and goes <= 3:
double = input('Did you checkout with a double? (y/n)\t')
if double == 'y':
print('Congratulations, you can now attempt to get', type + 1)
next_level = input('Do you want to continue play on? (y/n)')
if next_level == 'y':
type += 1
game_121(type)
if next_level == 'n':
print('All Done, Come play again soon!!')
exit()
elif double == 'n':
print('You have to finish on a double! Start Again!')
game_121(type)
if goes >= 3 and score != 0:
print('\nYou didnt get it this time, keep going!!')
game_121(type)
type = 121
game_121(type)
|
'''
Solution:
1. 1. Perform DP as there are repeated subproblems.
Recursive equation:
1. If string is empty => pattern depends on the truth value of 2 indices before.
2. If s[i] == p[j] or p[j] == '.' => isMatch(s, p, i-1, j-1)
3. If p[j] == '*':
if isMatch(s, p, i, j-2) == False:
if p[j-1] == '.' or p[j-1] == s[i] => isMatch(s, p, i-1, j)
4. Else, isMatch(...) = False
Time Complexity: O(m x n) for both approaches
Space Complexity: O(m x n) for approach - 1 and O(m) for approach - 2
--- Passed all testcases successfully on leetcode for both the solutions.
'''
class RegExDP-I:
def isMatch(self, s: str, p: str) -> bool:
# initializations
m = len(p)
n = len(s)
dpMatches = [[False for i in range(m+1)] for j in range(n+1)]
dpMatches[0][0] = True
# fill first row
for c in range(1, m+1):
if p[c-1] == '*':
dpMatches[0][c] = dpMatches[0][c-2]
# fill DP table
for r in range(1, n+1):
for c in range(1, m+1):
# condition 1
if (p[c-1] == '.' or s[r-1] == p[c-1]):
dpMatches[r][c] = dpMatches[r-1][c-1]
# condition 2 and 3
elif (p[c-1] == '*'):
dpMatches[r][c] = dpMatches[r][c-2]
if (p[c-2] == '.' or s[r-1] == p[c-2]):
dpMatches[r][c] = dpMatches[r][c] or dpMatches[r-1][c]
# condition 4
else:
dpMatches[r][c] = False
# return last cell
return dpMatches[n][m]
class RegExDP-II:
def isMatch(self, s: str, p: str) -> bool:
# entire proedure is same except we have only 2 rows which are required.
m = len(p)
n = len(s)
dpMatches = [[False for i in range(m+1)] for j in range(2)]
dpMatches[0][0] = True
for c in range(1, m+1):
if p[c-1] == '*':
dpMatches[0][c] = dpMatches[0][c-2]
for r in range(1, n+1):
dpMatches[1] = [False]
for c in range(1, m+1):
if (p[c-1] == '.' or s[r-1] == p[c-1]):
dpMatches[1].append(dpMatches[0][c-1])
elif (p[c-1] == '*'):
dpMatches[1].append(dpMatches[1][c-2])
if (p[c-2] == '.' or s[r-1] == p[c-2]):
dpMatches[1][c] = dpMatches[1][c] or dpMatches[0][c]
else:
dpMatches[1].append(False)
dpMatches[0] = dpMatches[1]
return dpMatches[0][m] |
from tkinter import *
from tkinter import ttk
root=Tk()
##login=PhotoImage(file='source.gif')
##resize=login.subsample(10,10)
en1=ttk.Entry(root,width=30)
en1.pack()
en2=ttk.Entry(root,width=30)
en2.pack()
def plus(x,y):
print(x+y)
en1.delete(0,END)
en2.delete(0,END)
def minus(x,y):
print(x-y)
en1.delete(0,END)
en2.delete(0,END)
def times(x,y):
print(x*y)
en1.delete(0,END)
en2.delete(0,END)
def division(x,y):
print(x/y)
en1.delete(0,END)
en2.delete(0,END)
pls=ttk.Button(root,text='+')
pls.pack()
pls.config(command=lambda:plus(int(en1.get()),int(en2.get())))
mins=ttk.Button(root,text='-')
mins.pack()
mins.config(command=lambda:minus(int(en1.get()),int(en2.get())))
tims=ttk.Button(root,text='*')
tims.pack()
tims.config(command=lambda:times(int(en1.get()),int(en2.get())))
divide=ttk.Button(root,text='/')
divide.pack()
divide.config(command=lambda:division(int(en1.get()),int(en2.get())))
##def buttonpress(event):
## print('clicked')
##pls.bind('<ButtonPress>',buttonpress)
style=ttk.Style()
##print(style.theme_names())
style.theme_use('winnative')
style.configure("TButton" , foreground='red')
style.configure("info.TButton" ,background='blue' , font=('Arial',18,'bold'))
pls.configure(style='info.TButton')
style.configure("div.TButton" ,background='green' , font=('Arial',18,'bold'))
divide.configure(style='div.TButton')
style.configure("tims.TButton" ,background='black' , font=('Arial',18,'bold'))
tims.configure(style='tims.TButton')
style.configure("mins.TButton" ,background='grey' , font=('Arial',18,'bold'))
mins.configure(style='mins.TButton')
|
'''
Created on 19/05/2015
@author: Juandoso
'''
import pandas as pd
import os, csv
from pandas.core.frame import DataFrame
data_dir = 'F:/WestNileVirusPrediction/data/'
#Add up duplicated rows
def duplicates():
rread = csv.reader(open(os.path.join(data_dir,'train.csv'), 'rb'))
header = rread.next()
print header
row0 = rread.next()
unique_rows = [row0]
for row in rread:
if row0[:5] == row[:5] :
wn = int(row[-1]) + int(row0[-1])
#row[-1] = 0 if wn == 0 else 1
row[-1] = int(row[-1]) + int(row0[-1])
row[-2] = int(row[-2]) + int(row0[-2])
else:
unique_rows.append(row0)
if int(row0[-1]) > 0:
for i in range(int(row0[-1])):
unique_rows.append(row0)
row0 = row
writer = csv.writer(open(os.path.join(data_dir,'train3.csv'), 'wb'))
writer.writerow(header)
writer.writerows(unique_rows)
from datetime import datetime, timedelta
t = "2007-05-30"
weather = pd.read_csv(os.path.join(data_dir,'weather.csv'), header=0)
weather = weather.interpolate()
def lookup_last_week_weather(look_str, weatherDF, weather_station=1):
now = datetime.strptime(look_str, "%Y-%m-%d")
weathers = DataFrame()
for i in range(35):
one_day = timedelta(days=i)
now1 = now - one_day
row = weatherDF[(weatherDF.Date == now1.strftime("%Y-%m-%d")) & (weatherDF.Station == weather_station)]
weathers = weathers.append(row)
return weathers
def weather_data(look_str, weatherDF):
features = ["Tmax","Tmin","Tavg","DewPoint", "WetBulb", "Heat","Cool","SnowFall", "PrecipTotal", "ResultSpeed"]
weather_week0 = lookup_last_week_weather(look_str, weatherDF)
weather_week = weather_week0[features]
averagesS = weather_week.mean(0)
maxs = weather_week.max(0)
maxsS = pd.Series()
mins = weather_week.min(0)
minsS = pd.Series()
for f in features:
maxsS["%s_max" % f] = maxs[f]
minsS["%s_min" % f] = mins[f]
#datapoints = pd.concat([averagesS, maxsS, minsS])
datapoints = averagesS
weather_data = DataFrame(datapoints).T
weather_data["Date"] = look_str
return weather_data
weather_avg = DataFrame()
dates = weather["Date"]
for d in dates:
row = weather_data(d, weather)
weather_avg= weather_avg.append(row, ignore_index=True)
weather_avg.to_csv(os.path.join(data_dir,'weather_info_averages5.csv'), index=False)
# duplicates() |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
plt.clf()
fig = plt.gcf()
fig.set_size_inches(10.0, 10.0)
dat1 = np.loadtxt('fort.100')
dat2 = np.loadtxt('fort.101')
dat3 = np.loadtxt('fort.102')
dat4 = np.loadtxt('fort.103')
Npts1 = dat1.shape[0]
Npts2 = dat2.shape[0]
Npts3 = dat3.shape[0]
Npts4 = dat4.shape[0]
# coarse grids
for i in range(Npts1):
plt.plot( dat1[i,0], dat1[i,1], marker='o', color='b' )
for i in range(Npts2):
plt.plot( dat2[i,0], dat2[i,1], marker='o', color='r' )
# fine grids
for i in range(Npts3):
plt.plot( dat3[i,0], dat3[i,1], marker='^', color='b' )
for i in range(Npts4):
plt.plot( dat4[i,0], dat4[i,1], marker='^', color='r' )
# center
c = np.loadtxt('fort.99')
plt.plot( c[0], c[1], marker='*', color='k' )
c1 = plt.Circle( (c[0],c[1]), radius=c[2], color='k', alpha=0.5 )
plt.gca().add_artist( c1 )
plt.axis('equal')
plt.xlim(0.0,16)
plt.ylim(0.0,16)
plt.grid()
plt.savefig('fort.gridpts.pdf')
|
from flask import Flask, request, render_template
from math import sqrt
import pandas as pd
import numpy as np
from shutil import copyfile
app = Flask(__name__)
def spec(df,benchmark,client):
df2=df.copy()
df2=df2[df2["benchmark"] == benchmark]
df2.sort_values(by='SPEC_ratio', ascending=False, inplace=True, ignore_index=True)
rank=str(list(df2["client_id"]).index(client)+1)
return rank
@app.route('/benchmarks' ,methods=['POST'])
def ev():
df = pd.read_csv("records.csv")
sent_json = request.get_json(force=True)
prd=1
SPECS=[]
index_reference=df[df["client_id"] == 1].index[0]
if sent_json["hostname"] not in df["client_hostname"].values:
c=df["client_id"].max()+1
counter=0
for i in sent_json["benchmarks"]:
df.loc[df.index.max()+1]=[c,sent_json["hostname"],i["name"],i["score"],float(df.loc[index_reference+counter,"Execution time"])/float(i["score"]), -9999]
SPECS.append(float(df.loc[index_reference+counter,"Execution time"])/float(i["score"]))
counter+=1
else:
c = df[df["client_hostname"] == sent_json["hostname"]].iloc[0]["client_id"]
df.drop(df[df["client_hostname"] == sent_json["hostname"]].index, inplace=True)
df.reset_index(drop=True,inplace=True)
index_reference=df[df["client_id"] == 1].index[0]
counter=0
for i in sent_json["benchmarks"]:
df.loc[df.index.max()+1]=[c,sent_json["hostname"],i["name"],i["score"],float(df.loc[index_reference+counter,"Execution time"])/float(i["score"]), -9999]
SPECS.append(float(df.loc[index_reference+counter,"Execution time"])/float(i["score"]))
counter+=1
SPECS=sqrt(np.array(SPECS).prod())
df["Geometric mean"].replace({-9999:SPECS}, inplace=True)
df.sort_values(by='Geometric mean', ascending=False, inplace=True, ignore_index=True)
df.to_csv("records.csv", index=False, encoding="utf-8")
ranking=list(df["client_id"].unique()).index(c)+1
copyfile("records.csv","./static/db.csv")
statement1 = "your rank for CPU is: "+spec(df,"bubbleSort",c)+" \n"
statement2 = "your rank for STORAGE is: "+spec(df,"fileEdit",c)+" \n"
statement3 = "your rank for RAM is: "+spec(df,"binaryTree",c)+" \n"
statement4 = "your rank for MEMORY is: "+spec(df,"hanoi",c)+" \n"
statement5 = "your rank for WIFI CARD is: "+spec(df,"network",c)+" \n"
return "Your benchmark result is: " + str(SPECS) + " with a ranking of: " + str(ranking) + "/" + str((df.index.max()+1)//5) + "\n \n" +statement1+statement2+statement3+statement4+statement5
@app.route('/')
def index():
return render_template("index.html")
@app.route('/db')
def db():
return render_template("download.html")
if __name__ == '__main__':
app.run(debug = True)
|
#!/usr/bin/python
import csv
import sys
import operator
import datetime
import re
MONTH_DICT = { '01': "Jan", '02': "Feb", '03': "Mar", '04': "Apr" , '05': "May", '06': "Jun", '07': "Jul", '08': "Aug" , '09': "Sep", '10': "Oct", '11': "Nov", '12': "Dec"}
if (len(sys.argv) < 2 or len(sys.argv) > 2):
print("Error! - No Log File Specified!")
elif (len(sys.argv)==2):
try:
with open(sys.argv[1]) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
ipdict={}
iplist=[]
plist=[]
flist=[]
slist=[]
conn=0
print("Source File:",sys.argv[1])
for row in reader:
if(row[4] in ['1337', '1338', '1339', '1340']):
if (row[1] not in iplist):
iplist.append(row[1])
if (row[2] not in slist and row[1] in iplist):
slist.append(row[2])
if(conn==0):
conn=row[0]
for i in iplist:
plist.append(i.split("."))
plist.sort(key= lambda plist: int(plist[3]))
for i in plist:
s="."
s = s.join(i)
flist.append(s)
print("Systems Infected:",len(flist))
print("Infected System IPs:\n",flist, sep='')
print("C2 Servers:",len(slist))
plist.clear()
flist.clear()
for i in slist:
plist.append(i.split("."))
plist.sort(key=operator.itemgetter(1,2,3))
#print(plist)
for i in plist:
s="."
s = s.join(i)
flist.append(s)
print("C2 Server IPs:\n",flist, sep='')
ans = datetime.datetime.utcfromtimestamp(float(conn)).strftime('%Y-%m-%d %H:%M:%S')
#print(ans)
lis2 = re.split("[- ]", ans)
#print(lis2)
if (lis2[1] in MONTH_DICT):
month = MONTH_DICT[lis2[1]]
print("First C2 Connection: ", lis2[0],"-",month,"-",lis2[2]," ", lis2[3]," UTC", sep='' )
csv_file.seek(0)
for row in reader:
if(row[2] in flist and (row[2] not in ipdict.keys())):
ipdict[row[2]] = int(row[5])
elif(row[2] in flist and (row[2] in ipdict.keys())):
ipdict[row[2]] +=int(row[5])
fdict = sorted(ipdict.items(), key=operator.itemgetter(1),reverse=True)
print("C2 Data Totals:",fdict)
except:
print("Error! - File Not Found!")
|
"""account urls."""
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.account, name='account'),
]
|
####Script to create .restart.h5 file (with one cycle)
#based on the abundance of the ABUPP###.DAT (mppnp output first cycle)
#and the header infos from the se file. Do a mass grid refinement that
#of the se grid to match the ABuPP grid
############Read restart0004910.check (from Falks RUN103 dir) file to get abundances,Z,A,(isomeric_state)
#take structure from the ABUPP00049100000.DAT file (inside to surface)
import utils as u
f1=open('restart0004910.check')
lines=f1.readlines()
f1.close()
shells_massfrac=[]
A=[]
Z=[]
mass=[]
for k in range(len(lines)):
#skip header
if k<2:
continue
if (k==2):
#column titles
isotopes=[]
line=lines[k]#[70:-1]#200]#150]
#print line
idx_iso=[]
isotopes1=[]
for i in xrange(0,len(line),11):
isotopes1.append(line[i:i+11].strip())
#idx_iso.append(line[i:i+11][:5])
isotopes1=isotopes1[:-1]
for iso in range(len(isotopes1)):
if 'NEUT' in isotopes1[iso]:
A.append(1)
isoname=isotopes1[iso][:-3].strip()
isotopes.append( isoname[0]+isoname[1:].lower()+'-'+str(A[-1]))
elif 'PROT' in isotopes1[iso]:
A.append(1)
isotopes.append( 'H'+'-'+str(A[-1]))
else:
A.append(int(isotopes1[iso][-3:]))
isoname=isotopes1[iso][:-3].strip()
isotopes.append( isoname[0]+isoname[1:].lower()+'-'+str(A[-1]))
#print len(isotopes)
#print len(isotopes)
#print len(idx_iso)
#print isotopes
#print A
continue
massfracs=lines[k].split()#[7:]
#if k==4:
#print lines[k].split()
#print len(massfracs)
#print massfracs
shells_massfrac.append(massfracs)
#print 'shells massffrac'
#print shells_massfrac
u.convert_specie_naming_from_h5_to_ppn(isotopes[2:])
Z=[0,1]+[int(k) for k in list(u.znum_int)]
#A1=[1,1]+[int(k) for k in list(u.amass_int)]
#Assume no isomeric states!!!:
isomeric_state=[1]*len(isotopes)
###############Read se file to get header information
import nugridse as mp
import sewrite as sw
run_H5='e2D14.0077501.se.h5'
cycle_all=77991
sefiles=mp.se('.',run_H5,rewrite=True)
hattrs=sefiles.se.hattrs
hattrs=[]
notneeded_header=['HDF5_version','modname','dcoeff_unit','radius_unit','rho_unit','temperature_unit']
for k in range(len(sefiles.se.hattrs)):
if sefiles.se.hattrs[k] in notneeded_header:
continue
hattrs.append(sefiles.se.hattrs[k])
hattrs_data=[]
for k in range(len(hattrs)):
hattrs_data.append(sefiles.get(hattrs[k]))
mass2=[]
dcols=[]
for k in range(len(sefiles.se.dcols)):
if sefiles.se.dcols[k] == 'yps':
continue
dcols.append(sefiles.se.dcols[k])
dcols_data=[]
for k in range(len(dcols)):
if dcols[k]=='mass':
mass2=sefiles.get(cycle_all,dcols[k])
dcols_data.append(sefiles.get(cycle_all,dcols[k]))
cattrs=[]
for k in range(len(sefiles.se.cattrs)):
cattrs.append(str(sefiles.se.cattrs[k]))
cattrs_data=[]
for k in range(len(cattrs)):
if sefiles.se.cattrs[k]=='shellnb': #number of cells from restart0004910.check
cattrs_data.append(len(shells_massfrac))
continue
cattrs_data.append(sefiles.get(cycle_all,str(cattrs[k])))
print 'hattrs',hattrs
print 'cattrs',cattrs
#take structure from the ABUPP00049100000.DAT file (inside to surface)
f1=open('ABUPP00049102001.DAT')
lines=f1.readlines()
f1.close()
xm=[]
t9t=[]
for k in range(len(lines)):
if k<4:
continue
line=lines[k]
xm.append(float(line.split()[1]))
t9t.append(float(line.split()[2]))
plt.plot(xm,t9t,marker='x')
#print 'xm[0]',xm[0],'xm[-1]',xm[-1]
#import sys
#sys.exit(0)
###assume grid is correctly set
###how do get dq?
# irestart newgrid xmrmin xmrmaxi
# 4910 1 0.5811842188 0.603
dq=[xm[1]-xm[0]]
#xm[0] center
for k in range(1,len(xm)):
dq.append(xm[k]-xm[k-1])
#Checck ingestion arad: how many cells?
#if(xm(j).ge.xmrmax-4.d-4)then
counter=0
for k in range(len(xm)):
if xm[k]>=(xm[-1]-4e-4 ):
counter+=1
print 'Number of cells in which H will be ingested:'
print counter
######Write restart cycle here#########
name='e2D14_hif.0077501.restart.h5'
f=sw.startfile(name)
#Same header as in se files but with modified number of isotopes, coming from restart0004910.check
f.write_hattr(hattr_name=hattrs+['zisnb'],hattr_val=hattrs_data+[len(shells_massfrac[0])])
#A,Z, isomeric state, mass,iso_massf is all needed in restart file (except header)
f.write_table('A',A)
f.write_table('Z',Z)
f.write_table('isomeric_state',isomeric_state)
#print 'last test#######################################################################'
#print shells_massfrac[0]
#print shells_massfrac[1]
print 'lengths ',len(xm),len(shells_massfrac)
#right units? since im writing out info header_unit + hattrs from se > consistent
f.write_dcol(cycle_all,dcol_name=['mass']+['iso_massf'],dcol_val=[xm]+[shells_massfrac])
#write column attr,really needed: "shellnb", but write out more,e.g. age
f.write_cattr(cycle_all,cattr_name=cattrs,cattr_val=cattrs_data)
|
import pickle as pk
from ToolScripts.TimeLogger import log
import os
# import scipy.sparse as sp
def loadData2(datasetStr, cv):
assert datasetStr == "Tianchi_time"
DIR = os.path.join(os.path.dirname(os.getcwd()), "dataset", datasetStr, 'implicit', "cv{0}".format(cv))
with open(DIR + '/pvTime.csv'.format(cv), 'rb') as fs:
pvTimeMat = pk.load(fs)
with open(DIR + '/cartTime.csv'.format(cv), 'rb') as fs:
cartTimeMat = pk.load(fs)
with open(DIR + '/favTime.csv'.format(cv), 'rb') as fs:
favTimeMat = pk.load(fs)
with open(DIR + '/buyTime.csv'.format(cv), 'rb') as fs:
buyTimeMat = pk.load(fs)
with open(DIR + "/test_data.csv".format(cv), 'rb') as fs:
test_data = pk.load(fs)
interatctMat = ((pvTimeMat + cartTimeMat + favTimeMat + buyTimeMat) != 0) * 1
with open(DIR + "/trust.csv".format(cv), 'rb') as fs:
trust = pk.load(fs)
return interatctMat, trust, test_data
def loadData(datasetStr, cv):
if datasetStr == "Tianchi_time":
return loadData2(datasetStr, cv)
DIR = os.path.join(os.path.dirname(os.getcwd()), "dataset", datasetStr, 'implicit', "cv{0}".format(cv))
log(DIR)
with open(DIR + '/train.csv', 'rb') as fs:
trainMat = pk.load(fs)
with open(DIR + '/test_data.csv', 'rb') as fs:
testData = pk.load(fs)
with open(DIR + '/valid_data.csv', 'rb') as fs:
validData = pk.load(fs)
with open(DIR + '/train_time.csv', 'rb') as fs:
trainTimeMat = pk.load(fs)
with open(DIR + '/trust.csv', 'rb') as fs:
trustMat = pk.load(fs)
return trainMat, trustMat, testData
|
from django.db import models
from django.contrib.auth.models import User
class Preference(models.Model):
name = models.CharField(max_length = 50)
def __str__(self):
return self.name
class Image(models.Model):
photo = models.ImageField(null=True,blank=True,upload_to='media/')
tag = models.ForeignKey(Preference,on_delete = models.CASCADE)
def __str__(self):
return self.photo.name[6:]
class UserProfile(models.Model):
user = models.OneToOneField(User,on_delete = models.CASCADE)
choices = models.ManyToManyField(Preference)
def __str__(self):
return self.user.username
|
#__author: "Jing Xu"
#date: 2018/1/29
'''
Python中一切事物都是对象
obj是对象,Foo是类
Foo类也是一个对象,type的对象
声明了一个类
def func(self):
print("123")
Foo = type("Foo",(object,), {"func": function})
'''
class MyType(type):
def __init__(self, *args, **kwargs):
print("123")
def __call__(self, *args, **kwargs):
print("456")
class Foo(object, metaclass=MyType):
def __init__(self):
pass
def __new__(cls, *args, **kwargs):
return "object"
def func(self):
print("hello world")
obj = Foo() |
word = input()
password = ''
j=(word.replace('a','@').replace('i','!').replace('m','M').replace('B','8').replace('o','.')+'q*s')
print(j)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/dict.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_dict_form(object):
def setupUi(self, dict_form):
dict_form.setObjectName(_fromUtf8("dict_form"))
dict_form.resize(504, 318)
self.horizontalLayout = QtGui.QHBoxLayout(dict_form)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.dict_view = QtGui.QListWidget(dict_form)
self.dict_view.setMinimumSize(QtCore.QSize(300, 300))
self.dict_view.setObjectName(_fromUtf8("dict_view"))
self.horizontalLayout.addWidget(self.dict_view)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.search_line = QtGui.QLineEdit(dict_form)
self.search_line.setMaximumSize(QtCore.QSize(300, 16777215))
self.search_line.setObjectName(_fromUtf8("search_line"))
self.verticalLayout.addWidget(self.search_line)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.add_button = QtGui.QPushButton(dict_form)
self.add_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.add_button.setObjectName(_fromUtf8("add_button"))
self.gridLayout.addWidget(self.add_button, 1, 0, 1, 1)
self.save_button = QtGui.QPushButton(dict_form)
self.save_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.save_button.setObjectName(_fromUtf8("save_button"))
self.gridLayout.addWidget(self.save_button, 0, 0, 1, 1)
self.undo_button = QtGui.QPushButton(dict_form)
self.undo_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.undo_button.setObjectName(_fromUtf8("undo_button"))
self.gridLayout.addWidget(self.undo_button, 1, 1, 1, 1)
self.sort_button = QtGui.QPushButton(dict_form)
self.sort_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.sort_button.setObjectName(_fromUtf8("sort_button"))
self.gridLayout.addWidget(self.sort_button, 0, 1, 1, 1)
self.exit_button = QtGui.QPushButton(dict_form)
self.exit_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.exit_button.setObjectName(_fromUtf8("exit_button"))
self.gridLayout.addWidget(self.exit_button, 2, 1, 1, 1)
self.delete_button = QtGui.QPushButton(dict_form)
self.delete_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.delete_button.setObjectName(_fromUtf8("delete_button"))
self.gridLayout.addWidget(self.delete_button, 2, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout.addLayout(self.verticalLayout)
self.retranslateUi(dict_form)
QtCore.QMetaObject.connectSlotsByName(dict_form)
def retranslateUi(self, dict_form):
dict_form.setWindowTitle(_translate("dict_form", "Dictionary", None))
self.add_button.setText(_translate("dict_form", "Add", None))
self.save_button.setText(_translate("dict_form", "Save", None))
self.undo_button.setText(_translate("dict_form", "Undo", None))
self.sort_button.setText(_translate("dict_form", "Sort", None))
self.exit_button.setText(_translate("dict_form", "Exit", None))
self.delete_button.setText(_translate("dict_form", "Delete", None))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.