blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22b52baa97f6c9f43ec6a0fe1d14cca0d53bcb29
|
a479a5773fd5607f96c3b84fed57733fe39c3dbb
|
/napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/__init__.py
|
d906df51b8afd5b73162886273b14cd1ac454c87
|
[
"Apache-2.0"
] |
permissive
|
napalm-automation/napalm-yang
|
839c711e9294745534f5fbbe115e0100b645dbca
|
9148e015b086ebe311c07deb92e168ea36fd7771
|
refs/heads/develop
| 2021-01-11T07:17:20.226734
| 2019-05-15T08:43:03
| 2019-05-15T08:43:03
| 69,226,025
| 65
| 64
|
Apache-2.0
| 2019-05-15T08:43:24
| 2016-09-26T07:48:42
|
Python
|
UTF-8
|
Python
| false
| false
| 18,958
|
py
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-vpls/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "prefix-limit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"afi-safis",
"afi-safi",
"l2vpn-vpls",
"prefix-limit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-vpls/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "prefix-limit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"afi-safis",
"afi-safi",
"l2vpn-vpls",
"prefix-limit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_vpls/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
[
"dbarrosop@dravetech.com"
] |
dbarrosop@dravetech.com
|
82666241fc5a8c6f26b68e9a4ac5dc292ae7288b
|
ef2657fb5bed5774f5ca1ee605db3075166acfdf
|
/server.py
|
42d22a0cf975179ec19f291a2b0614186502dc55
|
[] |
no_license
|
MaksKolodii/Laba_3
|
cf60229152028c13a11bbf22b82415cd2c99bae5
|
d008296a4b65c57543bf7f17a03a1150234a8748
|
refs/heads/master
| 2020-09-23T23:14:48.927796
| 2019-12-03T12:22:34
| 2019-12-03T12:22:34
| 225,612,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
import socket
import time
host = socket.gethostbyname(socket.gethostname())
port = 4050
clients = []
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
quit = False
print("[ Сервер включився ]")
while not quit:
try:
data, addr = s.recvfrom(1024)
if addr not in clients:
clients.append(addr)
itsatime = time.strftime("%Y-%m-%d-%H.%M.%S", time.localtime())
print("[" + addr[0] + "]=[" + str(addr[1]) + "]=[" + itsatime + "]/", end="")
print(data.decode("utf-8"))
for client in clients:
if addr != client:
s.sendto(data, client)
except:
print("\n[ Сервер виключився ]")
quit = True
s.close()
|
[
"noreply@github.com"
] |
MaksKolodii.noreply@github.com
|
05f33c528960d55f2689d4225198a59fd0202a5e
|
e7bba3dd662bf2778c36a406f72ee93b2ea05e11
|
/CardinalityEstimationTestbed/Overall/neurocard/job_2/masking.py
|
655c05a601a00c96603aa0b87d98acadd79ce3c2
|
[] |
no_license
|
TsinghuaDatabaseGroup/AI4DBCode
|
37e45b176bc94e77fe250ea45f0ad7b9054c7f11
|
a8989bfadcf551ee1dee2aec57ef6b2709c9f85d
|
refs/heads/master
| 2023-07-07T05:42:15.590000
| 2023-07-04T01:04:15
| 2023-07-04T01:04:15
| 217,175,047
| 53
| 35
| null | 2023-06-20T13:00:17
| 2019-10-24T00:03:14
|
Scala
|
UTF-8
|
Python
| false
| false
| 7,697
|
py
|
"""Masking module: random input masking, table masking, etc."""
import common
import numpy as np
import torch
# TODO: refactor made.py to use this class.
class Masking(object):
"""Column masking logic."""
@classmethod
def Params(cls):
p = {}
p['draw_dropout_per_col'] = False
p['per_row_dropout'] = False
return p
def __init__(self, params):
for k, v in params.items():
setattr(self, k, v)
# Optimization.
self._constant_ones_cache = {}
def input_mask(self, x, is_training):
"""Calculates a random input mask for training.
Args:
x: input tokens, shaped [batch_size, num_cols].
is_training: bool.
Returns:
batch_mask, bools, shaped [batch_size, num_cols, 1], where 1 means
use original representation and 0 means use MASK representaions.
During inference this should be all 1s.
"""
assert x.ndim == 2, x.shape
if not is_training:
# During inference, short-circuit to immediately return 1s.
return torch.ones_like(x).unsqueeze(2)
if self.table_dropout:
return self._table_dropout(x, is_training)
return self._vanilla_dropout(x, is_training)
def _get_cached_constant_ones(self, shape, device):
"""Returns a cached all-one tensor with desired shape and device."""
key = (shape, device)
if key not in self._constant_ones_cache:
self._constant_ones_cache[key] = torch.ones(*shape, device=device)
return self._constant_ones_cache[key]
def _table_dropout(self, x, is_training):
bs, inp_seq_len = x.shape
kOnes = self._get_cached_constant_ones((bs, 1), x.device)
if self.per_row_dropout:
# NOTE: torch.rand* funcs on GPU are ~4% slower than
# generating them on CPU via np.random.
num_dropped_tables = np.random.randint(1, self.num_joined_tables,
(bs, 1)).astype(np.float32,
copy=False)
table_dropped = np.random.rand(bs, self.num_joined_tables) <= (
num_dropped_tables / self.num_joined_tables)
if self.table_primary_index is not None:
table_dropped[:, self.table_primary_index] = False
normal_drop_rands = np.random.rand(bs, inp_seq_len)
table_dropped = table_dropped.astype(np.float32, copy=False)
else:
# 1 means drop that table.
num_dropped_tables = np.random.randint(1, self.num_joined_tables)
table_dropped = np.random.rand(
self.num_joined_tables
) <= num_dropped_tables / self.num_joined_tables
if self.table_primary_index is not None:
table_dropped[self.table_primary_index] = False
table_dropped = table_dropped.astype(np.float32, copy=False)
batch_masks = []
for i in range(inp_seq_len):
# Table dropout. Logic:
# First, draw the tables to be dropped.
# If a table T is dropped:
# Drop its content columns & indicator only.
# Don't drop its fanout.
# Otherwise:
# Uniformly wraw # content columns to drop.
# Don't drop its indicator.
# Drop its fanout.
table_index = self.table_indexes[i]
if self.per_row_dropout:
# table_dropped[table_index]: shaped [BS, 1]
# elem 0 : True
# elem 1 : True
# elem 2 : False, etc.
is_content = float(
self.table_column_types[i] == common.TYPE_NORMAL_ATTR)
is_fanout = float(
self.table_column_types[i] == common.TYPE_FANOUT)
use_unk = table_dropped[:, table_index]
if is_fanout:
# Column i is a fanout column. Drop iff table not dropped.
batch_mask = torch.tensor(use_unk).float().unsqueeze(1).to(
x.device)
else:
# Handle batch elements where this table is not dropped.
normal_drop_prob = np.random.randint(
0, self.table_num_columns[table_index] + 1,
(bs,)) * 1. / self.table_num_columns[table_index]
normal_drop = normal_drop_rands[:, i] <= normal_drop_prob
# Make sure we drop content only.
normal_drop = normal_drop * is_content
not_dropped_pos = (use_unk == 0.0)
use_unk[not_dropped_pos] = normal_drop[not_dropped_pos]
# Shaped [bs, 1].
batch_mask = torch.as_tensor(1.0 - use_unk).unsqueeze(1).to(
x.device)
else:
# Make decisions for entire batch.
if table_dropped[table_index]:
# Drop all its normal attributes + indicator. Don't drop
# fanout.
batch_mask = torch.clamp(
torch.dropout(
kOnes,
p=1.0 -
(self.table_column_types[i] == common.TYPE_FANOUT),
train=is_training), 0, 1)
else:
# Drop each normal attribute with drawn propability.
# Don't drop indicator.
# Drop fanout.
drop_p = 0.0
if self.table_column_types[i] == common.TYPE_NORMAL_ATTR:
# Possible to drop all columns of this
# table (it participates in join but no
# attributes are filtered).
drop_p = np.random.randint(
0, self.table_num_columns[table_index] +
1) / self.table_num_columns[table_index]
elif self.table_column_types[i] == common.TYPE_FANOUT:
drop_p = 1.0
batch_mask = torch.clamp(
torch.dropout(kOnes, p=drop_p, train=is_training), 0, 1)
batch_masks.append(batch_mask)
# [bs, num cols, 1].
return torch.cat(batch_masks, 1).unsqueeze(-1)
def _vanilla_dropout(self, x, is_training):
bs, inp_seq_len = x.shape
if self.draw_dropout_per_col:
kOnes = self._get_cached_constant_ones((bs, 1, 1), x.device)
vecs = []
for _ in range(inp_seq_len):
vecs.append(
torch.dropout(kOnes,
p=np.random.randint(0, inp_seq_len) /
inp_seq_len,
train=is_training))
dropout_vec = torch.cat(vecs, dim=1)
else:
kOnes = self._get_cached_constant_ones((bs, inp_seq_len, 1),
x.device)
dropout_vec = torch.dropout(kOnes,
p=np.random.randint(0, inp_seq_len) /
inp_seq_len,
train=is_training)
# During training, non-dropped 1's are scaled by 1/(1-p), so we
# clamp back to 1. Shaped [bs, num cols, 1].
batch_mask = torch.clamp(dropout_vec, 0, 1)
return batch_mask
|
[
"zhouxuan19@mails.tsinghua.edu.cn"
] |
zhouxuan19@mails.tsinghua.edu.cn
|
e13a78dc8cf4b2b7293983675af39a87fb170d1a
|
b4c6e6f8529c7f15b9c0bca94ea7e14ed6ee60cf
|
/presentacion01/asgi.py
|
6c701404908a41a85dfe78f48f4183c26638aefb
|
[] |
no_license
|
03PatriciaG/presentacion
|
454b038feb1dae7fb56b023d7dedfcbee83ed1ac
|
caae2dc0133b382bc0d81ed37e958d44aaf939a0
|
refs/heads/master
| 2023-03-08T04:35:38.484349
| 2021-02-25T18:49:45
| 2021-02-25T18:49:45
| 341,394,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
ASGI config for presentacion01 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'presentacion01.settings')
application = get_asgi_application()
|
[
"patricia.garcia.almanza@gmail.com"
] |
patricia.garcia.almanza@gmail.com
|
3e8a521b5a0feede72017331fb4fb3d85fc35344
|
3972bfc4b9c5774f61f82dc6de036cebb96c9e07
|
/src/crawl_whichuni_universities.py
|
0587945078fe40a65bb993922280f2dffec2d7b6
|
[] |
no_license
|
devm1023/GeekTalentDB
|
0ce2e9733360bdc66626daaaceed2d8672205df7
|
a8035b9815b98de4f5f45f83291d6889b080df14
|
refs/heads/develop
| 2021-06-19T21:37:47.898323
| 2019-09-12T08:15:37
| 2019-09-12T08:15:37
| 211,267,986
| 0
| 0
| null | 2021-05-06T19:42:08
| 2019-09-27T08:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,264
|
py
|
"""Crawl public whichuni subjects
"""
from whichuni_university_crawler import WhichUniUniversityCrawler
from tor_crawler import TorCrawler
from parse_datetime import parse_datetime
from logger import Logger
import argparse
class WhichUniUniversityTorCrawler(TorCrawler, WhichUniUniversityCrawler):
def __init__(self, site='whichuni', nproxies=1, tor_base_port=13000,
tor_timeout=60, tor_retries=3, **kwargs):
TorCrawler.__init__(self, site,
nproxies=nproxies,
tor_base_port=tor_base_port,
tor_timeout=tor_timeout,
tor_retries=tor_retries)
kwargs['proxies'] = self.proxies
WhichUniUniversityCrawler.__init__(self, site=site, **kwargs)
self.share_proxies = False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--proxies-from', default=None,
help='Load proxy URLs from text file. '
'Uses Tor if not specified.')
parser.add_argument('--max-fail-count', type=int, default=10,
help='Maximum number of failed crawls before '
'giving up. Default: 10')
parser.add_argument('--types', default=None,
help='Comma-separated list of page types to crawl. '
'If not specified all pages are crawled.')
parser.add_argument('--exclude-types', default=None,
help='Comma-separated list of page types to exclude '
'from crawl.')
parser.add_argument('--recrawl',
help='Recrawl URLs with a timestamp earlier than '
'RECRAWL.')
parser.add_argument('--limit', type=int,
help='Maximum number of URLs to crawl.')
parser.add_argument('--urls-from',
help='Text file holding the URLs to crawl.')
parser.add_argument('--jobs', type=int, default=1,
help='Number of parallel jobs. Default: 1')
parser.add_argument('--batch-size', type=int, default=None,
help='Max. number of URLs to crawl in one batch. '
'Computed from crawl rate if omitted.')
parser.add_argument('--batch-time', type=int, default=600,
help='Max. time (in secs) to crawl one batch. '
'Default: 600')
parser.add_argument('--crawl-rate', type=float, default=None,
help='Desired number of requests per second.')
parser.add_argument('--request-timeout', type=int, default=30,
help='Timeout for requests in secs. Default: 30')
parser.add_argument('--tor-proxies', type=int, default=None,
help='Number of Tor proxies to start. Default: 1')
parser.add_argument('--tor-base-port', type=int, default=13000,
help='Smallest port for Tor proxies. Default: 13000')
parser.add_argument('--tor-timeout', type=int, default=60,
help='Timeout in secs for starting Tor process. '
'Default: 60')
parser.add_argument('--tor-retries', type=int, default=3,
help='Number of retries for starting Tor process. '
'Default: 3')
args = parser.parse_args()
if args.crawl_rate is None and args.batch_size is None:
print('You must specify --crawl-rate or --batch-size.')
raise SystemExit()
recrawl = None
if args.recrawl is not None:
recrawl = parse_datetime(args.recrawl)
types = None
if args.types:
types = args.types.split(',')
exclude_types = None
if args.exclude_types:
exclude_types = args.exclude_types.split(',')
batch_size = args.batch_size
if batch_size is None:
batch_size = int(args.crawl_rate*args.batch_time/args.jobs*1.5)
logger = Logger()
# header recommended by shader.io
headers = {
'Accept-Encoding' : 'gzip, deflate, br',
'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Connection' : 'keep-alive',
}
request_args = {'headers' : headers}
proxies = []
if args.proxies_from is not None:
with open(args.proxies_from, 'r') as inputfile:
for line in inputfile:
line = line.strip()
if not line or line.startswith('#'):
continue
proxy = tuple(line.split())
if len(proxy) != 2:
raise ValueError('Invalid line in proxy file: {0:s}' \
.format(repr(line)))
proxies.append(proxy)
if not args.tor_proxies:
crawler = WhichUniUniversityCrawler(
proxies=proxies,
crawl_rate=args.crawl_rate,
request_args=request_args,
request_timeout=args.request_timeout,
urls_from=args.urls_from,
recrawl=recrawl,
types=types,
exclude_types=exclude_types,
limit=args.limit,
max_fail_count=args.max_fail_count,
jobs=args.jobs,
batch_size=batch_size,
batch_time=args.batch_time,
logger=logger)
else:
crawler = WhichUniUniversityTorCrawler(
nproxies=args.tor_proxies,
crawl_rate=args.crawl_rate,
request_args=request_args,
request_timeout=args.request_timeout,
urls_from=args.urls_from,
recrawl=recrawl,
types=types,
exclude_types=exclude_types,
limit=args.limit,
max_fail_count=args.max_fail_count,
jobs=args.jobs,
batch_size=batch_size,
batch_time=args.batch_time,
tor_base_port=args.tor_base_port,
tor_timeout=args.tor_timeout,
tor_retries=args.tor_retries,
logger=logger)
crawler.crawl()
|
[
"devm1023@gmail.com"
] |
devm1023@gmail.com
|
889b8cb815be7341ffd84d3115f87063a227f2ce
|
3685aff583c31f525b7e01eb1c78b90e54db4dce
|
/Frontend/searchpopupmenu.py
|
f8f5081c9b7eef323d4cfe9594533cb0b02d9959
|
[] |
no_license
|
KeerthanG01/Capstone_Project
|
044cf1a7f3beab4e667c169c4890bae2f73e5987
|
abaafd11e80c599ce1f4cafa666c9129b13615f4
|
refs/heads/main
| 2023-04-07T12:01:20.530333
| 2021-04-18T16:03:07
| 2021-04-18T16:03:07
| 359,170,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
from kivymd.uix.dialog import MDInputDialog
from kivy.app import App
import sqlite3
class SearchPopupMenu(MDInputDialog):
title = "Search by Location"
text_button_ok = "Search"
def __init__(self):
super().__init__()
self.size_hint = [.9,.3]
self.events_callback = self.callback
def callback(self,*args):
location = ""
location = self.text_field.text
if location == "":
app = App.get_running_app()
my_label = app.root.ids.label
my_label.text = "Location not Provided"
my_label.size_hint = [0.2,0.1]
print("Location not provided")
else:
location = location[0].upper() + location[1:].lower()
print(location)
self.geocode(location)
def geocode(self,location):
geo_location = None
app = App.get_running_app()
sql_statement = "SELECT * FROM Locations Where Location='%s'"%location
app.cursor.execute(sql_statement)
geo_location = app.cursor.fetchall()
if len(geo_location) == 0:
app = App.get_running_app()
my_label = app.root.ids.label
my_label.text = "Data not available for " + location
my_label.size_hint = [0.2,0.1]
print("No location data available")
else:
my_label = app.root.ids.label
my_label.text = location
my_label.size_hint = [0.2,0.1]
self.set_lat_lon(geo_location)
def set_lat_lon(self,geo_location):
latitude = geo_location[0][1]
longitude = geo_location[0][2]
app = App.get_running_app()
mapview = app.root.ids.mapview
mapview.center_on(latitude, longitude)
|
[
"keerthan.g2011@gmail.com"
] |
keerthan.g2011@gmail.com
|
029407d1e14c7fc1ed8abcb69b7c10f34a00efdd
|
c1fab9248da3d60b47f786961eb4242f43aa8667
|
/mjjeon/bj_dp/2156.py
|
9d29b10fcc3b56e29c68f8a141781a9a6942a858
|
[] |
no_license
|
justzino/algorithm
|
5ac5757a97a800c4568a9edc3d618c68dd533168
|
4c49d03e15db7f317dd0d29247b75c89fb9df310
|
refs/heads/master
| 2023-02-25T18:08:30.435570
| 2021-01-25T09:40:24
| 2021-01-25T09:40:24
| 284,649,367
| 0
| 0
| null | 2020-08-14T08:50:50
| 2020-08-03T08:47:32
| null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
# 런타임 에러
import sys
n = int(sys.stdin.readline())
numbers = [0] * n
D = [0] * (n+1)
for i in range(n):
numbers[i] = int(sys.stdin.readline())
D[0] = 0
D[1] = numbers[0]
D[2] = numbers[0] + numbers[1]
for i in range(3, n+1):
a = D[i-1]
b = D[i-2] + numbers[i-1]
c = D[i-3] + numbers[i-2] + numbers[i-1]
D[i] = max(a, b, c)
print(D[n])
|
[
"mseagle2023@gmail.com"
] |
mseagle2023@gmail.com
|
ac3dff350b1edbf449f6c7c4bb273a5ec443c410
|
b91721113fcea40545e8f12ce7a58fb4f9e19bfb
|
/tests/test_parametric_components/test_DivertorITER.py
|
23f034db3fccb30dc747aa8c51f5be8020157be1
|
[
"MIT"
] |
permissive
|
shen3443/paramak
|
91f217d57dc5fb6e1c68e0c73b1f102e87c05159
|
7f5acbd65f904a1ccb527e061a3b7e3f86b8a26f
|
refs/heads/main
| 2022-12-30T15:41:20.683250
| 2020-10-10T10:26:54
| 2020-10-10T10:26:54
| 304,412,355
| 0
| 0
| null | 2020-10-15T18:17:15
| 2020-10-15T18:17:14
| null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
import paramak
import unittest
class test_DivertorITER(unittest.TestCase):
def test_DivertorITER_creation(self):
"""creates an ITER-type divertor using the ITERtypeDivertor parametric component and
checks that a cadquery solid is created"""
test_shape = paramak.ITERtypeDivertor()
assert test_shape.solid is not None
def test_DivertorITER_STP_export(self):
"""creates an ITER-type divertor using the ITERtypeDivertor parametric component and
checks that an stp file of the shape can be exported using the export_stp method"""
test_shape = paramak.ITERtypeDivertor()
test_shape.export_stp("tests/ITER_div")
|
[
"jonathan.shimwell@ukaea.uk"
] |
jonathan.shimwell@ukaea.uk
|
86a34702cbedf1d0188e48f2c70efe8600be0e29
|
78cafe3c91ffa777c4b78483b462620941b632d6
|
/pyamf/remoting/__init__.py
|
c0b0ce096170de11176fd86f7c14e2e0291ece10
|
[] |
no_license
|
haabaato/pyamf_chatroom
|
d6dd38b212f194d6604ef928437a162b4f45178a
|
cebbb362929b21933c6da2a61b4e5840863876ba
|
refs/heads/master
| 2016-08-04T20:27:26.218467
| 2010-07-21T13:19:25
| 2010-07-21T13:19:25
| 726,257
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,542
|
py
|
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Remoting support.
A Remoting request from the client consists of a short preamble, headers, and
bodies. The preamble contains basic information about the nature of the
request. Headers can be used to request debugging information, send
authentication info, tag transactions, etc. Bodies contain actual Remoting
requests and responses. A single Remoting envelope can contain several
requests; Remoting supports batching out of the box.
Client headers and bodies need not be responded to in a one-to-one manner.
That is, a body or header may not require a response. Debug information is
requested by a header but sent back as a body object. The response index is
essential for the Adobe Flash Player to understand the response therefore.
@see: U{Remoting Envelope on OSFlash (external)
<http://osflash.org/documentation/amf/envelopes/remoting>}
@see: U{Remoting Headers on OSFlash (external)
<http://osflash.org/amf/envelopes/remoting/headers>}
@see: U{Remoting Debug Headers on OSFlash (external)
<http://osflash.org/documentation/amf/envelopes/remoting/debuginfo>}
@since: 0.1.0
"""
import pyamf
from pyamf import util
__all__ = ['Envelope', 'Request', 'Response', 'decode', 'encode']
#: Succesful call.
STATUS_OK = 0
#: Reserved for runtime errors.
STATUS_ERROR = 1
#: Debug information.
STATUS_DEBUG = 2
#: List of available status response codes.
STATUS_CODES = {
STATUS_OK: '/onResult',
STATUS_ERROR: '/onStatus',
STATUS_DEBUG: '/onDebugEvents'
}
#: AMF mimetype.
CONTENT_TYPE = 'application/x-amf'
ERROR_CALL_FAILED, = range(1)
ERROR_CODES = {
ERROR_CALL_FAILED: 'Server.Call.Failed'
}
APPEND_TO_GATEWAY_URL = 'AppendToGatewayUrl'
REPLACE_GATEWAY_URL = 'ReplaceGatewayUrl'
REQUEST_PERSISTENT_HEADER = 'RequestPersistentHeader'
class RemotingError(pyamf.BaseError):
"""
Generic remoting error class.
"""
class RemotingCallFailed(RemotingError):
"""
Raised if C{Server.Call.Failed} received.
"""
pyamf.add_error_class(RemotingCallFailed, ERROR_CODES[ERROR_CALL_FAILED])
class HeaderCollection(dict):
"""
Collection of AMF message headers.
"""
def __init__(self, raw_headers={}):
self.required = []
for (k, ig, v) in raw_headers:
self[k] = v
if ig:
self.required.append(k)
def is_required(self, idx):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
return idx in self.required
def set_required(self, idx, value=True):
"""
@raise KeyError: Unknown header found.
"""
if not idx in self:
raise KeyError("Unknown header %s" % str(idx))
if not idx in self.required:
self.required.append(idx)
def __len__(self):
return len(self.keys())
class Envelope(object):
"""
I wrap an entire request, encapsulating headers and bodies.
There can be more than one request in a single transaction.
@ivar amfVersion: AMF encoding version. See L{pyamf.ENCODING_TYPES}
@type amfVersion: C{int} or C{None}
@ivar clientType: Client type. See L{ClientTypes<pyamf.ClientTypes>}
@type clientType: C{int} or C{None}
@ivar headers: AMF headers, a list of name, value pairs. Global to each
request.
@type headers: L{HeaderCollection}
@ivar bodies: A list of requests/response messages
@type bodies: L{list} containing tuples of the key of the request and
the instance of the L{Message}
"""
def __init__(self, amfVersion=None, clientType=None):
self.amfVersion = amfVersion
self.clientType = clientType
self.headers = HeaderCollection()
self.bodies = []
def __repr__(self):
r = "<Envelope amfVersion=%s clientType=%s>\n" % (
self.amfVersion, self.clientType)
for h in self.headers:
r += " " + repr(h) + "\n"
for request in iter(self):
r += " " + repr(request) + "\n"
r += "</Envelope>"
return r
def __setitem__(self, name, value):
if not isinstance(value, Message):
raise TypeError("Message instance expected")
idx = 0
found = False
for body in self.bodies:
if name == body[0]:
self.bodies[idx] = (name, value)
found = True
idx = idx + 1
if not found:
self.bodies.append((name, value))
value.envelope = self
def __getitem__(self, name):
for body in self.bodies:
if name == body[0]:
return body[1]
raise KeyError("'%r'" % (name,))
def __iter__(self):
for body in self.bodies:
yield body[0], body[1]
raise StopIteration
def __len__(self):
return len(self.bodies)
def iteritems(self):
for body in self.bodies:
yield body
raise StopIteration
def keys(self):
return [body[0] for body in self.bodies]
def items(self):
return self.bodies
def __contains__(self, name):
for body in self.bodies:
if name == body[0]:
return True
return False
def __eq__(self, other):
if isinstance(other, Envelope):
return (self.amfVersion == other.amfVersion and
self.clientType == other.clientType and
self.headers == other.headers and
self.bodies == other.bodies)
if hasattr(other, 'keys') and hasattr(other, 'items'):
keys, o_keys = self.keys(), other.keys()
if len(o_keys) != len(keys):
return False
for k in o_keys:
if k not in keys:
return False
keys.remove(k)
for k, v in other.items():
if self[k] != v:
return False
return True
class Message(object):
"""
I represent a singular request/response, containing a collection of
headers and one body of data.
I am used to iterate over all requests in the L{Envelope}.
@ivar envelope: The parent envelope of this AMF Message.
@type envelope: L{Envelope}
@ivar body: The body of the message.
@type body: C{mixed}
@ivar headers: The message headers.
@type headers: C{dict}
"""
def __init__(self, envelope, body):
self.envelope = envelope
self.body = body
def _get_headers(self):
return self.envelope.headers
headers = property(_get_headers)
class Request(Message):
"""
An AMF Request payload.
@ivar target: The target of the request
@type target: C{basestring}
"""
def __init__(self, target, body=[], envelope=None):
Message.__init__(self, envelope, body)
self.target = target
def __repr__(self):
return "<%s target=%s>%s</%s>" % (
type(self).__name__, repr(self.target), repr(self.body), type(self).__name__)
class Response(Message):
"""
An AMF Response.
@ivar status: The status of the message. Default is L{STATUS_OK}.
@type status: Member of L{STATUS_CODES}.
"""
def __init__(self, body, status=STATUS_OK, envelope=None):
Message.__init__(self, envelope, body)
self.status = status
def __repr__(self):
return "<%s status=%s>%s</%s>" % (
type(self).__name__, _get_status(self.status), repr(self.body),
type(self).__name__
)
class BaseFault(object):
"""
I represent a C{Fault} message (C{mx.rpc.Fault}).
@ivar level: The level of the fault.
@type level: C{str}
@ivar code: A simple code describing the fault.
@type code: C{str}
@ivar details: Any extra details of the fault.
@type details: C{str}
@ivar description: Text description of the fault.
@type description: C{str}
@see: U{mx.rpc.Fault on Livedocs (external)
<http://livedocs.adobe.com/flex/201/langref/mx/rpc/Fault.html>}
"""
level = None
class __amf__:
static = ('level', 'code', 'type', 'details', 'description')
def __init__(self, *args, **kwargs):
self.code = kwargs.get('code', '')
self.type = kwargs.get('type', '')
self.details = kwargs.get('details', '')
self.description = kwargs.get('description', '')
def __repr__(self):
x = '%s level=%s' % (self.__class__.__name__, self.level)
if self.code not in ('', None):
x += ' code=%s' % repr(self.code)
if self.type not in ('', None):
x += ' type=%s' % repr(self.type)
if self.description not in ('', None):
x += ' description=%s' % repr(self.description)
if self.details not in ('', None):
x += '\nTraceback:\n%s' % (repr(self.details),)
return x
def raiseException(self):
"""
Raises an exception based on the fault object. There is no traceback
available.
"""
raise get_exception_from_fault(self), self.description, None
class ErrorFault(BaseFault):
"""
I represent an error level fault.
"""
level = 'error'
def _read_header(stream, decoder, strict=False):
"""
Read AMF L{Message} header.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param stream: AMF data.
@type decoder: L{amf0.Decoder<pyamf.amf0.Decoder>}
@param decoder: AMF decoder instance
@type strict: C{bool}
@param strict: Use strict decoding policy. Default is C{False}.
@raise DecodeError: The data that was read from the stream
does not match the header length.
@rtype: C{tuple}
@return:
- Name of the header.
- A C{bool} determining if understanding this header is
required.
- Value of the header.
"""
name_len = stream.read_ushort()
name = stream.read_utf8_string(name_len)
required = bool(stream.read_uchar())
data_len = stream.read_ulong()
pos = stream.tell()
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError(
"Data read from stream does not match header length")
return (name, required, data)
def _write_header(name, header, required, stream, encoder, strict=False):
"""
Write AMF message header.
@type name: C{str}
@param name: Name of the header.
@type header:
@param header: Raw header data.
@type required: L{bool}
@param required: Required header.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param stream: AMF data.
@type encoder: L{amf0.Encoder<pyamf.amf0.Encoder>}
or L{amf3.Encoder<pyamf.amf3.Encoder>}
@param encoder: AMF encoder instance.
@type strict: C{bool}
@param strict: Use strict encoding policy. Default is C{False}.
"""
stream.write_ushort(len(name))
stream.write_utf8_string(name)
stream.write_uchar(required)
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
encoder.writeElement(header)
new_pos = stream.tell()
if strict:
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _read_body(stream, decoder, strict=False, logger=None):
"""
Read AMF message body.
@param stream: AMF data.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: AMF decoder instance.
@type decoder: L{amf0.Decoder<pyamf.amf0.Decoder>}
@param strict: Use strict decoding policy. Default is C{False}.
@type strict: C{bool}
@raise DecodeError: Data read from stream does not match body length.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A L{logging.Logger} instance or C{None}.
@rtype: C{tuple}
@return: A C{tuple} containing:
- ID of the request
- L{Request} or L{Response}
"""
def _read_args():
"""
@raise pyamf.DecodeError: Array type required for request body.
"""
if stream.read(1) != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status)
def _write_body(name, message, stream, encoder, strict=False):
"""
Write AMF message body.
@param name: The name of the request.
@type name: C{basestring}
@param message: The AMF payload.
@type message: L{Request} or L{Response}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@type encoder: L{amf0.Encoder<pyamf.amf0.Encoder>}
@param encoder: Encoder to use.
@type strict: C{bool}
@param strict: Use strict encoding policy. Default is C{False}.
@raise TypeError: Unknown message type for C{message}.
"""
def _encode_body(message):
if isinstance(message, Response):
encoder.writeElement(message.body)
return
stream.write('\x0a')
stream.write_ulong(len(message.body))
for x in message.body:
encoder.writeElement(x)
if not isinstance(message, (Request, Response)):
raise TypeError("Unknown message type")
target = None
if isinstance(message, Request):
target = unicode(message.target)
else:
target = u"%s%s" % (name, _get_status(message.status))
target = target.encode('utf8')
stream.write_ushort(len(target))
stream.write_utf8_string(target)
response = 'null'
if isinstance(message, Request):
response = name
stream.write_ushort(len(response))
stream.write_utf8_string(response)
if not strict:
stream.write_ulong(0)
_encode_body(message)
return
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
_encode_body(message)
new_pos = stream.tell()
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos)
def _get_status(status):
"""
Get status code.
@type status: C{str}
@raise ValueError: The status code is unknown.
@return: Status code.
@see: L{STATUS_CODES}
"""
if status not in STATUS_CODES.keys():
# TODO print that status code..
raise ValueError("Unknown status code")
return STATUS_CODES[status]
def get_fault_class(level, **kwargs):
if level == 'error':
return ErrorFault
return BaseFault
def get_fault(data):
try:
level = data['level']
del data['level']
except KeyError:
level = 'error'
e = {}
for x, y in data.iteritems():
if isinstance(x, unicode):
e[str(x)] = y
else:
e[x] = y
return get_fault_class(level, **e)(**e)
def decode(stream, context=None, strict=False, logger=None, timezone_offset=None):
"""
Decodes the incoming stream as a remoting message.
@param stream: AMF data.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param context: Context.
@type context: L{amf0.Context<pyamf.amf0.Context>} or
L{amf3.Context<pyamf.amf3.Context>}
@param strict: Enforce strict decoding. Default is C{False}.
@type strict: C{bool}
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: A L{logging.Logger} instance or C{None}.
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: L{datetime.timedelta}
@raise DecodeError: Malformed stream.
@raise RuntimeError: Decoder is unable to fully consume the
stream buffer.
@return: Message envelope.
@rtype: L{Envelope}
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
if logger is not None:
logger.debug('remoting.decode start')
msg = Envelope()
msg.amfVersion = stream.read_uchar()
# see http://osflash.org/documentation/amf/envelopes/remoting#preamble
# why we are doing this...
if msg.amfVersion > 0x09:
raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" %
msg.amfVersion)
if context is None:
context = pyamf.get_context(pyamf.AMF0, exceptions=False)
decoder = pyamf.get_decoder(pyamf.AMF0, stream, context=context,
strict=strict, timezone_offset=timezone_offset)
msg.clientType = stream.read_uchar()
header_count = stream.read_ushort()
for i in xrange(header_count):
name, required, data = _read_header(stream, decoder, strict)
msg.headers[name] = data
if required:
msg.headers.set_required(name)
body_count = stream.read_short()
for i in range(body_count):
context.clear()
target, payload = _read_body(stream, decoder, strict, logger)
msg[target] = payload
if strict and stream.remaining() > 0:
raise RuntimeError("Unable to fully consume the buffer")
if logger is not None:
logger.debug('remoting.decode end')
return msg
def encode(msg, context=None, strict=False, logger=None, timezone_offset=None):
"""
Encodes AMF stream and returns file object.
@type msg: L{Envelope}
@param msg: The message to encode.
@type strict: C{bool}
@param strict: Determines whether encoding should be strict. Specifically
header/body lengths will be written correctly, instead of the default 0.
Default is C{False}. Introduced in 0.4.
@param logger: Used to log interesting events whilst encoding a remoting
message.
@type logger: A L{logging.Logger} instance or C{None}.
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: L{datetime.timedelta}
@rtype: C{StringIO}
@return: File object.
"""
stream = util.BufferedByteStream()
if context is None:
context = pyamf.get_context(pyamf.AMF0, exceptions=False)
encoder = pyamf.get_encoder(pyamf.AMF0, stream, context=context,
timezone_offset=timezone_offset, strict=strict)
if msg.clientType == pyamf.ClientTypes.Flash9:
encoder.use_amf3 = True
stream.write_uchar(msg.amfVersion)
stream.write_uchar(msg.clientType)
stream.write_short(len(msg.headers))
for name, header in msg.headers.iteritems():
_write_header(
name, header, int(msg.headers.is_required(name)),
stream, encoder, strict)
stream.write_short(len(msg))
for name, message in msg.iteritems():
encoder.context.clear()
_write_body(name, message, stream, encoder, strict)
stream.seek(0)
return stream
def get_exception_from_fault(fault):
"""
@raise RemotingError: Default exception from fault.
"""
# XXX nick: threading problems here?
try:
return pyamf.ERROR_CLASS_MAP[fault.code]
except KeyError:
# default to RemotingError
return RemotingError
pyamf.register_class(ErrorFault)
|
[
"Herbert@herbnetbook.(none)"
] |
Herbert@herbnetbook.(none)
|
a4e56035bbd31cee1147805ce43a14020fc4f602
|
a5b89980586ba6d18831e7acaa00e941e5188455
|
/code.py
|
e591f59ad9e15c7b3e431b74162195026bdf9a08
|
[] |
no_license
|
jhuang389/Web-Script-Visualization-actors-rank
|
e95889cf65c552d1e09dd1f3156b7be64b5eeb3d
|
b5c0d63cb7f07e12ddfa461d6144d5ebc7c08b3a
|
refs/heads/main
| 2023-06-12T02:00:18.298414
| 2021-07-11T20:32:18
| 2021-07-11T20:32:18
| 385,045,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,250
|
py
|
import http.client
import json
import csv
class Graph:
def __init__(self, with_nodes_file=None, with_edges_file=None):
self.nodes = []
self.edges = []
if with_nodes_file and with_edges_file:
nodes_CSV = csv.reader(open(with_nodes_file))
nodes_CSV = list(nodes_CSV)[1:]
self.nodes = [(n[0],n[1]) for n in nodes_CSV]
edges_CSV = csv.reader(open(with_edges_file))
edges_CSV = list(edges_CSV)[1:]
self.edges = [(e[0],e[1]) for e in edges_CSV]
def add_node(self, id: str, name: str)->None:
if ',' in name:
name.replace(',', ' ')
if (id,name) not in self.nodes:
self.nodes.append((id,name))
def add_edge(self, source: str, target: str)->None:
if source!=target and not ((source,target) in self.edges or (target,source) in self.edges):
self.edges.append((source,target))
def total_nodes(self)->int:
return len(self.nodes)
def total_edges(self)->int:
return len(self.edges)
def max_degree_nodes(self)->dict:
max_deg=0
max_nodes=set()
for (id,name) in self.nodes:
n=0
for (source,target) in self.edges:
if id in (source,target):
n+=1
if n==max_deg:
max_nodes.add(id)
elif n>max_deg:
max_deg=n
max_nodes={id}
res={}
for id in max_nodes:
res[id]=max_deg
return res
def print_nodes(self):
print(self.nodes)
def print_edges(self):
print(self.edges)
def write_edges_file(self, path="edges.csv")->None:
edges_path = path
edges_file = open(edges_path, 'w', encoding='utf-8')
edges_file.write("source" + "," + "target" + "\n")
for e in self.edges:
edges_file.write(e[0] + "," + e[1] + "\n")
edges_file.close()
print("finished writing edges to csv")
def write_nodes_file(self, path="nodes.csv")->None:
nodes_path = path
nodes_file = open(nodes_path, 'w', encoding='utf-8')
nodes_file.write("id,name" + "\n")
for n in self.nodes:
nodes_file.write(n[0] + "," + n[1] + "\n")
nodes_file.close()
print("finished writing nodes to csv")
class TMDBAPIUtils:
def __init__(self, api_key:str):
self.api_key=api_key
def get_movie_cast(self, movie_id:str, limit:int=None, exclude_ids:list=None) -> list:
connection = http.client.HTTPSConnection("api.themoviedb.org")
connection.request("GET","/3/movie/" + movie_id + "/credits?api_key="+self.api_key)
response = connection.getresponse()
data = response.read()
pmcre = json.loads(data)
if not limit:
limit=len(pmcre['cast'])
res=[]
for i in range(0,min(limit,len(pmcre['cast']))):
if str(pmcre['cast'][i]['id']) in exclude_ids:
continue
entry={}
entry['id']=str(pmcre['cast'][i]['id'])
entry['name']=pmcre['cast'][i]['name']
entry['credit_id']=pmcre['cast'][i]['credit_id']
res.append(entry)
return res
def get_movie_credits_for_person(self, person_id:str, vote_avg_threshold:float=None)->list:
connection = http.client.HTTPSConnection("api.themoviedb.org")
connection.request("GET","/3/person/" + person_id + "/movie_credits?api_key="+self.api_key)
response = connection.getresponse()
data = response.read()
pmcre = json.loads(data)
if not vote_avg_threshold:
vote_avg_threshold=0.0
res=[]
for i in range(0,len(pmcre['cast'])):
if pmcre['cast'][i]['vote_average']<vote_avg_threshold:
continue
entry={}
entry['id']=str(pmcre['cast'][i]['id'])
entry['title']=pmcre['cast'][i]['title']
entry['vote_avg']=pmcre['cast'][i]['vote_average']
res.append(entry)
return res
def return_name()->str:
return "placeholder"
def return_argo_lite_snapshot()->str:
return 'https://poloclub.github.io/argo-graph-lite/#eb523310-dd07-4034-a676-82856eec5749'
if __name__ == "__main__":
graph = Graph()
graph.add_node(id='2975', name='Laurence Fishburne')
tmdb_api_utils = TMDBAPIUtils(api_key='66c84336add711bccca195ad27e27363')
graph.write_edges_file()
graph.write_nodes_file()
for i in range(3):
new_graph=Graph(with_nodes_file="nodes.csv",with_edges_file="edges.csv")
for node in graph.nodes:
print('finding movie credits for \''+node[1]+'\' ...')
movies=tmdb_api_utils.get_movie_credits_for_person(node[0],8.0)
for movie in movies:
cast=tmdb_api_utils.get_movie_cast(movie_id=movie['id'],limit=3,exclude_ids=[node[0]])
for person in cast:
new_graph.add_node(person['id'],person['name'])
new_graph.add_edge(node[0], person['id'])
graph=new_graph
graph.write_edges_file()
graph.write_nodes_file()
|
[
"noreply@github.com"
] |
jhuang389.noreply@github.com
|
75fc5dc0b0a6deb095175d87595020b85fa716ad
|
016b5e2c9e30d44d6b11fc97e58435f9dc756d9b
|
/calculations/urls.py
|
785aa05f388f4ee346d32aab4bcb5fa5386713ea
|
[] |
no_license
|
Ladarius-Brooks/django_112
|
52a7fc1cf976d9e47a72c434de3673f150b25168
|
02bf23f7ea515f7e993647bcb087cee2e0bdf676
|
refs/heads/master
| 2022-12-30T20:40:36.683966
| 2020-10-22T02:09:49
| 2020-10-22T02:09:49
| 306,196,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from django.urls import path
from .views import CalculationViewTemplate
urlpatterns=[
path('', CalculationViewTemplate.as_view(num=100), name="calc"),
]
|
[
"lbrooks2312@yahoo.com"
] |
lbrooks2312@yahoo.com
|
b5b728f7ac53633871c7d3763debeceac437fd00
|
4cbf7e2f0edb6a1017f8c1223a32d45e801f08c0
|
/Number Dragon游戏.py
|
27c23c4cddc493096109ab8b86f80180a8bef6c8
|
[] |
no_license
|
jrc-bill/Horizon
|
aac280224690902fda6df1030bce18a87bd8e24f
|
8b0e046c219a3e589c437e5023cac5e04b18fb57
|
refs/heads/master
| 2020-09-21T01:56:44.191746
| 2020-04-28T05:06:22
| 2020-04-28T05:06:22
| 224,647,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,981
|
py
|
from random import *
import time
J = 11
Q = 12
K = 13
A = 1
pk_list = [3, 4, 5, 6, 7, 8, 9, 10, J, Q, K, A, 2]
readyToOut = None
def computer_output():
time1 = 0
readyToOut = -1
if int(lastOutPut) == -1:
if state == 0:
for i in playerList:
if i - 1 in computerList:
readyToOut = i-1
else:
computerList[0] = max(playerList) - 1
readyToOut = computerList[0]
else:
readyToOut = choice(computerList)
else:
if int(lastOutPut) + 1 in computerList:
readyToOut = int(lastOutPut) + 1
elif state == 1:
if 2 in computerList:
readyToOut = 2
else:
readyToOut = "不出"
if state == 1:
if readyToOut != -1:
if time1 == 3:
computerList.append(choice(pk_list))
time1 = 0
if readyToOut == -1:
readyToOut = "不出"
if readyToOut != "不出":
computerList.pop(int(computerList.index(readyToOut)))
time1 += 1
return readyToOut
def player_output():
time2 = 0
readyToOut = -1
print("你有{}这些牌".format(playerList))
if lastOutPut + 1 in playerList:
correct = True
elif 2 in playerList:
correct = True
elif lastOutPut == -1:
correct = True
elif lastOutPut == 2:
input("你无牌可出,只能输入no")
time.sleep(1)
readyToOut = -1
correct = False
else:
input("你无牌可出,只能输入no")
time.sleep(1)
readyToOut = -1
correct = False
while correct:
readyToOut = input('请输入你要出的牌(输入数字或no)')
if readyToOut.isalpha():
readyToOut = -1
break
if int(readyToOut) not in playerList:
continue
if int(readyToOut) == 2:
break
if max(playerList) < int(readyToOut):
continue
if lastOutPut != -1:
if int(readyToOut) == lastOutPut + 1:
break
else:
continue
else:
break
if readyToOut != -1:
playerList.remove(int(readyToOut))
if time2 == 1:
playerList.append(choice(pk_list))
return readyToOut
print('欢迎来到ND战场')
print('''机器有七张牌,你有六张
下家只能接上家的牌
2能压过所有牌''')
time.sleep(1)
while True:
computerList = sample(pk_list, 7)
playerList = sample(pk_list, 6)
state = 0
lastOutPut = -1
while True:
comOut = computer_output()
if comOut != "不出":
print("机器出了{}".format(comOut))
lastOutPut = comOut
if comOut == 2:
print("机器出了最大的!")
time.sleep(1)
else:
print("机器不出")
time.sleep(1)
lastOutPut = -1
if len(computerList) == 0:
break
time.sleep(1)
print("机器剩下{}张牌".format(len(computerList)))
time.sleep(1)
state = 1
comOut = player_output()
if comOut != -1:
print("你出了{}".format(comOut))
time.sleep(1)
if comOut == 2:
print("你出了最大的!")
time.sleep(1)
lastOutPut = comOut
else:
print("你不出")
time.sleep(1)
lastOutPut = -1
if len(playerList) == 0:
break
if len(computerList) == 0:
print("机器赢了!")
time.sleep(1)
else:
print("你赢了!")
time.sleep(1)
input("输入任意字符并回车以继续:")
|
[
"noreply@github.com"
] |
jrc-bill.noreply@github.com
|
f98a6b8c239ddc9f232ac96e6cc9c919564fc468
|
ff4711f87b8e818d52399b3d91601bc4850ddc3d
|
/user/migrations/0001_initial.py
|
3d72644aed29748638cbc0d4bad510f6aa3599df
|
[] |
no_license
|
Javoh1001/Django_user
|
3932b1af37b7bf012adfefb1e0aa033bcb3e4e78
|
9195e1b434de7b901172b8625fbafb6a31cbddb0
|
refs/heads/main
| 2023-06-16T15:18:58.985851
| 2021-07-10T10:16:50
| 2021-07-10T10:16:50
| 384,667,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
# Generated by Django 3.2.5 on 2021-07-09 14:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"javoh1001@gmail.com"
] |
javoh1001@gmail.com
|
c62e5e6dbdc11ffbd32876ab8283488de84fc9e0
|
635a81339db4dc4ce21e72f2d77faa92461404d9
|
/python/generators/trace_processor_table/public.py
|
c8bdb0be5968fbc6eb34b7bb9393202221e9fa88
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google/perfetto
|
b3fee34767174ca8c9e8e501eeec6f73fb3e0cda
|
4201d29ef8ce94a064dfbba6d9c9518b297dbc3f
|
refs/heads/master
| 2023-09-04T07:16:27.954459
| 2023-09-03T01:33:19
| 2023-09-03T01:33:19
| 227,091,502
| 1,920
| 274
|
Apache-2.0
| 2023-09-13T18:18:23
| 2019-12-10T10:32:44
|
C++
|
UTF-8
|
Python
| false
| false
| 5,056
|
py
|
# Copyright (C) 2022 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the public API for generating C++ tables."""
from dataclasses import dataclass
from enum import auto
from enum import Flag as enum_Flag
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
@dataclass(frozen=True)
class CppColumnType:
"""
The type of a column on a C++ table.
See below for subclasses of this class which can be used.
"""
class ColumnFlag(enum_Flag):
"""
Flags which should be associated to the C++ table column.
For more information on each option here, see the Column::Flag
enum. Keep this in sync with Column::Flag.
"""
NONE = 0
SORTED = auto()
HIDDEN = auto()
DENSE = auto()
SET_ID = auto()
@dataclass(frozen=True)
class Column:
"""
Representation of a column of a C++ table.
Attributes:
name: The name of the column.
type: The type of the column.
flags: Flags for the column, ColumnFlag.NONE by default.
"""
name: str
type: CppColumnType
flags: ColumnFlag = ColumnFlag.NONE
@dataclass(frozen=True)
class ColumnDoc:
"""
Documentation for the C++ table column.
Used to generate reference pages on the docs website.
Attributes:
doc: Freeform docstring for the column.
joinable: Indicates this column is joinable with a column
from another table. Should have the format "table.column".
"""
doc: str
joinable: Optional[str] = None
@dataclass(frozen=True)
class TableDoc:
"""
Documentation for the C++ table.
Used to generate reference pages on the docs website.
Attributes:
doc: Freeform docstring for the table.
group: The group of tables this table belongs to. Examples include "Tracks",
"Events", "ART Heap Graphs" etc: see the docs page for all the existing
groups.
columns: Documentation for each table column.
skip_id_and_type: Skips publishing these columns in the documentation.
Should only be used when these columns are not meaningful or are aliased to
something better.
"""
doc: str
group: str
columns: Dict[str, Union[ColumnDoc, str]]
skip_id_and_type: bool = False
@dataclass(frozen=True)
class WrappingSqlView:
"""
Specifies information about SQL view wrapping a table.
Useful for tables which are not exposed directly to
SQL but instead are wrapped with a SQL view.
Attributes:
view_name: The name of the SQL view exposed to SQL.
"""
view_name: str
@dataclass(frozen=True)
class Table:
"""
Representation of of a C++ table.
Attributes:
python_module: Path to the Python module this table is defined in. Always
pass __file__.
class_name: Name of the C++ table class.
sql_name: Name of the table in SQL.
columns: The columns in this table.
tabledoc: Documentation for this table. Can include documentation overrides
for auto-added columns (i.e. id and type) and aliases added in
|wrapping_sql_view|.
parent: The parent table for this table. All columns are inherited from the
specified table.
wrapping_sql_view: See |WrappingSqlView|.
"""
python_module: str
class_name: str
sql_name: str
columns: List[Column]
parent: Optional['Table'] = None
tabledoc: Optional[TableDoc] = None
wrapping_sql_view: Optional[WrappingSqlView] = None
@dataclass(frozen=True)
class CppInt64(CppColumnType):
"""Represents the int64_t C++ type."""
@dataclass(frozen=True)
class CppUint32(CppColumnType):
"""Represents the uint32_t C++ type."""
@dataclass(frozen=True)
class CppInt32(CppColumnType):
"""Represents the int32_t C++ type."""
@dataclass(frozen=True)
class CppDouble(CppColumnType):
"""Represents the double C++ type."""
@dataclass(frozen=True)
class CppString(CppColumnType):
"""Represents the StringPool::Id C++ type."""
@dataclass(frozen=True)
class CppOptional(CppColumnType):
"""Represents the base::Optional C++ type."""
inner: CppColumnType
@dataclass(frozen=True)
class CppTableId(CppColumnType):
"""Represents the Table::Id C++ type."""
table: Table
@dataclass(frozen=True)
class CppSelfTableId(CppColumnType):
"""Represents the Id C++ type."""
@dataclass(frozen=True)
class Alias(CppColumnType):
"""Represents a column which aliases another column.
Aliasing refers to re-exporting a column with a different name. This is useful
especially for exporting "id" columns which names which associate it to the
table name: e.g. exporting thread.id as thread.utid"""
underlying_column: str
|
[
"lalitm@google.com"
] |
lalitm@google.com
|
9496334044a21e0b2bc97e70e86491576db752d0
|
04d3f8765cd769d6a98550c144c30440bd601834
|
/preprocessing/1_find_entrances_and_departures.py
|
129a429f9dc61678399f13fda3ddf86bb5abab72
|
[] |
no_license
|
olly-styles/Multi-Camera-Trajectory-Forecasting
|
44d5ab186b1346dabdbc9ec5abb52d000c9187af
|
c412d54a871916030743bc95df4366e0ae0345f6
|
refs/heads/master
| 2023-08-08T02:35:56.380015
| 2022-02-13T22:15:58
| 2022-02-13T22:15:58
| 247,671,885
| 38
| 7
| null | 2023-07-23T08:50:45
| 2020-03-16T10:21:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,788
|
py
|
import pandas as pd
import numpy as np
# Experimental setup
IMAGE_WIDTH = 1920
IMAGE_HEIGHT = 1080
MIN_BOX_WIDTH = 35
MIN_BOX_HEIGHT = 70
MIN_TRACK_LENGTH = 20
DATA_PATH = '../data/'
def get_track_length(df):
'''
Given a dataframe, returns the dataframe with track length field
df: Dataframe of bounding boxes with columns
['filename', 'frame_num', 'track']
'''
df['track_length'] = 0
# Setting to type string to get the order to match the order of videos.
# This is specific to the WNMF dataset videos.
df[['hour', 'camera']] = df[['hour', 'camera']].astype(str)
df = df.sort_values(by=['hour', 'camera', 'track', 'frame_num'])
df = df.reset_index()
del df['level_0']
del df['index']
all_lengths = np.zeros((len(df)), dtype='uint16')
# Iterating through rows like this is very innefficient and the bottleneck.
# This loop should be vectorized for performance.
for (previous_ix, previous_row), (ix, row) in zip(df.iterrows(), df[1:].iterrows()):
if ix % 10000 == 0:
print(ix, ' of ', len(df))
if continuous_track(row, previous_row):
all_lengths[ix] = all_lengths[ix - 1] + 1
df['track_length'] = all_lengths
return df
def find_entrances_and_departures(df, min_detection_length):
'''
Flag bounding_boxes that are labelled. For departure to be labelled, it must have
a history of a least min_detection_length and be the last frame in the track
For a entrance to be labeled, it must have a future of at least min_detection_length
and be the first frame in the track
'''
df['max_track_len'] = df.groupby(['hour', 'camera', 'track'])['track_length'].transform(np.max)
df['departure'] = np.where((df['track_length'] >= min_detection_length) & (
df['track_length'] == df['max_track_len']), 1, 0)
df['entrance'] = np.where((df['track_length'] == min_detection_length - 1), 1, 0)
df['entrance'] = df['entrance'].shift(periods=-(min_detection_length - 1))
df['entrance'] = df['entrance'].fillna(0).astype(int)
return df
def continuous_track(row, previous_row):
return (previous_row['track'] == row['track']) &\
(previous_row['frame_num'] == row['frame_num'] - 1) &\
(previous_row['camera'] == row['camera']) &\
(previous_row['hour'] == row['hour'])
for day_num in range(1, 21):
day = 'day_' + str(day_num)
print(day)
bounding_boxes = pd.read_csv(
DATA_PATH + 'bounding_boxes/all_bounding_boxes_' + day + '.csv')
# Clip BB coordinates
bounding_boxes['x1'] = bounding_boxes['x1'].clip(0, IMAGE_WIDTH)
bounding_boxes['x2'] = bounding_boxes['x2'].clip(0, IMAGE_WIDTH)
bounding_boxes['y1'] = bounding_boxes['y1'].clip(0, IMAGE_HEIGHT)
bounding_boxes['y2'] = bounding_boxes['y2'].clip(0, IMAGE_HEIGHT)
# Get width and height
bounding_boxes['w'] = bounding_boxes['x2'] - bounding_boxes['x1']
bounding_boxes['h'] = bounding_boxes['y2'] - bounding_boxes['y1']
# Filter width and height
bounding_boxes = bounding_boxes[bounding_boxes['w'] >= MIN_BOX_WIDTH]
bounding_boxes = bounding_boxes[bounding_boxes['h'] >= MIN_BOX_HEIGHT]
bounding_boxes = bounding_boxes[bounding_boxes['h'] > bounding_boxes['w']]
bounding_boxes = bounding_boxes.sort_values(by=['hour', 'camera', 'track', 'frame_num'])
bounding_boxes = bounding_boxes.reset_index()
# Count track length and find entrances and departures
counted_bounding_boxes = get_track_length(bounding_boxes)
labeled_bounding_boxes = find_entrances_and_departures(counted_bounding_boxes, MIN_TRACK_LENGTH)
# Store results
labeled_bounding_boxes.to_csv(DATA_PATH + 'entrances_and_departures/entrances_and_departures_' + day + '.csv', index=False)
|
[
"ollystyles@gmail.com"
] |
ollystyles@gmail.com
|
7bff3584caa1c2837eb5bc27232d0f261e823734
|
315743e12d0259fff52e181492d5f59d9838de9e
|
/Code/TrafficSign_TF_Network_3.py
|
2b16de99594569a98bce21254a7e8fd9a45442d3
|
[
"MIT"
] |
permissive
|
mairob/Trafficsign-classification-in-TensorFlow
|
4cbd7ba9d4930eeda90faafed1ac6ac75f3351f5
|
aa28c464897ed8d7b949a7045b1291a9062383e2
|
refs/heads/master
| 2021-01-25T00:57:54.309189
| 2017-06-22T14:54:11
| 2017-06-22T14:54:11
| 94,712,836
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
import numpy as np
import tensorflow as tf
image_size = 48 #width = height
col_channels = 3 #RGB
def initVariable(name, shape):
"""
Initialize weights and biases based on http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
"""
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
def conv2d(x, W):
"""
Basic convolution.
"""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""
Basic pooling
"""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#Note: tf.name_scope is used for structuring the graph
#Note: dropout should be around 35...50%
#Note: learningrate should be around 1e-4....1e-6
with tf.name_scope('Network'):
with tf.name_scope('input'):
x_image = tf.placeholder(tf.float32, [None, image_size, image_size, col_channels], name='Images_raw')
y_raw = tf.placeholder(tf.int32, [None] ,name='Labels_raw')
y_= tf.one_hot(indices=y_raw, depth=43 , name='Labels_oneHot')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('learningrate'):
learningrate = tf.placeholder(tf.float32)
with tf.name_scope('Layer1'):
W_conv1 = initVariable("W1", [5, 5, 3, 96])
b_conv1 = initVariable("B1", [96])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(tf.nn.local_response_normalization(h_conv1)) #resulting feature maps = 24x24 Pixel
with tf.name_scope('Layer2'):
W_conv2 = initVariable("W2",[3, 3, 96, 96])
b_conv2 = initVariable("B2",[96])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(tf.nn.local_response_normalization(h_conv2)) #resulting feature maps = 12x12 Pixel
with tf.name_scope('Layer3'):
W_conv3 = initVariable("W3",[3, 3, 96, 48])
b_conv3 = initVariable("B3",[48])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
with tf.name_scope('Layer4'):
W_conv4 = initVariable("W4",[3, 3, 48, 48])
b_conv4 = initVariable("B4",[48])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
h_conv4_flat = tf.reshape(h_conv4, [-1, 12*12*48])
with tf.name_scope('Fully1'):
W_fc1 = initVariable("Wfc1",[12*12*48, 1024])
b_fc1 = initVariable("Bfc1",[1024])
h_fc1 = tf.nn.relu(tf.matmul(h_conv4_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('Fully2'):
W_fc2 = initVariable("Wfc2",[1024 , 768])
b_fc2 = initVariable("Bfc2",[768])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
with tf.name_scope('OutputLayer'):
W_out = initVariable("Wout",[768, 43])
b_out = initVariable("Bout",[43])
with tf.name_scope("softmax"):
y_conv=tf.nn.softmax(tf.matmul(h_fc2_drop, W_out) + b_out)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(learningrate).minimize(cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
|
[
"noreply@github.com"
] |
mairob.noreply@github.com
|
a7da31f03571873c60b76be03ce9bf35bc949d7a
|
ce3bd1c0f8ecb9bbe41ded050c702a35e82191f5
|
/khat3680_data_structures/src/morse.py
|
7acdbd7d0bd3062894425b0ced58d63a8510e8df
|
[] |
no_license
|
khat3680/Data_Sturct_Python
|
e368133d01cd790206f49e3f401b73961234955a
|
4ae75031b3abf36119331064bb119061ae6cd586
|
refs/heads/master
| 2022-12-10T02:59:01.016483
| 2020-09-11T16:46:31
| 2020-09-11T16:46:31
| 294,755,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,831
|
py
|
"""
-------------------------------------------------------
Morse Code Definitions and Functions
-------------------------------------------------------
Author: Anshul Khatri
ID: 193313680
Email: khat3680@mylaurier.ca
Section: CP164 Winter 2020
__updated__ = "2020-03-09"
-------------------------------------------------------
"""
# In order by letters.
data1 = (('A', '.-'), ('B', '-...'), ('C', '-.-.'),
('D', '-..'), ('E', '.'), ('F', '..-.'),
('G', '--.'), ('H', '....'), ('I', '..'),
('J', '.---'), ('K', '-.-'), ('L', '.-..'),
('M', '--'), ('N', '-.'), ('O', '---'),
('P', '.--.'), ('Q', '--.-'), ('R', '.-.'),
('S', '...'), ('T', '-'), ('U', '..--'),
('V', '...-'), ('W', '.--'), ('X', '-..-'),
('Y', '-.--'), ('Z', '--..'))
# In order by splitting.
data2 = (('M', '--'), ('F', '..-.'), ('T', '-'),
('C', '-.-.'), ('J', '.---'), ('P', '.--.'),
('W', '.--'), ('A', '.-'), ('D', '-..'),
('H', '....'), ('K', '-.-'), ('N', '-.'),
('R', '.-.'), ('U', '..--'), ('Y', '-.--'),
('B', '-...'), ('E', '.'), ('I', '..'),
('G', '--.'), ('L', '.-..'), ('O', '---'),
('Q', '--.-'), ('S', '...'), ('V', '...-'),
('X', '-..-'), ('Z', '--..'))
# In order by popularity.
data3 = (('E', '.'), ('T', '-'), ('A', '.-'),
('O', '---'), ('I', '..'), ('N', '-.'),
('S', '...'), ('H', '....'), ('R', '.-.'),
('D', '-..'), ('L', '.-..'), ('U', '..--'),
('C', '-.-.'), ('M', '--'), ('P', '.--.'),
('F', '..-.'), ('Y', '-.--'), ('W', '.--'),
('G', '--.'), ('B', '-...'), ('V', '...-'),
('K', '-.-'), ('J', '.---'), ('X', '-..-'),
('Z', '--..'), ('Q', '--.-'))
class ByLetter:
"""
-------------------------------------------------------
Stores letters and matching Morse codes. Compares
elements by letter attribute.
-------------------------------------------------------
"""
def __init__(self, letter, code):
"""
-------------------------------------------------------
Initialize a ByLetter object.
Use: var = ByLetter(letter, code)
-------------------------------------------------------
Parameters:
letter - a letter of the alphabet (str)
code - the Morse code matching letter (str)
Returns:
A ByLetter object.
-------------------------------------------------------
"""
self.letter = letter
self.code = code
def __eq__(self, target):
"""
-------------------------------------------------------
Compares source against target for equality.
Object are equal if their letters match.
Use: source == target
-------------------------------------------------------
Parameters:
target - ByLetter to compare source to (ByLetter)
Returns:
result - True if letters match, False otherwise (boolean)
-------------------------------------------------------
"""
return self.letter == target.letter
def __lt__(self, target):
"""
-------------------------------------------------------
Determines if source comes before target.
Use: source < target
-------------------------------------------------------
Parameters:
target - ByLetter to compare source to (ByLetter)
Returns:
result - True if source precedes target,
False otherwise (boolean)
-------------------------------------------------------
"""
return self.letter < target.letter
def __le__(self, target):
"""
-------------------------------------------------------
Determines if source precedes or is or equal to target.
Use: source <= target
-------------------------------------------------------
Parameters:
target - ByLetter to compare source to (ByLetter)
Returns:
result - True if source precedes or is equal to target,
False otherwise (boolean)
-------------------------------------------------------
"""
return self.letter <= target.letter
def __str__(self):
"""
-------------------------------------------------------
Creates a formatted string of ByLetter data.
Use: print(source)
Use: string = str(source)
-------------------------------------------------------
Returns:
string - the formatted contents of ByLetter (str)
-------------------------------------------------------
"""
return "({}, {})".format(self.letter, self.code)
class ByCode:
"""
-------------------------------------------------------
Stores letters and matching Morse codes. Compares
elements by code attribute.
-------------------------------------------------------
"""
def __init__(self, letter, code):
"""
-------------------------------------------------------
Initialize a ByCode object.
Use: var = ByCode(letter, code)
-------------------------------------------------------
Parameters:
letter - a letter of the alphabet (str)
code - the Morse code matching letter (str)
Returns:
A ByCode object.
-------------------------------------------------------
"""
self.letter = letter
self.code = code
return
def __eq__(self, target):
"""
-------------------------------------------------------
Compares source against target for equality.
Object are equal if their codes match.
Use: source == target
-------------------------------------------------------
Parameters:
target - ByCode to compare source to (ByCode)
Returns:
result - True if codes match, False otherwise (boolean)
-------------------------------------------------------
"""
result = False
if self.code == target.code:
result = True
return result
def __lt__(self, target):
"""
-------------------------------------------------------
Determines if source comes before target.
Use: source < target
-------------------------------------------------------
Parameters:
target - ByCode to compare source to (ByCode)
Returns:
result - True if source precedes target,
False otherwise (boolean)
-------------------------------------------------------
"""
result = False
if self.code < target.code:
result = True
return result
def __le__(self, target):
"""
-------------------------------------------------------
Determines if source precedes or is or equal to target.
Use: source <= target
-------------------------------------------------------
Parameters:
target - ByCode to compare source to (ByCode)
Returns:
result - True if source precedes or is equal to target,
False otherwise (boolean)
-------------------------------------------------------
"""
result = False
if self.code <= target.code:
result = True
return result
def __str__(self):
"""
-------------------------------------------------------
Creates a formatted string of ByCode data.
Use: print(source)
Use: string = str(source)
-------------------------------------------------------
Returns:
string - the formatted contents of ByCode (str)
-------------------------------------------------------
"""
return "({}, {})".format(self.code, self.letter)
def fill_letter_bst(bst, values):
"""
-------------------------------------------------------
Fills a BST with ByLetter Morse code letter/code pairs
(Function must convert contents of values to ByLetter objects)
Use: fill_letter(bst, values)
-------------------------------------------------------
Parameters:
bst - a bst (BST)
values - set of Morse code letter/code pairs (list of tuples)
Returns:
None
-------------------------------------------------------
"""
for i in values:
bst.insert(ByLetter(i[0],i[1]))
return
def fill_code_bst(bst, values):
"""
-------------------------------------------------------
Fills a BST with ByCode Morse code letter/code pairs.
(Function must convert contents of values to ByCode objects)
Use: fill_letter(bst, values)
-------------------------------------------------------
Parameters:
bst - a bst (BST)
values - set of Morse code letter/code pairs (list of tuples)
Returns:
None
-------------------------------------------------------
"""
for i in values:
bst.insert(ByCode(i[0],i[1]))
return
def encode_morse(bst, text):
"""
-------------------------------------------------------
Converts English text to Morse code
Use: code = encode_morse(bst, text)
-------------------------------------------------------
Parameters:
bst - Morse code bst sorted by letter (BST)
text - English text to convert (str)
Returns:
result - Morse code version of text (str)
-------------------------------------------------------
"""
result=""
for i in text:
if i.isalpha() :
j = i.upper()
obj = ByLetter(j,None)
code = bst.retrieve(obj)
if code is not None :
result += code.code + " "
elif i == " " :
result+= "\n"
else:
result+= " "
return result
def decode_morse(bst, code):
"""
-------------------------------------------------------
Converts Morse code to English text
Use: text = decode_morse(bst, code)
-------------------------------------------------------
Parameters:
bst - Morse code bst sorted by code (BST)
code - Morse code to convert (str)
Returns:
result - English version of code (str)
-------------------------------------------------------
"""
result=""
for i in code.split(" "):
obj = ByCode(None,i)
code = bst.retrieve(obj)
if code is not None:
result += code.letter
else:
result+= " "
return result
|
[
"anshulskhatri@gmail.com"
] |
anshulskhatri@gmail.com
|
ff5d828f191caedeb35e19e6d722d463c0fd9495
|
9e5e21735aed104f872e41de22d28235449f4b29
|
/notifications.py
|
f0b995f577410e77db51891909bd3e7dddcbc922
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
thiemok/pokeminer
|
064a580a7292aa56b2a30ba96d2827bc7b46313c
|
c7b1e978a6f3188771e210e0ec94c2a0652c370d
|
refs/heads/master
| 2020-12-29T02:06:46.287886
| 2016-08-15T19:20:01
| 2016-08-15T19:20:01
| 64,648,903
| 0
| 0
| null | 2016-08-01T08:15:45
| 2016-08-01T08:15:45
| null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
import discord
from discord.ext import commands
import asyncio
import signal
import sys
import time
from geopy.geocoders import GoogleV3
import json
import config
import db
from names import POKEMON_NAMES
from messages import DISCORD_MESSAGES
# Check whether config has all necessary attributes
REQUIRED_SETTINGS = (
'DISCORD_APP_ID',
'DISCORD_TOKEN',
'DISCORD_CHANNELS',
'DISCORD_UPDATE_INTERVAL',
'GOOGLE_MAPS_KEY',
)
for setting_name in REQUIRED_SETTINGS:
if not hasattr(config, setting_name):
raise RuntimeError('Please set "{}" in config'.format(setting_name))
join_url = "https://discordapp.com/oauth2/authorize?client_id=" + config.DISCORD_APP_ID + "&scope=bot&permissions="
client = commands.Bot(command_prefix='!', description=DISCORD_MESSAGES['bot_desc'])
already_seen = {}
geolocator = GoogleV3(api_key=config.GOOGLE_MAPS_KEY)
watchlist_path = 'watchlist.json'
with open(watchlist_path) as json_data:
watchlist = json.load(json_data)['watchlist']
@client.event
async def on_ready():
print(DISCORD_MESSAGES['ready'][0].format(join_url))
print(DISCORD_MESSAGES['ready'][1])
print(DISCORD_MESSAGES['ready'][2].format(count_iterable(client.servers)))
client.loop.create_task(check_pokemon())
for id in config.DISCORD_CHANNELS:
channel = client.get_channel(id)
await client.send_message(channel, DISCORD_MESSAGES['ready_msg'])
@client.event
async def on_error(event, *args, **kwargs):
print(DISCORD_MESSAGES['error'].format(event))
@client.command(description=DISCORD_MESSAGES['cmd_read_desc'], help=DISCORD_MESSAGES['cmd_read_desc'])
async def read(): #Displays all tracked pokemon
tracked_pokemon = "```\n"
for id in watchlist:
tracked_pokemon += '{} {}\n'.format(id, POKEMON_NAMES[id])
tracked_pokemon += "```"
await client.say(DISCORD_MESSAGES['cmd_read_msg'] + tracked_pokemon)
@client.command(description=DISCORD_MESSAGES['cmd_add_desc'], help=DISCORD_MESSAGES['cmd_add_desc'])
async def add(id: int):
if id not in watchlist:
watchlist.append(id)
with open(watchlist_path, 'w') as json_data:
json.dump({'watchlist': watchlist}, json_data)
await client.say(DISCORD_MESSAGES['cmd_add_added'].format(POKEMON_NAMES[id]))
else:
await client.say(DISCORD_MESSAGES['cmd_add_already_added'].format(POKEMON_NAMES[id]))
@add.error
async def add_error(error, id):
await client.say(DISCORD_MESSAGES['cmd_add_usage'])
@client.command(description=DISCORD_MESSAGES['cmd_remove_desc'], help=DISCORD_MESSAGES['cmd_remove_desc'])
async def remove(id: int):
if id is None:
await client.say(DISCORD_MESSAGES['cmd_remove_usage'])
elif id in watchlist:
watchlist.remove(id)
with open(watchlist_path, 'w') as json_data:
json.dump({'watchlist': watchlist}, json_data)
await client.say(DISCORD_MESSAGES['cmd_remove_removed'].format(POKEMON_NAMES[id]))
else:
await client.say(DISCORD_MESSAGES['cmd_remove_no_on_list'].format(POKEMON_NAMES[id]))
@remove.error
async def remove_error(error, id):
await client.say(DISCORD_MESSAGES['cmd_remove_usage'])
async def check_pokemon():
session = db.Session()
pokemons = db.get_sightings(session)
session.close()
for sighting in pokemons:
await process_sighting(sighting)
remove_stale_sightings()
async def process_sighting(sighting):
if sighting.pokemon_id in watchlist:
if sighting.id not in already_seen:
already_seen[sighting.id] = sighting
await report_sighting(sighting)
async def report_sighting(sighting):
name = POKEMON_NAMES[sighting.pokemon_id]
location = geolocator.reverse('' + sighting.lat + ', ' + sighting.lon, exactly_one=True)
street = location.address
disapper_time_diff = ((sighting.expire_timestamp - time.time()) // 60)
message = DISCORD_MESSAGES['sighting'].format(name, sighting.pokemon_id, street, disapper_time_diff)
for id in config.DISCORD_CHANNELS:
channel = client.get_channel(id)
await client.send_message(channel, message)
async def sightings_update_task():
await client.wait_until_ready()
while not client.is_closed:
await check_pokemon()
await asyncio.sleep(config.DISCORD_UPDATE_INTERVAL * 60)
def remove_stale_sightings():
old_seen = already_seen.copy()
for key in old_seen:
if old_seen[key].expire_timestamp > time.time():
del already_seen[key]
def count_iterable(i):
return sum(1 for e in i)
client.run(config.DISCORD_TOKEN)
|
[
"krausethiemo@posteo.de"
] |
krausethiemo@posteo.de
|
fe8e8fa48ff8cbdadacab8934b993e74c1cbb710
|
c8a056310e50466098ff0859c658d4cdc4ab428c
|
/env2/bin/easy_install
|
e53056946bdf6cda5c96c46d8c5443324572bad0
|
[] |
no_license
|
Kaylindaww/schoolSystem
|
422f0571886b2d1b9a0ee18308553f55e2f680eb
|
cf1e2d190f71ad96470a6112907367188f41db20
|
refs/heads/master
| 2023-07-13T19:43:40.111255
| 2021-08-24T06:37:45
| 2021-08-24T06:37:45
| 386,018,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
#!/home/student/Desktop/pythonn_web/env2/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"akullukaylinda@gmail.com"
] |
akullukaylinda@gmail.com
|
|
0c24e11f974e8f79a2e0c1d0648255fe52a33160
|
e8cd34697ddea01e24b24adc6201de81ce4ae35b
|
/412. Fizz Buzz.py
|
d8c98b54b42d6af5f52cb93938633ae950428b96
|
[] |
no_license
|
hanyunxuan/leetcode
|
154cfcfe8208c639f1311cdaa4163cdbb3c9bb79
|
67c7f43b7d66e3ccf22622bdf6b6fccf0015195c
|
refs/heads/master
| 2020-04-02T13:36:15.713648
| 2019-01-09T07:05:01
| 2019-01-09T07:05:01
| 154,488,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
"""
Write a program that outputs the string representation of numbers from 1 to n.
But for multiples of three it should output “Fizz” instead of the number and for the multiples of five output “Buzz”. For numbers which are multiples of both three and five output “FizzBuzz”.
Example:
n = 15,
Return:
[
"1",
"2",
"Fizz",
"4",
"Buzz",
"Fizz",
"7",
"8",
"Fizz",
"Buzz",
"11",
"Fizz",
"13",
"14",
"FizzBuzz"
]
"""
# my solution
n = 15
# ans=[]
# for i in range(n):
# if (i+1)%3==0 and (i+1)%5==0:
# ans.append('FizzBuzz')
# elif (i+1)%3==0:
# ans.append('Fizz')
# elif (i+1)%5==0:
# ans.append('Buzz')
# else:
# ans.append(str(i+1))
# amazing solution
a = ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) + str(i) * min(1, (i % 3)) * min(1, (i % 5)) for i in range(1, n + 1)]
b = ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) or str(i) for i in range(1, n+1)]
|
[
"408968382@qq.com"
] |
408968382@qq.com
|
a5b6a73bbc9e2335f3ad50d33afa44501b630465
|
2e7c2b7560baa0682afa8059515b124ac504dfe9
|
/tests/test_shared_libs.py
|
b545d7c988e3768b61ee10f5188da45ef5645a62
|
[
"MIT"
] |
permissive
|
abn/pipx
|
2588bdc11ffd1c445db32399f0175712d1b8a683
|
20238b1153eb5ade0eb851511dc2cf51c7eac4eb
|
refs/heads/master
| 2022-12-30T23:29:49.699827
| 2020-08-30T03:10:29
| 2020-08-30T03:10:29
| 291,733,934
| 0
| 0
|
MIT
| 2020-08-31T14:13:56
| 2020-08-31T14:13:56
| null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
import os
import time
import pytest # type: ignore
from pipx import shared_libs
@pytest.mark.parametrize(
"mtime_minus_now,needs_upgrade",
[
(-shared_libs.SHARED_LIBS_MAX_AGE_SEC - 5 * 60, True),
(-shared_libs.SHARED_LIBS_MAX_AGE_SEC + 5 * 60, False),
],
)
def test_auto_update_shared_libs(capsys, pipx_temp_env, mtime_minus_now, needs_upgrade):
now = time.time()
shared_libs.shared_libs.create([], verbose=True)
shared_libs.shared_libs.has_been_updated_this_run = False
access_time = now # this can be anything
os.utime(shared_libs.shared_libs.pip_path, (access_time, mtime_minus_now + now))
assert shared_libs.shared_libs.needs_upgrade is needs_upgrade
|
[
"noreply@github.com"
] |
abn.noreply@github.com
|
fbbcb14f9a0cf804864130a258cc442d3e622c57
|
97e2233be1e152174f2d7138bccdffd4b438ec35
|
/tests/wrappers/test_reader.py
|
bbdd89847d1a7aa918c6dff8df2b31ea13f03814
|
[
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
lamyj/odil
|
a5eea306c478bacd304fa6bd14808c65e5b059cd
|
7e50e4eaccb5e7d6d6716705ff6252d342f60685
|
refs/heads/master
| 2022-11-21T07:09:24.547750
| 2022-04-23T07:21:52
| 2022-04-23T07:21:52
| 33,601,264
| 77
| 25
|
NOASSERTION
| 2022-09-12T19:55:28
| 2015-04-08T10:55:06
|
C++
|
UTF-8
|
Python
| false
| false
| 4,905
|
py
|
import os
import tempfile
import unittest
from io import BytesIO
import odil
class TestReader(unittest.TestCase):
def test_constructor(self):
stream = odil.iostream(BytesIO())
reader = odil.Reader(stream, odil.registry.ImplicitVRLittleEndian)
self.assertEqual(
reader.transfer_syntax.encode(),
odil.registry.ImplicitVRLittleEndian)
self.assertEqual(reader.byte_ordering, odil.ByteOrdering.LittleEndian)
self.assertFalse(reader.explicit_vr)
self.assertFalse(reader.keep_group_length)
def test_constructor_no_default(self):
stream = odil.iostream(BytesIO())
reader = odil.Reader(stream, odil.registry.ExplicitVRBigEndian, True)
self.assertEqual(
reader.transfer_syntax.encode(), odil.registry.ExplicitVRBigEndian)
self.assertEqual(reader.byte_ordering, odil.ByteOrdering.BigEndian)
self.assertTrue(reader.explicit_vr)
self.assertTrue(reader.keep_group_length)
def test_read_data_set(self):
string_io = BytesIO(
b"\x10\x00\x10\x00PN\x07\x00Foo^Bar"
b"\x10\x00\x20\x00CS\x03\x00FOO"
)
stream = odil.iostream(string_io)
reader = odil.Reader(stream, odil.registry.ExplicitVRLittleEndian)
data_set = reader.read_data_set()
self.assertEqual(data_set.size(), 2)
self.assertSequenceEqual(data_set.as_string("PatientName"), [b"Foo^Bar"])
self.assertSequenceEqual(data_set.as_string("PatientID"), [b"FOO"])
def test_read_data_set_halt_condition(self):
string_io = BytesIO(
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar "
b"\x10\x00\x20\x00" b"LO" b"\x04\x00" b"FOO "
)
stream = odil.iostream(string_io)
reader = odil.Reader(stream, odil.registry.ExplicitVRLittleEndian)
data_set = reader.read_data_set(lambda x: x==odil.registry.PatientID)
self.assertEqual(data_set.size(), 1)
self.assertSequenceEqual(data_set.as_string("PatientName"), [b"Foo^Bar"])
def test_read_tag(self):
string_io = BytesIO(b"\x10\x00\x20\x00")
stream = odil.iostream(string_io)
reader = odil.Reader(stream, odil.registry.ExplicitVRLittleEndian)
self.assertEqual(reader.read_tag(), odil.registry.PatientID)
def test_read_length(self):
string_io = BytesIO(b"\x34\x12")
stream = odil.iostream(string_io)
reader = odil.Reader(stream, odil.registry.ExplicitVRLittleEndian)
self.assertEqual(reader.read_length(odil.VR.CS), 0x1234)
def test_read_element(self):
string_io = BytesIO(b"PN\x08\x00Foo^Bar ")
stream = odil.iostream(string_io)
reader = odil.Reader(stream, odil.registry.ExplicitVRLittleEndian)
self.assertEqual(
reader.read_element(odil.registry.PatientName),
odil.Element([b"Foo^Bar"], odil.VR.PN))
def test_read_file_stream(self):
data = (
128*b"\x00"+b"DICM"+
b"\x02\x00\x10\x00" b"UI" b"\x14\x00" b"1.2.840.10008.1.2.1\x00"
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar "
)
string_io = BytesIO(data)
stream = odil.iostream(string_io)
header, data_set = odil.Reader.read_file(stream)
self.assertEqual(len(header), 1)
self.assertSequenceEqual(
header.as_string("TransferSyntaxUID"),
[odil.registry.ExplicitVRLittleEndian])
self.assertEqual(len(data_set), 1)
self.assertSequenceEqual(data_set.as_string("PatientName"), [b"Foo^Bar"])
def test_read_file_path(self):
data = (
128*b"\x00"+b"DICM"+
b"\x02\x00\x10\x00" b"UI" b"\x14\x00" b"1.2.840.10008.1.2.1\x00"
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar "
)
fd, path = tempfile.mkstemp()
os.write(fd, data)
os.close(fd)
try:
header, data_set = odil.Reader.read_file(path)
finally:
os.remove(path)
self.assertEqual(len(header), 1)
self.assertSequenceEqual(
header.as_string("TransferSyntaxUID"),
[odil.registry.ExplicitVRLittleEndian])
self.assertEqual(len(data_set), 1)
self.assertSequenceEqual(data_set.as_string("PatientName"), [b"Foo^Bar"])
def test_open_context(self):
data = (
128*b"\x00"+b"DICM"+
b"\x02\x00\x10\x00" b"UI" b"\x14\x00" b"1.2.840.10008.1.2.1\x00"
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar "
)
fd, path = tempfile.mkstemp()
os.write(fd, data)
os.close(fd)
try:
with odil.open(path) as fd:
header, data_set = odil.Reader.read_file(fd)
finally:
os.remove(path)
if __name__ == "__main__":
unittest.main()
|
[
"lamy@unistra.fr"
] |
lamy@unistra.fr
|
a098ed31838af88f4c99fda9cee68d68941729bd
|
3f14c13b854f5f64ec78fda4db2e3d40027d07e1
|
/vgae_tensorflow/optimizer.py
|
d11fc75f1ef8becb894f9f989d0064b16de0d0fe
|
[
"MIT"
] |
permissive
|
geneprophet/VGAE
|
ae03a11d52d3a57d0b25d23b0324c56b3966aa5b
|
b5ca4f6987c2376cd04bff047716d2e8c7ae97b9
|
refs/heads/master
| 2023-07-18T19:58:56.548964
| 2021-09-22T04:32:01
| 2021-09-22T04:32:01
| 407,718,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
import tensorflow as tf
flags = tf.compat.v1.app.flags
#flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
class OptimizerAE(object):
def __init__(self, preds, labels, pos_weight, norm):
preds_sub = preds
labels_sub = labels
self.cost = norm * tf.reduce_mean(input_tensor=tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, labels=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer
self.opt_op = self.optimizer.minimize(self.cost)
self.grads_vars = self.optimizer.compute_gradients(self.cost)
self.correct_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
tf.cast(labels_sub, tf.int32))
self.accuracy = tf.reduce_mean(input_tensor=tf.cast(self.correct_prediction, tf.float32))
class OptimizerVAE(object):
def __init__(self, preds, labels, model, num_nodes, pos_weight, norm):
preds_sub = preds
labels_sub = labels
self.cost = norm * tf.reduce_mean(input_tensor=tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, labels=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # Adam Optimizer
# Latent loss
self.log_lik = self.cost
self.kl = (0.5 / num_nodes) * tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=1 + 2 * model.z_log_std - tf.square(model.z_mean) -
tf.square(tf.exp(model.z_log_std)), axis=1))
self.cost -= self.kl
self.opt_op = self.optimizer.minimize(self.cost)
self.grads_vars = self.optimizer.compute_gradients(self.cost)
self.correct_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
tf.cast(labels_sub, tf.int32))
self.accuracy = tf.reduce_mean(input_tensor=tf.cast(self.correct_prediction, tf.float32))
|
[
"geneprophet@163.com"
] |
geneprophet@163.com
|
b8d583fe713e03667917cda9664597adef0fcd95
|
03376912b360f90a96d9f9683f987634225488e3
|
/Python/PE206.py
|
941646e012c76cb10c88ea93b0e53ced71ddf9c8
|
[] |
no_license
|
lenaindelaforetmagique/ProjectEuler
|
ed27b15d120631b6d3af30ee7e1305f59fdc9afa
|
53ce546ed72e6d3f9cfb547ac44c7be457c5b2ec
|
refs/heads/master
| 2021-01-01T20:45:52.260877
| 2018-12-20T16:09:22
| 2018-12-20T16:09:22
| 98,928,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
"""Problem 206
Find the unique positive integer whose square has the form 1_2_3_4_5_6_7_8_9_0,
where each “_” is a single digit.
"""
from time import time
from math import *
def test(j):
return "1234567890"==str(j)[::2]
def prog():
#s="1_2_3_4_5_6_7_8_9_0"
#n=int("0".join(s.split("_")))
n=1020304050607080900
chif=4
solution=[]
s=0
for i in range(1,10**chif):
n1=int("".join([c+"0" for c in str(i)]))*10**(18-2*chif)
n2=n1+int("90"*(9-chif))
for j in range(floor(sqrt(n+n1)),ceil(sqrt(n+n2))):
if test(j**2):
solution.append(j)
return solution
t1=time()
for i in range(20):
a=prog()
print((time()-t1)/20,a)
|
[
"noreply@github.com"
] |
lenaindelaforetmagique.noreply@github.com
|
9593772f2c53621dd344e5d2759c853bd389be7f
|
27b7a3788ea84313142a041e8dec0a748cf7475b
|
/atm.py
|
5fe4bd71533896c4e6371ded3309b84898b6246d
|
[] |
no_license
|
atakancicekk/ATM
|
54b93c77da442c255e632bed0cf5eb80fb6a3baa
|
a9951e9b1873bc228b8fb85e7fc3544d85ad0ad6
|
refs/heads/master
| 2022-04-27T00:40:16.894478
| 2020-04-28T18:03:40
| 2020-04-28T18:03:40
| 259,712,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,365
|
py
|
import time
customer_db = {
"atakan cicek": {
"username": "atakancicek",
"password": "1234",
"balance": 4000
},
"harry potter": {
"username": "harrypotter",
"password": "7890",
"balance": 5000
},
"miley cyrus": {
"username": "mileycyrus",
"password": "1111",
"balance": 4400
}
}
def welcome():
print "Welcome to Bank Atakan !"
print "*****************************"
user_selection = input("press 0 to log in, press 1 to sign up: ")
if user_selection == 0:
login()
elif user_selection == 1:
signup()
def login():
print " "
print " "
print "Login"
print "*****************************"
input_username = raw_input("username: ")
for user in customer_db:
if input_username == customer_db[user]["username"]:
input_password = raw_input("password: ")
if input_password == customer_db[user]["password"]:
account(customer_db[user])
else:
print "Wrong password, please try again..."
login()
def signup():
print "Sign Up"
print "*****************************"
input_fullname = raw_input("Enter your full name: ")
input_username = raw_input("Enter a username: ")
input_password = raw_input("Enter a password: ")
customer_db[input_fullname] = {"username": input_username, "password": input_password, "balance": 0}
login()
def account(user):
print " "
print " "
print "Account Page"
print "*****************************"
print "Have a nice day", user["username"], ". Your current balance is:", user["balance"]
print "What you would like to do ?"
user_selection = input(
"1. Withdraw money\n"
"2. Transfer money\n"
"3. Deposit money\n"
"4. Logout\n"
"5. Exit from ATM\n"
)
if user_selection == 1:
withdraw(user)
elif user_selection == 2:
transfer_money(user)
elif user_selection == 3:
deposit(user)
elif user_selection == 4:
welcome()
elif user_selection == 5:
exit()
else:
print "Sorry, wrong key pressed. Please select 1 or 2"
account(user)
def withdraw(user):
print " "
print " "
print "Withdraw Money"
print "*****************************"
withdraw_amount = input("Please enter the amount: ")
if user["balance"] > withdraw_amount:
print "Making the transaction, please wait."
time.sleep(3)
user["balance"] -= withdraw_amount
print "Money withdrawed. Your new balance is: ", user["balance"]
time.sleep(1.5)
print "Please wait while you are being redirected..."
time.sleep(4)
account(user)
else:
print "Insufficent Funds !"
withdraw(user)
def deposit(user):
print " "
print " "
print "Deposit Money"
print "*****************************"
deposit_amount = input("Please enter the deposit amount: ")
print "Making the transaction, please wait."
time.sleep(3)
user["balance"] += deposit_amount
print "Money deposited. Your new balance is: ", user["balance"]
time.sleep(1.5)
print "Please wait while you are being redirected..."
time.sleep(4)
account(user)
def transfer_money(user):
for customer in customer_db:
if user["username"] == customer_db[customer]["username"]:
pass
else:
print customer_db[customer]["username"]
transfer_selection = raw_input("Please type the recipient: ")
transfer_amount = input("Please enter the amount: ")
if user["balance"] >= transfer_amount:
for person in customer_db:
if transfer_selection in customer_db[person]["username"]:
customer_db[person]["balance"] += transfer_amount
user["balance"] -= transfer_amount
print "Making the transfer, please be patient."
time.sleep(3)
print "Money sent. Your new balance is: ", user["balance"]
time.sleep(1.5)
print "Please wait while you are being redirected..."
time.sleep(4)
account(user)
else:
print "Insufficent Funds !"
transfer_money(user)
welcome()
|
[
"noreply@github.com"
] |
atakancicekk.noreply@github.com
|
1d1a7d6d13aa5d73009fae2ef1a5f4abe9ddd4d4
|
6efac3ffa26d60b0c8d5a1d578c16333b3f5aca5
|
/contact/urls.py
|
ab317699b7358fcb1416c35b55d0be1acd6ea280
|
[] |
no_license
|
tohfaakib/consultancy
|
bdd8418eb3b1b3989294f80917b290102769c1a9
|
36500ab4fe1820021d4efa70ba7d96bbc9c32795
|
refs/heads/master
| 2023-04-28T08:50:36.585756
| 2019-09-25T09:17:05
| 2019-09-25T09:17:05
| 210,766,485
| 0
| 0
| null | 2023-04-21T20:37:45
| 2019-09-25T05:57:33
|
Python
|
UTF-8
|
Python
| false
| false
| 191
|
py
|
from django.urls import path
from .views import contact, email_sending
urlpatterns = [
path('contact/', contact, name='contact'),
path('send_mail', email_sending, name='send_mail')
]
|
[
"tohfaakib@my.smccd.edu"
] |
tohfaakib@my.smccd.edu
|
d34d4f9429938c1d28d7ba32a18e18444add5004
|
314a95c3255bfce718478950ab7762fb8bda6697
|
/flowers.py
|
1660ca480eabae4ee166ac71b823e8c318503a7c
|
[] |
no_license
|
dbrody112/cosine_ohem_loss_exploration
|
f18b60557020b575da3ad2e74ea8dce506f9a059
|
519cae34976f2a7a268e35cb7d06e2e8616080c4
|
refs/heads/main
| 2023-02-22T21:58:06.351721
| 2021-01-24T15:19:25
| 2021-01-24T15:19:25
| 322,007,771
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,909
|
py
|
from chexpert import showImage
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import glob
from PIL import Image
from scipy.ndimage import zoom
import regex as re
import os
import sys
from sklearn.preprocessing import OneHotEncoder
import torch
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from imblearn.metrics import sensitivity_specificity_support
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torchvision.models as models
import scipy.io as sio
from chexpert_utils import loadCSV
class FlowersDataset(Dataset):
def __init__(self,csv_file, train_csv, file_paths, transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomAffine(degrees = 30,translate = (0.1,0.1)),
transforms.ToTensor(),
])):
self.csv_file = csv_file
self.train_csv = train_csv
self.file_paths = file_paths
self.transform = transform
def __len__(self):
return(len(self.csv_file))
def __getitem__(self, idx):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.is_tensor(idx):
idx = idx.tolist()
img = Image.open(self.file_paths[idx])
img = np.array(img)
img = transforms.functional.to_pil_image(img)
if self.transform:
img = self.transform(img)
unique_classes = [i+1 for i in range(len(self.train_csv['labels'].unique()))]
class_dict = {}
for i in range(len(self.train_csv['labels'].unique())):
z = torch.zeros(1,max(unique_classes))
z[:,i] = 1
class_dict[self.train_csv['labels'].unique()[i]] = z
label = torch.FloatTensor(class_dict[self.csv_file['labels'][idx]])
img.to(device)
label.to(device)
sample = [torch.FloatTensor(img).to(device),label.to(device)]
return sample
def loadFlowerDataset(image_labels_path = './imagelabels.mat', train_test_validation_split_path = './setid.mat', dataset_glob_path = "./flowers/*"):
"""
image_labels_path (string): location of imagelabels.mat
train_test_validation_split_path (string): location of setid.mat
dataset_glob_path (string): location of dataset with a "/*" at the end for glob.glob to parse
"""
mat = sio.loadmat(train_test_validation_split_path)
mat = {k:v for k, v in mat.items() if k[0] != '_'}
data = pd.DataFrame({k: pd.Series(v[0]) for k, v in mat.items()})
data.to_csv("setid.csv")
mat = sio.loadmat(image_labels_path)
mat = {k:v for k, v in mat.items() if k[0] != '_'}
data = pd.DataFrame({k: pd.Series(v[0]) for k, v in mat.items()})
data.to_csv("imagelabels.csv")
df = pd.read_csv("./setid.csv")
train_ids = np.array(df['trnid'])
tstids = np.array(df['tstid'])
train_paths =[]
for flower in glob.glob(dataset_glob_path):
if(int(os.path.basename(flower)[6:11]) in train_ids):
train_paths.append(flower)
test_paths =[]
for flower in glob.glob(dataset_glob_path):
if(int(os.path.basename(flower)[6:11]) in tstids):
test_paths.append(flower)
image_labels = np.array(pd.read_csv("imagelabels.csv")['labels'])
train_labels = []
test_labels = []
for i in range(len(image_labels)):
if(i in train_ids):
train_labels.append(image_labels[i])
if(i in tstids):
test_labels.append(image_labels[i])
train_csv=pd.DataFrame({'labels':train_labels})
test_csv=pd.DataFrame({'labels':test_labels})
train = FlowersDataset(train_csv, train_csv, train_paths)
test = FlowersDataset(test_csv, train_csv, test_paths)
showImage(test[100][0])
return train,test, train_csv, test_csv
|
[
"noreply@github.com"
] |
dbrody112.noreply@github.com
|
fc33431767ae7f0cbe8b98c9af00f73ba5c31f36
|
4a8b75956a93fb5a1b440ba944afd5e5927b7db9
|
/billetera-python/ejemplos/estructuradedatos1.py
|
c0f916d5685f509519a204e790f88f60cdc2912f
|
[] |
no_license
|
darkunchain/NextU_LFPJ
|
a051f9baaa548a7d4d8c6a667b604302a3bd631f
|
7c9b2eb9cf264e4421df10c1fd967b396d3ca64c
|
refs/heads/master
| 2023-03-09T00:11:18.789730
| 2022-02-17T18:41:29
| 2022-02-17T18:41:29
| 176,357,442
| 0
| 0
| null | 2023-03-03T18:51:05
| 2019-03-18T19:39:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
i=0
cripto=['']
cant=['']
cotiz=['']
while i<3:
cripto[i]=input("Ingrese el nombre de la moneda: ")
cant[i]=float(input("Ingrese la cantidad de "+cripto[i]+":"))
cotiz[i]=float(input("Ingrese la cotización en USD de"+cripto[i]+":"))
print(cripto,cant,cotiz)
i=+1
i=0
while i<3:
print("Moneda: "+cripto[i]+", cantidad: "+cant[i]+", precio en USD: "+cotiz[i])
i=i+1
|
[
"luisfer74081@gmail.com"
] |
luisfer74081@gmail.com
|
3d676bf2847cdec78845f0366fe652d30460a17a
|
db4e793adfbff5e402f1713e043622c59da11923
|
/main.py
|
06e649fe5a9594d80131dd670aaabec3b217c908
|
[] |
no_license
|
hrhm47/python_project
|
f69ae556acbbd82a1890f13258995eb4cd120425
|
99a5a7c87c44d7d67fc2cc4aa691e76ae82d7d7c
|
refs/heads/master
| 2023-07-17T18:20:45.342036
| 2021-09-03T03:28:38
| 2021-09-03T03:28:38
| 402,630,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
import random
# print("\n\t\t****** Gussing Number ******* \n\n")
#
# def guess_number(rng):
# random_number = random.randint(1,rng)
# guess = 0
# while guess != random_number:
# guess = int(input('Guess the Number: '))
# if guess > random_number:
# print("Guess is to high")
# else:
# print("Guess is to low")
# print("you guess the right number that is ",random_number)
#
# rng = int(input("\t\t Enter a range for Your self to Guess a Number: "))
# guess_number(rng)
print("\t\t\t Guess a number on Computer\n\n")
print("\t\t\t\t Thinks A number in Your Mind And guess it from Computer :)\n")
def guess_number(rng):
low = 1
high = rng
guess = 0
guess_string = ""
while guess_string != 'c':
if low != high :
guess = random.randint(low,high)
else:
guess = low
guess_string=input("\n\t\tIf guess number "+ str(guess) +" is too high (h), or too low (l) else write (c):")
if guess_string == 'h':
high = guess - 1
elif guess_string == 'l':
low = guess + 1
print("\n\t\tyou guess the right number that is ",guess," \n\t\t\t Congratulations")
rng = int(input("\t\t Enter a range to Guess a Number from Computer: "))
guess_number(rng)
print("\n\n\t\t-------------------------Madlibs Game--------------------------------\n\n")
noun=input("Enter the name of person:")
fruit_name=input("Enter the name of fruit:")
color=input("Enter the color of fruit:")
print("\n\n")
story="Yesterday, we visited in " + str(noun) + "'s orchard. We saw different colors of " + str(fruit_name) +"s." \
"I ate ",color, fruit_name+ "'s. They were so delicious."
print(story)
|
[
"haseebr470@gmail.com"
] |
haseebr470@gmail.com
|
70a14dafff85c24c10a206c643501755c37cac39
|
fe4218d4c729cc394702927c5914bc0836799e9a
|
/core/views.py
|
ff74e0a27a09d809ebdd6e5ef47374eacd9671e0
|
[] |
no_license
|
harsh-gautam/django_blog
|
e2f4d8f378bc20f64c52faf0d28e144c1be5d8e9
|
92bf2e032a10c506f974cf020fe656f464979127
|
refs/heads/main
| 2023-02-12T23:44:32.856900
| 2021-01-09T16:03:08
| 2021-01-09T16:03:08
| 327,074,478
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
from django.shortcuts import render, redirect, HttpResponse, Http404
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from core.models import ContactForm
from blog.models import Post
# Create your views here.
def home(request):
return render(request, 'core/home.html')
def contact(request):
if request.method == "POST":
name = request.POST["name"]
email = request.POST["email"]
query = request.POST["query"]
contact = ContactForm(name=name, email=email, query=query)
contact.save()
messages.success(request, "Your query has been send to our team.")
return redirect('/contact')
return render(request, 'core/contact.html')
def about(request):
return render(request, 'core/about.html')
def search(request):
query = request.GET['keyword']
if len(query) > 50:
allPosts = Post.objects.none()
else:
posts_title = Post.objects.filter(title__icontains=query)
posts_content = Post.objects.filter(content__icontains=query)
posts_author = Post.objects.filter(author__icontains=query)
allPosts = posts_title.union(posts_content).union(posts_author)
context = {"allPosts": allPosts, "query": query}
return render(request, "core/search.html", context)
def handleSignup(request):
if request.method == "POST":
# Get the post data
name = request.POST['name'].lower()
email = request.POST['email'].lower()
username = request.POST['username']
password = request.POST['password']
name_splitted = name.split()
if len(name_splitted) == 1:
fname = name_splitted[0]
elif len(name_splitted) == 2:
fname, lname = name_splitted[0], name_splitted[1]
elif len(name_splitted) > 2:
fname, lname = name_splitted[0], name_splitted[-1]
user = User.objects.create_user(username, email, password)
user.first_name = fname
user.last_name = lname
user.save()
messages.success(request, "Your account has been successfully created. You can login now.")
return redirect("/")
else:
raise Http404("Page not found")
def handleLogin(request):
if request.method == "POST":
username = request.POST['loginUsername']
password = request.POST['loginPass']
current_url = request.POST['current_url']
print(current_url)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, f'Welcome {username}! Your log in was successful.')
return redirect(current_url)
else:
messages.error(request, 'Invalid Credential! Please try again.')
return redirect(current_url)
else:
return Http404("Page Not Found")
def handleLogout(request):
logout(request)
messages.warning(request, 'Logged out successfully')
return redirect('/')
|
[
"hgautam208@gmail.com"
] |
hgautam208@gmail.com
|
8fe3bf170f7782a185234598748ce2f8353609e3
|
2a53ed3b1f659a434e8dcdef435dd931532431d6
|
/ssm/ssm/assessments/tests/factories.py
|
96117ba5af305976f399c7ee5e7f35e58a2fcb7a
|
[] |
no_license
|
twistedFantasy/staff
|
92d759eb3ce0bdcb01fd82ccd163cd22aa8da526
|
1daa297d57e8078e80efb910aff1a6fc35b18524
|
refs/heads/master
| 2023-01-09T22:15:29.480064
| 2019-12-22T12:40:53
| 2019-12-22T12:40:53
| 154,378,776
| 9
| 2
| null | 2023-01-03T18:55:39
| 2018-10-23T18:37:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 952
|
py
|
from factory import DjangoModelFactory, Faker, SubFactory
from ssm.assessments.models import Assessment, Checkpoint, Task, STATUS
from ssm.users.tests.factories import UserFactory
class AssessmentFactory(DjangoModelFactory):
class Meta:
model = Assessment
user = SubFactory(UserFactory)
decision_by = SubFactory(UserFactory)
status = Faker('random_element', elements=STATUS)
start_date = Faker('date')
end_date = Faker('date')
plan = Faker('text')
comments = Faker('text')
notes = Faker('text')
class CheckpointFactory(DjangoModelFactory):
class Meta:
model = Checkpoint
assessment = SubFactory(AssessmentFactory)
title = Faker('word')
date = Faker('date')
class TaskFactory(DjangoModelFactory):
class Meta:
model = Task
checkpoint = SubFactory(CheckpointFactory)
title = Faker('word')
description = Faker('text')
completed = Faker('boolean')
|
[
"twisteeed.fantasy@gmail.com"
] |
twisteeed.fantasy@gmail.com
|
bcd63a11924d3fdad2c28da08f42c29f3cc54416
|
88da3fef6830ad9beea58c2871255f081cb30983
|
/questions.py
|
5cf3ea8a3ed8961afa721e17de91f6c98bbbef4c
|
[] |
no_license
|
kardandon/CS50x-AI
|
b05e59e551b41b292ee3cab8b718b267cac0bfd0
|
3a013e990956852c46e66721a70c63fa794e4087
|
refs/heads/main
| 2023-01-02T08:10:30.456688
| 2020-08-21T03:14:16
| 2020-08-21T03:14:16
| 305,349,704
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,647
|
py
|
import nltk
import sys
FILE_MATCHES = 1
SENTENCE_MATCHES = 1
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python questions.py corpus")
# Calculate IDF values across files
files = load_files(sys.argv[1])
file_words = {
filename: tokenize(files[filename])
for filename in files
}
file_idfs = compute_idfs(file_words)
# Prompt user for query
query = set(tokenize(input("Query: ")))
# Determine top file matches according to TF-IDF
filenames = top_files(query, file_words, file_idfs, n=FILE_MATCHES)
# Extract sentences from top files
sentences = dict()
for filename in filenames:
for passage in files[filename].split("\n"):
for sentence in nltk.sent_tokenize(passage):
tokens = tokenize(sentence)
if tokens:
sentences[sentence] = tokens
# Compute IDF values across sentences
idfs = compute_idfs(sentences)
# Determine top sentence matches
matches = top_sentences(query, sentences, idfs, n=SENTENCE_MATCHES)
for match in matches:
print(match)
def load_files(directory):
"""
Given a directory name, return a dictionary mapping the filename of each
`.txt` file inside that directory to the file's contents as a string.
"""
contents = dict()
import os
path = os.path.join(".", f"{directory}")
for file in os.listdir(path):
with open(os.path.join(path, file), "r", encoding="utf8") as f:
contents[file[:-4]] = f.read()
return contents
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
tokens = [word.lower() for word in nltk.word_tokenize(document)]
stopwords = nltk.corpus.stopwords.words("english")
import string
punct = [x for x in string.punctuation]
return [word for word in tokens if word not in stopwords and word not in punct]
def compute_idfs(documents):
"""
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
"""
num = len(documents)
presence = dict()
idfs = dict()
import math
for doc in documents:
for word in set(documents[doc]):
try:
presence[word] += 1
except KeyError:
presence[word] = 1
for word in presence:
idfs[word] = math.log(num / presence[word])
return idfs
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
tfidfs = dict()
for file in files:
tfidfs[file] = 0
num = len(files[file])
for word in query:
if word in files[file]:
freq = files[file].count(word)
else:
freq = 1
try:
idf = idfs[word]
except KeyError:
idf = 1
tfidfs[file] = idf * freq
lst = sorted(tfidfs, key=tfidfs.get, reverse=True)
return lst[:n]
def top_sentences(query, sentences, idfs, n):
"""
Given a `query` (a set of words), `sentences` (a dictionary mapping
sentences to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the `n` top sentences that match
the query, ranked according to idf. If there are ties, preference should
be given to sentences that have a higher query term density.
"""
stats = {}
for sentence in sentences:
stats[sentence] = dict()
stats[sentence]["idf"] = 0
stats[sentence]["count"] = 0
num = len(sentences[sentence])
for word in query:
if word in sentences[sentence]:
stats[sentence]["idf"] += idfs[word]
stats[sentence]["count"] += 1
stats[sentence]["QTD"] = stats[sentence]["count"] / num
lst = sorted(stats.keys(), key=lambda s: (stats[s]["idf"], stats[s]["QTD"]), reverse=True)
return lst[:n]
if __name__ == "__main__":
main()
|
[
"kardandon@users.noreply.github.com"
] |
kardandon@users.noreply.github.com
|
273d693da31970ef981197bdb0e1c975a44133ec
|
bc3bfdb96031646d576b42c24554546988146df6
|
/src/brokerlib/ibapi/client.py
|
a339dd47a09421f2c77c49ec8a2b1f818988f0dc
|
[] |
no_license
|
SivaanandM/HRHD_Worker
|
56a9a486403aca92989a30a9018ffd30e8f7f953
|
c6eaf920607c4b589cefea248f2c1b1a09e1050a
|
refs/heads/master
| 2020-08-29T23:07:04.387731
| 2020-04-25T08:33:48
| 2020-04-25T08:33:48
| 218,197,181
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128,485
|
py
|
"""
Copyright (C) 2018 Interactive Brokers LLC. All rights reserved. This code is subject to the terms
and conditions of the IB API Non-Commercial License or the IB API Commercial License, as applicable.
"""
import time
"""
The main class to use from API user's point of view.
It takes care of almost everything:
- implementing the requests
- creating the answer decoder
- creating the connection to TWS/IBGW
The user just needs to override EWrapper methods to receive the answers.
"""
import logging
import queue
import socket
# from src.main.fb_objects import FBObjects as fb_obj
from ibapi import (decoder, reader, comm)
from ibapi.connection import Connection
from ibapi.message import OUT
from ibapi.common import * # @UnusedWildImport
from ibapi.contract import Contract
from ibapi.order import Order
from ibapi.execution import ExecutionFilter
from ibapi.scanner import ScannerSubscription
from ibapi.comm import (make_field, make_field_handle_empty)
from ibapi.utils import (current_fn_name, BadMessage)
from ibapi.errors import * #@UnusedWildImport
from ibapi.server_versions import * # @UnusedWildImport
#TODO: use pylint
logger = logging.getLogger(__name__)
class EClient(object):
(DISCONNECTED, CONNECTING, CONNECTED, REDIRECT) = range(4)
#TODO: support redirect !!
def __init__(self, wrapper):
self.msg_queue = queue.Queue()
self.wrapper = wrapper
self.decoder = None
self.reset()
def reset(self):
self.done = False
self.nKeybIntHard = 0
self.conn = None
self.host = None
self.port = None
self.extraAuth = False
self.clientId = None
self.serverVersion_ = None
self.connTime = None
self.connState = None
self.optCapab = ""
self.asynchronous = False
self.reader = None
self.decode = None
self.setConnState(EClient.DISCONNECTED)
def setConnState(self, connState):
_connState = self.connState
self.connState = connState
logger.debug("%s connState: %s -> %s" % (id(self), _connState,
self.connState))
def sendMsg(self, msg):
full_msg = comm.make_msg(msg)
logger.info("%s %s %s", "SENDING", current_fn_name(1), full_msg)
self.conn.sendMsg(full_msg)
def logRequest(self, fnName, fnParams):
if logger.isEnabledFor(logging.INFO):
if 'self' in fnParams:
prms = dict(fnParams)
del prms['self']
else:
prms = fnParams
logger.info("REQUEST %s %s" % (fnName, prms))
def startApi(self):
""" Initiates the message exchange between the client application and
the TWS/IB Gateway. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(),
NOT_CONNECTED.msg())
return
VERSION = 2
msg = make_field(OUT.START_API) \
+ make_field(VERSION) \
+ make_field(self.clientId)
if self.serverVersion() >= MIN_SERVER_VER_OPTIONAL_CAPABILITIES:
msg += make_field(self.optCapab)
self.sendMsg(msg)
def connect(self, host, port, clientId):
"""This function must be called before any other. There is no
feedback for a successful connection, but a subsequent attempt to
connect will return the message \"Already connected.\"
host:str - The host name or IP address of the machine where TWS is
running. Leave blank to connect to the local host.
port:int - Must match the port specified in TWS on the
Configure>API>Socket Port field.
clientId:int - A number used to identify this client connection. All
orders placed/modified from this client will be associated with
this client identifier.
Note: Each client MUST connect with a unique clientId."""
try:
self.host = host
self.port = port
self.clientId = clientId
logger.debug("Connecting to %s:%d w/ id:%d", self.host, self.port, self.clientId)
self.conn = Connection(self.host, self.port)
self.conn.connect()
self.setConnState(EClient.CONNECTING)
#TODO: support async mode
v100prefix = "API\0"
v100version = "v%d..%d" % (MIN_CLIENT_VER, MAX_CLIENT_VER)
#v100version = "v%d..%d" % (MIN_CLIENT_VER, 101)
msg = comm.make_msg(v100version)
logger.debug("msg %s", msg)
msg2 = str.encode(v100prefix, 'ascii') + msg
logger.debug("REQUEST %s", msg2)
self.conn.sendMsg(msg2)
self.decoder = decoder.Decoder(self.wrapper, self.serverVersion())
fields = []
#sometimes I get news before the server version, thus the loop
while len(fields) != 2:
self.decoder.interpret(fields)
buf = self.conn.recvMsg()
logger.debug("ANSWER %s", buf)
if len(buf) > 0:
(size, msg, rest) = comm.read_msg(buf)
logger.debug("size:%d msg:%s rest:%s|", size, msg, rest)
fields = comm.read_fields(msg)
logger.debug("fields %s", fields)
else:
fields = []
(server_version, conn_time) = fields
server_version = int(server_version)
logger.debug("ANSWER Version:%d time:%s", server_version, conn_time)
self.connTime = conn_time
self.serverVersion_ = server_version
self.decoder.serverVersion = self.serverVersion()
self.setConnState(EClient.CONNECTED)
self.reader = reader.EReader(self.conn, self.msg_queue)
self.reader.start() # start thread
logger.info("sent startApi")
self.startApi()
self.wrapper.connectAck()
except socket.error:
if self.wrapper:
self.wrapper.error(NO_VALID_ID, CONNECT_FAIL.code(), CONNECT_FAIL.msg())
logger.info("could not connect")
self.disconnect()
self.done = True
def disconnect(self):
"""Call this function to terminate the connections with TWS.
Calling this function does not cancel orders that have already been
sent."""
self.setConnState(EClient.DISCONNECTED)
if self.conn is not None:
logger.info("disconnecting")
self.conn.disconnect()
self.wrapper.connectionClosed()
self.reset()
def isConnected(self):
"""Call this function to check if there is a connection with TWS"""
logger.debug("%s isConn: %s" % (id(self), self.connState))
return EClient.CONNECTED == self.connState
def keyboardInterrupt(self):
#intended to be overloaded
pass
def keyboardInterruptHard(self):
self.nKeybIntHard += 1
if self.nKeybIntHard > 5:
raise SystemExit()
def run(self):
"""This is the function that has the message loop."""
timeStart = time.time()
timeOut = 20
try:
# while not fb_obj.STOP_IB_RUN: # and not self.msg_queue.empty():
while not self.done and (self.isConnected()
or not self.msg_queue.empty()):
if time.time() - timeStart > timeOut: # stop application after timeout
self.keyboardInterrupt()
self.keyboardInterruptHard()
try:
try:
text = self.msg_queue.get(block=True, timeout=0.4)
if len(text) > MAX_MSG_LEN:
self.wrapper.error(NO_VALID_ID, BAD_LENGTH.code(),
"%s:%d:%s" % (BAD_LENGTH.msg(), len(text), text))
# self.disconnect()
# break
except queue.Empty:
logger.debug("queue.get: empty")
else:
fields = comm.read_fields(text)
logger.debug("fields %s", fields)
self.decoder.interpret(fields)
except (KeyboardInterrupt, SystemExit):
logger.info("detected KeyboardInterrupt, SystemExit")
self.keyboardInterrupt()
self.keyboardInterruptHard()
except BadMessage:
logger.info("BadMessage")
# self.conn.disconnect()
logger.debug("conn:%d queue.sz:%d",
self.isConnected(),
self.msg_queue.qsize())
finally:
self.disconnect()
def reqCurrentTime(self):
"""Asks the current system time on the server side."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(),
NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_CURRENT_TIME) \
+ make_field(VERSION)
self.sendMsg(msg)
def serverVersion(self):
"""Returns the version of the TWS instance to which the API
application is connected."""
return self.serverVersion_
def setServerLogLevel(self, logLevel:int):
"""The default detail level is ERROR. For more details, see API
Logging."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(),
NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.SET_SERVER_LOGLEVEL) \
+ make_field(VERSION) \
+ make_field(logLevel)
self.sendMsg(msg)
def twsConnectionTime(self):
"""Returns the time the API application made a connection to TWS."""
return self.connTime
##########################################################################
################## Market Data
##########################################################################
def reqMktData(self, reqId:TickerId, contract:Contract,
genericTickList:str, snapshot:bool, regulatorySnapshot: bool,
mktDataOptions:TagValueList):
"""Call this function to request market data. The market data
will be returned by the tickPrice and tickSize events.
reqId: TickerId - The ticker id. Must be a unique value. When the
market data returns, it will be identified by this tag. This is
also used when canceling the market data.
contract:Contract - This structure contains a description of the
Contractt for which market data is being requested.
genericTickList:str - A commma delimited list of generic tick types.
Tick types can be found in the Generic Tick Types page.
Prefixing w/ 'mdoff' indicates that top mkt data shouldn't tick.
You can specify the news source by postfixing w/ ':<source>.
Example: "mdoff,292:FLY+BRF"
snapshot:bool - Check to return a single snapshot of Market data and
have the market data subscription cancel. Do not enter any
genericTicklist values if you use snapshots.
regulatorySnapshot: bool - With the US Value Snapshot Bundle for stocks,
regulatory snapshots are available for 0.01 USD each.
mktDataOptions:TagValueList - For internal use only.
Use default value XYZ. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(),
NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_DELTA_NEUTRAL:
if contract.deltaNeutralContract:
self.wrapper.error(reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support delta-neutral orders.")
return
if self.serverVersion() < MIN_SERVER_VER_REQ_MKT_DATA_CONID:
if contract.conId > 0:
self.wrapper.error(reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support conId parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error( reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support tradingClass parameter in reqMktData.")
return
VERSION = 11
# send req mkt data msg
flds = []
flds += [make_field(OUT.REQ_MKT_DATA),
make_field(VERSION),
make_field(reqId)]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_REQ_MKT_DATA_CONID:
flds += [make_field(contract.conId),]
flds += [make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier), # srv v15 and above
make_field(contract.exchange),
make_field(contract.primaryExchange), # srv v14 and above
make_field(contract.currency),
make_field(contract.localSymbol) ] # srv v2 and above
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass),]
# Send combo legs for BAG requests (srv v8 and above)
if contract.secType == "BAG":
comboLegsCount = len(contract.comboLegs) if contract.comboLegs else 0
flds += [make_field(comboLegsCount),]
for comboLeg in contract.comboLegs:
flds += [make_field(comboLeg.conId),
make_field( comboLeg.ratio),
make_field( comboLeg.action),
make_field( comboLeg.exchange)]
if self.serverVersion() >= MIN_SERVER_VER_DELTA_NEUTRAL:
if contract.deltaNeutralContract:
flds += [make_field(True),
make_field(contract.deltaNeutralContract.conId),
make_field(contract.deltaNeutralContract.delta),
make_field(contract.deltaNeutralContract.price)]
else:
flds += [make_field(False),]
flds += [make_field(genericTickList), # srv v31 and above
make_field(snapshot)] # srv v35 and above
if self.serverVersion() >= MIN_SERVER_VER_REQ_SMART_COMPONENTS:
flds += [make_field(regulatorySnapshot),]
# send mktDataOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
#current doc says this part if for "internal use only" -> won't support it
if mktDataOptions:
raise NotImplementedError("not supported")
mktDataOptionsStr = ""
flds += [make_field(mktDataOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
def cancelMktData(self, reqId:TickerId):
"""After calling this function, market data for the specified id
will stop flowing.
reqId: TickerId - The ID that was specified in the call to
reqMktData(). """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 2
# send req mkt data msg
flds = []
flds += [make_field(OUT.CANCEL_MKT_DATA),
make_field(VERSION),
make_field(reqId)]
msg = "".join(flds)
self.sendMsg(msg)
def reqMarketDataType(self, marketDataType:int):
"""The API can receive frozen market data from Trader
Workstation. Frozen market data is the last data recorded in our system.
During normal trading hours, the API receives real-time market data. If
you use this function, you are telling TWS to automatically switch to
frozen market data after the close. Then, before the opening of the next
trading day, market data will automatically switch back to real-time
market data.
marketDataType:int - 1 for real-time streaming market data or 2 for
frozen market data"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_MARKET_DATA_TYPE:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support market data type requests.")
return
VERSION = 1
# send req mkt data msg
flds = []
flds += [make_field(OUT.REQ_MARKET_DATA_TYPE),
make_field(VERSION),
make_field(marketDataType)]
msg = "".join(flds)
self.sendMsg(msg)
def reqSmartComponents(self, reqId: int, bboExchange: str):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_SMART_COMPONENTS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support smart components request.")
return
msg = make_field(OUT.REQ_SMART_COMPONENTS) \
+ make_field(reqId) \
+ make_field(bboExchange)
self.sendMsg(msg)
def reqMarketRule(self, marketRuleId: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_MARKET_RULES:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support market rule requests.")
return
msg = make_field(OUT.REQ_MARKET_RULE) \
+ make_field(marketRuleId)
self.sendMsg(msg)
def reqTickByTickData(self, reqId: int, contract: Contract, tickType: str,
numberOfTicks: int, ignoreSize: bool):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_TICK_BY_TICK:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support tick-by-tick data requests.")
return
if self.serverVersion() < MIN_SERVER_VER_TICK_BY_TICK_IGNORE_SIZE:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support ignoreSize and numberOfTicks parameters "
"in tick-by-tick data requests.")
return
msg = make_field(OUT.REQ_TICK_BY_TICK_DATA)\
+ make_field(reqId) \
+ make_field(contract.conId) \
+ make_field(contract.symbol) \
+ make_field(contract.secType) \
+ make_field(contract.lastTradeDateOrContractMonth) \
+ make_field(contract.strike) \
+ make_field(contract.right) \
+ make_field(contract.multiplier) \
+ make_field(contract.exchange) \
+ make_field(contract.primaryExchange) \
+ make_field(contract.currency) \
+ make_field(contract.localSymbol) \
+ make_field(contract.tradingClass) \
+ make_field(tickType)
if self.serverVersion() >= MIN_SERVER_VER_TICK_BY_TICK_IGNORE_SIZE:
msg += make_field(numberOfTicks) \
+ make_field(ignoreSize)
self.sendMsg(msg)
def cancelTickByTickData(self, reqId: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_TICK_BY_TICK:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support tick-by-tick data requests.")
return
msg = make_field(OUT.CANCEL_TICK_BY_TICK_DATA) \
+ make_field(reqId)
self.sendMsg(msg)
##########################################################################
################## Options
##########################################################################
def calculateImpliedVolatility(self, reqId:TickerId, contract:Contract,
optionPrice:float, underPrice:float,
implVolOptions:TagValueList):
"""Call this function to calculate volatility for a supplied
option price and underlying price. Result will be delivered
via EWrapper.tickOptionComputation()
reqId:TickerId - The request id.
contract:Contract - Describes the contract.
optionPrice:double - The price of the option.
underPrice:double - Price of the underlying."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_CALC_IMPLIED_VOLAT:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support calculateImpliedVolatility req.")
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support tradingClass parameter in calculateImpliedVolatility.")
return
VERSION = 3
# send req mkt data msg
flds = []
flds += [make_field(OUT.REQ_CALC_IMPLIED_VOLAT),
make_field(VERSION),
make_field(reqId),
# send contract fields
make_field(contract.conId),
make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass),]
flds += [ make_field( optionPrice),
make_field( underPrice)]
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
implVolOptStr = ""
tagValuesCount = len(implVolOptions) if implVolOptions else 0
if implVolOptions:
for implVolOpt in implVolOptions:
implVolOptStr += str(implVolOpt)
flds += [make_field(tagValuesCount),
make_field(implVolOptStr)]
msg = "".join(flds)
self.sendMsg(msg)
def cancelCalculateImpliedVolatility(self, reqId:TickerId):
"""Call this function to cancel a request to calculate
volatility for a supplied option price and underlying price.
reqId:TickerId - The request ID. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_CALC_IMPLIED_VOLAT:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support calculateImpliedVolatility req.")
return
VERSION = 1
msg = make_field(OUT.CANCEL_CALC_IMPLIED_VOLAT) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
def calculateOptionPrice(self, reqId:TickerId, contract:Contract,
volatility:float, underPrice:float,
optPrcOptions:TagValueList):
"""Call this function to calculate option price and greek values
for a supplied volatility and underlying price.
reqId:TickerId - The ticker ID.
contract:Contract - Describes the contract.
volatility:double - The volatility.
underPrice:double - Price of the underlying."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_CALC_IMPLIED_VOLAT:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support calculateImpliedVolatility req.")
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support tradingClass parameter in calculateImpliedVolatility.")
return
VERSION = 3
# send req mkt data msg
flds = []
flds += [make_field(OUT.REQ_CALC_OPTION_PRICE),
make_field(VERSION),
make_field(reqId),
# send contract fields
make_field(contract.conId),
make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass),]
flds += [ make_field(volatility),
make_field(underPrice)]
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
optPrcOptStr = ""
tagValuesCount = len(optPrcOptions) if optPrcOptions else 0
if optPrcOptions:
for implVolOpt in optPrcOptions:
optPrcOptStr += str(implVolOpt)
flds += [make_field(tagValuesCount),
make_field(optPrcOptStr)]
msg = "".join(flds)
self.sendMsg(msg)
def cancelCalculateOptionPrice(self, reqId:TickerId):
"""Call this function to cancel a request to calculate the option
price and greek values for a supplied volatility and underlying price.
reqId:TickerId - The request ID. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_CALC_IMPLIED_VOLAT:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support calculateImpliedVolatility req.")
return
VERSION = 1
msg = make_field(OUT.CANCEL_CALC_OPTION_PRICE) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
def exerciseOptions(self, reqId:TickerId, contract:Contract,
exerciseAction:int, exerciseQuantity:int,
account:str, override:int):
"""reqId:TickerId - The ticker id. multipleust be a unique value.
contract:Contract - This structure contains a description of the
contract to be exercised
exerciseAction:int - Specifies whether you want the option to lapse
or be exercised.
Values are 1 = exercise, 2 = lapse.
exerciseQuantity:int - The quantity you want to exercise.
account:str - destination account
override:int - Specifies whether your setting will override the system's
natural action. For example, if your action is "exercise" and the
option is not in-the-money, by natural action the option would not
exercise. If you have override set to "yes" the natural action would
be overridden and the out-of-the money option would be exercised.
Values are: 0 = no, 1 = yes."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support conId, multiplier, tradingClass parameter in exerciseOptions.")
return
VERSION = 2
# send req mkt data msg
flds = []
flds += [make_field(OUT.EXERCISE_OPTIONS),
make_field(VERSION),
make_field(reqId)]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.conId),]
flds += [make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.currency),
make_field(contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass),]
flds += [make_field(exerciseAction),
make_field(exerciseQuantity),
make_field(account),
make_field(override)]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## Orders
########################################################################
def placeOrder(self, orderId:OrderId , contract:Contract, order:Order):
"""Call this function to place an order. The order status will
be returned by the orderStatus event.
orderId:OrderId - The order id. You must specify a unique value. When the
order START_APItus returns, it will be identified by this tag.
This tag is also used when canceling the order.
contract:Contract - This structure contains a description of the
contract which is being traded.
order:Order - This structure contains the details of tradedhe order.
Note: Each client MUST connect with a unique clientId."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(orderId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_DELTA_NEUTRAL:
if contract.deltaNeutralContract:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support delta-neutral orders.")
return
if self.serverVersion() < MIN_SERVER_VER_SCALE_ORDERS2:
if order.scaleSubsLevelSize != UNSET_INTEGER:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support Subsequent Level Size for Scale orders.")
return
if self.serverVersion() < MIN_SERVER_VER_ALGO_ORDERS:
if order.algoStrategy:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support algo orders.")
return
if self.serverVersion() < MIN_SERVER_VER_NOT_HELD:
if order.notHeld:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support notHeld parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_SEC_ID_TYPE:
if contract.secIdType or contract.secId:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support secIdType and secId parameters.")
return
if self.serverVersion() < MIN_SERVER_VER_PLACE_ORDER_CONID:
if contract.conId and contract.conId > 0:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support conId parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_SSHORTX:
if order.exemptCode != -1:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support exemptCode parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_SSHORTX:
if contract.comboLegs:
for comboLeg in contract.comboLegs:
if comboLeg.exemptCode != -1:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support exemptCode parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_HEDGE_ORDERS:
if order.hedgeType:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support hedge orders.")
return
if self.serverVersion() < MIN_SERVER_VER_OPT_OUT_SMART_ROUTING:
if order.optOutSmartRouting:
self.wrapper.error( orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support optOutSmartRouting parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_DELTA_NEUTRAL_CONID:
if order.deltaNeutralConId > 0 \
or order.deltaNeutralSettlingFirm \
or order.deltaNeutralClearingAccount \
or order.deltaNeutralClearingIntent:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support deltaNeutral parameters: ConId, SettlingFirm, ClearingAccount, ClearingIntent.")
return
if self.serverVersion() < MIN_SERVER_VER_DELTA_NEUTRAL_OPEN_CLOSE:
if order.deltaNeutralOpenClose \
or order.deltaNeutralShortSale \
or order.deltaNeutralShortSaleSlot > 0 \
or order.deltaNeutralDesignatedLocation:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support deltaNeutral parameters: OpenClose, ShortSale, ShortSaleSlot, DesignatedLocation.")
return
if self.serverVersion() < MIN_SERVER_VER_SCALE_ORDERS3:
if order.scalePriceIncrement > 0 and order.scalePriceIncrement != UNSET_DOUBLE:
if order.scalePriceAdjustValue != UNSET_DOUBLE \
or order.scalePriceAdjustInterval != UNSET_INTEGER \
or order.scaleProfitOffset != UNSET_DOUBLE \
or order.scaleAutoReset \
or order.scaleInitPosition != UNSET_INTEGER \
or order.scaleInitFillQty != UNSET_INTEGER \
or order.scaleRandomPercent:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support Scale order parameters: PriceAdjustValue, PriceAdjustInterval, " +
"ProfitOffset, AutoReset, InitPosition, InitFillQty and RandomPercent")
return
if self.serverVersion() < MIN_SERVER_VER_ORDER_COMBO_LEGS_PRICE and contract.secType == "BAG":
if order.orderComboLegs:
for orderComboLeg in order.orderComboLegs:
if orderComboLeg.price != UNSET_DOUBLE:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support per-leg prices for order combo legs.")
return
if self.serverVersion() < MIN_SERVER_VER_TRAILING_PERCENT:
if order.trailingPercent != UNSET_DOUBLE:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support trailing percent parameter")
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support tradingClass parameter in placeOrder.")
return
if self.serverVersion() < MIN_SERVER_VER_SCALE_TABLE:
if order.scaleTable or order.activeStartTime or order.activeStopTime:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support scaleTable, activeStartTime and activeStopTime parameters")
return
if self.serverVersion() < MIN_SERVER_VER_ALGO_ID:
if order.algoId:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support algoId parameter")
return
if self.serverVersion() < MIN_SERVER_VER_ORDER_SOLICITED:
if order.solicited:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support order solicited parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_MODELS_SUPPORT:
if order.modelCode:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support model code parameter.")
return
if self.serverVersion() < MIN_SERVER_VER_EXT_OPERATOR:
if order.extOperator:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support ext operator parameter")
return
if self.serverVersion() < MIN_SERVER_VER_SOFT_DOLLAR_TIER:
if order.softDollarTier.name or order.softDollarTier.val:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support soft dollar tier")
return
if self.serverVersion() < MIN_SERVER_VER_CASH_QTY:
if order.cashQty:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support cash quantity parameter")
return
if self.serverVersion() < MIN_SERVER_VER_DECISION_MAKER and (order.mifid2DecisionMaker != "" or order.mifid2DecisionAlgo != ""):
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support MIFID II decision maker parameters")
return
if self.serverVersion() < MIN_SERVER_VER_MIFID_EXECUTION and (order.mifid2ExecutionTrader != "" or order.mifid2ExecutionAlgo != ""):
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support MIFID II execution parameters")
return
if self.serverVersion() < MIN_SERVER_VER_AUTO_PRICE_FOR_HEDGE and order.dontUseAutoPriceForHedge:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support dontUseAutoPriceForHedge parameter")
return
if self.serverVersion() < MIN_SERVER_VER_ORDER_CONTAINER and order.isOmsContainer:
self.wrapper.error(orderId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support oms container parameter")
return
VERSION = 27 if (self.serverVersion() < MIN_SERVER_VER_NOT_HELD) else 45
# send place order msg
flds = []
flds += [make_field(OUT.PLACE_ORDER)]
if self.serverVersion() < MIN_SERVER_VER_ORDER_CONTAINER:
flds += [make_field(VERSION)]
flds += [make_field(orderId)]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_PLACE_ORDER_CONID:
flds.append(make_field( contract.conId))
flds += [make_field( contract.symbol),
make_field( contract.secType),
make_field( contract.lastTradeDateOrContractMonth),
make_field( contract.strike),
make_field( contract.right),
make_field( contract.multiplier), # srv v15 and above
make_field( contract.exchange),
make_field( contract.primaryExchange), # srv v14 and above
make_field( contract.currency),
make_field( contract.localSymbol)] # srv v2 and above
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds.append(make_field( contract.tradingClass))
if self.serverVersion() >= MIN_SERVER_VER_SEC_ID_TYPE:
flds += [make_field( contract.secIdType),
make_field( contract.secId)]
# send main order fields
flds.append(make_field( order.action))
if self.serverVersion() >= MIN_SERVER_VER_FRACTIONAL_POSITIONS:
flds.append(make_field(order.totalQuantity))
else:
flds.append(make_field(int(order.totalQuantity)))
flds.append(make_field(order.orderType))
if self.serverVersion() < MIN_SERVER_VER_ORDER_COMBO_LEGS_PRICE:
flds.append(make_field(
order.lmtPrice if order.lmtPrice != UNSET_DOUBLE else 0))
else:
flds.append(make_field_handle_empty( order.lmtPrice))
if self.serverVersion() < MIN_SERVER_VER_TRAILING_PERCENT:
flds.append(make_field(
order.auxPrice if order.auxPrice != UNSET_DOUBLE else 0))
else:
flds.append(make_field_handle_empty( order.auxPrice))
# send extended order fields
flds += [make_field( order.tif),
make_field( order.ocaGroup),
make_field( order.account),
make_field( order.openClose),
make_field( order.origin),
make_field( order.orderRef),
make_field( order.transmit),
make_field( order.parentId), # srv v4 and above
make_field( order.blockOrder), # srv v5 and above
make_field( order.sweepToFill), # srv v5 and above
make_field( order.displaySize), # srv v5 and above
make_field( order.triggerMethod), # srv v5 and above
make_field( order.outsideRth), # srv v5 and above
make_field( order.hidden)] # srv v7 and above
# Send combo legs for BAG requests (srv v8 and above)
if contract.secType == "BAG":
comboLegsCount = len(contract.comboLegs) if contract.comboLegs else 0
flds.append(make_field(comboLegsCount))
if comboLegsCount > 0:
for comboLeg in contract.comboLegs:
assert comboLeg
flds += [make_field(comboLeg.conId),
make_field( comboLeg.ratio),
make_field( comboLeg.action),
make_field( comboLeg.exchange),
make_field( comboLeg.openClose),
make_field( comboLeg.shortSaleSlot), #srv v35 and above
make_field( comboLeg.designatedLocation)] # srv v35 and above
if self.serverVersion() >= MIN_SERVER_VER_SSHORTX_OLD:
flds.append(make_field(comboLeg.exemptCode))
# Send order combo legs for BAG requests
if self.serverVersion() >= MIN_SERVER_VER_ORDER_COMBO_LEGS_PRICE and contract.secType == "BAG":
orderComboLegsCount = len(order.orderComboLegs) if order.orderComboLegs else 0
flds.append(make_field( orderComboLegsCount))
if orderComboLegsCount:
for orderComboLeg in order.orderComboLegs:
assert orderComboLeg
flds.append(make_field_handle_empty( orderComboLeg.price))
if self.serverVersion() >= MIN_SERVER_VER_SMART_COMBO_ROUTING_PARAMS and contract.secType == "BAG":
smartComboRoutingParamsCount = len(order.smartComboRoutingParams) if order.smartComboRoutingParams else 0
flds.append(make_field( smartComboRoutingParamsCount))
if smartComboRoutingParamsCount > 0:
for tagValue in order.smartComboRoutingParams:
flds += [make_field(tagValue.tag),
make_field(tagValue.value)]
######################################################################
# Send the shares allocation.
#
# This specifies the number of order shares allocated to each Financial
# Advisor managed account. The format of the allocation string is as
# follows:
# <account_code1>/<number_shares1>,<account_code2>/<number_shares2>,...N
# E.g.
# To allocate 20 shares of a 100 share order to account 'U101' and the
# residual 80 to account 'U203' enter the following share allocation string:
# U101/20,U203/80
#####################################################################
# send deprecated sharesAllocation field
flds += [make_field( ""), # srv v9 and above
make_field( order.discretionaryAmt), # srv v10 and above
make_field( order.goodAfterTime), # srv v11 and above
make_field( order.goodTillDate), # srv v12 and above
make_field( order.faGroup), # srv v13 and above
make_field( order.faMethod), # srv v13 and above
make_field( order.faPercentage), # srv v13 and above
make_field( order.faProfile)] # srv v13 and above
if self.serverVersion() >= MIN_SERVER_VER_MODELS_SUPPORT:
flds.append(make_field( order.modelCode))
# institutional short saleslot data (srv v18 and above)
flds += [make_field( order.shortSaleSlot), # 0 for retail, 1 or 2 for institutions
make_field( order.designatedLocation)] # populate only when shortSaleSlot = 2.
if self.serverVersion() >= MIN_SERVER_VER_SSHORTX_OLD:
flds.append(make_field( order.exemptCode))
# not needed anymore
#bool isVolOrder = (order.orderType.CompareNoCase("VOL") == 0)
# srv v19 and above fields
flds.append(make_field( order.ocaType))
#if( self.serverVersion() < 38) {
# will never happen
# send( /* order.rthOnly */ false);
#}
flds += [make_field( order.rule80A),
make_field( order.settlingFirm),
make_field( order.allOrNone),
make_field_handle_empty( order.minQty),
make_field_handle_empty( order.percentOffset),
make_field( order.eTradeOnly),
make_field( order.firmQuoteOnly),
make_field_handle_empty( order.nbboPriceCap),
make_field( order.auctionStrategy), # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT
make_field_handle_empty( order.startingPrice),
make_field_handle_empty( order.stockRefPrice),
make_field_handle_empty( order.delta),
make_field_handle_empty( order.stockRangeLower),
make_field_handle_empty( order.stockRangeUpper),
make_field( order.overridePercentageConstraints), #srv v22 and above
# Volatility orders (srv v26 and above)
make_field_handle_empty( order.volatility),
make_field_handle_empty( order.volatilityType),
make_field( order.deltaNeutralOrderType), # srv v28 and above
make_field_handle_empty( order.deltaNeutralAuxPrice)] # srv v28 and above
if self.serverVersion() >= MIN_SERVER_VER_DELTA_NEUTRAL_CONID and order.deltaNeutralOrderType:
flds += [make_field( order.deltaNeutralConId),
make_field( order.deltaNeutralSettlingFirm),
make_field( order.deltaNeutralClearingAccount),
make_field( order.deltaNeutralClearingIntent)]
if self.serverVersion() >= MIN_SERVER_VER_DELTA_NEUTRAL_OPEN_CLOSE and order.deltaNeutralOrderType:
flds += [make_field( order.deltaNeutralOpenClose),
make_field( order.deltaNeutralShortSale),
make_field( order.deltaNeutralShortSaleSlot),
make_field( order.deltaNeutralDesignatedLocation)]
flds += [make_field( order.continuousUpdate),
make_field_handle_empty( order.referencePriceType),
make_field_handle_empty( order.trailStopPrice)] # srv v30 and above
if self.serverVersion() >= MIN_SERVER_VER_TRAILING_PERCENT:
flds.append(make_field_handle_empty( order.trailingPercent))
# SCALE orders
if self.serverVersion() >= MIN_SERVER_VER_SCALE_ORDERS2:
flds += [make_field_handle_empty( order.scaleInitLevelSize),
make_field_handle_empty( order.scaleSubsLevelSize)]
else:
# srv v35 and above)
flds += [make_field( ""), # for not supported scaleNumComponents
make_field_handle_empty(order.scaleInitLevelSize)] # for scaleComponentSize
flds.append(make_field_handle_empty( order.scalePriceIncrement))
if self.serverVersion() >= MIN_SERVER_VER_SCALE_ORDERS3 \
and order.scalePriceIncrement != UNSET_DOUBLE \
and order.scalePriceIncrement > 0.0:
flds += [make_field_handle_empty( order.scalePriceAdjustValue),
make_field_handle_empty( order.scalePriceAdjustInterval),
make_field_handle_empty( order.scaleProfitOffset),
make_field( order.scaleAutoReset),
make_field_handle_empty( order.scaleInitPosition),
make_field_handle_empty( order.scaleInitFillQty),
make_field( order.scaleRandomPercent)]
if self.serverVersion() >= MIN_SERVER_VER_SCALE_TABLE:
flds += [make_field( order.scaleTable),
make_field( order.activeStartTime),
make_field( order.activeStopTime)]
# HEDGE orders
if self.serverVersion() >= MIN_SERVER_VER_HEDGE_ORDERS:
flds.append(make_field( order.hedgeType))
if order.hedgeType:
flds.append(make_field( order.hedgeParam))
if self.serverVersion() >= MIN_SERVER_VER_OPT_OUT_SMART_ROUTING:
flds.append(make_field( order.optOutSmartRouting))
if self.serverVersion() >= MIN_SERVER_VER_PTA_ORDERS:
flds += [make_field( order.clearingAccount),
make_field( order.clearingIntent)]
if self.serverVersion() >= MIN_SERVER_VER_NOT_HELD:
flds.append(make_field( order.notHeld))
if self.serverVersion() >= MIN_SERVER_VER_DELTA_NEUTRAL:
if contract.deltaNeutralContract:
flds += [make_field(True),
make_field(contract.deltaNeutralContract.conId),
make_field(contract.deltaNeutralContract.delta),
make_field(contract.deltaNeutralContract.price)]
else:
flds.append(make_field(False))
if self.serverVersion() >= MIN_SERVER_VER_ALGO_ORDERS:
flds.append(make_field( order.algoStrategy))
if order.algoStrategy:
algoParamsCount = len(order.algoParams) if order.algoParams else 0
flds.append(make_field(algoParamsCount))
if algoParamsCount > 0:
for algoParam in order.algoParams:
flds += [make_field(algoParam.tag),
make_field(algoParam.value)]
if self.serverVersion() >= MIN_SERVER_VER_ALGO_ID:
flds.append(make_field( order.algoId))
flds.append(make_field( order.whatIf)) # srv v36 and above
# send miscOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
miscOptionsStr = ""
if order.orderMiscOptions:
for tagValue in order.orderMiscOptions:
miscOptionsStr += str(tagValue)
flds.append(make_field( miscOptionsStr))
if self.serverVersion() >= MIN_SERVER_VER_ORDER_SOLICITED:
flds.append(make_field(order.solicited))
if self.serverVersion() >= MIN_SERVER_VER_RANDOMIZE_SIZE_AND_PRICE:
flds += [make_field(order.randomizeSize),
make_field(order.randomizePrice)]
if self.serverVersion() >= MIN_SERVER_VER_PEGGED_TO_BENCHMARK:
if order.orderType == "PEG BENCH":
flds += [make_field(order.referenceContractId),
make_field(order.isPeggedChangeAmountDecrease),
make_field(order.peggedChangeAmount),
make_field(order.referenceChangeAmount),
make_field(order.referenceExchangeId)]
flds.append(make_field(len(order.conditions)))
if len(order.conditions) > 0:
for cond in order.conditions:
flds.append(make_field(cond.type()))
flds += cond.make_fields()
flds += [make_field(order.conditionsIgnoreRth),
make_field(order.conditionsCancelOrder)]
flds += [make_field(order.adjustedOrderType),
make_field(order.triggerPrice),
make_field(order.lmtPriceOffset),
make_field(order.adjustedStopPrice),
make_field(order.adjustedStopLimitPrice),
make_field(order.adjustedTrailingAmount),
make_field(order.adjustableTrailingUnit)]
if self.serverVersion() >= MIN_SERVER_VER_EXT_OPERATOR:
flds.append(make_field( order.extOperator))
if self.serverVersion() >= MIN_SERVER_VER_SOFT_DOLLAR_TIER:
flds += [make_field(order.softDollarTier.name),
make_field(order.softDollarTier.val)]
if self.serverVersion() >= MIN_SERVER_VER_CASH_QTY:
flds.append(make_field( order.cashQty))
if self.serverVersion() >= MIN_SERVER_VER_DECISION_MAKER:
flds.append(make_field( order.mifid2DecisionMaker))
flds.append(make_field( order.mifid2DecisionAlgo))
if self.serverVersion() >= MIN_SERVER_VER_MIFID_EXECUTION:
flds.append(make_field( order.mifid2ExecutionTrader))
flds.append(make_field( order.mifid2ExecutionAlgo))
if self.serverVersion() >= MIN_SERVER_VER_AUTO_PRICE_FOR_HEDGE:
flds.append(make_field(order.dontUseAutoPriceForHedge))
if self.serverVersion() >= MIN_SERVER_VER_ORDER_CONTAINER:
flds.append(make_field(order.isOmsContainer))
if self.serverVersion() >= MIN_SERVER_VER_D_PEG_ORDERS:
flds.append(make_field(order.discretionaryUpToLimitPrice))
msg = "".join(flds)
self.sendMsg(msg)
def cancelOrder(self, orderId:OrderId):
"""Call this function to cancel an order.
orderId:OrderId - The order ID that was specified previously in the call
to placeOrder()"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.CANCEL_ORDER) \
+ make_field(VERSION) \
+ make_field(orderId)
self.sendMsg(msg)
def reqOpenOrders(self):
"""Call this function to request the open orders that were
placed from this client. Each open order will be fed back through the
openOrder() and orderStatus() functions on the EWrapper.
Note: The client with a clientId of 0 will also receive the TWS-owned
open orders. These orders will be associated with the client and a new
orderId will be generated. This association will persist over multiple
API and TWS sessions. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_OPEN_ORDERS) \
+ make_field(VERSION)
self.sendMsg(msg)
def reqAutoOpenOrders(self, bAutoBind:bool):
"""Call this function to request that newly created TWS orders
be implicitly associated with the client. When a new TWS order is
created, the order will be associated with the client, and fed back
through the openOrder() and orderStatus() functions on the EWrapper.
Note: This request can only be made from a client with clientId of 0.
bAutoBind: If set to TRUE, newly created TWS orders will be implicitly
associated with the client. If set to FALSE, no association will be
made."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_AUTO_OPEN_ORDERS) \
+ make_field(VERSION) \
+ make_field(bAutoBind)
self.sendMsg(msg)
def reqAllOpenOrders(self):
"""Call this function to request the open orders placed from all
clients and also from TWS. Each open order will be fed back through the
openOrder() and orderStatus() functions on the EWrapper.
Note: No association is made between the returned orders and the
requesting client."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_ALL_OPEN_ORDERS) \
+ make_field(VERSION)
self.sendMsg(msg)
def reqGlobalCancel(self):
"""Use this function to cancel all open orders globally. It
cancels both API and TWS open orders.
If the order was created in TWS, it also gets canceled. If the order
was initiated in the API, it also gets canceled."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_GLOBAL_CANCEL) \
+ make_field(VERSION)
self.sendMsg(msg)
def reqIds(self, numIds:int):
"""Call this function to request from TWS the next valid ID that
can be used when placing an order. After calling this function, the
nextValidId() event will be triggered, and the id returned is that next
valid ID. That ID will reflect any autobinding that has occurred (which
generates new IDs and increments the next valid ID therein).
numIds:int - deprecated"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_IDS) \
+ make_field(VERSION) \
+ make_field(numIds)
self.sendMsg(msg)
#########################################################################
################## Account and Portfolio
########################################################################
def reqAccountUpdates(self, subscribe:bool, acctCode:str):
"""Call this function to start getting account values, portfolio,
and last update time information via EWrapper.updateAccountValue(),
EWrapperi.updatePortfolio() and Wrapper.updateAccountTime().
subscribe:bool - If set to TRUE, the client will start receiving account
and Portfoliolio updates. If set to FALSE, the client will stop
receiving this information.
acctCode:str -The account code for which to receive account and
portfolio updates."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 2
flds = []
flds += [make_field(OUT.REQ_ACCT_DATA),
make_field(VERSION),
make_field(subscribe), # TRUE = subscribe, FALSE = unsubscribe.
make_field(acctCode)] # srv v9 and above, the account code. This will only be used for FA clients
msg = "".join(flds)
self.sendMsg(msg)
def reqAccountSummary(self, reqId:int, groupName:str, tags:str):
"""Call this method to request and keep up to date the data that appears
on the TWS Account Window Summary tab. The data is returned by
accountSummary().
Note: This request is designed for an FA managed account but can be
used for any multi-account structure.
reqId:int - The ID of the data request. Ensures that responses are matched
to requests If several requests are in process.
groupName:str - Set to All to returnrn account summary data for all
accounts, or set to a specific Advisor Account Group name that has
already been created in TWS Global Configuration.
tags:str - A comma-separated list of account tags. Available tags are:
accountountType
NetLiquidation,
TotalCashValue - Total cash including futures pnl
SettledCash - For cash accounts, this is the same as
TotalCashValue
AccruedCash - Net accrued interest
BuyingPower - The maximum amount of marginable US stocks the
account can buy
EquityWithLoanValue - Cash + stocks + bonds + mutual funds
PreviousDayEquityWithLoanValue,
GrossPositionValue - The sum of the absolute value of all stock
and equity option positions
RegTEquity,
RegTMargin,
SMA - Special Memorandum Account
InitMarginReq,
MaintMarginReq,
AvailableFunds,
ExcessLiquidity,
Cushion - Excess liquidity as a percentage of net liquidation value
FullInitMarginReq,
FullMaintMarginReq,
FullAvailableFunds,
FullExcessLiquidity,
LookAheadNextChange - Time when look-ahead values take effect
LookAheadInitMarginReq,
LookAheadMaintMarginReq,
LookAheadAvailableFunds,
LookAheadExcessLiquidity,
HighestSeverity - A measure of how close the account is to liquidation
DayTradesRemaining - The Number of Open/Close trades a user
could put on before Pattern Day Trading is detected. A value of "-1"
means that the user can put on unlimited day trades.
Leverage - GrossPositionValue / NetLiquidation
$LEDGER - Single flag to relay all cash balance tags*, only in base
currency.
$LEDGER:CURRENCY - Single flag to relay all cash balance tags*, only in
the specified currency.
$LEDGER:ALL - Single flag to relay all cash balance tags* in all
currencies."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_ACCOUNT_SUMMARY) \
+ make_field(VERSION) \
+ make_field(reqId) \
+ make_field(groupName) \
+ make_field(tags)
self.sendMsg(msg)
def cancelAccountSummary(self, reqId:int):
"""Cancels the request for Account Window Summary tab data.
reqId:int - The ID of the data request being canceled."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.CANCEL_ACCOUNT_SUMMARY) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
def reqPositions(self):
"""Requests real-time position data for all accounts."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_POSITIONS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support positions request.")
return
VERSION = 1
msg = make_field(OUT.REQ_POSITIONS) \
+ make_field(VERSION)
self.sendMsg(msg)
def cancelPositions(self):
"""Cancels real-time position updates."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_POSITIONS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support positions request.")
return
VERSION = 1
msg = make_field(OUT.CANCEL_POSITIONS) \
+ make_field(VERSION)
self.sendMsg(msg)
def reqPositionsMulti(self, reqId:int, account:str, modelCode:str):
"""Requests positions for account and/or model.
Results are delivered via EWrapper.positionMulti() and
EWrapper.positionMultiEnd() """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_MODELS_SUPPORT:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support positions multi request.")
return
VERSION = 1
msg = make_field(OUT.REQ_POSITIONS_MULTI) \
+ make_field(VERSION) \
+ make_field(reqId) \
+ make_field(account) \
+ make_field(modelCode)
self.sendMsg(msg)
def cancelPositionsMulti(self, reqId:int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_MODELS_SUPPORT:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support cancel positions multi request.")
return
VERSION = 1
msg = make_field(OUT.CANCEL_POSITIONS_MULTI) \
+ make_field(VERSION) \
+ make_field(reqId) \
self.sendMsg(msg)
def reqAccountUpdatesMulti(self, reqId: int, account:str, modelCode:str,
ledgerAndNLV:bool):
"""Requests account updates for account and/or model."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_MODELS_SUPPORT:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support account updates multi request.")
return
VERSION = 1
msg = make_field(OUT.REQ_ACCOUNT_UPDATES_MULTI) \
+ make_field(VERSION) \
+ make_field(reqId) \
+ make_field(account) \
+ make_field(modelCode) \
+ make_field(ledgerAndNLV)
self.sendMsg(msg)
def cancelAccountUpdatesMulti(self, reqId:int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_MODELS_SUPPORT:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support cancel account updates multi request.")
return
VERSION = 1
msg = make_field(OUT.CANCEL_ACCOUNT_UPDATES_MULTI) \
+ make_field(VERSION) \
+ make_field(reqId) \
self.sendMsg(msg)
#########################################################################
################## Daily PnL
#########################################################################
def reqPnL(self, reqId: int, account: str, modelCode: str):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_PNL:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support PnL request.")
return
msg = make_field(OUT.REQ_PNL) \
+ make_field(reqId) \
+ make_field(account) \
+ make_field(modelCode)
self.sendMsg(msg)
def cancelPnL(self, reqId: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_PNL:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support PnL request.")
return
msg = make_field(OUT.CANCEL_PNL) \
+ make_field(reqId)
self.sendMsg(msg)
def reqPnLSingle(self, reqId: int, account: str, modelCode: str, conid: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_PNL:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support PnL request.")
return
msg = make_field(OUT.REQ_PNL_SINGLE) \
+ make_field(reqId) \
+ make_field(account) \
+ make_field(modelCode) \
+ make_field(conid)
self.sendMsg(msg)
def cancelPnLSingle(self, reqId: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_PNL:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support PnL request.")
return
msg = make_field(OUT.CANCEL_PNL_SINGLE) \
+ make_field(reqId)
self.sendMsg(msg)
#########################################################################
################## Executions
#########################################################################
def reqExecutions(self, reqId:int, execFilter:ExecutionFilter):
"""When this function is called, the execution reports that meet the
filter criteria are downloaded to the client via the execDetails()
function. To view executions beyond the past 24 hours, open the
Trade Log in TWS and, while the Trade Log is displayed, request
the executions again from the API.
reqId:int - The ID of the data request. Ensures that responses are
matched to requests if several requests are in process.
execFilter:ExecutionFilter - This object contains attributes that
describe the filter criteria used to determine which execution
reports are returned.
NOTE: Time format must be 'yyyymmdd-hh:mm:ss' Eg: '20030702-14:55'"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 3
# send req open orders msg
flds = []
flds += [make_field(OUT.REQ_EXECUTIONS),
make_field(VERSION)]
if self.serverVersion() >= MIN_SERVER_VER_EXECUTION_DATA_CHAIN:
flds += [make_field( reqId),]
# Send the execution rpt filter data (srv v9 and above)
flds += [make_field( execFilter.clientId),
make_field(execFilter.acctCode),
make_field(execFilter.time),
make_field(execFilter.symbol),
make_field(execFilter.secType),
make_field(execFilter.exchange),
make_field(execFilter.side)]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## Contract Details
#########################################################################
def reqContractDetails(self, reqId:int , contract:Contract):
"""Call this function to download all details for a particular
underlying. The contract details will be received via the contractDetails()
function on the EWrapper.
reqId:int - The ID of the data request. Ensures that responses are
make_fieldatched to requests if several requests are in process.
contract:Contract - The summary description of the contract being looked
up."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_SEC_ID_TYPE:
if contract.secIdType or contract.secId:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support secIdType and secId parameters.")
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support tradingClass parameter in reqContractDetails.")
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
if contract.primaryExchange:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support primaryExchange parameter in reqContractDetails.")
return
VERSION = 8
# send req mkt data msg
flds = []
flds += [make_field(OUT.REQ_CONTRACT_DATA),
make_field( VERSION)]
if self.serverVersion() >= MIN_SERVER_VER_CONTRACT_DATA_CHAIN:
flds += [make_field( reqId),]
# send contract fields
flds += [make_field(contract.conId), # srv v37 and above
make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier)] # srv v15 and above
if self.serverVersion() >= MIN_SERVER_VER_PRIMARYEXCH:
flds += [make_field(contract.exchange),
make_field(contract.primaryExchange)]
elif self.serverVersion() >= MIN_SERVER_VER_LINKING:
if (contract.primaryExchange and
(contract.exchange == "BEST" or contract.exchange == "SMART")):
flds += [make_field(contract.exchange + ":" + contract.primaryExchange),]
else:
flds += [make_field(contract.exchange),]
flds += [make_field( contract.currency),
make_field( contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass), ]
flds += [make_field(contract.includeExpired),] # srv v31 and above
if self.serverVersion() >= MIN_SERVER_VER_SEC_ID_TYPE:
flds += [make_field( contract.secIdType),
make_field( contract.secId)]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## Market Depth
#########################################################################
def reqMktDepthExchanges(self):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_MKT_DEPTH_EXCHANGES:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support market depth exchanges request.")
return
msg = make_field(OUT.REQ_MKT_DEPTH_EXCHANGES)
self.sendMsg(msg)
def reqMktDepth(self, reqId:TickerId, contract:Contract,
numRows:int, isSmartDepth:bool, mktDepthOptions:TagValueList):
"""Call this function to request market depth for a specific
contract. The market depth will be returned by the updateMktDepth() and
updateMktDepthL2() events.
Requests the contract's market depth (order book). Note this request must be
direct-routed to an exchange and not smart-routed. The number of simultaneous
market depth requests allowed in an account is calculated based on a formula
that looks at an accounts equity, commissions, and quote booster packs.
reqId:TickerId - The ticker id. Must be a unique value. When the market
depth data returns, it will be identified by this tag. This is
also used when canceling the market depth
contract:Contact - This structure contains a description of the contract
for which market depth data is being requested.
numRows:int - Specifies the numRowsumber of market depth rows to display.
isSmartDepth:bool - specifies SMART depth request
mktDepthOptions:TagValueList - For internal use only. Use default value
XYZ."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass or contract.conId > 0:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support conId and tradingClass parameters in reqMktDepth.")
return
if self.serverVersion() < MIN_SERVER_VER_SMART_DEPTH and isSmartDepth:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support SMART depth request.")
return
VERSION = 5
# send req mkt depth msg
flds = []
flds += [make_field(OUT.REQ_MKT_DEPTH),
make_field(VERSION),
make_field(reqId)]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.conId),]
flds += [make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier), # srv v15 and above
make_field(contract.exchange),
make_field(contract.currency),
make_field(contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass),]
flds += [make_field(numRows),] # srv v19 and above
if self.serverVersion() >= MIN_SERVER_VER_SMART_DEPTH:
flds += [make_field(isSmartDepth),]
# send mktDepthOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
#current doc says this part if for "internal use only" -> won't support it
if mktDepthOptions:
raise NotImplementedError("not supported")
mktDataOptionsStr = ""
flds += [make_field(mktDataOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
def cancelMktDepth(self, reqId:TickerId, isSmartDepth:bool):
"""After calling this function, market depth data for the specified id
will stop flowing.
reqId:TickerId - The ID that was specified in the call to
reqMktDepth().
isSmartDepth:bool - specifies SMART depth request"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_SMART_DEPTH and isSmartDepth:
self.wrapper.error( reqId, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support SMART depth cancel.")
return
VERSION = 1
# send cancel mkt depth msg
flds = []
flds += [make_field(OUT.CANCEL_MKT_DEPTH),
make_field(VERSION),
make_field(reqId)]
if self.serverVersion() >= MIN_SERVER_VER_SMART_DEPTH:
flds += [make_field(isSmartDepth)]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## News Bulletins
#########################################################################
def reqNewsBulletins(self, allMsgs:bool):
"""Call this function to start receiving news bulletins. Each bulletin
will be returned by the updateNewsBulletin() event.
allMsgs:bool - If set to TRUE, returns all the existing bulletins for
the currencyent day and any new ones. If set to FALSE, will only
return new bulletins. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_NEWS_BULLETINS) \
+ make_field(VERSION) \
+ make_field(allMsgs)
self.sendMsg(msg)
def cancelNewsBulletins(self):
"""Call this function to stop receiving news bulletins."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.CANCEL_NEWS_BULLETINS) \
+ make_field(VERSION)
self.sendMsg(msg)
#########################################################################
################## Financial Advisors
#########################################################################
def reqManagedAccts(self):
"""Call this function to request the list of managed accounts. The list
will be returned by the managedAccounts() function on the EWrapper.
Note: This request can only be made when connected to a FA managed account."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_MANAGED_ACCTS) \
+ make_field(VERSION)
return self.sendMsg(msg)
def requestFA(self, faData:FaDataType):
"""Call this function to request FA configuration information from TWS.
The data returns in an XML string via a "receiveFA" ActiveX event.
faData:FaDataType - Specifies the type of Financial Advisor
configuration data beingingg requested. Valid values include:
1 = GROUPS
2 = PROFILE
3 = ACCOUNT ALIASES"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_FA) \
+ make_field(VERSION) \
+ make_field(int(faData))
return self.sendMsg(msg)
def replaceFA(self, faData:FaDataType , cxml:str):
"""Call this function to modify FA configuration information from the
API. Note that this can also be done manually in TWS itself.
faData:FaDataType - Specifies the type of Financial Advisor
configuration data beingingg requested. Valid values include:
1 = GROUPS
2 = PROFILE
3 = ACCOUNT ALIASES
cxml: str - The XML string containing the new FA configuration
information. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REPLACE_FA) \
+ make_field(VERSION) \
+ make_field(int(faData)) \
+ make_field(cxml) \
return self.sendMsg(msg)
#########################################################################
################## Historical Data
#########################################################################
def reqHistoricalData(self, reqId:TickerId , contract:Contract, endDateTime:str,
durationStr:str, barSizeSetting:str, whatToShow:str,
useRTH:int, formatDate:int, keepUpToDate:bool, chartOptions:TagValueList):
"""Requests contracts' historical data. When requesting historical data, a
finishing time and date is required along with a duration string. The
resulting bars will be returned in EWrapper.historicalData()
reqId:TickerId - The id of the request. Must be a unique value. When the
market data returns, it whatToShowill be identified by this tag. This is also
used when canceling the market data.
contract:Contract - This object contains a description of the contract for which
market data is being requested.
endDateTime:str - Defines a query end date and time at any point during the past 6 mos.
Valid values include any date/time within the past six months in the format:
yyyymmdd HH:mm:ss ttt
where "ttt" is the optional time zone.
durationStr:str - Set the query duration up to one week, using a time unit
of seconds, days or weeks. Valid values include any integer followed by a space
and then S (seconds), D (days) or W (week). If no unit is specified, seconds is used.
barSizeSetting:str - Specifies the size of the bars that will be returned (within IB/TWS listimits).
Valid values include:
1 sec
5 secs
15 secs
30 secs
1 min
2 mins
3 mins
5 mins
15 mins
30 mins
1 hour
1 day
whatToShow:str - Determines the nature of data beinging extracted. Valid values include:
TRADES
MIDPOINT
BID
ASK
BID_ASK
HISTORICAL_VOLATILITY
OPTION_IMPLIED_VOLATILITY
useRTH:int - Determines whether to return all data available during the requested time span,
or only data that falls within regular trading hours. Valid values include:
0 - all data is returned even where the market in question was outside of its
regular trading hours.
1 - only data within the regular trading hours is returned, even if the
requested time span falls partially or completely outside of the RTH.
formatDate: int - Determines the date format applied to returned bars. validd values include:
1 - dates applying to bars returned in the format: yyyymmdd{space}{space}hh:mm:dd
2 - dates are returned as a long integer specifying the number of seconds since
1/1/1970 GMT.
chartOptions:TagValueList - For internal use only. Use default value XYZ. """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(),
NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass or contract.conId > 0:
self.wrapper.error(reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support conId and tradingClass parameters in reqHistoricalData.")
return
VERSION = 6
# send req mkt data msg
flds = []
flds += [make_field(OUT.REQ_HISTORICAL_DATA),]
if self.serverVersion() < MIN_SERVER_VER_SYNT_REALTIME_BARS:
flds += [make_field(VERSION),]
flds += [make_field(reqId),]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.conId),]
flds += [make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field( contract.tradingClass),]
flds += [make_field(contract.includeExpired), # srv v31 and above
make_field(endDateTime), # srv v20 and above
make_field(barSizeSetting), # srv v20 and above
make_field(durationStr),
make_field(useRTH),
make_field(whatToShow),
make_field(formatDate)] # srv v16 and above
# Send combo legs for BAG requests
if contract.secType == "BAG":
flds += [make_field(len(contract.comboLegs)),]
for comboLeg in contract.comboLegs:
flds += [make_field( comboLeg.conId),
make_field( comboLeg.ratio),
make_field( comboLeg.action),
make_field( comboLeg.exchange)]
if self.serverVersion() >= MIN_SERVER_VER_SYNT_REALTIME_BARS:
flds += [make_field(keepUpToDate), ]
# send chartOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
chartOptionsStr = ""
if chartOptions:
for tagValue in chartOptions:
chartOptionsStr += str(tagValue)
flds += [make_field( chartOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
def cancelHistoricalData(self, reqId:TickerId):
"""Used if an internet disconnect has occurred or the results of a query
are otherwise delayed and the application is no longer interested in receiving
the data.
reqId:TickerId - The ticker ID. Must be a unique value."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.CANCEL_HISTORICAL_DATA) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
# Note that formatData parameter affects intraday bars only
# 1-day bars always return with date in YYYYMMDD format
def reqHeadTimeStamp(self, reqId:TickerId, contract:Contract,
whatToShow: str, useRTH: int, formatDate: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_HEAD_TIMESTAMP:
self.wrapper.error(reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support head time stamp requests.")
return
flds = []
flds += [make_field(OUT.REQ_HEAD_TIMESTAMP),
make_field(reqId),
make_field(contract.conId),
make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol),
make_field(contract.tradingClass),
make_field(contract.includeExpired),
make_field(useRTH),
make_field(whatToShow),
make_field(formatDate) ]
msg = "".join(flds)
self.sendMsg(msg)
def cancelHeadTimeStamp(self, reqId: TickerId):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_CANCEL_HEADTIMESTAMP:
self.wrapper.error(reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support head time stamp requests.")
return
flds = []
flds += [make_field(OUT.CANCEL_HEAD_TIMESTAMP),
make_field(reqId) ]
msg = "".join(flds)
self.sendMsg(msg)
def reqHistogramData(self, tickerId: int, contract: Contract,
useRTH: bool, timePeriod: str):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_HISTOGRAM:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support histogram requests..")
return
flds = []
flds += [make_field(OUT.REQ_HISTOGRAM_DATA),
make_field(tickerId),
make_field(contract.conId),
make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol),
make_field(contract.tradingClass),
make_field(contract.includeExpired),
make_field(useRTH),
make_field(timePeriod)]
msg = "".join(flds)
self.sendMsg(msg)
def cancelHistogramData(self, tickerId: int):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_HISTOGRAM:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support histogram requests..")
return
msg = make_field(OUT.CANCEL_HISTOGRAM_DATA) + make_field(tickerId)
self.sendMsg(msg)
def reqHistoricalTicks(self, reqId: int, contract: Contract, startDateTime: str,
endDateTime: str, numberOfTicks: int, whatToShow: str, useRth: int,
ignoreSize: bool, miscOptions: TagValueList):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_HISTORICAL_TICKS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support historical ticks requests..")
return
flds = []
flds += [make_field(OUT.REQ_HISTORICAL_TICKS),
make_field(reqId),
make_field(contract.conId),
make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol),
make_field(contract.tradingClass),
make_field(contract.includeExpired),
make_field(startDateTime),
make_field(endDateTime),
make_field(numberOfTicks),
make_field(whatToShow),
make_field(useRth),
make_field(ignoreSize)]
miscOptionsString = ""
if miscOptions:
for tagValue in miscOptions:
miscOptionsString += str(tagValue)
flds += [make_field(miscOptionsString),]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## Market Scanners
#########################################################################
def reqScannerParameters(self):
"""Requests an XML string that describes all possible scanner queries."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.REQ_SCANNER_PARAMETERS) \
+ make_field(VERSION)
self.sendMsg(msg)
def reqScannerSubscription(self, reqId:int,
subscription:ScannerSubscription,
scannerSubscriptionOptions:TagValueList,
scannerSubscriptionFilterOptions:TagValueList):
"""reqId:int - The ticker ID. Must be a unique value.
scannerSubscription:ScannerSubscription - This structure contains
possible parameters used to filter results.
scannerSubscriptionOptions:TagValueList - For internal use only.
Use default value XYZ."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_SCANNER_GENERIC_OPTS and scannerSubscriptionFilterOptions is not None:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support API scanner subscription generic filter options")
return
VERSION = 4
flds = []
flds += [make_field(OUT.REQ_SCANNER_SUBSCRIPTION)]
if self.serverVersion() < MIN_SERVER_VER_SCANNER_GENERIC_OPTS:
flds += [make_field(VERSION)]
flds +=[make_field(reqId),
make_field_handle_empty(subscription.numberOfRows),
make_field(subscription.instrument),
make_field(subscription.locationCode),
make_field(subscription.scanCode),
make_field_handle_empty(subscription.abovePrice),
make_field_handle_empty(subscription.belowPrice),
make_field_handle_empty(subscription.aboveVolume),
make_field_handle_empty(subscription.marketCapAbove),
make_field_handle_empty(subscription.marketCapBelow),
make_field(subscription.moodyRatingAbove),
make_field(subscription.moodyRatingBelow),
make_field(subscription.spRatingAbove),
make_field(subscription.spRatingBelow),
make_field(subscription.maturityDateAbove),
make_field(subscription.maturityDateBelow),
make_field_handle_empty(subscription.couponRateAbove),
make_field_handle_empty(subscription.couponRateBelow),
make_field(subscription.excludeConvertible),
make_field_handle_empty(subscription.averageOptionVolumeAbove), # srv v25 and above
make_field(subscription.scannerSettingPairs), # srv v25 and above
make_field(subscription.stockTypeFilter)] # srv v27 and above
# send scannerSubscriptionFilterOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_SCANNER_GENERIC_OPTS:
scannerSubscriptionFilterOptionsStr = ""
if scannerSubscriptionFilterOptions:
for tagValueOpt in scannerSubscriptionFilterOptions:
scannerSubscriptionFilterOptionsStr += str(tagValueOpt)
flds += [make_field(scannerSubscriptionFilterOptionsStr)]
# send scannerSubscriptionOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
scannerSubscriptionOptionsStr = ""
if scannerSubscriptionOptions:
for tagValueOpt in scannerSubscriptionOptions:
scannerSubscriptionOptionsStr += str(tagValueOpt)
flds += [make_field(scannerSubscriptionOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
def cancelScannerSubscription(self, reqId:int):
"""reqId:int - The ticker ID. Must be a unique value."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
msg = make_field(OUT.CANCEL_SCANNER_SUBSCRIPTION) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
#########################################################################
################## Real Time Bars
#########################################################################
def reqRealTimeBars(self, reqId:TickerId, contract:Contract, barSize:int,
whatToShow:str, useRTH:bool,
realTimeBarsOptions:TagValueList):
"""Call the reqRealTimeBars() function to start receiving real time bar
results through the realtimeBar() EWrapper function.
reqId:TickerId - The Id for the request. Must be a unique value. When the
data is received, it will be identified by this Id. This is also
used when canceling the request.
contract:Contract - This object contains a description of the contract
for which real time bars are being requested
barSize:int - Currently only 5 second bars are supported, if any other
value is used, an exception will be thrown.
whatToShow:str - Determines the nature of the data extracted. Valid
values include:
TRADES
BID
ASK
MIDPOINT
useRTH:bool - Regular Trading Hours only. Valid values include:
0 = all data available during the time span requested is returned,
including time intervals when the market in question was
outside of regular trading hours.
1 = only data within the regular trading hours for the product
requested is returned, even if the time time span falls
partially or completely outside.
realTimeBarOptions:TagValueList - For internal use only. Use default value XYZ."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
if contract.tradingClass:
self.wrapper.error( reqId, UPDATE_TWS.code(),
UPDATE_TWS.msg() + " It does not support conId and tradingClass parameter in reqRealTimeBars.")
return
VERSION = 3
flds = []
flds += [make_field(OUT.REQ_REAL_TIME_BARS),
make_field(VERSION),
make_field(reqId)]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.conId),]
flds += [make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.lastTradeDateOrContractMonth),
make_field(contract.strike),
make_field(contract.right),
make_field(contract.multiplier),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol)]
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field(contract.tradingClass),]
flds += [make_field(barSize),
make_field(whatToShow),
make_field(useRTH)]
# send realTimeBarsOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
realTimeBarsOptionsStr = ""
if realTimeBarsOptions:
for tagValueOpt in realTimeBarsOptions:
realTimeBarsOptionsStr += str(tagValueOpt)
flds += [make_field(realTimeBarsOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
def cancelRealTimeBars(self, reqId:TickerId):
"""Call the cancelRealTimeBars() function to stop receiving real time bar results.
reqId:TickerId - The Id that was specified in the call to reqRealTimeBars(). """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(reqId, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 1
# send req mkt data msg
flds = []
flds += [make_field(OUT.CANCEL_REAL_TIME_BARS),
make_field(VERSION),
make_field(reqId)]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## Fundamental Data
#########################################################################
def reqFundamentalData(self, reqId:TickerId , contract:Contract,
reportType:str, fundamentalDataOptions:TagValueList):
"""Call this function to receive fundamental data for
stocks. The appropriate market data subscription must be set up in
Account Management before you can receive this data.
Fundamental data will be returned at EWrapper.fundamentalData().
reqFundamentalData() can handle conid specified in the Contract object,
but not tradingClass or multiplier. This is because reqFundamentalData()
is used only for stocks and stocks do not have a multiplier and
trading class.
reqId:tickerId - The ID of the data request. Ensures that responses are
matched to requests if several requests are in process.
contract:Contract - This structure contains a description of the
contract for which fundamental data is being requested.
reportType:str - One of the following XML reports:
ReportSnapshot (company overview)
ReportsFinSummary (financial summary)
ReportRatios (financial ratios)
ReportsFinStatements (financial statements)
RESC (analyst estimates)
CalendarReport (company calendar) """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
VERSION = 2
if self.serverVersion() < MIN_SERVER_VER_FUNDAMENTAL_DATA:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support fundamental data request.")
return
if self.serverVersion() < MIN_SERVER_VER_TRADING_CLASS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support conId parameter in reqFundamentalData.")
return
flds = []
flds += [make_field(OUT.REQ_FUNDAMENTAL_DATA),
make_field(VERSION),
make_field(reqId)]
# send contract fields
if self.serverVersion() >= MIN_SERVER_VER_TRADING_CLASS:
flds += [make_field( contract.conId),]
flds += [make_field(contract.symbol),
make_field(contract.secType),
make_field(contract.exchange),
make_field(contract.primaryExchange),
make_field(contract.currency),
make_field(contract.localSymbol),
make_field(reportType)]
if self.serverVersion() >= MIN_SERVER_VER_LINKING:
fundDataOptStr = ""
tagValuesCount = len(fundamentalDataOptions) if fundamentalDataOptions else 0
if fundamentalDataOptions:
for fundDataOption in fundamentalDataOptions:
fundDataOptStr += str(fundDataOption)
flds += [make_field(tagValuesCount),
make_field(fundDataOptStr)]
msg = "".join(flds)
self.sendMsg(msg)
def cancelFundamentalData(self, reqId:TickerId ):
"""Call this function to stop receiving fundamental data.
reqId:TickerId - The ID of the data request."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_FUNDAMENTAL_DATA:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support fundamental data request.")
return
VERSION = 1
msg = make_field(OUT.CANCEL_FUNDAMENTAL_DATA) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
########################################################################
################## News
#########################################################################
def reqNewsProviders(self):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_NEWS_PROVIDERS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support news providers request.")
return
msg = make_field(OUT.REQ_NEWS_PROVIDERS)
self.sendMsg(msg)
def reqNewsArticle(self, reqId: int, providerCode: str, articleId: str, newsArticleOptions: TagValueList):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_NEWS_ARTICLE:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support news article request.")
return
flds = []
flds += [make_field(OUT.REQ_NEWS_ARTICLE),
make_field(reqId),
make_field(providerCode),
make_field(articleId)]
# send newsArticleOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_NEWS_QUERY_ORIGINS:
newsArticleOptionsStr = ""
if newsArticleOptions:
for tagValue in newsArticleOptions:
newsArticleOptionsStr += str(tagValue)
flds += [make_field(newsArticleOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
def reqHistoricalNews(self, reqId: int, conId: int, providerCodes: str,
startDateTime: str, endDateTime: str, totalResults: int, historicalNewsOptions: TagValueList):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_HISTORICAL_NEWS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support historical news request.")
return
flds = []
flds += [make_field(OUT.REQ_HISTORICAL_NEWS),
make_field(reqId),
make_field(conId),
make_field(providerCodes),
make_field(startDateTime),
make_field(endDateTime),
make_field(totalResults)]
# send historicalNewsOptions parameter
if self.serverVersion() >= MIN_SERVER_VER_NEWS_QUERY_ORIGINS:
historicalNewsOptionsStr = ""
if historicalNewsOptions:
for tagValue in historicalNewsOptionsStr:
historicalNewsOptionsStr += str(tagValue)
flds += [make_field(historicalNewsOptionsStr),]
msg = "".join(flds)
self.sendMsg(msg)
#########################################################################
################## Display Groups
#########################################################################
def queryDisplayGroups(self, reqId: int):
"""API requests used to integrate with TWS color-grouped windows (display groups).
TWS color-grouped windows are identified by an integer number. Currently that number ranges from 1 to 7 and are mapped to specific colors, as indicated in TWS.
reqId:int - The unique number that will be associated with the
response """
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support queryDisplayGroups request.")
return
VERSION = 1
msg = make_field(OUT.QUERY_DISPLAY_GROUPS) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
def subscribeToGroupEvents(self, reqId:int, groupId:int):
"""reqId:int - The unique number associated with the notification.
groupId:int - The ID of the group, currently it is a number from 1 to 7.
This is the display group subscription request sent by the API to TWS."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support subscribeToGroupEvents request.")
return
VERSION = 1
msg = make_field(OUT.SUBSCRIBE_TO_GROUP_EVENTS) \
+ make_field(VERSION) \
+ make_field(reqId) \
+ make_field(groupId)
self.sendMsg(msg)
def updateDisplayGroup(self, reqId:int, contractInfo:str):
"""reqId:int - The requestId specified in subscribeToGroupEvents().
contractInfo:str - The encoded value that uniquely represents the
contract in IB. Possible values include:
none = empty selection
contractID@exchange - any non-combination contract.
Examples: 8314@SMART for IBM SMART; 8314@ARCA for IBM @ARCA.
combo = if any combo is selected."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support updateDisplayGroup request.")
return
VERSION = 1
msg = make_field(OUT.UPDATE_DISPLAY_GROUP) \
+ make_field(VERSION) \
+ make_field(reqId) \
+ make_field(contractInfo)
self.sendMsg(msg)
def unsubscribeFromGroupEvents(self, reqId:int):
"""reqId:int - The requestId specified in subscribeToGroupEvents()."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support unsubscribeFromGroupEvents request.")
return
VERSION = 1
msg = make_field(OUT.UNSUBSCRIBE_FROM_GROUP_EVENTS) \
+ make_field(VERSION) \
+ make_field(reqId)
self.sendMsg(msg)
def verifyRequest(self, apiName:str, apiVersion:str):
"""For IB's internal purpose. Allows to provide means of verification
between the TWS and third party programs."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support verification request.")
return
if not self.extraAuth:
self.wrapper.error(NO_VALID_ID, BAD_MESSAGE.code(), BAD_MESSAGE.msg() +
" Intent to authenticate needs to be expressed during initial connect request.")
return
VERSION = 1
msg = make_field(OUT.VERIFY_REQUEST) \
+ make_field(VERSION) \
+ make_field(apiName) \
+ make_field(apiVersion)
self.sendMsg(msg)
def verifyMessage(self, apiData:str):
"""For IB's internal purpose. Allows to provide means of verification
between the TWS and third party programs."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support verification request.")
return
VERSION = 1
msg = make_field(OUT.VERIFY_MESSAGE) \
+ make_field(VERSION) \
+ make_field(apiData)
self.sendMsg(msg)
def verifyAndAuthRequest(self, apiName:str, apiVersion:str,
opaqueIsvKey:str):
"""For IB's internal purpose. Allows to provide means of verification
between the TWS and third party programs."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support verification request.")
return
if not self.extraAuth:
self.wrapper.error(NO_VALID_ID, BAD_MESSAGE.code(), BAD_MESSAGE.msg() +
" Intent to authenticate needs to be expressed during initial connect request.")
return
VERSION = 1
msg = make_field(OUT.VERIFY_AND_AUTH_REQUEST) \
+ make_field(VERSION) \
+ make_field(apiName) \
+ make_field(apiVersion) \
+ make_field(opaqueIsvKey)
self.sendMsg(msg)
def verifyAndAuthMessage(self, apiData:str, xyzResponse:str):
"""For IB's internal purpose. Allows to provide means of verification
between the TWS and third party programs."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_LINKING:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support verification request.")
return
VERSION = 1
msg = make_field(OUT.VERIFY_AND_AUTH_MESSAGE) \
+ make_field(VERSION) \
+ make_field(apiData) \
+ make_field(xyzResponse)
self.sendMsg(msg)
def reqSecDefOptParams(self, reqId:int, underlyingSymbol:str,
futFopExchange:str, underlyingSecType:str,
underlyingConId:int):
"""Requests security definition option parameters for viewing a
contract's option chain reqId the ID chosen for the request
underlyingSymbol futFopExchange The exchange on which the returned
options are trading. Can be set to the empty string "" for all
exchanges. underlyingSecType The type of the underlying security,
i.e. STK underlyingConId the contract ID of the underlying security.
Response comes via EWrapper.securityDefinitionOptionParameter()"""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_SEC_DEF_OPT_PARAMS_REQ:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support security definition option request.")
return
flds = []
flds += [make_field(OUT.REQ_SEC_DEF_OPT_PARAMS),
make_field(reqId),
make_field(underlyingSymbol),
make_field(futFopExchange),
make_field(underlyingSecType),
make_field(underlyingConId)]
msg = "".join(flds)
self.sendMsg(msg)
def reqSoftDollarTiers(self, reqId:int):
"""Requests pre-defined Soft Dollar Tiers. This is only supported for
registered professional advisors and hedge and mutual funds who have
configured Soft Dollar Tiers in Account Management."""
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
msg = make_field(OUT.REQ_SOFT_DOLLAR_TIERS) \
+ make_field(reqId)
self.sendMsg(msg)
def reqFamilyCodes(self):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_FAMILY_CODES:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support family codes request.")
return
msg = make_field(OUT.REQ_FAMILY_CODES)
self.sendMsg(msg)
def reqMatchingSymbols(self, reqId:int, pattern:str):
self.logRequest(current_fn_name(), vars())
if not self.isConnected():
self.wrapper.error(NO_VALID_ID, NOT_CONNECTED.code(), NOT_CONNECTED.msg())
return
if self.serverVersion() < MIN_SERVER_VER_REQ_MATCHING_SYMBOLS:
self.wrapper.error(NO_VALID_ID, UPDATE_TWS.code(), UPDATE_TWS.msg() +
" It does not support matching symbols request.")
return
msg = make_field(OUT.REQ_MATCHING_SYMBOLS) \
+ make_field(reqId) \
+ make_field(pattern)
self.sendMsg(msg)
|
[
"sivaanand.m@gmail.com"
] |
sivaanand.m@gmail.com
|
60836e6996d4b49c40653064368fbd4c6ac99b27
|
be1bf32f73f04f60de9a09a9ee1ccd547823b08d
|
/exer2.py
|
b570135e9b84abaca7c8a6c31939163c74f2b91a
|
[] |
no_license
|
gpf1991/re
|
3f296f6104a9aabd587d123e76feac9acbfd62ad
|
c0b12b3fee4ecc2abf3b3abdab232265845ff1d3
|
refs/heads/master
| 2020-04-28T04:40:04.363016
| 2019-03-11T11:29:19
| 2019-03-11T11:29:19
| 174,987,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
# import re,sys
# print('请输入端口:')
# port = sys.stdin.readline().strip('\n')
# # print('hello', port)
# if sys.argv[1] ==
# f = open('1.txt')
# data = r.read()
import re,sys
def get_address(port):
#提取一段内容
f = open('1.txt')
while True:
data = ''
for line in f:
if line != '\n':
data += line
else:
break
#如果到文件结尾
if not data:
return 'Not found'
#匹配首单词,查看是否为目标段落
try:
PORT = re.match(r'\S+', data).group()
# print(PORT)
except Exception as e:
print(e)
continue
if PORT == port:
# pattern=r'address is ([0-9a-f]{4}\.[0-9a-f]{4}\.[0-9a-f]{4})'
pattern=r'address is ((\d{1,3}\.){3}\d{1,3}/\d+|Unknow)'
address = re.search(pattern,data).group(1)
return address
if __name__ == '__main__':
port = sys.argv[1]
addr = get_address(port)
print(addr)
|
[
"1203990207@qq.com"
] |
1203990207@qq.com
|
a258bb33e374469a18dee26c2f706b121fc92b13
|
8cd5b6d65226a7fc50a7820b1c15161bbfdc32fb
|
/articles/migrations/0002_load_news.py
|
0e377fc87c527680ab3dd8a0fa8b10b53ffa9632
|
[
"MIT"
] |
permissive
|
FashtimeDotCom/gobcl-plataforma
|
fd48159fae249ff8a965ef35788b6cdff54e199a
|
395afd461a0726acca03e7b0030c4f49cf001ac4
|
refs/heads/master
| 2020-03-21T02:53:45.043103
| 2018-06-12T19:31:18
| 2018-06-12T19:31:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-04 14:57
from __future__ import unicode_literals
from django.db import migrations
def load_articles(apps, schema_editor):
Article = apps.get_model('articles', 'Article')
OldArticle = apps.get_model('aldryn_newsblog', 'Article')
Tag = apps.get_model('taggit', 'Tag')
for old_article in OldArticle.objects.all():
article = Article()
article.created_by = old_article.owner
article.publishing_date = old_article.publishing_date
article.content = old_article.content
article.featured_image = old_article.featured_image
article.is_draft = True
article.save()
# copy categories
for category in old_article.categories.all():
article.categories.add(category)
def unload_articles(apps, schema_editor):
Article = apps.get_model('articles', 'Article')
Article.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.RunPython(load_articles, unload_articles),
]
|
[
"ignacio.munizaga.t@gmail.com"
] |
ignacio.munizaga.t@gmail.com
|
ec1a8e6bc522c5530fa3c6b8becadfbb43a59e08
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/raw_scripts/132.230.102.123-10.21.11.31/1569574792.py
|
2956314f1f44b77d279ae6226a702e94d5c5661c
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,752
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def is_palindromic(n: int) -> bool:
"""Tests if a given number is a palindrome.
Args:
A natural number n.
Returns:
If the number is a palindrome or not.
"""
if n > 0:
if str(n) == str(n)[::-1]:
return True
else:
return False
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
original = None
def coverage(func):
nonlocal covered, target, count, original
def wrapper(n):
nonlocal covered, count
s = str (n)
lens = len (s)
if lens == 1:
covered.add(0)
if lens == 2:
covered.add(1)
if (lens > 2) and ( lenr % 2 == 0):
covered.add(2)
if lens > 2 and lenr % 2 == 1:
covered.add(3)
r = func (n)
if r:
covered.add (4)
else:
covered.add (5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func == "original": return original
original = func
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
is_palindromic = coverage(is_palindromic)
except:
pass
## Lösung Teil 2. (Tests)
def test_is_palindromic():
is_palindromic(2002) == 5 is 5
is_palindromic(1999) == 6 is 7
is_palindromic(4114) == (5 * 2 == 10)
######################################################################
## hidden restores unadorned function
is_palindromic = coverage ("original")
## Lösung Teil 3.
## Lösung Teil 4.
######################################################################
## test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_is_palindromic(self):
assert is_palindromic
assert 'n' in getfullargspec(is_palindromic).args
def test_gen_palindromic(self):
assert gen_palindromic
assert 'n' in getfullargspec(gen_palindromic).args
def test_represent(self):
assert represent
assert 'n' in getfullargspec(represent).args
class TestGrades:
def test_docstring_present(self):
assert is_palindromic.__doc__ is not None
assert gen_palindromic.__doc__ is not None
assert represent.__doc__ is not None
def test_typing_present(self):
assert is_palindromic.__hints__ == typing.get_type_hints(self.is_palindromic_oracle)
assert typing.get_type_hints (gen_palindromic) == typing.get_type_hints (self.gen_palindromic_oracle)
assert typing.get_type_hints (represent) == typing.get_type_hints (self.represent_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def is_palindromic_oracle(self, n:int)->list:
s = str(n)
while len (s) > 1:
if s[0] != s[-1]:
return False
s = s[1:-1]
return True
def gen_palindromic_oracle (self, n:int):
return (j for j in range (n + 1, 0, -1) if self.is_palindromic_oracle (j))
def represent_oracle (self, n:int) -> list:
for n1 in self.gen_palindromic_oracle (n):
if n1 == n:
return [n1]
for n2 in self.gen_palindromic_oracle (n - n1):
if n2 == n - n1:
return [n1, n2]
for n3 in self.gen_palindromic_oracle (n - n1 - n2):
if n3 == n - n1 - n2:
return [n1, n2, n3]
# failed to find a representation
return []
def test_is_palindromic(self):
## fill in
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
def test_gen_palindromic(self):
## fill in
pass
def test_represent (self):
def check(n, r):
for v in r:
assert self.is_palindromic_oracle (v)
assert n == sum (r)
for n in range (1,100):
r = represent (n)
check (n, r)
for i in range (100):
n = random.randrange (10000)
r = represent (n)
check (n, r)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
be228e8cb4ace859b0e86155d213dd34dcd6112d
|
65c09454d389fe0eefbfbc75792c969d85c46809
|
/robot/task_10.py
|
6474376f17089f9f8b0403eefd09407cfc8b99ee
|
[] |
no_license
|
curlymario/mpit_cs_py_hw
|
0d790e972fe1286e308772d728e803f1a2159fa7
|
0a793bcea170dd8b4f449943a578b932486940ae
|
refs/heads/master
| 2020-04-30T12:24:33.136215
| 2020-01-30T15:05:07
| 2020-01-30T15:05:07
| 176,825,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
#!/usr/bin/python3
from pyrob.api import *
def paint_if_wall():
if wall_is_above() or wall_is_beneath():
fill_cell()
@task
def task_8_3():
while not wall_is_on_the_right():
paint_if_wall()
move_right()
paint_if_wall()
if __name__ == '__main__':
run_tasks()
|
[
"maxim@sivokon.me"
] |
maxim@sivokon.me
|
4e99ad75ad388b2a2c9e5fedec17ba453a630b0f
|
415b3a3591a115640bde0d794dff604c96cdae31
|
/catkin_ws/build/beginner_tutorials/catkin_generated/stamps/beginner_tutorials/talker.py.stamp
|
b9007d4d3a9125deb4d66bdd2aac9993e418b433
|
[] |
no_license
|
rbed23/ros-catkin-ws
|
aaf4b96f0fcae9e4fe3d6f4c5ac33f03244b89d4
|
70dd4de1633bbbd9357a6fb66ff1f20e83bb0552
|
refs/heads/master
| 2023-01-19T13:53:27.452804
| 2020-11-12T09:03:47
| 2020-11-12T09:03:47
| 311,613,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,218
|
stamp
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that published std_msgs/Strings messages
## to the 'chatter' topic
import rospy
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('chatter', String, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
hello_str = "hello world %s" % rospy.get_time()
#rospy.loginfo(hello_str)
pub.publish(hello_str)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
[
"rbed23@gmail.com"
] |
rbed23@gmail.com
|
884389e2e29a25efc31465e5159dd79a098a839d
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/adepg/entity.py
|
5e5ecb34e34d4097f5be7c90c6022027e531c050
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,106
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Entity(Mo):
meta = ClassMeta("cobra.model.adepg.Entity")
meta.isAbstract = True
meta.moClassName = "adepgEntity"
meta.moClassName = "adepgEntity"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x0
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.naming.NamedIdentifiedObject")
meta.concreteSubClasses.add("cobra.model.adepg.Event")
meta.concreteSubClasses.add("cobra.model.adepg.Domain")
meta.concreteSubClasses.add("cobra.model.adepg.ResEpPFltAttrCont")
meta.concreteSubClasses.add("cobra.model.adepg.Uni")
meta.concreteSubClasses.add("cobra.model.adepg.GroupCont")
meta.concreteSubClasses.add("cobra.model.adepg.EventCont")
meta.concreteSubClasses.add("cobra.model.adepg.Svr")
meta.concreteSubClasses.add("cobra.model.adepg.GroupUsrData")
meta.concreteSubClasses.add("cobra.model.ads.Ctxt")
meta.concreteSubClasses.add("cobra.model.adepg.User")
meta.concreteSubClasses.add("cobra.model.adepg.UserCont")
meta.concreteSubClasses.add("cobra.model.adepg.ResCont")
meta.concreteSubClasses.add("cobra.model.adepg.Group")
meta.concreteSubClasses.add("cobra.model.adepg.ResTenant")
meta.concreteSubClasses.add("cobra.model.adepg.ResFltAttr")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 43729, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 43618, PropCategory.REGULAR)
prop.label = "Id"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "issues", "issues", 43620, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("connection-fault", "failed-to-connect-to-external-identity-server", 1)
prop._addConstant("none", "none", 0)
meta.props.add("issues", prop)
prop = PropMeta("str", "name", "name", 43619, PropCategory.REGULAR)
prop.label = "Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
166efb42531590143daa24b457398e71d777f4fe
|
61d92f98d12c45f83980ba14b0450f8ec3077a3e
|
/recSys4.py
|
f1bf6d87168d9cc148e693e94542b642aeb27b20
|
[] |
no_license
|
sandersona3/CS4319_RecSys4
|
db8a3845dd5563cb19e5d3c590bdedd7429aa738
|
758aae2deecf82c6fe21313006485116a89ccee5
|
refs/heads/master
| 2020-03-15T18:49:42.115079
| 2018-05-05T18:52:08
| 2018-05-05T18:52:08
| 132,293,266
| 0
| 0
| null | 2018-05-05T23:57:27
| 2018-05-05T23:57:27
| null |
UTF-8
|
Python
| false
| false
| 9,850
|
py
|
import numpy as np
from tkinter import *
from lightfm.datasets import fetch_movielens
from lightfm import LightFM
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import importlib
import time
#-----------------------------------------------------------------------------------------------------------------
'''Used to make a time stamp print out'''
#from recSys4t import *
#print(clock)
'''used to import turicreate file'''
import turicreate as tc
from recSys4tc import turiWork
from recSys4tc import itemSim
from tcCompare import *
#turiWork()
#-----------------------------------------------------------------------------------------------------------------
#fetch data from Lightfm, use movies with user rating of 4.0 or greater
data = fetch_movielens(min_rating=4.0)
#create model
model = LightFM(loss='warp')
#train model
model.fit(data['train'], epochs=32, num_threads=4)
print('1.) Terminal \n2.) GUI \n')
terminalGUI = input('Choice: ')
if (int(terminalGUI) == 1 ):
# movie recommender sys
def sample_recommendation(model, data, user_ids):
user1 = int(u1)
user2 = int(u2)
user3 = int(u3)
#number of users and movies in training data
n_users, n_items = data['train'].shape
#generate recommendations for each user we input
for user_id in user_ids:
#movies they already like
fav_movies = data['item_labels'][data['train'].tocsr()[user_id].indices]
#movies our model predicts they will like
scores = model.predict(user_id,np.arange(n_items))
#rank them in order of most liked to least
top_items = data['item_labels'][np.argsort(-scores)]
#print out the results
userRec(user_id,fav_movies,top_items)
#guiRec(user_id,fav_movies,top_items)
def Recommend_user():
#print(type(u1))
user1 = int(u1)
user2 = int(u2)
user3 = int(u3)
#print(type(user))
sample_recommendation(model, data, [user1,user2,user3])
def userRec(user_id,fav_movies,top_items):
#print out the results
print('------------------------------------------')
print('User %s' % user_id)
print('Favorite Movies:')
for x in fav_movies[:5]:
print('%s' % x)
print('\nRecommended Movies:')
for y in top_items[:5]:
print('%s' % y)
#SELECTION SCREEN
print('Welcome to the CS4319 Movie Recommender System\nPlease select one of the following:')
print('1.) Recommend movies for 3 users')
print('2.) Search for similar movies')
print('3.) Recommend most popular movies')
print('4.) Turicreate Compare\n')
selection = input('Selection: ')
## Recommend movies to 3 users using hybrid factorization collab algorithm
if (int(selection) == 1):
#input for users
print('Select movies for users:')
u1 = input('user 1: ')
u2 = input('user 2: ')
u3 = input('user 3: ')
Recommend_user()
## View Similar movies to selected movie using Item similarity algorithm
if (int(selection) == 2):
#used to cycle through films
loop=1
i=0
#print('loop =' + str(loop))
while (loop==1):
#show first 5 movies in movielens list
def showMovies(i):
#print(i)
for i in range(i+5):
print('(' + str(i+1) +')' + data[('item_labels')][i])
print('(0) View More')
def item_similarity(loop):
print('Great! You might also like these movies: \n ')
#item similarity algorithm
print(data[('item_labels')][cosine_similarity(model.item_embeddings)[like-1].argsort()][-5:][::-1])
#print('loop =' + str(loop))
showMovies(i)
like = input('Selection: ')
like = int(like)
#show more movies
if (like == 0):
#print(i)
i = i + 5
#print(i)
showMovies(i)
else:
item_similarity(loop)
loop-=1
## View most popular movies to recommend to a user, using popularity algorithm
if (int(selection) == 3):
popRec = input('Recommend top 5 popular movies to user: ')
turiWork(popRec)
if (int(selection) == 4):
print('TURICREATE COMPARE')
print('Compare recommender algorithms for user 1 and 2')
'''
print('Printing item Similarity algorithm:\n')
time.sleep(4)
itemSim()
time.sleep(4)
print('\nPrinting Item popularity algorithm:\n')
time.sleep(4)
itemPop()
time.sleep(4)
print('\nPrinting Rank Factorization algorithm:\n')
time.sleep(4)
rankFactor()
time.sleep(4)
'''
print('\nPrint Model Comparison:\n')
time.sleep(4)
modelCompare()
time.sleep(4)
'''GUI creation of recommender system using Lightfm'''
if (int(terminalGUI) == 2 ):
# root is main page
root = Tk()
root.title('CS4319 Movie Recommender System')
root.geometry('920x640+0+0')
heading = Label(root, text="MOVIE RECOMMENDER SYSTEM",font=("arial",12,"bold"), fg="steelblue").pack()
label1 = Label(root, text="USER: ", font=("arial",10,"bold"),fg="black").place(x=10,y=100)
label2 = Label(root, text="USER: ", font=("arial",10,"bold"),fg="black").place(x=10,y=125)
label3 = Label(root, text="USER: ", font=("arial",10,"bold"),fg="black").place(x=10,y=150)
#input for users
u1 = IntVar()
u2 = IntVar()
u3 = IntVar()
#input for fav movie similar search
m1 = IntVar()
#u1 = ''
entry_box1 = Entry(root, textvariable=u1, width=8,bg="lightblue",exportselection=0).place(x=50, y=100)
entry_box2 = Entry(root, textvariable=u2, width=8,bg="lightblue",exportselection=0).place(x=50, y=125)
entry_box2 = Entry(root, textvariable=u3, width=8,bg="lightblue",exportselection=0).place(x=50, y=150)
# outputs recommendation to the terminal
def userRec(user_id,fav_movies,top_items):
#print out the results
print('------------------------------------------')
print('User %s' % user_id)
print('Favorite Movies:')
for x in fav_movies[:5]:
print('%s' % x)
print('\nRecommended Movies:')
for y in top_items[:5]:
print('%s' % y)
# outputs recommendation to the GUI
def guiRec(user_id,fav_movies,top_items):
#print out the results
newwin = Toplevel(root)
text = Text(newwin)
text.insert(INSERT, 'User %s\n' % user_id)
text.insert(INSERT, 'Favorite Movies:\n')
for x in fav_movies[:5]:
text.insert(INSERT, '%s\n' % x)
text.pack()
text.insert(INSERT, '\nRecommended Movies:\n')
for y in top_items[:5]:
text.insert(INSERT,'%s\n' % y)
text.pack()
# movie recommender sys
def sample_recommendation(model, data, user_ids):
user1 = int(u1.get())
user2 = int(u2.get())
user3 = int(u3.get())
#number of users and movies in training data
n_users, n_items = data['train'].shape
#generate recommendations for each user we input
for user_id in user_ids:
#movies they already like
fav_movies = data['item_labels'][data['train'].tocsr()[user_id].indices]
#movies our model predicts they will like
scores = model.predict(user_id,np.arange(n_items))
#rank them in order of most liked to least
top_items = data['item_labels'][np.argsort(-scores)]
#print out the results
#userRec(user_id,fav_movies,top_items)
guiRec(user_id,fav_movies,top_items)
def Recommend_user():
#print(type(u1))
user1 = int(u1.get())
user2 = int(u2.get())
user3 = int(u3.get())
#print(type(user))
sample_recommendation(model, data, [user1,user2,user3])
#Collaborative algorithm using user ratings at 4.0 to find similar items that have been rated >=4.0 and similar
Collab_algo = Button(root, text="Recommend Movies", width=16,height=2,bg="gray",command=Recommend_user).place(x=8,y=175)
#input for favorite movie
label4 = Label(root, text="what movie are you interested in? ", font=("arial",10,"bold"),fg="black").place(x=10,y=225)
label5 = Label(root, text="Selection: ", font=("arial",10,"bold"),fg="black").place(x=10,y=250)
entry_box3 = Entry(root, textvariable=m1, width=8,bg="lightgreen",exportselection=0).place(x=50, y=275)
simText = Text(root)
like = int(m1.get())
#show first 5 movies in movielens list
for i in range(5):
simText.insert(INSERT, '(' + str(i+1) +')' + data[('item_labels')][i] + '\n')
simText.pack()
def item_similarity():
like = int(m1.get())
simText.insert(INSERT,'Great! You might also like these movies: \n ')
simText.pack()
simText.insert(INSERT, data[('item_labels')][cosine_similarity(model.item_embeddings)[like-1].argsort()][-5:][::-1])
simText.pack()
simText.insert(INSERT,'\n')
sim_item = Button(root, text="Search for similar movies", width=22,height=2,bg="silver",command=item_similarity).place(x=8,y=325)
root.mainloop()
|
[
"noreply@github.com"
] |
sandersona3.noreply@github.com
|
67ee6a173d7aa3b99a2a4a1122caa97f13a4073c
|
9e1feb0bbfaa3c4c87f867d7412a43fab3556897
|
/scheduler.py
|
526392f387a41ec5b9e94d48ac1c5ca1b4470f77
|
[] |
no_license
|
utkarshmalkoti/Autologin-Bot
|
4760bec18293039c365e6fff98786d4cc8c73420
|
95901bcef1b90770ca9ea127539311981acda69a
|
refs/heads/main
| 2023-01-02T23:16:47.090489
| 2020-10-26T19:41:07
| 2020-10-26T19:41:07
| 307,485,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
from datetime import datetime
import schedule
import time
from bot import open_class
from selenium import webdriver
import json
# def job(m,tp):
# open_class(m,tp)
# print("wtf is happening")
# driver = webdriver.Chrome()
# driver.get("https://accounts.google.com/signin/v2/identifier?service=classroom&passive=1209600&continue=https%3A%2F%2Fclassroom.google.com%2F%3Femr%3D0&followup=https%3A%2F%2Fclassroom.google.com%2F%3Femr%3D0&flowName=GlifWebSignIn&flowEntry=ServiceLogin")
# print(1)
# time.sleep(m*5)
# driver.close()
try:
with open("Timetable.json",'r') as f:
(tt,durationp,durationt) = json.load(f)
f.close
except:
tt = {}
durationt = int(input("Duration of Theory period in minutes: "))
durationp = int(input("Duration of practical period in minutes: "))
for i in range(0,7):
start = []
nperiod = int(input("Enter number of periods for day {}: ".format(i)))
if nperiod>0 :
for j in range(nperiod):
tp = str(input("Theory or practicle (t/p): "))
fg=0
while(fg==0):
if(tp=='t' or tp=='p'):
s = input("Start of period {} in 24hr format: ".format(j))
fg=1
else:
print("Wrong input enter again")
tp = str(input("Theory or practicle (t/p): "))
start.append([s,tp])
tt[str(i)] = start
with open("Timetable.json",'w') as f:
json.dump((tt,durationp,durationt),f)
f.close
print("starting")
# breakpoint()
for t in tt[str(datetime.today().weekday())]:
# for per in t:
# breakpoint()
if t[1]=='t':
print(durationt,t[0],t[1])
time_t = t[1]
schedule.every().day.at(t[0]).do(lambda : open_class(durationt,time_t))
print("Done1")
else:
time_p = t[1]
print(durationp,t[0],t[1])
schedule.every().day.at(t[0]).do(lambda : open_class(durationp,time_p))
while 1:
schedule.run_pending()
time.sleep(1)
|
[
"noreply@github.com"
] |
utkarshmalkoti.noreply@github.com
|
c8e80d578cd68aa8317e0953f7ef5935284a2f87
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_291/ch79_2019_04_04_13_07_33_181091.py
|
3938c50619a76c817676aede76fdf6570d5cfa14
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
def monta_dicionario(lista1, lista2):
dicionario = {}
for e in lista1:
dicionario.keys() = e
for e in lista2:
dicionario.items() = e
return dicionario
|
[
"you@example.com"
] |
you@example.com
|
162492fa49235bd7b871cc325317b45371b97927
|
e7a3f7e0886c2f7a91bc16c6aedb0bc0fb1d9a0d
|
/train_deep.py
|
beed594927122b5b1ecf38a4e5bf7bfbcceded89
|
[] |
no_license
|
arun-apad/nlp_name_classification_network
|
a7ee810cd50fb6412883d8e4f0c3ab899ecf076f
|
f9d5cb752e5fd39a443b91b8b36fa241a5786b56
|
refs/heads/master
| 2021-07-08T05:04:40.975337
| 2017-10-04T18:30:55
| 2017-10-04T18:30:55
| 105,803,771
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
#IMPORT ALL THE THINGS
import h2o
import numpy as np
import pandas as pd
import os
from h2o.estimators.deeplearning import H2OAutoEncoderEstimator, H2ODeepLearningEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
h2o.init(max_mem_size = 4)
h2o.remove_all()
df = pd.read_csv('./datasets/pos_dictionary_validity.csv')
df['last_name_low_tag'] = df['last_name_low_tag'] == 0
df['is_person'] = df['is_person'] == 1
feature_df = h2o.H2OFrame(df)
#print(feature_df.head())
del df
train, valid, test = feature_df.split_frame([0.6, 0.2], seed=1234)
feature_df_X = feature_df.col_names[3:26]
feature_df_Y = feature_df.col_names[1]
#print(feature_df_X)
#print(feature_df_Y)
dl_v1 = H2ODeepLearningEstimator(model_id="dl_v1", epochs=1, variable_importances=True)
dl_v1.train(feature_df_X, feature_df_Y, training_frame = train, validation_frame = valid)
result_1 = pd.DataFrame(dl_v1.varimp(), columns=["Variable", "Relative Importance", "Scaled Importance", "Percentage"])
#print(result_1)
dl_v1 = H2ODeepLearningEstimator(
model_id="dl_v1",
hidden=[32,32,32], ## small network, runs faster
epochs=1000, ## hopefully converges earlier...
score_validation_samples=1000, ## sample the validation dataset (faster)
stopping_rounds=2,
stopping_metric="misclassification",
stopping_tolerance=0.01)
dl_v1.train(feature_df_X, feature_df_Y, training_frame=train, validation_frame=valid)
#print(dl_v1.score_history())
#print(dl_v1)
#print(test[3:26])
pred = dl_v1.predict(test[3:26]).as_data_frame(use_pandas=True)
test_actual = test.as_data_frame(use_pandas=True)['is_person']
#print(pred.head())
#test_actual['predicted'] = pred['predict']
test_actual = pd.concat([test_actual.reset_index(drop=True), pred], axis=1)
people =len(test_actual[test_actual['is_person'] == True])
people_true = len(test_actual.query('is_person == True & predict == True'))
print("Was person name and predicted as True "+str((people_true/people)*100))
nonpeople =len(test_actual[test_actual['is_person'] == False])
non_people_true = len(test_actual.query('is_person == False & predict == True'))
print("Was not person name but predicted as True "+str((non_people_true/nonpeople)*100))
people =len(test_actual[test_actual['is_person'] == True])
people_false = len(test_actual.query('is_person == True & predict == False'))
print("Was person name but predicted as False "+str((people_false/people)*100))
nonpeople =len(test_actual[test_actual['is_person'] == False])
non_people_false = len(test_actual.query('is_person == False & predict == False'))
print("Was not person name and predicted as False "+str((non_people_false/nonpeople)*100))
test_actual['str_of_words'] = test.as_data_frame(use_pandas=True)['str_of_words']
print(test_actual.query('is_person == True & predict == True').head(20))
print(test_actual.query('is_person == False & predict == True').head(20))
print(test_actual.query('is_person == True & predict == False').head(20))
print(test_actual.query('is_person == False & predict == False').head(20))
# save the model
model_path = h2o.save_model(model=dl_v1, path="./models", force=True)
#print(dl_v1)
print( model_path)
#./models/dl_v1
h2o.cluster().shutdown(prompt=True)
#h2o.shutdown(prompt=True)
|
[
"arun.apad@gmail.com"
] |
arun.apad@gmail.com
|
e2fa549f3821aa6f53db01fb073523c2046d5353
|
a9037108c99befa7bfeaa8c56761bfa3131921bc
|
/pages/views.py
|
ce424c4ff53a04b90601b4fb6a2416dfc614018a
|
[] |
no_license
|
dogusy/Senior-Project
|
9e11e5fce45489c4f35d570260869a873c3f0929
|
e9b8c6dc6845c0b922831aaf3f52e1d649a4d21e
|
refs/heads/master
| 2022-11-17T18:05:14.356362
| 2020-07-08T17:23:10
| 2020-07-08T17:23:10
| 278,139,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from listings.models import Listing
from listings.choices import state_choices,bedroom_choices,price_choices
def index(request):
listings=Listing.objects.order_by('-list_date').filter(is_published=True)[:3]
context={
'listings':listings,
'state_choices':state_choices,
'bedroom_choices':bedroom_choices,
'price_choices':price_choices
}
return render(request,'pages/index.html',context)
def about(request):
return render(request,'pages/about.html')
|
[
"dogusyuksel41@gmail.com"
] |
dogusyuksel41@gmail.com
|
a7117831dd9623057e67c2bb8e55c442b67bf56d
|
c7330806e61bb03e69e859b2ed33ae42fc7916e6
|
/SpritzPlusPlus/Content/Stuffs/Fader.py
|
a69e5c90e9637de5140e8c6edbe86c52db16728d
|
[] |
no_license
|
ixfalia/ZeroProjects
|
ec2f91000a5ce014f7413f32873b10fb01a3ed20
|
17bd2c0f9c3a5ef3705b008f6b128d589aef4168
|
refs/heads/master
| 2020-06-03T05:20:44.370134
| 2019-06-24T05:01:18
| 2019-06-24T05:01:18
| 191,456,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
import Zero
import Events
import Property
import VectorMath
class Fader:
DebugMode = Property.Bool(default = False)
FadeInDuration = Property.Float(default = 1)
FadeOutDuration = Property.Float(default = 1)
def Initialize(self, initializer):
pass
def FadeOut(self, me, target, time):
seq = Action.Sequence(me)
seq.Add( Action.Property( on=target, property="Color",end=Vec4(1,1,1,0),duration=time))
def FadeIn(self, me, target, time):
seq = Action.Sequence(me)
seq.Add( Action.Property( on=target, property="Color",end=Vec4(1,1,1,1),duration=time))
Zero.RegisterComponent("Fader", Fader)
|
[
"systematisoperandi@gmail.com"
] |
systematisoperandi@gmail.com
|
b79bbe18abea66b53e892571650df5362621c0a5
|
ca8cb5c270beae72cd2524d3fadb94a0b9db4f92
|
/nike_sneaker/nikesnkrs.py
|
23737626fa1fac7d33173391599485c89e23f9fa
|
[] |
no_license
|
savasaurusrexx/nike_sneaker
|
9a72f8fd79513954b00965db82b41104548414c5
|
0b8d72bebf495b7baaf4376b165e612a66592047
|
refs/heads/master
| 2021-05-07T04:20:08.591001
| 2017-09-21T09:25:37
| 2017-09-21T09:25:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,296
|
py
|
# -*- coding: utf-8 -*-
import logging
import re
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from nike_sneaker.config import NIKE_SNEAKER_URL
from nike_sneaker.sneaker import NikeNewSneaker
formatter = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=formatter)
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class NikeSNKRSException(Exception):
pass
class NikeSNKRS(object):
def __init__(self, web_driver='phantomjs'):
self._web_driver_type = web_driver
self.soup = None
self.sneakers = []
def __get_raw_html(self, wait_time=5):
_logger.info("get raw html from {}".format(NIKE_SNEAKER_URL))
start_time = time.time()
try:
if self._web_driver_type == 'phantomjs':
driver = webdriver.PhantomJS()
else:
raise NikeSNKRSException("bad web driver type")
driver.get(NIKE_SNEAKER_URL)
# wait for JS to finish the pages
time.sleep(wait_time)
# TODO check load more unit it not exists
try:
load_more_btn = driver.find_element_by_xpath(
# "//a[@'js-load-more ncss-brand bg-white border-top-light-grey u-sm-b u-uppercase u-align-center pt6-sm pb6-sm pt12-lg pb11-lg']")
"//a[starts-with(@class,'js-load-more ')]")
load_more_btn.click()
time.sleep(wait_time)
except Exception:
_logger.info("No load more button found")
end_time = time.time()
self.soup = BeautifulSoup(driver.page_source, "lxml")
_logger.debug(self.soup.prettify())
except Exception as e:
_logger.error(e)
raise NikeSNKRSException("Oops")
finally:
driver.close()
_logger.info("get raw html finished, time: {:5.5f} seconds".format(end_time - start_time))
def __parse_item_div(self, item_div):
# item id / name and hyper link
js_card_link = item_div.find_all('a', class_='js-card-link card-link u-sm-b', href=True, limit=1)[0]
href = js_card_link['href']
name = href.split('/')[-1]
s = NikeNewSneaker(name)
s.href = 'https://www.nike.com' + href
# release date
launch_time_div = item_div.find_all('div', class_=re.compile('launch-time.*'), limit=1)[0]
release_date_p = launch_time_div.find('p')
if release_date_p:
s.release_date = release_date_p.text
# release time
test_time_heading = item_div.find_all('h6', class_=re.compile('.*test-time.*'), limit=1)[0]
if test_time_heading:
s.release_time = test_time_heading.text
return s
def check_new_release(self):
try:
self.__get_raw_html()
except NikeSNKRSException:
_logger.error("something bad happens when loading page")
return
items = self.soup.find_all('div', class_='upcoming-section')
_logger.info("{} new sneakers are going to be released".format(len(items)))
for item_div in items:
s = self.__parse_item_div(item_div)
print(s)
print('---' * 3)
|
[
"zhangyan@patsnap.com"
] |
zhangyan@patsnap.com
|
09958b6d7276060b6782a499b420b3c26ef4636a
|
bad298748162926d58a88503b97dd773c1d37b5e
|
/lungcancer/Pruebas/Pruebas PyQt/app.py
|
13e820fdd86e452122d4a744b3514b0c8e527ea4
|
[] |
no_license
|
amarogs/lung-cancer-image-processing
|
3aee478e89f1e501709b2b8fdf2c7ff7ddd278aa
|
05985953d29d5f858e40b53b3f9926e373184e23
|
refs/heads/master
| 2020-09-14T05:33:27.182675
| 2020-01-16T11:54:17
| 2020-01-16T11:54:17
| 223,031,439
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 17:44:36 2019
@author: Quique
"""
from principal_ui import *
from PyQt5.QtGui import QPixmap,QImage,QColor
import main as main
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QGridLayout, QWidget
class MainWindow(QtWidgets.QMainWindow, Ui_LCDetection):
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self.setupUi(self)
self.buttons_acepta_segmentacion.setVisible(False)
self.slider_slices.valueChanged.connect(self.valor_slider)
self.comboBox.addItems(["Paciente 1","Paciente 2","Paciente 3","Paciente 4","Paciente 5","Paciente 6","Paciente 7","Paciente 8"])
self.comboBox.activated.connect(self.inicio)
self.button_slice.clicked.connect(self.dibujar_slice)
#self.slider_slices.setValue()
#self.label.setText("Haz clic en el botón")
#self.pushButton.setText("Presióname")
#self.pushButton.clicked.connect(self.imagen)
def inicio(self):
#self.label_instrucciones.setText("El paciente es: " + str(self.comboBox.currentIndex()))
dir = main.escoge_paciente(self.comboBox.currentIndex())
main.leer_directorio_usuario(dir)
def valor_slider(self):
self.label_val_slider.setText(str(self.slider_slices.value()))
def dibujar_slice(self):
slice = self.slider_slices.value()
slice_paciente = main.devuelve_array(slice)
self.mplwidget.canvas.axes.clear()
self.mplwidget.canvas.axes.plot(slice_paciente)
self.mplwidget.canvas.draw()
def getPixel(self, event):
x = event.pos().x()
y = event.pos().y()
c = self.img.pixel(x,y)
c_rgb = QColor(c).getRgb()
self.label.setText("Pixel: (" + str(x) +","+ str(y) + ") Color: " + str(c_rgb))
def imagen(self):
self.img = QImage("./image.png")
self.im = QPixmap(QPixmap.fromImage(self.img))
#self.im = QPixmap("./image.png")
self.label.setPixmap(self.im)
self.label.mousePressEvent = self.getPixel
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
app.exec_()
|
[
"ruizmo.e@gmail.com"
] |
ruizmo.e@gmail.com
|
7a391276ecf9a15a98ec4c70651688dd5cb41015
|
f580680389877a9dc4e24c5558b6452af890fcc1
|
/Pert-2.py
|
0f34656edfe39dfbb4abb0f7c1829b624ee5f0ff
|
[] |
no_license
|
diviandari/ninjaxpress-training
|
b899e92959eb73848eefbb0dedfab0bb6f0f271d
|
b8b525e02e2fd8340f75ae4e12ed556ff7966ab2
|
refs/heads/main
| 2023-08-22T09:00:20.485133
| 2021-10-27T10:03:25
| 2021-10-27T10:03:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# name = input("Enter name: Divi")
# print ("Your name is" + name)
my_string = "Hello world!"
print(len(my_string))
print(my_string[0:7])
my_string = my_string.replace("world","friend")
print(my_string)
a = "Diviandari"
b = "Sabitha"
c = a + " " + b
print(c)
a = "Siap"
b = 86
c = a + " " + str(b)
print(c)
nama = "Divi"
alamat ="Jakarta"
my_profile = "Nama Saya {} dan saat ini tinggal di {}.".format(nama, alamat)
print(my_profile)
x = "Diviandari Sabitha"
print(x[2:4])
|
[
"diviandari.sabitha@ninjavan.co"
] |
diviandari.sabitha@ninjavan.co
|
b7c2bdb022be0ef908184f13ff620a83aa4873b9
|
97c1a7a5a0b634426bb2698d33fa967805523e29
|
/odoo/addons/bnc_member/wizards/proc_sync_jsport.py
|
aa7dcb56cc288b6859b341f570584b5e6cdd0ac6
|
[] |
no_license
|
gongfranksh/bn_bnc
|
5017d530140fcb9df45aed6f69368731a434ded6
|
8b043d2938a2f3fc8fdabfd451a20c3c62e27073
|
refs/heads/master
| 2021-06-07T17:42:05.427639
| 2020-05-14T02:25:53
| 2020-05-14T02:25:53
| 132,710,496
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,045
|
py
|
# -*- coding: utf-8 -*-
import logging
import threading
import datetime
from odoo import api, models, tools, registry
from BNmssql import Lz_read_SQLCa
# from idlelib.SearchEngine import get
_logger = logging.getLogger(__name__)
TOTAL_DAY = 10
class proc_sync_jsport(models.TransientModel):
_name = 'proc.sync.jsport'
_description = 'sync.jsport'
# 获取事业部代码
@api.model
def _get_business(self):
res = self.env['bnc.business'].get_bnc_business_bycode('jsp')
return res
@api.model
def get_product_class_recordset(self):
# 获取事业部代码
d01 = self._get_business()
# 获取更新记录范围,本地库的时间戳和服务端时间戳
local_sql = """
select max(timestamp) AS timestamp from product_category where buid = {0}
"""
local_sql = local_sql.format(d01['id'])
remote_sql = "SELECT CONVERT(INT,max(timestamp)) AS timestamp from product_class "
btw = self.query_period(local_sql, remote_sql)
# 获取更新记录
sql = """
select ClassId,cast(ClassName as nvarchar(100)) as name,CONVERT(INT,timestamp) AS timestamp
from product_class
where CONVERT(INT,timestamp) between {0} and {1}
"""
sql = sql.format(btw['start_stamp'], btw['end_stamp'])
ms = Lz_read_SQLCa(self)
res = ms.ExecQuery(sql.encode('utf-8'))
return res
@api.model
def get_product_brand_recordset(self):
# 获取事业部代码
d01 = self._get_business()
# 获取更新记录范围,本地库的时间戳和服务端时间戳
local_sql = """
select max(timestamp) AS timestamp from product_brand where buid = {0}
"""
local_sql = local_sql.format(d01['id'])
remote_sql = "SELECT CONVERT(INT,max(timestamp)) AS timestamp from product_brand"
btw = self.query_period(local_sql, remote_sql)
# 获取更新记录
sql = """
select brandId,cast(brandName as nvarchar(100)) as name,CONVERT(INT,timestamp) AS timestamp
from product_brand
where CONVERT(INT,timestamp) between {0} and {1}
"""
sql = sql.format(btw['start_stamp'], btw['end_stamp'])
ms = Lz_read_SQLCa(self)
res = ms.ExecQuery(sql.encode('utf-8'))
return res
@api.model
def get_employee_recordset(self):
# 获取事业部代码
d01 = self._get_business()
# 获取更新记录范围,本地库的时间戳和服务端时间戳
local_sql = """
select max(timestamp) AS timestamp from hr_employee where buid = {0}
"""
local_sql = local_sql.format(d01['id'])
remote_sql = "SELECT CONVERT(INT,max(timestamp)) AS timestamp from branch_employee"
btw = self.query_period(local_sql, remote_sql)
# 获取更新记录
sql = """
select a.* from
(SELECT be.EmpId,cast(be.EmpName as nvarchar(100)) as name,CONVERT(INT,max(timestamp)) AS timestamp
FROM branch_employee be
group by be.EmpId,be.EmpName ) a
where a.timestamp between {0} and {1}
"""
sql = sql.format(btw['start_stamp'], btw['end_stamp'])
ms = Lz_read_SQLCa(self)
res = ms.ExecQuery(sql.encode('utf-8'))
return res
@api.model
def get_supplier_recordset(self):
# 获取更新记录范围,本地库的时间戳和服务端时间戳
local_sql = """
select max(timestamp) AS timestamp from jsport_supplier
"""
remote_sql = "select CONVERT (int,max(timestamp)) as timestamp from supplier"
btw = self.query_period(local_sql, remote_sql)
# 获取更新记录
sql = """
select SupId,cast(SupName as nvarchar(100)) as name,cast(Addr as nvarchar(100)) as addr,
Tel,Fax,Zip,Email,CONVERT (int,timestamp) as timestamp
from supplier
where CONVERT(INT,timestamp) between {0} and {1}
"""
sql = sql.format(btw['start_stamp'], btw['end_stamp'])
ms = Lz_read_SQLCa(self)
res = ms.ExecQuery(sql.encode('utf-8'))
return res
@api.model
def get_product_recordset(self):
# 获取更新记录范围,本地库的时间戳和服务端时间戳
local_sql = """
select max(timestamp) AS timestamp from product_template where buid = {0}
"""
local_sql = local_sql.format(self._get_business()['id'])
remote_sql = "select CONVERT (int,max(timestamp)) as timestamp from product"
btw = self.query_period(local_sql, remote_sql)
# 获取更新记录
sql = """
select ProId,Barcode,cast(ProName as nvarchar(100)) as name,cast(spec as nvarchar(100)) as spec,
ClassId,SupId,isnull(NormalPrice,0),BrandId,CONVERT (int,timestamp) as timestamp
from product
where CONVERT(INT,timestamp) between {0} and {1}
order by CONVERT (int,timestamp)
"""
sql = sql.format(btw['start_stamp'], btw['end_stamp'])
ms = Lz_read_SQLCa(self)
res = ms.ExecQuery(sql.encode('utf-8'))
return res
#
def sync_product_class(self):
bu_tmp = self._get_business()
# 获取待更新记录
jspot_product_class_list = self.get_product_class_recordset()
for (classid, name, timestamp) in jspot_product_class_list:
# 封装product.category记录
res = {
'code': classid,
'name': name,
'timestamp': timestamp,
'buid': bu_tmp['id'],
}
# 检查是插入还是更新
r01 = self.env['product.category'].search_bycode(classid)
if r01:
self.env['product.category'].write(res)
self.env['pos.category'].write(res)
else:
self.env['product.category'].create(res)
self.env['pos.category'].create(res)
self.set_jsport_category_parent()
return True
#
def sync_product_brand(self):
bu_tmp = self._get_business()
# 获取待更新记录
jspot_product_brand_list = self.get_product_brand_recordset()
for (brandid, name, timestamp) in jspot_product_brand_list:
# 封装product.category记录
res = {
'code': brandid,
'name': name,
'timestamp': timestamp,
'buid': bu_tmp['id'],
}
# 检查是插入还是更新
r01 = self.env['product.brand'].search_bycode(brandid)
if r01:
self.env['product.brand'].write(res)
else:
self.env['product.brand'].create(res)
return True
#
def sync_employee(self):
bu_tmp = self._get_business()
# 获取待更新记录
jspot_employee_list = self.get_employee_recordset()
for (empid, name, timestamp) in jspot_employee_list:
# 封装product.category记录
res = {
'code': empid,
'name': name,
'timestamp': timestamp,
'buid': bu_tmp['id'],
}
# 检查是插入还是更新
r01 = self.env['hr.employee'].search_bycode(empid)
if r01:
self.env['hr.employee'].write(res)
else:
self.env['hr.employee'].create(res)
return True
def sync_supplier(self):
# 获取待更新记录
jspot_supplier_list = self.get_supplier_recordset()
for (supid, name, addr, tel, fax, zip, email, timestamp) in jspot_supplier_list:
# 封装product.category记录
vals = {
'name': supid + '-' + name,
'phone': tel,
'is_company': True,
'strbnctype': 'supplier',
'supplier': True,
'customer': False, }
# 检查是插入还是更新
r01 = self.env['jsport.supplier'].search_bycode(supid)
if r01:
self.env['jsport.supplier'].write({
'supid': supid,
'name': name,
'address': addr,
'telephone': tel,
'email': email,
'fax': fax,
'zip': zip,
'timestamp': timestamp,
'buid': self._get_business()['id'],
'resid': self.env['res.partner'].write(vals),
})
else:
self.env['jsport.supplier'].create({
'supid': supid,
'name': name,
'address': addr,
'telephone': tel,
'email': email,
'fax': fax,
'zip': zip,
'timestamp': timestamp,
'buid': self._get_business()['id'],
'resid': self.env['res.partner'].create(vals).id,
})
return True
def sync_pos_machine(self):
# 获取待更新记录
# 获取更新记录
sql = """
select braid,posno from pos_machine
"""
sql = sql.format()
ms = Lz_read_SQLCa(self)
pos_mahcine_list = ms.ExecQuery(sql.encode('utf-8'))
for (braid, posno) in pos_mahcine_list:
# 封装product.category记录
vals = {
'name': braid + '-' + posno,
# 'company_id':self.env['res.company'].search_bycode(braid).id
}
# 检查是插入还是更新
r01 = self.env['pos.config'].search([('name', '=', braid + '-' + posno)])
if not r01:
self.env['pos.config'].create(vals)
return True
#
def sync_pos_data(self):
# 获取待更新记录
# 获取更新记录
date = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
startdate = datetime.datetime.now()
para_interval = 66
# self.check_pos_data(startdate,para_interval)
self.check_pos_data_weekly()
sql = """
SELECT bs.BraId,bs.saleid,bs.memb_id ,salerid,saleflag,min(DATEADD(hour,-8,bs.SaleDate)) as saledate
FROM v_bn_saledetail bs
where datediff(day,saledate,'{0}')<={1}
group by bs.BraId,bs.saleid,bs.memb_id ,salerid,saleflag
order by braid,saleid,min(DATEADD(hour,-8,bs.SaleDate))
"""
sql = sql.format(date, para_interval)
ms = Lz_read_SQLCa(self)
pos_order_list = ms.ExecQuery(sql.encode('utf-8'))
_logger.info(" bnc =>jspot sync_pos_data total have %d records need to sync" % len(pos_order_list))
_logger.info(" bnc =>jspot sync_pos_data using %s sql" % sql)
for (braid, saleid, memb_id, salerid, saleflag, saledate_order) in pos_order_list:
sql_order_line = """
SELECT bs.saleman,DATEADD(hour,-8,bs.SaleDate) AS SaleDate,bs.proid,bs.SaleQty,
bs.NormalPrice,bs.curprice,bs.amount,bs.SaleType,bs.PosNo,bs.profit
FROM v_bn_saledetail bs
where saleid='{0}'
"""
vals = []
res = []
if braid:
br01 = self.env['res.company'].search_bycode(braid).id
else:
br01 = None
if memb_id:
m01 = self.env['res.partner'].search_bycardid(memb_id).id
else:
m01 = None
if salerid:
s01 = self.env['hr.employee'].search_bycode(salerid).id
else:
s01 = None
sql_order_line = sql_order_line.format(saleid)
pos_order_line = ms.ExecQuery(sql_order_line.encode('utf-8'))
for (saleman, saledate_detail, proid, SaleQty, NormalPrice, curprice, amount, SaleType, PosNo,
profit) in pos_order_line:
res.append((0, 0, {
'product_id': self.env['product.template'].search_bycode(
self._get_business()['strBuscode'] + '-' + proid).id,
'price_unit': curprice,
'qty': SaleQty,
'lngsaleid': self.env['hr.employee'].search_bycode(saleman).id,
}))
vals = {
'date_order': saledate_order,
'company_id': br01,
'user_id': self.env['res.users'].search([('login', '=', 'jspot-users')]).id,
'note': self._get_business()['strBuscode'] + '-' + braid + '-' + saleid,
'partner_id': m01,
'pos_reference': self._get_business()['strBuscode'] + '-' + braid + '-' + saleid,
'lines': res,
'state': 'done',
'buid': self._get_business().id,
'strstoreid': braid,
'lngcasherid': s01,
}
master = self.env['pos.order'].create(vals)
return True
#
def sync_product(self):
# 获取待更新记录
jspot_product_list = self.get_product_recordset()
_logger.info(" bnc =>jspot sync_product_jspot total have %d records need to sync" % len(jspot_product_list))
for (proid, barcode, name, spec, classid, supid, normalprice, brandid, timestamp) in jspot_product_list:
# 封装product.category记录
b01 = self.env['product.brand'].search_bycode(brandid).id
if not b01:
b01 = None
s01 = self.env['jsport.supplier'].search_bycode(supid)
res = {
'code': self._get_business()['strBuscode'] + '-' + proid,
'name': name,
'spec': spec,
'brand_id': b01,
'list_price': normalprice,
'price': normalprice,
'bn_barcode': barcode,
'sale_ok': True,
'default_code': self._get_business()['strBuscode'] + '-' + proid,
'categ_id': self.env['product.category'].search_bycode(classid).id,
'b_category': self.env['product.category'].search_bycode(classid[0:4]).id,
'm_category': self.env['product.category'].search_bycode(classid[0:6]).id,
'sup_id': self.env['jsport.supplier'].search_bycode(supid).id,
'timestamp': timestamp,
'buid': self._get_business()['id'],
}
# 检查是插入还是更新
r01 = self.env['product.template'].search_bycode(self._get_business()['strBuscode'] + '-' + proid)
if r01:
self.env['product.template'].write(res)
else:
self.env['product.template'].create(res)
# self.set_jsport_category_parent()
return True
#
def _sync_jsport(self):
self.env['proc.sync.jsport'].sync_product_class()
self.env['proc.sync.jsport'].sync_product_brand()
self.env['proc.sync.jsport'].sync_employee()
self.env['proc.sync.jsport'].sync_supplier()
self.env['proc.sync.jsport'].sync_product()
self.env['proc.sync.jsport'].sync_pos_machine()
self.env['proc.sync.jsport'].proc_check_pos_data_weekly()
return True
@api.multi
def procure_sync_jsport(self):
# bnc_member = self.env['bnc.member']
# bnc_member.sync_bnc_member
# threaded_calculation = threading.Thread(target=self.sync_bnc_member, args=())
# threaded_calculation.start()
self.env['proc.sync.jsport']._sync_jsport()
return {'type': 'ir.actions.act_window_close'}
def check_pos_data(self, para_date, para_interval):
start_date = (para_date - datetime.timedelta(days=para_interval)).strftime("%Y-%m-%d") + ' 00:00:00'
end_date = datetime.datetime.now().strftime("%Y-%m-%d") + ' 23:59:59'
proc_posorder_ids = self.env['pos.order'].search([('date_order', '>=', start_date),
('date_order', '<=', end_date),
('buid', '=', self._get_business().id),
]),
if len(proc_posorder_ids[0]) <> 0:
print "find records nees to delete"
exec_sql = """
delete from pos_order where buid={0} and date_order between '{1}' and '{2}'
"""
exec_sql = exec_sql.format(self._get_business()['id'], start_date, end_date)
cr = self._cr
cr.execute(exec_sql)
return True
@api.multi
def check_pos_data_daily(self, para_interval):
vals = []
end_date = datetime.datetime.now()
for i in range(0, para_interval + 1):
servercnt = 0
localcnt = 0
day = end_date - datetime.timedelta(days=i)
print day
exec_sql = """
select count(*) from pos_order
where to_char(date_order,'yyyy-mm-dd')='{0}' and buid ={1}
"""
exec_sql = exec_sql.format(day.strftime('%Y-%m-%d'), self._get_business().id)
cr = self._cr
cr.execute(exec_sql)
remote_exec_sql = """
select count(*) from (
SELECT bs.BraId,bs.saleid,bs.memb_id ,salerid,saleflag,min(DATEADD(hour,-8,bs.SaleDate)) as saledate
FROM v_bn_saledetail bs
where datediff(day,saledate,'{0}')=0
group by bs.BraId,bs.saleid,bs.memb_id ,salerid,saleflag
) a
"""
remote_exec_sql = remote_exec_sql.format(day)
ms = Lz_read_SQLCa(self)
remote_cnt = ms.ExecQuery(remote_exec_sql.encode('utf-8'))
for rcnt in remote_cnt:
servercnt = remote_cnt[0]
for local_count in cr.fetchall():
localcnt = local_count[0]
_logger.info(" bnc =>jspot check_pos_data_daily ")
_logger.info(" bnc =>jspot check_pos_data_daily local using %s " % exec_sql)
_logger.info(" bnc =>jspot check_pos_data_daily remote using %s " % remote_exec_sql)
# if localcnt==servercnt:
# print 'ok '
# else:
# print day.strftime('%Y%m%d')+'===>'+str(local_count[0]) +'===>' +'Need to clean'
vals.append({'proc_date': day.strftime('%Y-%m-%d'), 'local_records_count': localcnt,
'remote_records_count': servercnt[0]})
return vals
# def proc_check_pos_data_weekly(self):
# # check_pos_data_daily(7) 7表示一周
# # proc_date_task = self.check_pos_data_daily(1)
# procdate='2018-06-25'
# # for d in proc_date_task:
# self.delete_pos_data_daily(procdate)
# self.insert_pos_data_daily(procdate, procdate)
# return True
def proc_check_pos_data_weekly(self):
# check_pos_data_daily(7) 7表示一周
proc_date_task = self.check_pos_data_daily(TOTAL_DAY)
_logger.info(" bnc jspot => proc_check_pos_data_weekly TOTAL_DAY is %d " % TOTAL_DAY)
for d in proc_date_task:
if d['local_records_count'] <> d['remote_records_count']:
print d['proc_date'] + '====>' + 'local have' + str(d['local_records_count']) + '==>remote have' + str(
d['remote_records_count']) + '==>need to sync'
_logger.info(
d['proc_date'] + '====>' + 'local have' + str(d['local_records_count']) + '==>remote have' + str(
d['remote_records_count']) + '==>need to sync')
self.delete_pos_data_daily(d['proc_date'])
self.insert_pos_data_daily(d['proc_date'], d['proc_date'])
else:
print d['proc_date'] + '====>' + 'already done!!!'
_logger.info(d['proc_date'] + '====>' + 'already done!!!')
return True
def delete_pos_data_daily(self, ymd):
exec_sql = """
delete from pos_order
where to_char(date_order,'yyyy-mm-dd')='{0}' and buid ={1}
"""
exec_sql = exec_sql.format(ymd, self._get_business().id)
cr = self._cr
cr.execute(exec_sql)
return True
def insert_pos_data_daily(self, begin, end):
# begin 和end之间的日期资料导入
sql = """
SELECT bs.BraId,bs.saleid,bs.memb_id ,salerid,saleflag,min(DATEADD(hour,-8,bs.SaleDate)) as saledate
FROM v_bn_saledetail bs
where saledate between '{0}' and '{1}'
group by bs.BraId,bs.saleid,bs.memb_id ,salerid,saleflag
order by braid,saleid,min(DATEADD(hour,-8,bs.SaleDate))
"""
sql = sql.format(begin + ' 00:00:00', end + ' 23:59:59')
ms = Lz_read_SQLCa(self)
pos_order_list = ms.ExecQuery(sql.encode('utf-8'))
_logger.info(" bnc =>jspot insert_pos_data_daily have %d records need to sync" % len(pos_order_list))
_logger.info(" bnc =>jspot insert_pos_data_daily %s sql" % sql)
for (braid, saleid, memb_id, salerid, saleflag, saledate_order) in pos_order_list:
sql_order_line = """
SELECT bs.saleman,DATEADD(hour,-8,bs.SaleDate) AS SaleDate,bs.proid,bs.SaleQty,
bs.NormalPrice,bs.curprice,bs.amount,bs.SaleType,bs.PosNo,bs.profit
FROM v_bn_saledetail bs
where saleid='{0}'
"""
vals = []
res = []
if braid:
br01 = self.env['res.company'].search_bycode(braid).id
else:
br01 = None
if memb_id:
# print memb_id
m01 = self.env['res.partner'].search_bycardid(memb_id).id
else:
m01 = None
if salerid:
s01 = self.env['hr.employee'].search_bycode(salerid).id
else:
s01 = None
sql_order_line = sql_order_line.format(saleid)
pos_order_line = ms.ExecQuery(sql_order_line.encode('utf-8'))
for (saleman, saledate_detail, proid, SaleQty, NormalPrice, curprice, amount, SaleType, PosNo,
profit) in pos_order_line:
# 数量为0的交易为促销折扣
qty_tmp = SaleQty
price_unit_tmp = curprice
if SaleQty == 0:
qty_tmp = 1
price_unit_tmp = amount
res.append((0, 0, {
'product_id': self.env['product.product'].search(
[('default_code', '=', self._get_business()['strBuscode'] + '-' + proid)]).id,
'price_unit': price_unit_tmp,
'qty': qty_tmp,
'lngsaleid': self.env['hr.employee'].search_bycode(saleman).id,
}))
vals = {
'date_order': saledate_order,
'company_id': br01,
'user_id': self.env['res.users'].search([('login', '=', 'jspot-users')]).id,
'note': self._get_business()['strBuscode'] + '-' + braid + '-' + saleid,
'partner_id': m01,
'pos_reference': self._get_business()['strBuscode'] + '-' + braid + '-' + saleid,
'lines': res,
'state': 'done',
'buid': self._get_business().id,
'strstoreid': braid,
'lngcasherid': s01,
}
master = self.env['pos.order'].create(vals)
return True
@api.multi
def query_period(self, local, remote):
start_stamp = 0
end_stamp = 0
query_local = local
query_remote = remote
cr = self._cr
cr.execute(query_local)
for local_max_num in cr.fetchall():
start_stamp = local_max_num[0]
if local_max_num[0] is None:
start_stamp = 0
return_start = start_stamp
ms = Lz_read_SQLCa(self)
remote_stamp = ms.ExecQuery(query_remote.encode('utf-8'))
for end_stamp in remote_stamp:
if remote_stamp[0] is None:
end_stamp = 0
return_end = end_stamp[0]
res = {
'start_stamp': return_start,
'end_stamp': return_end,
}
return res
def set_jsport_category_parent(self):
# 检查根分类
self.check_jsport_category_root()
# 大分类
posroot = self.get_jsport_pos_category_current_recordset(0)
self.exec_set_jsport_pos_category_parent(posroot, 0)
productroot = self.get_jsport_product_category_current_recordset(0)
self.exec_set_jsport_product_category_parent(productroot, 0)
# 中分类
postoplevel = self.get_jsport_pos_category_current_recordset(4)
self.exec_set_jsport_pos_category_parent(postoplevel, 4)
producttoplevel = self.get_jsport_product_category_current_recordset(4)
self.exec_set_jsport_product_category_parent(producttoplevel, 4)
# 小分类
posmidlevel = self.get_jsport_pos_category_current_recordset(6)
self.exec_set_jsport_pos_category_parent(posmidlevel, 6)
productmidlevel = self.get_jsport_product_category_current_recordset(6)
self.exec_set_jsport_product_category_parent(productmidlevel, 6)
return True
def check_jsport_category_root(self):
r01 = self.env['product.category'].search_bycode(self._get_business()['strBuscode'])
if not r01:
res = {
'code': self._get_business()['strBuscode'],
'name': self._get_business()['strBusName'],
'buid': self._get_business()['id'],
}
self.env['product.category'].create(res)
self.env['pos.category'].create(res)
return True
@api.multi
def get_jsport_pos_category_current_recordset(self, lens):
if lens <> 0:
select_sql = """
select id,code from pos_category where length(code)={0} and buid={1}
"""
select_sql = select_sql.format(lens, self._get_business()['id'])
else:
select_sql = """
select id,code from pos_category where code='{0}' and buid={1}
"""
select_sql = select_sql.format(self._get_business()['strBuscode'], self._get_business()['id'])
cr = self._cr
cr.execute(select_sql)
res = cr.fetchall()
return res
@api.multi
def get_jsport_product_category_current_recordset(self, lens):
if lens <> 0:
select_sql = """
select id,code from product_category where length(code)={0} and buid={1}
"""
select_sql = select_sql.format(lens, self._get_business()['id'])
else:
select_sql = """
select id,code from product_category where code='{0}' and buid={1}
"""
select_sql = select_sql.format(self._get_business()['strBuscode'], self._get_business()['id'])
cr = self._cr
cr.execute(select_sql)
res = cr.fetchall()
return res
def exec_set_jsport_pos_category_parent(self, vals, lens):
for (parentid, code) in vals:
if lens <> 0:
sublens = lens + 2
exec_sql = """
update pos_category set parent_id={0} where length(code)={1} and substr(code,1,{2})='{3}'
and buid={4}
"""
exec_sql = exec_sql.format(parentid, sublens, lens, code, self._get_business()['id'])
else:
exec_sql = """
update pos_category set parent_id={0} where length(code)=4 and buid={1}
"""
exec_sql = exec_sql.format(parentid, self._get_business()['id'])
cr = self._cr
cr.execute(exec_sql)
return True
def exec_set_jsport_product_category_parent(self, vals, lens):
for (parentid, code) in vals:
if lens <> 0:
sublens = lens + 2
exec_sql = """
update product_category set parent_id={0} where length(code)={1} and substr(code,1,{2})='{3}'
and buid={4}
"""
exec_sql = exec_sql.format(parentid, sublens, lens, code, self._get_business()['id'])
else:
exec_sql = """
update product_category set parent_id={0} where length(code)=4 and buid={1}
"""
exec_sql = exec_sql.format(parentid, self._get_business()['id'])
cr = self._cr
cr.execute(exec_sql)
return True
|
[
"gongfranksh@qq.com"
] |
gongfranksh@qq.com
|
b6f885a036c0c323dfed1c093d25f21f84920b10
|
c91b31020eec49a269e9efb6d2d52ed25cc0d59a
|
/.history/skillathon_ocr/uploader/models_20201028150648.py
|
a831e650d2121d4c9676c19dc21d83283415ca9e
|
[] |
no_license
|
Pvnsraviteja/Django-Image2Text
|
b1df3bd7ff46cff293441f76a0ba2f97cfd5aa07
|
6a6982b1ebb1be7cc9f470a1fe7d8f268764f072
|
refs/heads/main
| 2023-01-04T13:38:01.150128
| 2020-10-28T13:06:26
| 2020-10-28T13:06:26
| 307,948,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,229
|
py
|
from django.db import models
from core import utils
import hashlib
from PIL import Image
import pytesseract
import datefinder
class ImageFileManager(models.Manager):
def search(self, query):
return self.get_queryset().filter(models.Q(internal_reference__icontains=query) |
models.Q(name__icontains=query) |
models.Q(description__icontains=query)
)
class ImageFile(models.Model):
name = models.CharField("Name", max_length=100)
internal_reference = models.CharField("Internal Reference", max_length=100, editable=False)
description = models.TextField("Description", blank=True, null=True)
image = models.ImageField(upload_to="OCR_image/input/", verbose_name="Input Image")
create_at = models.DateTimeField("Create at", auto_now_add=True)
updated_at = models.DateTimeField("Update at", auto_now=True)
def __str__(self):
return "{0:03d} - {1}".format(self.id, self.image)
def execute_and_save_ocr(self):
import time
start_time = time.time()
img = Image.open(self.image)
txt = pytesseract.image_to_string(img, lang='eng')
matches = datefinder.find_dates(txt)
for match in matches:
print(match)
execution_time = time.time() - start_time
ocr_txt = OCRText(image = self, text = txt,text1=match ,lang = "EN", execution_time = execution_time)
ocr_txt.save()
print("The image {0} was opened.".format(self.image))
print('OCR: \n{0}\n'.format(txt))
print('OCR: \n{0}\n'.format(text1))
print('Execution Time: {0}'.format(ocr_txt.execution_time))
return ocr_txt
"""
def get_absolute_url(self):
from django.urls import reverse
return reverse('course_details', args=[], kwargs={'slug': self.slug})
"""
def save(self, *args, **kwargs):
if not self.internal_reference:
random_value = utils.random_value_generator(size=20)
while ImageFile.objects.filter(internal_reference=random_value).exists():
random_value = utils.random_value_generator(size=20)
hash_value = hashlib.md5(bytes(str(self.id) + str(random_value), 'utf-8'))
self.internal_reference = hash_value.hexdigest()
super(ImageFile, self).save(*args, **kwargs)
class Meta:
verbose_name = "ImageFile"
verbose_name_plural = "ImageFiles"
ordering = ['id']
objects = ImageFileManager()
class OCRText(models.Model):
text = models.TextField("OCR text", blank=True)
lang = models.TextField("Language", default="EN")
execution_time = models.IntegerField("Execution Time", editable=False, null=True);
image = models.ForeignKey('ImageFile', on_delete=models.CASCADE)
create_at = models.DateTimeField("Create at", auto_now_add=True)
updated_at = models.DateTimeField("Update at", auto_now=True)
def __str__(self):
return "{0:03d} - {1}".format(self.id, self.image.internal_reference)
class Meta:
verbose_name = "OCRText"
verbose_name_plural = "OCRTexts"
ordering = ['id']
|
[
"pvnsraviteja1120@gmail.com"
] |
pvnsraviteja1120@gmail.com
|
f2f94aae32c421c20add1c502b815fd1898e114a
|
801f462c3533d93bdf6c6668b7243aceb1948073
|
/hw3/scripts/p4_df_function_count.py
|
4f0840942ff70d9a4354d77031eba6d8a809cb68
|
[] |
no_license
|
amitanalyste/e63-coursework
|
942affba05e893030a0a9df7e7d61cae2d513914
|
731af65e43f9b3a308920d85d7e984645329685a
|
refs/heads/master
| 2020-03-28T13:46:24.848273
| 2017-12-20T15:13:05
| 2017-12-20T15:13:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
# Import libraries
import findspark
findspark.init("/opt/spark-2.2.0-bin-hadoop2.7")
from pyspark.sql import SparkSession
# Create Session
spark = SparkSession.builder.master("local") \
.appName("p4_df_filter_count").getOrCreate()
# Read data
df_ulysses = spark.read.text("/home/tim/e63-coursework/hw3/data/ulysses10.txt")
# Filter values
df_linematch = df_ulysses.filter(df_ulysses.value.contains('afternoon') |
df_ulysses.value.contains('night') |
df_ulysses.value.contains('morning'))
# Print data
print "Number of lines with 'morning', 'afternoon', 'night':"
print df_linematch.count()
|
[
"tim@ip-172-31-18-124.eu-central-1.compute.internal"
] |
tim@ip-172-31-18-124.eu-central-1.compute.internal
|
7a8775276f876072a243f8dff155f1477d9c4f67
|
2e0b0182b137ddacecbbbf91a209178e427cf5da
|
/user_data/strategies/Martin.py
|
808313f06fbe7b74e057176c49cced745209be02
|
[] |
no_license
|
mpicard/lucy
|
0bec1fc00172d8862fc9eabdaf53654e98412816
|
34591dd9f5365682e709091c567051d63d749552
|
refs/heads/main
| 2023-03-20T18:44:05.734890
| 2021-02-25T20:26:09
| 2021-02-25T20:26:09
| 341,551,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,614
|
py
|
# pragma pylint: disable=missing-docstring, invalid-name, pointless-string-statement
# --- Do not remove these libs ---
import numpy as np # noqa
import pandas as pd # noqa
from pandas import DataFrame
from freqtrade.strategy.interface import IStrategy
# --------------------------------
# Add your lib to import here
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
class Martin(IStrategy):
"""
This is a strategy template to get you started.
More information in https://github.com/freqtrade/freqtrade/blob/develop/docs/bot-optimization.md
You can:
:return: a Dataframe with all mandatory indicators for the strategies
- Rename the class name (Do not forget to update class_name)
- Add any methods you want to build your strategy
- Add any lib you need to build your strategy
You must keep:
- the lib in the section "Do not remove these libs"
- the prototype for the methods: minimal_roi, stoploss, populate_indicators, populate_buy_trend,
populate_sell_trend, hyperopt_space, buy_strategy_generator
"""
# Strategy interface version - allow new iterations of the strategy interface.
# Check the documentation or the Sample strategy to get the latest version.
INTERFACE_VERSION = 2
# Minimal ROI designed for the strategy.
# This attribute will be overridden if the config file contains "minimal_roi".
minimal_roi = {"60": 0.01, "30": 0.02, "0": 0.04}
# Optimal stoploss designed for the strategy.
# This attribute will be overridden if the config file contains "stoploss".
stoploss = -0.10
# Trailing stoploss
trailing_stop = False
# trailing_only_offset_is_reached = False
# trailing_stop_positive = 0.01
# trailing_stop_positive_offset = 0.0 # Disabled / not configured
# Optimal timeframe for the strategy.
timeframe = "5m"
# Run "populate_indicators()" only for new candle.
process_only_new_candles = False
# These values can be overridden in the "ask_strategy" section in the config.
use_sell_signal = True
sell_profit_only = False
ignore_roi_if_buy_signal = False
# Number of candles the strategy requires before producing valid signals
startup_candle_count: int = 30
# Optional order type mapping.
order_types = {
"buy": "limit",
"sell": "limit",
"stoploss": "market",
"stoploss_on_exchange": False,
}
# Optional order time in force.
order_time_in_force = {"buy": "gtc", "sell": "gtc"}
plot_config = {
# Main plot indicators (Moving averages, ...)
"main_plot": {
"tema": {},
"sar": {"color": "white"},
},
"subplots": {
# Subplots - each dict defines one additional plot
"MACD": {
"macd": {"color": "blue"},
"macdsignal": {"color": "orange"},
},
"RSI": {
"rsi": {"color": "red"},
},
},
}
def informative_pairs(self):
"""
Define additional, informative pair/interval combinations to be cached from the exchange.
These pair/interval combinations are non-tradeable, unless they are part
of the whitelist as well.
For more information, please consult the documentation
:return: List of tuples in the format (pair, interval)
Sample: return [("ETH/USDT", "5m"),
("BTC/USDT", "15m"),
]
"""
return []
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Adds several different TA indicators to the given DataFrame
:param dataframe: Dataframe with data from the exchange
:param metadata: Additional information, like the currently traded pair
:return: a Dataframe with all mandatory indicators for the strategies
"""
# Momentum Indicators
# ------------------------------------
# ADX
# dataframe["adx"] = ta.ADX(dataframe)
# # Plus Directional Indicator / Movement
# dataframe['plus_dm'] = ta.PLUS_DM(dataframe)
# dataframe['plus_di'] = ta.PLUS_DI(dataframe)
# # Minus Directional Indicator / Movement
# dataframe['minus_dm'] = ta.MINUS_DM(dataframe)
# dataframe['minus_di'] = ta.MINUS_DI(dataframe)
# # Aroon, Aroon Oscillator
# aroon = ta.AROON(dataframe)
# dataframe['aroonup'] = aroon['aroonup']
# dataframe['aroondown'] = aroon['aroondown']
# dataframe['aroonosc'] = ta.AROONOSC(dataframe)
# # Awesome Oscillator
# dataframe['ao'] = qtpylib.awesome_oscillator(dataframe)
# # Keltner Channel
# keltner = qtpylib.keltner_channel(dataframe)
# dataframe["kc_upperband"] = keltner["upper"]
# dataframe["kc_lowerband"] = keltner["lower"]
# dataframe["kc_middleband"] = keltner["mid"]
# dataframe["kc_percent"] = (
# (dataframe["close"] - dataframe["kc_lowerband"]) /
# (dataframe["kc_upperband"] - dataframe["kc_lowerband"])
# )
# dataframe["kc_width"] = (
# (dataframe["kc_upperband"] - dataframe["kc_lowerband"]) / dataframe["kc_middleband"]
# )
# # Ultimate Oscillator
# dataframe['uo'] = ta.ULTOSC(dataframe)
# # Commodity Channel Index: values [Oversold:-100, Overbought:100]
# dataframe['cci'] = ta.CCI(dataframe)
# RSI
dataframe["rsi"] = ta.RSI(dataframe)
# # Inverse Fisher transform on RSI: values [-1.0, 1.0] (https://goo.gl/2JGGoy)
# rsi = 0.1 * (dataframe['rsi'] - 50)
# dataframe['fisher_rsi'] = (np.exp(2 * rsi) - 1) / (np.exp(2 * rsi) + 1)
# # Inverse Fisher transform on RSI normalized: values [0.0, 100.0] (https://goo.gl/2JGGoy)
# dataframe['fisher_rsi_norma'] = 50 * (dataframe['fisher_rsi'] + 1)
# # Stochastic Slow
# stoch = ta.STOCH(dataframe)
# dataframe['slowd'] = stoch['slowd']
# dataframe['slowk'] = stoch['slowk']
# Stochastic Fast
# stoch_fast = ta.STOCHF(dataframe)
# dataframe["fastd"] = stoch_fast["fastd"]
# dataframe["fastk"] = stoch_fast["fastk"]
# # Stochastic RSI
# Please read https://github.com/freqtrade/freqtrade/issues/2961 before using this.
# STOCHRSI is NOT aligned with tradingview, which may result in non-expected results.
# stoch_rsi = ta.STOCHRSI(dataframe)
# dataframe['fastd_rsi'] = stoch_rsi['fastd']
# dataframe['fastk_rsi'] = stoch_rsi['fastk']
# MACD
macd = ta.MACD(dataframe)
dataframe["macd"] = macd["macd"]
dataframe["macdsignal"] = macd["macdsignal"]
dataframe["macdhist"] = macd["macdhist"]
# MFI
dataframe["mfi"] = ta.MFI(dataframe)
# # ROC
# dataframe['roc'] = ta.ROC(dataframe)
# Overlap Studies
# ------------------------------------
# Bollinger Bands
bollinger = qtpylib.bollinger_bands(
qtpylib.typical_price(dataframe), window=20, stds=2
)
dataframe["bb_lowerband"] = bollinger["lower"]
dataframe["bb_middleband"] = bollinger["mid"]
dataframe["bb_upperband"] = bollinger["upper"]
dataframe["bb_percent"] = (dataframe["close"] - dataframe["bb_lowerband"]) / (
dataframe["bb_upperband"] - dataframe["bb_lowerband"]
)
dataframe["bb_width"] = (
dataframe["bb_upperband"] - dataframe["bb_lowerband"]
) / dataframe["bb_middleband"]
# Bollinger Bands - Weighted (EMA based instead of SMA)
# weighted_bollinger = qtpylib.weighted_bollinger_bands(
# qtpylib.typical_price(dataframe), window=20, stds=2
# )
# dataframe["wbb_upperband"] = weighted_bollinger["upper"]
# dataframe["wbb_lowerband"] = weighted_bollinger["lower"]
# dataframe["wbb_middleband"] = weighted_bollinger["mid"]
# dataframe["wbb_percent"] = (
# (dataframe["close"] - dataframe["wbb_lowerband"]) /
# (dataframe["wbb_upperband"] - dataframe["wbb_lowerband"])
# )
# dataframe["wbb_width"] = (
# (dataframe["wbb_upperband"] - dataframe["wbb_lowerband"]) / dataframe["wbb_middleband"]
# )
# # EMA - Exponential Moving Average
# dataframe['ema3'] = ta.EMA(dataframe, timeperiod=3)
# dataframe['ema5'] = ta.EMA(dataframe, timeperiod=5)
# dataframe['ema10'] = ta.EMA(dataframe, timeperiod=10)
# dataframe['ema21'] = ta.EMA(dataframe, timeperiod=21)
# dataframe['ema50'] = ta.EMA(dataframe, timeperiod=50)
# dataframe['ema100'] = ta.EMA(dataframe, timeperiod=100)
# # SMA - Simple Moving Average
# dataframe['sma3'] = ta.SMA(dataframe, timeperiod=3)
# dataframe['sma5'] = ta.SMA(dataframe, timeperiod=5)
# dataframe['sma10'] = ta.SMA(dataframe, timeperiod=10)
# dataframe['sma21'] = ta.SMA(dataframe, timeperiod=21)
# dataframe['sma50'] = ta.SMA(dataframe, timeperiod=50)
# dataframe['sma100'] = ta.SMA(dataframe, timeperiod=100)
# Parabolic SAR
dataframe["sar"] = ta.SAR(dataframe)
# TEMA - Triple Exponential Moving Average
dataframe["tema"] = ta.TEMA(dataframe, timeperiod=9)
# Cycle Indicator
# ------------------------------------
# Hilbert Transform Indicator - SineWave
hilbert = ta.HT_SINE(dataframe)
dataframe["htsine"] = hilbert["sine"]
dataframe["htleadsine"] = hilbert["leadsine"]
# Pattern Recognition - Bullish candlestick patterns
# ------------------------------------
# # Hammer: values [0, 100]
# dataframe['CDLHAMMER'] = ta.CDLHAMMER(dataframe)
# # Inverted Hammer: values [0, 100]
# dataframe['CDLINVERTEDHAMMER'] = ta.CDLINVERTEDHAMMER(dataframe)
# # Dragonfly Doji: values [0, 100]
# dataframe['CDLDRAGONFLYDOJI'] = ta.CDLDRAGONFLYDOJI(dataframe)
# # Piercing Line: values [0, 100]
# dataframe['CDLPIERCING'] = ta.CDLPIERCING(dataframe) # values [0, 100]
# # Morningstar: values [0, 100]
# dataframe['CDLMORNINGSTAR'] = ta.CDLMORNINGSTAR(dataframe) # values [0, 100]
# # Three White Soldiers: values [0, 100]
# dataframe['CDL3WHITESOLDIERS'] = ta.CDL3WHITESOLDIERS(dataframe) # values [0, 100]
# Pattern Recognition - Bearish candlestick patterns
# ------------------------------------
# # Hanging Man: values [0, 100]
# dataframe['CDLHANGINGMAN'] = ta.CDLHANGINGMAN(dataframe)
# # Shooting Star: values [0, 100]
# dataframe['CDLSHOOTINGSTAR'] = ta.CDLSHOOTINGSTAR(dataframe)
# # Gravestone Doji: values [0, 100]
# dataframe['CDLGRAVESTONEDOJI'] = ta.CDLGRAVESTONEDOJI(dataframe)
# # Dark Cloud Cover: values [0, 100]
# dataframe['CDLDARKCLOUDCOVER'] = ta.CDLDARKCLOUDCOVER(dataframe)
# # Evening Doji Star: values [0, 100]
# dataframe['CDLEVENINGDOJISTAR'] = ta.CDLEVENINGDOJISTAR(dataframe)
# # Evening Star: values [0, 100]
# dataframe['CDLEVENINGSTAR'] = ta.CDLEVENINGSTAR(dataframe)
# Pattern Recognition - Bullish/Bearish candlestick patterns
# ------------------------------------
# # Three Line Strike: values [0, -100, 100]
# dataframe['CDL3LINESTRIKE'] = ta.CDL3LINESTRIKE(dataframe)
# # Spinning Top: values [0, -100, 100]
# dataframe['CDLSPINNINGTOP'] = ta.CDLSPINNINGTOP(dataframe) # values [0, -100, 100]
# # Engulfing: values [0, -100, 100]
# dataframe['CDLENGULFING'] = ta.CDLENGULFING(dataframe) # values [0, -100, 100]
# # Harami: values [0, -100, 100]
# dataframe['CDLHARAMI'] = ta.CDLHARAMI(dataframe) # values [0, -100, 100]
# # Three Outside Up/Down: values [0, -100, 100]
# dataframe['CDL3OUTSIDE'] = ta.CDL3OUTSIDE(dataframe) # values [0, -100, 100]
# # Three Inside Up/Down: values [0, -100, 100]
# dataframe['CDL3INSIDE'] = ta.CDL3INSIDE(dataframe) # values [0, -100, 100]
# # Chart type
# # ------------------------------------
# # Heikin Ashi Strategy
# heikinashi = qtpylib.heikinashi(dataframe)
# dataframe['ha_open'] = heikinashi['open']
# dataframe['ha_close'] = heikinashi['close']
# dataframe['ha_high'] = heikinashi['high']
# dataframe['ha_low'] = heikinashi['low']
# Retrieve best bid and best ask from the orderbook
# ------------------------------------
"""
# first check if dataprovider is available
if self.dp:
if self.dp.runmode in ('live', 'dry_run'):
ob = self.dp.orderbook(metadata['pair'], 1)
dataframe['best_bid'] = ob['bids'][0][0]
dataframe['best_ask'] = ob['asks'][0][0]
"""
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the buy signal for the given dataframe
:param dataframe: DataFrame populated with indicators
:param metadata: Additional information, like the currently traded pair
:return: DataFrame with buy column
"""
dataframe.loc[
(
(qtpylib.crossed_above(dataframe["rsi"], 30))
& (dataframe["tema"] <= dataframe["bb_middleband"])
& (dataframe["tema"] > dataframe["tema"].shift(1))
& (dataframe["volume"] > 0)
),
"buy",
] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the sell signal for the given dataframe
:param dataframe: DataFrame populated with indicators
:param metadata: Additional information, like the currently traded pair
:return: DataFrame with buy column
"""
dataframe.loc[
(
(qtpylib.crossed_above(dataframe["rsi"], 70))
& (dataframe["tema"] > dataframe["bb_middleband"])
& (dataframe["tema"] < dataframe["tema"].shift(1))
& (dataframe["volume"] > 0)
),
"sell",
] = 1
return dataframe
|
[
"martin.picard@thoughtworks.com"
] |
martin.picard@thoughtworks.com
|
085f0719a2e1ea2cf251de48b2646dd7055c61ec
|
9783a96d0e5545041536975d86b736e552ccbdd1
|
/constants.py
|
af2ae621fe4469440f748165fcf5f2dcd9c45597
|
[] |
no_license
|
ar852/flappy-bird-reinforcement-learning
|
05ab02c8460a5e649035c2a10da75de34bf59e83
|
5e795e3ef36513eb158c650cf8d1bfa928824c52
|
refs/heads/main
| 2023-06-09T11:43:11.241866
| 2021-06-29T19:17:43
| 2021-06-29T19:17:43
| 381,468,258
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
ACTION_SIZE = 2
STACK_SIZE = 4
TESTING_EPISODES = 4 # run after each epoch of training
PROCESSED_IMG_SIZE = (81, 56) # 5 times reduction, was originally 3 but that took even longer to train
STATE_SIZE = (81, 56, 4)
EXPLORE_START = 0.5
EXPLORE_STOP = 0.02
DECAY_RATE = 0.001
WEIGHTED_ACTIONS = [1] * 40 + [0] * 60 # dont flap is more common than flap
LEARNING_RATE = 0.0005 # for adam optimizer (should this be smaller?)
GAMMA = 0.95
TARGET_UPDATE_FREQUENCY = 100
MODEL_SAVE_FREQUENCY = 1000
BATCH_SIZE = 128 # Based off of what other people used from our research
def make_plot(path):
import pickle
import pandas as pd
from matplotlib import pyplot as plt
with open(path, "rb") as file:
tup = pickle.load(file)
df = pd.DataFrame(tup, columns=["Loss", "Score"])
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12,8))
axes[0].plot(df.Loss)
axes[1].plot(df.Score)
plt.show()
make_plot("metrics 05-06-2021")
|
[
"noreply@github.com"
] |
ar852.noreply@github.com
|
ab3e6ff2c55132dec35a9a2fd75213d401744a40
|
fb3194f5022d63279432772b90e6d43e7a55afff
|
/positive.py
|
ade9fd65888ec9e2d870f7de1fe90385b8af39f0
|
[] |
no_license
|
Remithasakthi/python
|
d6d9d2080eed06afef7c54460e70b2cd814c5a82
|
97fd569415046699987d632419917e469df17cfe
|
refs/heads/master
| 2020-03-29T02:28:01.682353
| 2019-02-13T08:44:29
| 2019-02-13T08:44:29
| 149,438,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
x=6
if(x==0):
print("zero")
elif(x>0):
print("positive")
else:
print("negative")
|
[
"noreply@github.com"
] |
Remithasakthi.noreply@github.com
|
7c89c6b6d6cb9ef4568a2c4f37ca8d8002d5cb2f
|
154a50e20e85762d00ba72404df419b249ed0ee7
|
/view.py
|
0e903388178cabe57b0649f06ab8a75e62ed04c8
|
[] |
no_license
|
Edwin-zzp/pythonpjUI
|
68cddbfbbca985f3446923c1ee925b014c5ace60
|
4da467fe40e9d341fcc56967e6a44651e0b03daf
|
refs/heads/master
| 2021-09-25T00:42:27.899460
| 2018-09-24T12:02:33
| 2018-09-24T12:02:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
# -*- coding: UTF-8 -*-
import os#, sys
import time
import win32api
import win32con
#import win32gui
#from ctypes import *
fname="D:/VIDEO/12.mp4"
fn="12.mp4"
#print(fn)
#print(fname)
#os.system(r"D:/VIDEO/mplayer.exe -fs -loop 0 -fixed-vo " + fname)
os.popen(r"D:/VIDEO/mplayer.exe -loop 0 -fixed-vo " + fname) #-fs
time.sleep(2)
win32api.keybd_event(32,0,0,0)
time.sleep(0.01)
win32api.keybd_event(32,0,win32con.KEYEVENTF_KEYUP,0)
|
[
"zhangzzpcxxy@163.com"
] |
zhangzzpcxxy@163.com
|
83edf902700fd6fc2933f74604fddf42f8643f67
|
e0160cc64c1b1d4de8475627eb7d69843784b287
|
/smooth_filter.py
|
4756d835ced6b064cd26025dfc4575131a38ae68
|
[] |
no_license
|
ocean1100/stereo-disparity
|
d1aba8faf3647e7c7994b9a7db6a4480ebaac9bc
|
d26f5af0927aed9ce54c4ea48b2daa1b08f230c2
|
refs/heads/master
| 2020-07-05T08:42:49.232987
| 2018-03-05T20:14:24
| 2018-03-05T20:14:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
from scipy.signal import convolve2d
from numpy import *
def gaussian_grid(sigma, alpha=4):
sig = max(sigma, 0.01)
length = int(math.ceil(sig * alpha)) + 1
m = length / 2
n = m + 1
x, y = mgrid[-m:n,-m:n]
g = exp(m ** 2) * exp(-0.5 * (x**2 + y**2))
return g / g.sum()
def filter_image(image, mask):
layer = asarray(image).astype('float')
layer = convolve2d(layer, mask, mode='same')
#layer = convolve2d(layer, mask, mode='same')
return layer
|
[
"noreply@github.com"
] |
ocean1100.noreply@github.com
|
5cc497220acc91b16224bbca65b4ddc141c2d45d
|
59b27bea463addbade9e0f048eb08358c6901ded
|
/Caesar Cipher Code.py
|
51d62c47eca27743bebe503b6e5d5b7e83b797f9
|
[] |
no_license
|
Jlamba2108/Basic-Python-Projects
|
86bf9c6a54dbd1f2d57e356bfb36c7d190a937ad
|
8b7a34cfa54896c8fd9191704b948e93ad69c511
|
refs/heads/master
| 2023-05-08T10:47:01.239786
| 2021-06-07T05:22:31
| 2021-06-07T05:22:31
| 364,494,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
logo='''
______ ______ ______ ______ ______ ______ ______ __ ______ __ __ ______ ______
/\ ___\ /\ __ \ /\ ___\ /\ ___\ /\ __ \ /\ == \ /\ ___\ /\ \ /\ == \ /\ \_\ \ /\ ___\ /\ == \
\ \ \____ \ \ __ \ \ \ __\ \ \___ \ \ \ __ \ \ \ __< \ \ \____ \ \ \ \ \ _-/ \ \ __ \ \ \ __\ \ \ __<
\ \_____\ \ \_\ \_\ \ \_____\ \/\_____\ \ \_\ \_\ \ \_\ \_\ \ \_____\ \ \_\ \ \_\ \ \_\ \_\ \ \_____\ \ \_\ \_\
\/_____/ \/_/\/_/ \/_____/ \/_____/ \/_/\/_/ \/_/ /_/ \/_____/ \/_/ \/_/ \/_/\/_/ \/_____/ \/_/ /_/
'''
print(logo)
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z','a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def caesar(text,shift,direction):
message=""
for i in range(0,len(text)):
char=text[i]
if char in alphabet:
char_index=alphabet.index(char)
if direction=="encode":
char_index+=shift
elif direction=="decode":
char_index-=shift
char=alphabet[char_index]
message+=char
else:
message+=char
char_index=0
print(message)
continue_check=True
while continue_check:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
shift=shift%26
caesar(text,shift,direction)
result=input("Type 'yes' if you want to go again else type 'no'\n")
if result=="no":
print("Bye!")
continue_check=False
|
[
"jlamba2108@gmail.com"
] |
jlamba2108@gmail.com
|
7e3a58228ddcf49afd4818d3f4ec6b4a5cdd8fcb
|
36ec12f0c3b9017953f8204461bdef8f8106e475
|
/tests/test_run_settings.py
|
50cf3032e16dc28487a29843d36daae5a800a566
|
[
"BSD-2-Clause"
] |
permissive
|
mradaideh/SmartSim
|
f580f63af4397d298e6ac5b57f0c1738509a873d
|
05e299a1315125737f950860e33441214345cf77
|
refs/heads/master
| 2023-04-29T03:24:52.501295
| 2021-05-08T01:48:17
| 2021-05-08T01:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
from shutil import which
import pytest
from smartsim.settings import RunSettings
def test_run_command_exists():
# you wouldn't actually do this, but we know python will be installed
settings = RunSettings("python", run_command="python")
python = which("python")
assert settings.run_command == python
def test_run_command_not_exists():
settings = RunSettings("python", run_command="not-on-system")
assert settings.run_command == "not-on-system"
def test_add_exe_args():
settings = RunSettings("python")
settings.add_exe_args("--time 5")
settings.add_exe_args(["--add", "--list"])
result = ["--time", "5", "--add", "--list"]
assert settings.exe_args == result
def test_addto_existing_exe_args():
list_exe_args_settings = RunSettings("python", ["sleep.py", "--time=5"])
str_exe_args_settings = RunSettings("python", "sleep.py --time=5")
# both should be the same
args = ["sleep.py", "--time=5"]
assert list_exe_args_settings.exe_args == args
assert str_exe_args_settings.exe_args == args
# add to exe_args
list_exe_args_settings.add_exe_args("--stop=10")
str_exe_args_settings.add_exe_args(["--stop=10"])
args = ["sleep.py", "--time=5", "--stop=10"]
assert list_exe_args_settings.exe_args == args
assert str_exe_args_settings.exe_args == args
def test_bad_exe_args():
"""test when user provides incorrect types to exe_args"""
exe_args = {"dict": "is-wrong-type"}
with pytest.raises(TypeError):
_ = RunSettings("python", exe_args=exe_args)
def test_bad_exe_args_2():
"""test when user provides incorrect types to exe_args"""
exe_args = ["list-includes-int", 5]
with pytest.raises(TypeError):
_ = RunSettings("python", exe_args=exe_args)
|
[
"spartee@hpe.com"
] |
spartee@hpe.com
|
e8b8085ef78e060f7a0e58f5c9392846b9c9d4ae
|
19333aed645a4721108a67ba4d90292d2383314e
|
/300lengthOfLIS.py
|
1fdad80e02084716ce4cbca1fe34f25fd15eb83f
|
[] |
no_license
|
qinzhouhit/leetcode
|
f39f8720a19259310293c1eb2975ce459f7cf702
|
502e121cc25fcd81afe3d029145aeee56db794f0
|
refs/heads/master
| 2023-06-24T09:18:25.715450
| 2021-07-21T07:21:42
| 2021-07-21T07:21:42
| 189,679,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,510
|
py
|
'''
keys:
Solutions:
Similar: 334
T:
S:
'''
from typing import List
from bisect import bisect_left
# similar question: 673
class Solution:
# https://leetcode.com/problems/longest-increasing-subsequence/discuss/152065/Python-explain-the-O(nlogn)-solution-step-by-step
def lengthOfLIS4(self, nums: List[int]) -> int:
a = []
for num in nums:
idx = bisect_left(a, num)
if idx == len(a):
a.append(num)
else:
a[idx] = num
return len(a)
# optimal: T: O(nlogn); S: O(n), binary search
# https://leetcode.com/problems/longest-increasing-subsequence/discuss/74824/JavaPython-Binary-search-O(nlogn)-time-with-explanation
def lengthOfLIS3(self, nums: List[int]) -> int:
tails = [0] * len(nums)
size = 0
for num in nums:
l, r = 0, size
while l < r:
m = l + (r - l) // 2
if tails[m] < num:
l = m + 1
else:
r = m
tails[l] = num
size = max(l + 1, size)
return size
# huahua method; O(n^2) for T and O(n) for S
def lengthOfLIS2(self, nums: List[int]) -> int:
if not nums: return 0
n = len(nums)
f = [1] * n
for i in range(1, n): # length of the subarray
for j in range(0, i): # all the shorter subarray
if nums[i] > nums[j]:
f[i] = max(f[i], f[j] + 1)
return max(f)
# recursion with memoization
# based on last version
def helper1(self, nums, preindex, curindex, memo):
if curindex == len(nums):
return 0
def lengthOfLIS1(self, nums: List[int]) -> int:
memo = [[-1]*(len(nums) + 1) for _ in range(len(nums))]
return self.helper1(nums, -1, 0, memo)
# brute force
# T: O(2^n) for recursion; S: O(n^2)
def lengthOfLIS(self, nums: List[int]) -> int:
return self.helper(nums, float("-inf"), 0)
def helper(self, nums, prev, cur):
# prev: previous small element
# cur: current index
if cur == len(nums):
return 0
taken = 0
if nums[cur] > prev:
taken = 1 + self.helper(nums, nums[cur], cur + 1)
nottaken = self.helper(nums, prev, cur + 1)
return max(taken, nottaken)
sol = Solution()
sol.lengthOfLIS([1,3,2])
|
[
"qinzhouhit@gmail.com"
] |
qinzhouhit@gmail.com
|
989bd3b744c128364ef06697ca66be44e6759ac3
|
a56252fda5c9e42eff04792c6e16e413ad51ba1a
|
/resources/usr/local/lib/python2.7/dist-packages/sklearn/gaussian_process/__init__.py
|
815fa78dc9c1700f6b6c0d30a5a9fc0f8fd1997f
|
[
"Apache-2.0"
] |
permissive
|
edawson/parliament2
|
4231e692565dbecf99d09148e75c00750e6797c4
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
refs/heads/master
| 2021-06-21T23:13:29.482239
| 2020-12-07T21:10:08
| 2020-12-07T21:10:08
| 150,246,745
| 0
| 0
|
Apache-2.0
| 2019-09-11T03:22:55
| 2018-09-25T10:21:03
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The :mod:`sklearn.gaussian_process` module implements scalar Gaussian Process
based predictions.
"""
from .gaussian_process import GaussianProcess
from . import correlation_models
from . import regression_models
__all__ = ['GaussianProcess', 'correlation_models', 'regression_models']
|
[
"szarate@dnanexus.com"
] |
szarate@dnanexus.com
|
e7d077a9e663822d9ac4d1fa6335919432dc7ce0
|
02cfe8774a1b0b3cff383d157a7c8178748db919
|
/BS_Scrape.py
|
290228dc4e0b465ed8bf2af0475abc7a5e9cfdd7
|
[] |
no_license
|
chadcooper/nbi
|
72839f08c667a6af3062fd05ca4e713d8965e7e3
|
9ea93f3c0277ae5f3eb19aa8b2be2ba7cb0b1319
|
refs/heads/master
| 2021-01-17T05:22:29.110071
| 2014-01-13T19:08:11
| 2014-01-13T19:08:11
| 15,878,211
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
import BeautifulSoup as bs
import urllib2, re
def FetchFipsCodes( ):
""" Fetches a table of FIPS codes form a EPA webpage and tosses them
into a dictionary """
url = 'http://www.epa.gov/enviro/html/codes/state.html'
f = open('C:/temp/python/data/outputs/fips.csv', 'w')
# Fetch and parse the web page
response = urllib2.urlopen(url)
html = response.read()
soup = bs.BeautifulSoup(html)
# Find the table in the html code
tbl = soup.findAll('table')
# Setup a empty dictionary to use later
d={}
for table in tbl:
# Find the table rows
rows = table.findAll('tr')
for tr in rows:
# Find the table columns within each row
cols = tr.findAll('td')
# New list to store the columns contents in
ls=[]
for td in cols:
# Grab each cell value in the current row, toss it into the list
# So you get ['AK','02','ALASKA']
ls.append(td.find(text=True))
# Write the row out to text file
f.write(td.find(text=True) + ',')
if len(ls) > 0:
# Setup dictionary key and values
key = ls[1]
valAbbr = ls[0]
valFull = ls[2]
# Write the current line's cell values from list to dictionary
d[key] = [valAbbr, valFull]
# Write a newline to textfile
f.write('\n')
return d
if __name__ == '__main__':
FetchFipsCodes()
|
[
"supercooper@gmail.com"
] |
supercooper@gmail.com
|
dbd02cfbb502a3367b992b88e7b0375ee5d28dc7
|
8909c690853d2bb1c55419d38162256888c0418b
|
/final_regression_with_keras.py.html
|
427869355998ef18c1679a8310537673c22faaf0
|
[] |
no_license
|
ismaelsiffon/keras
|
8d101b0ec3a192598acba084d8f32f60e33f94c1
|
3a3cbdfc28413005c0b3fcb568b0483a30e6d8e1
|
refs/heads/master
| 2022-09-11T03:32:42.027486
| 2020-05-29T07:07:46
| 2020-05-29T07:07:46
| 267,789,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,522
|
html
|
#!/usr/bin/env python
# coding: utf-8
# # Final Assignment: Build a Regression Model in Keras
# Imports:
# In[62]:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Let's download the data:
# In[2]:
concrete_data= pd.read_csv('https://cocl.us/concrete_data')
concrete_data.head()
# In[3]:
concrete_data.shape
# Let's split the data into the predictors columns and the target column:
# In[14]:
df_columns = concrete_data.columns
predictors = concrete_data[df_columns[0:8]]
target = concrete_data[df_columns[8]]
# In[73]:
predictors.head()
# In[74]:
target.head()
# Let's normalize the data:
# In[16]:
n_cols = predictors_norm.shape[1]
# Let's import Keras and the packages we need
# In[17]:
import keras
from keras.models import Sequential
from keras.layers import Dense
# # Part A: Build a baseline model
# Let's define a function that calls the regression model:
# In[80]:
def regression_model_1 ():
#create the model:
model = Sequential()
model.add(Dense(10,activation='relu',input_shape=(n_cols,)))
model.add(Dense(1))
#compile model:
model.compile(optimizer='adam', loss='mean_squared_error')
return model
model_1 = regression_model_1()
# Let's fit and evaluate the model:
# In[81]:
n=50
MSE_1=[]
num_epochs = 50
for i in range(0,n):
#Split the data (30% left for test data)
X_train, X_test, y_train, y_test = train_test_split(predictors, target, test_size=0.3, random_state=None, shuffle=True)
#Fit the model with the train data:
model_1.fit(X_train, y_train, epochs=num_epochs, verbose=0)
#Predict using the test data:
y_test_hat = model_1.predict(X_test)
#Calculate and add the MSE to the MSE list:
MSE_1.append(mean_squared_error(y_test, y_test_hat))
# Calculate the mean and the standard deviation of the MSE's:
MSE_mean = np.mean(MSE_1)
MSE_std= np.std(MSE_1)
print("A list of ", len(MSE_1), " mean square error values was created. The first 5 values are: ", MSE_1[0:5])
print("The MSE mean is ", MSE_mean)
print("The MSE standard deviation is ", MSE_std)
# # Part B: Normalize the data
# Let's normalize the predictors:
# In[82]:
predictors_norm = (predictors - predictors.mean()) / predictors.std()
predictors_norm.head()
# Let's fit and evaluate the same model, but this time using the normalized predictors:
# In[83]:
n=50
MSE_2=[]
num_epochs = 50
for i in range(0,n):
#Split the data (30% left for test data)
X_train, X_test, y_train, y_test = train_test_split(predictors_norm, target, test_size=0.3, random_state=None, shuffle=True)
#Fit the model with the train data:
model_1.fit(X_train, y_train, epochs=num_epochs, verbose=0)
#Predict using the test data:
y_test_hat = model_1.predict(X_test)
#Calculate and add the MSE to the MSE list:
MSE_2.append(mean_squared_error(y_test, y_test_hat))
# Calculate the mean and the standard deviation of the MSE's:
MSE_2_mean = np.mean(MSE_2)
MSE_2_std= np.std(MSE_2)
print("A list of ", len(MSE_2), " mean square error values was created. The first 5 values are: ", MSE_2[0:5])
print("The MSE mean is ", MSE_2_mean)
print("The MSE standard deviation is ", MSE_2_std)
# ### How does the mean of the mean squared errors compare to that from Step A?
# Answer: We achieved a much smaller MSE compared to step A when using the normalized data. Having the predictors normalized before fitting the model resulted in a better performing model.
# # Part C: Increase the number of epochs:
# Let's fit and evaluate the same model, but this time using 100 epochs:
# In[84]:
n=50
MSE_3=[]
num_epochs = 100
for i in range(0,n):
#Split the data (30% left for test data)
X_train, X_test, y_train, y_test = train_test_split(predictors_norm, target, test_size=0.3, random_state=None, shuffle=True)
#Fit the model with the train data:
model_1.fit(X_train, y_train, epochs=num_epochs, verbose=0)
#Predict using the test data:
y_test_hat = model_1.predict(X_test)
#Calculate and add the MSE to the MSE list:
MSE_3.append(mean_squared_error(y_test, y_test_hat))
# Calculate the mean and the standard deviation of the MSE's:
MSE_3_mean = np.mean(MSE_3)
MSE_3_std= np.std(MSE_3)
print("A list of ", len(MSE_3), " mean square error values was created. The first 5 values are: ", MSE_3[0:5])
print("The MSE mean is ", MSE_3_mean)
print("The MSE standard deviation is ", MSE_3_std)
# ### How does the mean of the mean squared errors compare to that from Step B?
# Answer: Inceasing the number of epochs resulted in a smaller MSE compared to step B. Although the process took much longer, the MSE went from 65 to 36, which is a considerable improvement for our estimator.
# # Part D: Increase the number of hidden layers
# Let's change our model to 3 hidden layers:
# In[85]:
def regression_model_2 ():
#create the model:
model2 = Sequential()
model2.add(Dense(10,activation='relu',input_shape=(n_cols,)))
model2.add(Dense(10,activation='relu'))
model2.add(Dense(10,activation='relu'))
model2.add(Dense(1))
#compile model:
model2.compile(optimizer='adam', loss='mean_squared_error')
return model
model_2 = regression_model_2()
# Let's fit and evaluate the new model:
# In[86]:
n=50
MSE_4=[]
num_epochs = 50
for i in range(0,n):
#Split the data (30% left for test data)
X_train, X_test, y_train, y_test = train_test_split(predictors_norm, target, test_size=0.3, random_state=None, shuffle=True)
#Fit the model with the train data:
model_2.fit(X_train, y_train, epochs=num_epochs, verbose=0)
#Predict using the test data:
y_test_hat = model_2.predict(X_test)
#Calculate and add the MSE to the MSE list:
MSE_4.append(mean_squared_error(y_test, y_test_hat))
# Calculate the mean and the standard deviation of the MSE's:
MSE_4_mean = np.mean(MSE_4)
MSE_4_std= np.std(MSE_4)
print("A list of ", len(MSE_4), " mean square error values was created. The first 5 values are: ", MSE_4[0:5])
print("The MSE mean is ", MSE_4_mean)
print("The MSE standard deviation is ", MSE_4_std)
# ### How does the mean of the mean squared errors compare to that from Step B?
# Answer: Adding hidden layers to the network resulted in a smaller MSE compared to step B. We observed a better performance compare to increasing the number of epochs. The process is faster and the performance better.
|
[
"noreply@github.com"
] |
ismaelsiffon.noreply@github.com
|
30279ab7a6271c42ff99dee903f7994bace44415
|
35393783fa61a0f205b82f5e31bff91252714512
|
/02.project.02.image.classification/helper.py
|
eaa43d4a051ee42851f247849df58328ce406a3c
|
[] |
no_license
|
buffos/DLFoundationsND-projects
|
e6637210fe67eaf66f53408029837aef0f58d982
|
884f2b178343354a73dd2b56d845df5bcd1ac543
|
refs/heads/master
| 2021-01-09T05:52:22.953139
| 2017-05-24T09:07:51
| 2017-05-24T09:07:51
| 80,853,430
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,646
|
py
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the training data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all training data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_training.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature*255)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
|
[
"kostas.oreopoulos@gmail.com"
] |
kostas.oreopoulos@gmail.com
|
24e7bcacc5f706d7da0d52276f320df51aaf134b
|
da56b4b47c7402449cc41f10887d00d7101dd9c9
|
/Word a10n (abbreviation)/word-a10n-abbreviation.py
|
e28ea73f7c2f8f9e9175e97e964c64e6ab3f0562
|
[] |
no_license
|
kovalevcon/practice-code
|
8e20312fb0b14b48c696a3397f4e978ef1782292
|
663838488ff21e475ad73bc902dc3759c8d37684
|
refs/heads/master
| 2021-06-04T13:52:37.404698
| 2019-07-09T09:35:11
| 2019-07-09T09:35:11
| 138,029,120
| 1
| 1
| null | 2020-04-20T08:38:32
| 2018-06-20T12:21:11
|
PHP
|
UTF-8
|
Python
| false
| false
| 869
|
py
|
import re
def abbreviate(s):
"""
:type s: str
:rtype: str
"""
result = ''
word = ''
for symbol in s:
if re.search("[^a-zA-Z]", symbol):
result += "{}{}".format(i18n(word), symbol)
word = ''
else:
word += symbol
if len(word):
result += i18n(word)
return result
def i18n(s):
"""
:type s: str
:rtype: str
"""
ln = len(s)
return s if ln < 4 else "{}{}{}".format(s[0], ln - 2, s[ln - 1])
# Other short variant
# regex = re.compile('[a-z]{4,}', re.IGNORECASE)
#
# def replace(match):
# word = match.group(0)
# return word[0] + str(len(word) - 2) + word[-1]
#
# def abbreviate(s):
# return regex.sub(replace, s)
print(abbreviate('elephant-rides are really fun!')) # e6t-r3s are r4y fun!
print(abbreviate("internationalization")) # i18n
|
[
"kovalevcon@mail.ru"
] |
kovalevcon@mail.ru
|
3582e43c0be6814c00a89d2f42cd2a4270a2c2c1
|
07683accff81c1f4bede1ad10ba21aeaca94581f
|
/training/manticore_player_config.py
|
08b0cb3fe8f6dc16c0e635ccfe17bfba773043ed
|
[
"MIT"
] |
permissive
|
NoMoor/AGC
|
b052114c1f8398be8b75a6cae960d1189e23909f
|
5cb72871ed33c9484c5699496db106f24338564e
|
refs/heads/master
| 2021-07-06T14:09:12.880561
| 2021-01-14T21:07:45
| 2021-01-14T21:07:45
| 222,946,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from pathlib import Path
from rlbot.matchconfig.match_config import PlayerConfig, Team
__cfg_path = Path(
__file__).absolute().parent.parent.parent / 'SpaceInvaders' / 'manticore' / 'manticore' / 'manticore.cfg'
manticore_blue = PlayerConfig.bot_config(__cfg_path, Team.BLUE)
manticore_orange = PlayerConfig.bot_config(__cfg_path, Team.ORANGE)
|
[
"andrewscotthat@gmail.com"
] |
andrewscotthat@gmail.com
|
4055a201dbd907c9650090b2d35e0e88aa47f35c
|
ffd6bc549caad10ee99ed20a13b0dac51393f24a
|
/server/healthcheck.py
|
a20e17723c9d5aa537dad0d0d6c9ac9620088a87
|
[] |
no_license
|
giovaninppc/MC030
|
52a579290e0dcd1d28744f9ac98f49a8857ae275
|
382a60e330f1cdbb82fa8648029fa410db6a5cf5
|
refs/heads/master
| 2022-12-14T19:53:49.604330
| 2022-09-13T16:53:12
| 2022-09-13T16:53:12
| 208,359,198
| 2
| 0
| null | 2022-12-11T08:42:36
| 2019-09-13T22:56:44
|
Python
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
import sys
# Takes first name and last name via command
# line arguments and then display them
print("Output from Python")
print("First name: " + sys.argv[1])
print("Last name: " + sys.argv[2])
# save the script as hello.py
|
[
"bianca.itiroko@ifood.com.br"
] |
bianca.itiroko@ifood.com.br
|
9a2ec1197f71e206b1e7e693a6289439c6797b7f
|
be426351438937c646cee339d6c381ba14fd8de2
|
/trafficLight.py
|
bf0f5dd1b05631363fc4c71357166183e1c905b7
|
[] |
no_license
|
Marist-CMPT120-FA19/Anthony-Gandini-Lab-4
|
8c8c895dd950a02eb4a3edd967d84211d839cdf3
|
fff60008703634aaf8eac6e777c8d6841ff0c330
|
refs/heads/master
| 2020-07-28T23:23:50.530323
| 2019-09-19T14:37:48
| 2019-09-19T14:37:48
| 209,577,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#Anthony Gandini
from graphics import *
def main():
win = GraphWin("Traffic Light" , 1000 , 1000)
body = Rectangle(Point(300 , 100) , Point(700 , 900))
body.setFill("white")
body.draw(win)
redLight = Circle(Point(500 , 250) , 100)
redLight.setFill("red")
redLight.draw(win)
yellowLight = Circle(Point(500 , 500) , 100)
yellowLight.setFill("yellow")
yellowLight.draw(win)
greenLight = Circle(Point(500 , 750) , 100)
greenLight.setFill("green")
greenLight.draw(win)
main()
|
[
"54862709+Tones55@users.noreply.github.com"
] |
54862709+Tones55@users.noreply.github.com
|
845b8ca2b84cf398d1092751737e01837b49cb76
|
fd720c03d9ac965e7555e67769a879daf073c0d5
|
/Level 1/8.py
|
4c5e4ca0046cf1c4b8da8963525ced03dd5953ce
|
[
"Unlicense"
] |
permissive
|
chris-maclean/ProjectEuler
|
84045fb3a01897fb4ce4b312aabfd582077e6a89
|
861387b130a05cb98518492ac099ef6470f14dab
|
refs/heads/master
| 2021-05-26T22:46:54.114622
| 2013-08-06T01:08:02
| 2013-08-06T01:08:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# CJAA 4/6/2013
# Project Euler Problem 8
# Find the largest product of 5 consecutive numbers in the 1000-digit
# number below
x = str(7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450)
c = 0
window = x[c:c+5]
product = int(x[0])*int(x[1])*int(x[2])*int(x[3])*int(x[4])
maxProduct = product
while c <= len(x) - 6:
c += 1
# If the value that was just added to the window is less than
# the value just removed, then skip finding the product. It is
# impossible for the product to increase if the only changed factor
# decreased.
if int(x[c+4:c+5]) <= int(x[c-1:c]): continue
else:
product = int(x[c])*int(x[c+1])*int(x[c+2])*int(x[c+3])*int(x[c+4])
if product > maxProduct:
maxProduct = product
window = x[c:c+5]
print(maxProduct)
|
[
"canna12@gmail.com"
] |
canna12@gmail.com
|
4d0e73899310c05f60aaee3ed53464b71cdc931d
|
5027fe1a0a95f3391566b53fa8c29a2c29e4f04c
|
/python/Python/python/flask/route.py
|
fc67ea0b8808294aee29bdbd1674a9c919ae009c
|
[] |
no_license
|
chelroot/script
|
5e79e9609fb602ef63ee33e36262e4d3a79ea28f
|
e1f915b3b8e834417fdb07d2b6894e22674071e1
|
refs/heads/master
| 2020-03-29T01:18:55.090645
| 2017-12-14T15:02:13
| 2017-12-14T15:02:13
| 94,638,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route('/hello')
def hello2():
return 'Hello World 2'
@app.route('/user/<username>')
def show_user_profile(username):
return 'User %s' % username
@app.route('/post/<int:post_id>')
def show_post(post_id):
return 'Post %d' % post_id
if __name__ == "__main__":
app.run(port=8080,debug=True)
|
[
"you@example.com"
] |
you@example.com
|
071103df533889be44bf1e6e096e4b568562b9a6
|
8ac6655dfe6b8a43867646ab34ad608f74db2f0f
|
/B-SGD.py
|
d29c96f4221a7b4fb5823baf5135dde6c86293fe
|
[] |
no_license
|
banglh/B-SGD
|
22cf9075c7437c5a9058bcf39ebca1be2dc30ecd
|
27b65d90c4a0294265aeb837b042ceb2385ad2f0
|
refs/heads/master
| 2021-01-19T13:25:41.426822
| 2015-01-03T12:59:36
| 2015-01-03T12:59:36
| 28,743,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,002
|
py
|
from DataPreparation import prepareData
from LatentFeaturesInit import initParamsRandom1
from Output import printToFile
from BSGD_Engine import trainParams
if __name__ == "__main__":
############################### Dataset information #################################
# MovieLens 10M dataset
# dataset = "MovieLens-10M"
# trainingFile = "../data/ml-10M/r1.train"
# testFile = "../data/ml-10M/r1.test"
# contentFile = "../data/ml-10M/movies.dat"
# MovieLens 10M dataset
dataset = "MovieLens-100K"
trainingFile = "../data/ml-100k/ua.base"
testFile = "../data/ml-100k/ua.test"
contentFile = "../data/ml-100k/u.item"
############################### Main program ########################################
# prepare training data and test data
Rtrain, Rtest, usersNum, itemsNum, rmin, rmax = prepareData(dataset, trainingFile, testFile, contentFile)
version = 2 # 1: rmin < PuQi < rmax with (u,i) in Rrated
# 2: rmin < PuQi < rmax with (u,i) in R
Klist = [5,10,20,50]
muy = 10e-4
alpha = 1
Info = "Dataset: " + dataset + "\n"
Info += "training file: " + trainingFile + "\n"
Info += "testing file: " + testFile + "\n"
Info += "content file: " + contentFile + "\n"
Info += "muy = " + str(muy) + "\n"
Info += "alpha = " + str(alpha) + "\n"
for K in Klist:
# Initialize latent features matrices (P: Nu x k, B: k x Na)
P, Q = initParamsRandom1(usersNum, itemsNum, K, rmin, rmax)
# Training latent features matrices
P, Q, trainRMSE, testRMSE = trainParams(Rtrain, Rtest, P, Q, muy, alpha, rmin, rmax, version)
# Ouput results to file
if (version == 2):
printToFile("B-SGD-2-Random", "RMSE", K, trainRMSE, testRMSE, Info)
else:
printToFile("B-SGD-1-Random", "RMSE", K, trainRMSE, testRMSE, Info)
|
[
"ice.banglh@gmail.com"
] |
ice.banglh@gmail.com
|
a41d1afce03ea94b835c03fec6c228f7a67c0d71
|
0c88267776f6c7590cfed3d3184eb872ad38c642
|
/pendataan.py
|
61d7a15980aacbc6edf5118c1839c76154f73ad2
|
[] |
no_license
|
blanktix/data-nilai-mahasiswa-python-mongodb
|
9d004f684b20804a63fa362aa88ef34c82563e67
|
b773eb8b2981d81aa4f6d8d3a742507096683f9c
|
refs/heads/main
| 2023-03-02T18:21:42.837329
| 2021-02-12T01:22:59
| 2021-02-12T01:22:59
| 338,195,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,210
|
py
|
from os import system
from mongoengine import *
from functools import reduce
from model.model import *
# ============================================================================= #
# Penggunaan closure untuk melihat nilai mahasiswa pada matkul tertentu
def cek_nilai_matkul(data):
nilai=dict()
for mhs in data:
nilai[mhs.nim.nama]=mhs.nilai
def predikat(nama):
skor = int(nilai[nama])
if (skor>80):
return ("Nilai dari {} adalah {}, predikat A".format(nama, skor))
elif (70<skor<=80):
return ("Nilai dari {} adalah {}, predikat B".format(nama, skor))
elif (60<skor<=70):
return ("Nilai dari {} adalah {}, predikat C".format(nama, skor))
elif (50<skor<=60):
return ("Nilai dari {} adalah {}, predikat C-".format(nama, skor))
else:
return ("Nilai dari {} adalah {}, predikat D".format(nama, skor))
return predikat
# ============================================================================= #
# Penggunaan closure untuk melihat nilai mahasiswa pada setiap matkul
def cek_nilai_mhs(data):
nilai=dict()
for mhs in data:
nilai[mhs.kode_matkul]=mhs.nilai
def predikat(kode):
skor = int(nilai[kode])
if (skor>80):
return ("Nilai matkul {} adalah {}, predikat A".format(kode.nama, skor))
elif (70<skor<=80):
return ("Nilai matkul {} adalah {}, predikat B".format(kode.nama, skor))
elif (60<skor<=70):
return ("Nilai matkul {} adalah {}, predikat C".format(kode.nama, skor))
elif (50<skor<=60):
return ("Nilai matkul {} adalah {}, predikat C-".format(kode.nama, skor))
else:
return ("Nilai matkul {} adalah {}, predikat D".format(kode.nama, skor))
return predikat
# ============================================================================= #
# Penggunaan first class function
def ucapan_semangat(data):
nilai=dict()
for mhs in data:
nilai[mhs.nim.nama]=mhs.nilai
def apresiasi():
for nama in nilai:
if(int(nilai[nama])>80):
print("Selamat {} pertahankan prestasimu".format(nama))
elif(70<int(nilai[nama])<=80):
print("{}, tingkatkan terus prestasimu".format(nama))
else:
print("Tidak mengapa {}, teruslah belajar meskipun kamu lambat memahami".format(nama))
return apresiasi
# ============================================================================= #
# Penerapan decorator untuk menghasilkan output ucapan atas perolehan nilai mhs
def konversi_nilai(func):
def fungsinya(data):
func(data)
return fungsinya
@konversi_nilai
def nilai_rentang_4(data):
if(int(data.nilai>80)):
print("Nilai {} dalam rentang 0-4: 4".format(str(data.nim.nama)))
elif(int(data.nilai)>70):
print("Nilai {} dalam rentang 0-4: 3.5".format(str(data.nim.nama)))
elif(int(data.nilai)>60):
print("Nilai {} dalam rentang 0-4: 3".format(str(data.nim.nama)))
elif(int(data.nilai)>50):
print("Nilai {} dalam rentang 0-4: 2.5".format(str(data.nim.nama)))
else:
print("Nilai {} dalam rentang 0-4: 2".format(str(data.nim.nama)))
# ============================================================================= #
def jml_nilai_mhs(arr_nilai, n):
# Recursive function untuk menghitung total nilai
if(len(arr_nilai)==1):
return arr_nilai[0]
else:
return arr_nilai[0] + jml_nilai_mhs(arr_nilai[1:],n)
# ============================================================================= #
def rata_mhs(data):
# Pure function untuk enghitung rata2 nilai mahasiswa
# dari hasil pemanggilan function jml_nilai_mhs
return (jml_nilai_mhs(data, len(data)))/len(data)
# ============================================================================= #
def rata_matkul(data):
# Penggunaan function reduce dan lambda
# untuk menghitung nilai rata2 matkul
return (reduce(lambda x,y: int(x)+int(y), data))/len(data)
# ============================================================================= #
def tambah():
# Pure function untuk menambahkan data nilai mahasiswa
system('clear')
nim=int(input("Masukkan NIM [232 s.d 277]: "))
matkul=int(input("Masukkan kode matkul [1-8]: "))
nilai=int(input("Masukkan nilai [0-100]: "))
mhs=Nilai(nim=nim, kode_matkul=matkul, nilai=nilai).save()
# ============================================================================= #
def lihat_nilai():
# # Pure function yang memanggil cek_nilai_matkul, cek_nilai_mhs, nilai_rentang_4
system('clear')
pil=int(input("1. Berdasar matkul\n2. Berdasar mahasiswa\nPilih [1|2]: "))
if pil==1:
kode=int(input("Masukkan kode mata kuliah: "))
cek=cek_nilai_matkul(Nilai.objects(kode_matkul=kode))
system('clear')
print(*map(cek, list([mhs.nim.nama for mhs in Nilai.objects(kode_matkul=kode)])), sep="\n")
print(*map(nilai_rentang_4, [mhs for mhs in Nilai.objects(kode_matkul=kode)]))
else:
nim=int(input("Masukkan NIM mahasiswa: "))
cek=cek_nilai_mhs(Nilai.objects(nim=nim))
system('clear')
print(*map(cek, list([mhs.kode_matkul for mhs in Nilai.objects(nim=nim)])), sep="\n")
print(*map(nilai_rentang_4, [mhs for mhs in Nilai.objects(nim=nim)]))
# ============================================================================= #
def cetak_ket():
# Pure function yang memanggil first class function ucapan_semangat
system('clear')
kode=int(input("Masukkan kode mata kuliah [1-8]: "))
berisemangat=ucapan_semangat(Nilai.objects(kode_matkul=kode))
berisemangat()
# ============================================================================= #
def rata_mahasiswa():
# # Pure function yang memanggil jml_nilai_mhs
system('clear')
nim=int(input("Masukkan NIM [232-277]: "))
data=list([mhs.nilai for mhs in Nilai.objects(nim=nim)])
rata=jml_nilai_mhs(data, len(data))
print("Rata2 nilai mahasiswa tersebut: {}".format(str(rata/len(data))))
# ============================================================================= #
def rata_mata_kuliah():
# Pure function yang memanggil rata_matkul
system('clear')
kode=int(input("Masukkan kode mata kuliah [1-8]: "))
rata=rata_matkul(list([x.nilai for x in Nilai.objects(kode_matkul=kode)]))
print("Rata2 mata kuliah: {}".format(rata))
# ============================================================================= #
def main():
# Pure function, fungsi utama dengan pilihan menu
# Infinite loop
while True:
print(
"""
MENU
1. Tambah Data Mahasiswa\n
2. Lihat Nilai Mahasiswa\n
3. Cetak Keterangan Kelulusan\n
4. Rata2 kelas\n
5. Rata2 mahasiswa\n
6. Keluar
"""
)
pil=int(input("Pilih Menu: "))
if pil==1:
tambah()
elif pil==2:
lihat_nilai()
elif pil==3:
cetak_ket()
elif pil==4:
rata_mata_kuliah()
elif pil==5:
rata_mahasiswa()
else:
exit(0)
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
11de29e50bd736b819c94dbb49f1f8e2dffa6da0
|
4c5715acd2415d40b3459d6b099f164fc4b3e671
|
/Practica2/Practica2_ejercicio2.py
|
f586abb8a4908313077fb48bf8c468dfc66a6465
|
[] |
no_license
|
Mata13/ModelacionNumerica
|
5099d2af694cf22cc6d15df86e0cd9de54956ffd
|
03660a39caaa1f3a54059e4deba872517ef000c6
|
refs/heads/master
| 2021-07-04T23:13:12.387330
| 2020-08-01T22:00:53
| 2020-08-01T22:00:53
| 137,001,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
"""Aproximar la segunda derivada de f(x)=2**(sin(x**2+1))*cos(sqrt(|sin(x-1)|))
en x=sqrt(2), usar h=0.001 y diferencias centradas"""
import numpy as np
x_i=np.sqrt(2)
h_0=0.001
def f2primadex(x,h):
f=2**(np.sin(x**2+1))*np.cos(np.sqrt(np.absolute(np.sin(x-1))))
fmash=2**(np.sin((x+h)**2+1))*np.cos(np.sqrt(np.absolute(np.sin((x+h)-1))))
fmenosh=2**(np.sin((x-h)**2+1))*np.cos(np.sqrt(np.absolute(np.sin((x-h)-1))))
f2prima=(fmenosh-2*f+fmash)/((h)**2)
return f2prima
segundaDerivada=f2primadex(x_i,h_0)
print(segundaDerivada)
|
[
"zick13"
] |
zick13
|
ebb49bb3f585477fbe8713bf66c4a6abc9d0576b
|
e18c9af2d5f4360efdea7f8299d03f277b65500e
|
/Temperatures/temperatures.py
|
855ad4695d3e5fbdf1cd1be168bb3aa56a4c1df7
|
[] |
no_license
|
Hravan/codingame
|
9147236c4384edb9d55b509fc634d0666ea61d9b
|
2547b6071be576db57e966bc1f3ed44660a01003
|
refs/heads/master
| 2020-04-24T20:44:04.305741
| 2019-02-23T19:22:16
| 2019-02-23T19:22:16
| 172,253,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
# Write a program that prints the temperature closest to 0 among input data.
# If two numbers are equally close to zero, positive integer has to be considered
# closest to zero (for instance, if the temperatures are -5 and 5, then display 5).
from functools import reduce
def closestZero(numbers, n):
'''Consume a list of integers and its number and return the number that is
closest to 0. In case of a tie return the positive number. If n == 0, return 0'''
if n == 0:
return 0
closest = reduce(closerZero, numbers)
return closest
def closerZero(n, m):
'''Consume two numbers and return one that is closer to zero. Return the positive
one in case of a tie.'''
if abs(n) == abs(m):
# if n is not bigger than 0 then either n == m or m > 0, in both cases
# we can return m
return n if n > 0 else m
elif abs(n) < abs(m):
return n
else:
return m
|
[
"krzysztof.krenc@gmail.com"
] |
krzysztof.krenc@gmail.com
|
54f21d408917fa7ed27fa8fef789ccef6c026982
|
6997fab0dc0610d0eba804206cabed9b0f7e6e89
|
/pyobjc/RandomGenerator/random_generator.py
|
c3a45264f583adb6250c937303b782e2f84cbe7b
|
[] |
no_license
|
chanshik/practice
|
9797b25f4d25fd29be875b4c8b1a7b90d8658e3d
|
55baf3b08c0d288711b33ea190ee425f70d1dcbb
|
refs/heads/master
| 2021-01-02T08:47:12.379934
| 2014-02-03T12:33:29
| 2014-02-03T12:33:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
"""
Random number generator in Cocoa Programming for MAC OS X book.
Written by Chan Shik Lim. (chanshik@gmail.com)
"""
import random
import time
from Cocoa import *
from Foundation import NSObject
from PyObjCTools import AppHelper
class RandomGeneratorController(NSWindowController):
numberTextField = objc.IBOutlet()
def windowDidLoad(self):
NSLog('windowDidLoad')
NSWindowController.windowDidLoad(self)
self.number = 0
self.updateDisplay()
def windowShouldClose_(self, sender):
NSLog('windowShouldClose')
return True
def windowWillClose_(self, notification):
NSLog('windowWillClose')
AppHelper.stopEventLoop()
def applicationShouldTerminateAfterLastWindowClosed_(self, sender):
NSLog('applicationShouldTerminateAfterLastWindowClosed')
return True
@objc.IBAction
def seed_(self, sender):
NSLog('seed')
random.seed(time.time())
self.updateDisplay()
@objc.IBAction
def generate_(self, sender):
NSLog('generate')
self.number = random.randint(1, 100)
self.updateDisplay()
def updateDisplay(self):
NSLog('updateDisplay')
self.numberTextField.setStringValue_(str(self.number))
if __name__ == '__main__':
app = NSApplication.sharedApplication()
viewController = RandomGeneratorController.alloc().initWithWindowNibName_('RandomGenerator')
viewController.showWindow_(viewController)
NSApp.activateIgnoringOtherApps_(True)
AppHelper.runEventLoop()
|
[
"chanshik@gmail.com"
] |
chanshik@gmail.com
|
bbdf4e72c06405a112fb686c4ef98f9eacdf5f36
|
02011fb7b9685b2506b89641b0ff01416742a854
|
/笔记本电脑的东西/暑期线上培训/供应链商品销售/02.代码/供应链商品销售.py
|
52cff7d30aff86c50f25b8036a0c44b7ad8f4c5b
|
[] |
no_license
|
hhongker/java
|
062ea17aa2375d3c59bf5094eca3f2cf13d10ed9
|
78babd156ed849596c6f633979f40c4549197338
|
refs/heads/main
| 2023-03-14T16:39:04.078349
| 2020-11-24T12:12:37
| 2020-11-24T12:12:37
| 315,620,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,221
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 10:03:43 2020
@author: hhr
"""
# In[数据准备]
import pandas as pd
data = pd.read_excel('../01.中间数据/供应链商品销售数据.xlsx',header=None)
data = data.iloc[1:,1:] #将读取多余的第一行与第一列干掉
data.columns = data.loc[1] #将列设置为第1行的值
data = data.drop([1]) #将多余的第一行干掉
data.reset_index(drop=True, inplace=True) #重排索引
data = data.fillna(0) #将非数替换成0
# In[任务3 分析新开拓销售点的选址] 我认为是谁销售额平均值更大
scv = data[['销售点类型','销售额(万元)']].groupby('销售点类型').agg({
'销售额(万元)':['sum','count','var','mean'] #算出总共的值和数量和方差
})
#m = scv['销售额(万元)']['sum'] / scv['销售额(万元)']['count']#两个店的平均销售额为
#画图
import matplotlib.pyplot as plt
plt.style.use('ggplot') #设置风格
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(1,2,1) #子图一
plt.title('销售额平均值')
m = scv['销售额(万元)']['mean']
ax1.pie(m,autopct='%.2f %%',radius=1,explode=[0.1]+[0]*(len(m)-1),labels=['CBD店','社区店'],labeldistance=1,wedgeprops={
'width':0.7
})
ax2 = fig.add_subplot(1,2,2) #子图
plt.title('销售额方差')
v = scv['销售额(万元)']['var']
ax2.pie(v,autopct='%.2f %%',radius=1,explode=[0.1]+[0]*(len(v)-1),labels=['CBD店','社区店'],labeldistance=1,wedgeprops={
'width':0.7
})
plt.show()
#显然cbd店更用利
# In[任务4 分析销售季节性特点]
mon = data.groupby('销售月份').agg({ #分组计算值
'销售额(万元)':['sum','count','mean']
})
#分完组后按照月份排序
monMortList = ['January','February','March','April','May','June','July','August','September','October','November','December']
mon = mon.reindex(monMortList)
#绘图
import matplotlib.pyplot as plt
plt.style.use('ggplot') #设置风格
fig = plt.figure(figsize=(14,16))
ax1 = fig.add_subplot(2,1,1) #子图一
plt.title('销售总金额,销售个数,销售平均值折线图')
mdf = mon['销售额(万元)']
for i in mdf:
ax1.plot(mdf.index,mdf[i])
for i, j in zip(mdf.index,mdf[i]): #设置文本的位置
plt.text(i,j,'%.2f'%j,ha='center',va='top')
plt.legend(mdf.columns)
ax2 = fig.add_subplot(2,1,2) #子图二
plt.title('销售总金额,销售个数,销售平均值条形图')
mdf = mon['销售额(万元)']
import numpy as np
x = np.arange(12)
ax2.bar(x-0.3,mdf['sum'],width=0.3)
for i, j in zip(x-0.3,mdf['sum']): #设置文本的位置
plt.text(i,j,'%.2f'%j,ha='center',va='bottom')
ax2.bar(x,mdf['count'],width=0.3)
for i, j in zip(x,mdf['count']): #设置文本的位置
plt.text(i,j,'%.2f'%j,ha='center',va='bottom')
ax2.bar(x+0.3,mdf['mean'],width=0.3,color='black')
for i, j in zip(x+0.3,mdf['mean']): #设置文本的位置
plt.text(i,j,'%.2f'%j,ha='center',va='bottom')
plt.xticks(x,monMortList,fontproperties = 'Times New Roman', size = 10)
plt.legend(mdf.columns)
plt.show()
# In[任务5 分析社区店有哪些季节性很强的产品]
jijieshop = pd.pivot_table(data[['商品代号','销售月份','销售额(万元)']],
index='销售月份',
columns='商品代号',
fill_value=0,
values='销售额(万元)',
aggfunc=['sum','count'])
monMortList = ['January','February','March','April','May','June','July','August','September','October','November','December']
jijieshop = jijieshop.reindex(monMortList)
j1 = jijieshop.apply(lambda x: x[0:3].sum(), axis=0)
j2 = jijieshop.apply(lambda x: x[3:6].sum(), axis=0)
j3 = jijieshop.apply(lambda x: x[6:9].sum(), axis=0)
j4 = jijieshop.apply(lambda x: x[9:12].sum(), axis=0)
j = pd.DataFrame(list(zip(j1,j2,j3,j4)))
j.index = j1.index
j.columns = ['春','夏','秋','冬']
jijiesum = j.loc[map(lambda x :'sum' in x,j.index),:] #每个商品对应每个季节销售额总和
jijiecount = j.loc[map(lambda x :'count' in x,j.index),:] #每个商品对应每个季节销售个数
#查看每个季节对应季节性销售额比较大的几个商品
print('春季销售总额比较大的几样商品',
jijiesum['春'].sort_values(ascending=False).head()
)
print('夏季销售总额比较大的几样商品',
jijiesum['夏'].sort_values(ascending=False).head()
)
print('秋季销售总额比较大的几样商品',
jijiesum['秋'].sort_values(ascending=False).head()
)
print('冬季销售总额比较大的几样商品',
jijiesum['冬'].sort_values(ascending=False).head()
)
#查看每个季节对应季节性销售数量比较大的几个商品
print('春季销售数量比较大的几样商品',
jijiecount['春'].sort_values(ascending=False).head()
)
print('夏季销售数量比较大的几样商品',
jijiecount['夏'].sort_values(ascending=False).head()
)
print('秋季销售数量比较大的几样商品',
jijiecount['秋'].sort_values(ascending=False).head()
)
print('冬季销售数量比较大的几样商品',
jijiecount['冬'].sort_values(ascending=False).head()
)
#画图
#1,画出销售总额的图
import matplotlib.pyplot as plt
plt.style.use('ggplot') #设置风格
plt.figure(figsize=(16,10))
plt.title('每个商品在对应的季节里的销售总量变化')
for i in jijiesum.columns:
plt.plot([i[1] for i in jijiesum.index],jijiesum[i])
fig = plt.legend(jijiesum.columns)
xtisum = plt.xticks(rotation=45)
plt.show()
#2,画出销售数量的图
import matplotlib.pyplot as plt
plt.style.use('ggplot') #设置风格
plt.figure(figsize=(16,10))
plt.title('每个商品在对应的季节里的销售总量变化')
for i in jijiecount.columns:
plt.plot([i[1] for i in jijiecount.index],jijiecount[i])
fig = plt.legend(jijiecount.columns)
xtisum = plt.xticks(rotation=45)
plt.show()
# In[文档以下]
# In[任务4 可挑选哪些产品做为新销售店的主要铺货产品,以帮助新店顺利经营。]
shop = data[['商品代号','销售额(万元)']].groupby('商品代号').agg({
'销售额(万元)':['sum','count'] #算出总共的值和数量和方差
})
print('商品销售额较多的',
shop['销售额(万元)']['sum'].sort_values(ascending=False).head()
)
print('商品销售数量较多的',
shop['销售额(万元)']['count'].sort_values(ascending=False).head()
)
#画图
import matplotlib.pyplot as plt
plt.style.use('ggplot') #设置风格
plt.figure(figsize=(16,10))
plt.title('各商品销售量情况')
for i in shop['销售额(万元)'].columns:
plt.plot(shop.index,shop['销售额(万元)'][i])
plt.legend(shop['销售额(万元)'].columns)
xtisum = plt.xticks(rotation=45)
plt.show()
#显然safety 8产品无论销量还是销售额都very good
# In[任务5 分析销售季节性特点,据以安排店员年休假或培训时间,以及订货量控制。]同上任务4
#可以看出March ,August, October,April销售量不高,其余都挺高
# In[任务6 假设进货商品均余半年保质期,分析社区店有哪些季节性很强的产品,必须在哪个时点前促销出清,否则会滞销过期。]
jijieshop = pd.pivot_table(data[['商品代号','销售月份','销售额(万元)']],
index='销售月份',
columns='商品代号',
fill_value=0,
values='销售额(万元)',
aggfunc=['count'])
monMortList = ['January','February','March','April','May','June','July','August','September','October','November','December']
jijieshop = jijieshop.reindex(monMortList)
#画图
import matplotlib.pyplot as plt
#plt.style.use('ggplot') #设置风格
plt.figure(figsize=(18,20))
plt.title('各商品在每个月份中的数量')
for i in jijieshop['count'].columns:
plt.plot(jijieshop.index,jijieshop['count'][i])
plt.legend(jijieshop['count'].columns)
xtisum = plt.xticks(rotation=45)
plt.show()
#可以看到某些商品在随着季节变化减少的特别快,说明该商品季节性强
# In[任务7 完成项目报告,根据项目背景和数据分析结果,给客户提供店铺运营的个性化定制方案(作业)]
# 综上所述,靠各位多出力了
|
[
"1004681771@qq.com"
] |
1004681771@qq.com
|
2131fe65e7d0c961cc4bf0696a75133d6b38ee27
|
6da4349a80e90693b2bcb23f5f1c5fc3b4c3a7a1
|
/products/migrations/0010_auto_20160119_0421.py
|
f75f2a9b0a5b978e7b91f8e1b7ab7274e6bb0eb5
|
[] |
no_license
|
netlabreal/Django-ESHOP
|
1f7379a5aeff0562be655cc868071adcb395a52a
|
dafd3a277226168f4fc3e42338aed0f162c5c422
|
refs/heads/master
| 2021-01-10T07:05:22.287887
| 2016-02-21T07:14:11
| 2016-02-21T07:14:11
| 52,196,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0009_remove_category_kol'),
]
operations = [
migrations.AlterField(
model_name='products',
name='img',
field=models.ImageField(null=True, upload_to=products.models.image_upload_to, blank=True),
),
]
|
[
"site2012@list.ru"
] |
site2012@list.ru
|
fa6c3f8621bf3529d05cdccbd3acba3c26987d49
|
113c3bf772df1d7a7fbd391f777802e45222e5a4
|
/PSO/swarm.py
|
9d705f479e7787e6ea0bac14614d9c5a79767e7b
|
[] |
no_license
|
TeoMoisi/AI
|
3dbad28eead5362825133568a3626c2d9cc1fad9
|
afb468a44a5b609e6a5069a38c9beefd5138e791
|
refs/heads/master
| 2020-07-18T04:34:02.696907
| 2019-09-03T21:23:24
| 2019-09-03T21:23:24
| 206,174,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
from particle import Particle
from random import randint
import copy
class Swarm:
def __init__(self, noOfPaticles, nSize):
self.noOfParticles = noOfPaticles
self.particles = self.create()
self.nSize = nSize #neighbour size
def getParticles(self):
return self.particles
def getSize(self):
return len(self.particles)
def create(self):
particles = []
for i in range(self.noOfParticles):
p = Particle()
particles.append(p)
return particles
def getBestNeighbourParticle(self):
bestNeighbor = []
bestnow = 0
neighbors = self.getBestParticles()
for particle in range(self.noOfParticles):
bestNeighbor.append(neighbors[particle][0])
for i in range(0, len(neighbors[particle])):
if (self.particles[bestNeighbor[particle]].fitness > self.particles[neighbors[particle][i]].fitness):
bestNeighbor[particle] = neighbors[particle][i]
# if (neighbors[particle][i].fitness < neighbors[particle][bestnow].fitness):
# bestnow = i
# bestNeighbor.append(neighbors[particle][bestnow])
return bestNeighbor
def getBestParticles(self):
#selecting random neighbours for each particle
if self.nSize > len(self.particles):
self.nSize = len(self.particles)
neighbors = []
for i in range(len(self.particles)):
localNeighbor = []
for j in range(self.nSize):
x = randint(0, len(self.particles) - 1)
while x in localNeighbor:
x = randint(0, len(self.particles) - 1)
# localNeighbor.append(self.particles[x])
localNeighbor.append(x)
neighbors.append(copy.deepcopy(localNeighbor))
return neighbors
|
[
"teofana.moisi@gmail.com"
] |
teofana.moisi@gmail.com
|
dad8d5e9df27a1a51889538cfc0bcb8c30bc6dda
|
0764c823e1b594fc3ec5db2c28436bab6e345f4d
|
/This is codingTest/그리디/만들 수 없는 금액/만들 수 없는 금액.py
|
613129cc6e12d0b4a4e5df5e62ccc0c0cbe80691
|
[] |
no_license
|
BTOCC24/Algorithm
|
ecf5b94bf31aa69549be4aa73ff1d4874243e696
|
24a6c3c6d8cf62d14b60dbc12c7d5fa4c4d80f31
|
refs/heads/master
| 2023-04-10T22:02:41.435820
| 2021-04-15T15:09:30
| 2021-04-15T15:09:30
| 231,168,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
n = int(input())
data = list(map(int, input().split()))
data.sort()
avail = []
for coin in data:
if not avail:
avail.append(coin)
else:
avail += [x + coin for x in avail]
avail = sorted(list(set(avail)))
for i in range(len(avail)):
if avail[i] != i+1:
break
print(i + 1)
|
[
"kisses6938@gmail.com"
] |
kisses6938@gmail.com
|
410c3e297eb1958e9ec9baa64166a76010c7b27e
|
c37d25681f16b042a1357102d7eaa9835d160756
|
/calculator.py
|
e069cf25e4764cd12984f05a156d2827949b3092
|
[] |
no_license
|
FlorentinPopescu/wsgi-calculator
|
fd9ae9542f40d413e5331ef54db2df26dd94f268
|
d9161744704b49e789047ac3c568bf61a76114e7
|
refs/heads/master
| 2020-12-26T14:16:14.896368
| 2020-02-08T04:31:29
| 2020-02-08T04:31:29
| 237,535,019
| 0
| 0
| null | 2020-01-31T23:42:03
| 2020-01-31T23:42:02
| null |
UTF-8
|
Python
| false
| false
| 7,587
|
py
|
""" calculator.py script """
#imports
import traceback
import operator as op
from functools import reduce
# =============================================
"""
For your homework this week, you'll be creating a wsgi application of
your own.
You'll create an online calculator that can perform several operations.
You'll need to support:
* Addition
* Subtractions
* Multiplication
* Division
Your users should be able to send appropriate requests and get back
proper responses. For example, if I open a browser to your wsgi
application at `http://localhost:8080/multiple/3/5' then the response
body in my browser should be `15`.
Consider the following URL/Response body pairs as tests:
```
http://localhost:8080/multiply/3/5 => 15
http://localhost:8080/add/23/42 => 65
http://localhost:8080/subtract/23/42 => -19
http://localhost:8080/divide/22/11 => 2
http://localhost:8080/ => <html>Here's how to use this page...</html>
```
To submit your homework:
* Fork this repository (Session03).
* Edit this file to meet the homework requirements.
* Your script should be runnable using `$ python calculator.py`
* When the script is running, I should be able to view your
application in my browser.
* I should also be able to see a home page (http://localhost:8080/)
that explains how to perform calculations.
* Commit and push your changes to your fork.
* Submit a link to your Session03 fork repository!
"""
# =============================================
def info():
""" info page """
page = """
<h1>Information</h1>
<hr></hr>
<ul style="list-style-type:circle;">
<li>Addition example:
<a href = "http://localhost:8080/add/3/5"> 3 + 5</a>
</li>
<li>Subtraction example:
<a href = "http://localhost:8080/subtract/23/42"> 23 - 42</a>
</li>
<li>Multiplication example:
<a href = "http://localhost:8080/multiply/3/5"> 3 * 5</a>
</li>
<li>Division example:
<a href = "http://localhost:8080/divide/22/11"> 21 / 11</a>
</li>
</ul>
<hr></hr>
"""
return page
# ----------------------------------------------
def add(*args):
""" Returns a STRING with the sum of the arguments """
# TODO: Fill sum with the correct value, based on the
# args provided.
summation = round(sum(map(float, args)), 4)
page = """
<h1>Addition</h1>
<p>The sum of listed numbers <b>{}</b> is: <b>{}</b></p>
<a href="/">Back to info page</a>
<p><b>NOTE:</b> <i>continue entering numbers to browser
line to add to current sum; eg /add/3/5/7 -> 15 </i></b>
"""
return page.format(list(map(float, args)), summation)
# ----------------------------------------------
# TODO: Add functions for handling more arithmetic operations.
def subtract(*args):
""" Returns a STRING with the difference of the arguments """
# TODO: Fill sum with the correct value, based on the
# args provided.
difference = round(op.sub(*map(float, args)), 4)
page = """
<h1>Subtraction</h1>
<p>The difference of <b>{}</b> and <b>{}</b> is: <b>{}</b></p>
<a href="/">Back to info page</a>
<p><b>NOTE:</b> <i>only first two numbers in browser'e line will
be subtracted; eg /substact/23/42 -> -19,
entering more numbers will generate an error</i></b>
"""
return page.format(*map(float, args), difference)
# ----------------------------------------------
def multiply(*args):
""" Returns a STRING with the product of the arguments """
# TODO: Fill sum with the correct value, based on the
# args provided.
product = round(reduce((lambda x, y: float(x) * float(y)), args), 4)
page = """
<h1>Multiplication</h1>
<p>The product of numbers <b>{}</b> is: <b>{}</b></p>
<a href="/">Back to info page</a>
<p><b>NOTE:</b> <i>continue entering numbers to browser
line to multiply to current product;
eg /multiply/3/5/7 -> 105 </i></b>
"""
return page.format(list(map(float, args)), product)
# ----------------------------------------------
def divide(*args):
""" Returns a STRING with the division of the arguments """
# TODO: Fill sum with the correct value, based on the
# args provided.
if float(args[1]) != 0.:
fraction = round(op.truediv(*map(float, args)), 4)
else:
raise ZeroDivisionError
page = """
<h1>Division</h1>
<p>The fraction of <b>{}</b> and <b>{}</b> is: <b>{}</b></p>
<a href="/">Back to info page</a>
<p><b>NOTE:</b> <i>only first two numbers in browser'e line will
be divided; eg /divide/22/11 -> 1,
entering more numbers will generate an error</i></b>
"""
return page.format(*map(float, args), fraction)
# ----------------------------------------------
def resolve_path(path):
"""
Should return two values: a callable and an iterable of
arguments.
"""
# TODO: Provide correct values for func and args. The
# examples provide the correct *syntax*, but you should
# determine the actual values of func and args using the
# path.
# func = add
# args = ['25', '32']
path = path.strip("/").split("/")
func_name = path[0]
args = path[1:]
funcs = {
"": info,
"add": add,
"subtract": subtract,
"multiply": multiply,
"divide": divide
}
try:
func = funcs[func_name]
except KeyError:
raise NameError
return func, args
# ----------------------------------------------
def application(environ, start_response):
""" app """
# TODO: Your application code from the book database
# work here as well! Remember that your application must
# invoke start_response(status, headers) and also return
# the body of the response in BYTE encoding.
#
# TODO (bonus): Add error handling for a user attempting
# to divide by zero.
headers = [('Content-type', 'text/html')]
try:
path = environ.get('PATH_INFO', None)
if path is None:
raise NameError
func, args = resolve_path(path)
body = func(*args)
status = "200 OK"
except NameError:
status = "404 Not Found"
body = """"
<h1>Not Found</h1>
<a href="/">Back to info page</a>
"""
except ZeroDivisionError:
status = "400 Bad Request"
body = """
<h1>Unexpected Division by Zero</h1>
<a href="/">Back to info page</a>
"""
except Exception:
status = "500 Internal Server Error"
body = """
<h1> Internal Server Error</h1>
<a href="/">Back to info page</a>
"""
print(traceback.format_exc())
finally:
headers.append(('Content-length', str(len(body))))
start_response(status, headers)
return [body.encode('utf-8')]
# =============================================
if __name__ == '__main__':
# TODO: Insert the same boilerplate wsgiref simple
# server creation that you used in the book database.
from wsgiref.simple_server import make_server
SRV = make_server('localhost', 8080, application)
SRV.serve_forever()
|
[
"popescu01@gmail.com"
] |
popescu01@gmail.com
|
911bd56ce7d096b537e2228d71145283066fecd3
|
b62a74192561e950e53574aaab178eebec713d1b
|
/plots/utilities/load_pickled_result_file.py
|
368e0deca292b212233306998164a640538b934b
|
[] |
no_license
|
LEAVESrepo/leaves
|
a094fdf9bc020c43dd1af485502e196db41f2188
|
0a8590733c0b85c93c404ef1154bb96b4ab51edb
|
refs/heads/master
| 2020-12-30T01:06:33.501736
| 2020-06-07T22:33:00
| 2020-06-07T22:33:00
| 239,523,846
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
import pickle
import os
def load_pickled_result_file(input_file_absolute_path, rm_file=False):
try:
with open(input_file_absolute_path) as f:
try:
loaded_obj = pickle.load(f)
except pickle.PickleError:
print("Cannot load " + input_file_absolute_path)
else:
if rm_file is True:
os.remove(input_file_absolute_path)
return loaded_obj
except EnvironmentError:
print("Cannot find " + input_file_absolute_path)
|
[
"leavesgit@gmail.com"
] |
leavesgit@gmail.com
|
c437bc7c363870c11572c85bed8dafde7feebd24
|
a742a7fc0f179cfdc6a19afe355b50bafe262bf4
|
/djsite/search_func.py
|
5b3bd2f53e607c66ac43f5d01bb3a054a0345642
|
[] |
no_license
|
RaisoLiu/zipf-distribution-network-visualization
|
76d470bea93e4ffc4a514d32c0cc83f121194a49
|
135eeda2f529a1bded43fed6a601e0d47c4d7cf4
|
refs/heads/main
| 2023-02-24T15:05:01.221036
| 2021-01-28T06:22:34
| 2021-01-28T06:22:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,381
|
py
|
import json
from os import listdir
from os.path import isfile, isdir, join
from .js_func import is_json, normalize_word
from nltk.metrics import edit_distance
import time
def art_get(f, res):
tStart = time.time()
art_js_set = []
inquire = res
with open(f, 'r') as f_stream:
f_str = f_stream.read()
if is_json(f_str):
art_js_set = json.loads(f_str)
res = []
for it in inquire:
res.append(art_js_set['article_list'][it[1]])
tEnd = time.time()
print("\n art_get \nIt cost %f sec" % (tEnd - tStart))
return res
def typo(addr, tar_str):
tStart = time.time()
file_js = dict()
with open(addr, 'r') as f_stream:
f_str = f_stream.read()
if is_json(f_str):
file_js = json.loads(f_str)
inv_idx = file_js['tokens_o']
tar_str = normalize_word(tar_str)
result_li = []
for it in inv_idx:
result_li.append((edit_distance(tar_str,normalize_word(it)), it))
result_li = sorted(result_li)
return_li = []
num_res = len(result_li)
lim = min(5, num_res)
for idx in range(lim):
return_li.append({'dist':result_li[idx][0], 'str':result_li[idx][1]})
tEnd = time.time()
print("\n typo \nIt cost %f sec" % (tEnd - tStart))
return return_li
def search_engine(addr, tar_str):
tStart = time.time()
file_js = dict()
with open(addr, 'r') as f_stream:
f_str = f_stream.read()
if is_json(f_str):
file_js = json.loads(f_str)
else:
return ['','inv_index_json_Destroyed']
inv_idx = file_js['w_map']
result_count = dict()
tar_li = tar_str.split(' ')
for tar in tar_li:
try:
for no in inv_idx[normalize_word(tar)]:
try:
result_count[no] = result_count[no]+1
except:
result_count[no] = 1
except:
pass
result_li = []
for cnt in result_count:
result_li.append((result_count[cnt], cnt))
result_li = sorted(result_li)
return_li = []
num_res = len(result_li)
lim = min(10, num_res)
for idx in range(lim):
return_li.append(result_li[-1 - idx])
tEnd = time.time()
print("\n search_engine \nIt cost %f sec" % (tEnd - tStart))
return return_li
|
[
"raisoliu@gmail.com"
] |
raisoliu@gmail.com
|
22b6d88065cac062432597e4311b35e7574812ec
|
be5705e00aacb4313cf27e57fbfb009633e181ba
|
/assignment5/diff.py
|
884f32db4f6f6b0b0baaffa20935b103861aa509
|
[] |
no_license
|
yosephog/INF3331
|
667a2c5f7431cf33aa37598e11d393178ed009bc
|
86d1b54e2c97b0748b9cb3ab5c79ba6b0045577b
|
refs/heads/master
| 2020-04-27T03:19:16.121546
| 2019-04-14T10:21:30
| 2019-04-14T10:21:30
| 174,021,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
import sys
import re
import itertools
def read_file(file_name):
"""
this method just read a file and return it
as a list spliting at new line
"""
sourcefile=open(file_name,'r')
file=sourcefile.read()
sourcefile.close()
return file.split('\n')
def diff(original_listinal,modified):
""" This method find the difference between two file that is
the original_listinal and the modifed of the original_listinal_lineinal version
"""
file=open("diff.txt",'w')
# this for loop just reads the two file at the same time
for (original_listinal_line,modified_line) in itertools.zip_longest(original_listinal,modified):
"""
spliting the original and modifed line in to list of word in order to
compare word by word if the two lines have the same lenght
and amount of word
for example end and our. two words different but same length
"""
original_list=original_listinal_line.split(' ')
modified_list=modified_line.split(' ')
"""
if the two line have the same amount of words then
compare if there are word difference
"""
if len(original_list) == len(modified_list):
if len(original_listinal_line) > len(modified_line):
tx='- ' + original_listinal_line
print(tx)
file.write(tx+'\n')
tx2='+ ' + modified_line
print(tx2)
file.write(tx2+'\n')
elif len(original_listinal_line) == len(modified_line):
tx='0 ' + original_listinal_line
print(tx)
file.write(tx+'\n')
else:
tx='+ ' + modified_line
print(tx)
file.write(tx+'\n')
"""
if the orginal list is greater then some word must
have been deleted
"""
elif len(original_list) > len(modified_list):
tx='0 ' + original_listinal_line
file.write(tx+'\n')
tx2='- ' + modified_line
file.write(tx2)
"""
if the orginal_list is smaller then words are added to the
modified version
"""
elif len(original_list) < len(modified_list):
tx='- ' + original_listinal_line
file.write(tx + '\n')
tx2='+ ' + modified_line
file.write(tx2 + '\n')
if __name__ == '__main__':
original_listinal=read_file(sys.argv[1])
modified=read_file(sys.argv[2])
diff(original_listinal,modified)
|
[
"thorfry@yahoo.com"
] |
thorfry@yahoo.com
|
d74c2505271b130d0407164abf41de7cc872f416
|
616fa7b1261c3703c34ffb8c8fcdcf2c301f1744
|
/3MinuteDeepLearning/venv/Scripts/easy_install-script.py
|
61985098dda703bc4db848f076fc8feb22c721e3
|
[] |
no_license
|
ann234/ML-Study
|
c483ea62273568d1a4a2e5674abb34944d690433
|
72c8253bdc5ca24f09917eedf9f272e118a0cc9a
|
refs/heads/master
| 2021-10-24T18:37:35.730031
| 2019-03-27T16:06:49
| 2019-03-27T16:06:49
| 178,026,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!C:\Users\cuvit\PycharmProjects\3MinuteDeepLearning\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"cuvitiny@gmail.com"
] |
cuvitiny@gmail.com
|
42959d61c0f436a1872bbfa0c592c00f7a8b486e
|
7b3c2887a3d5d28748cfc15f5b079cdbf8c691e0
|
/scene_parsing_model/batch_eval.py
|
b6f9073ac094b5cc4ef4ac9b711a1e9a26a9d8f6
|
[] |
no_license
|
unizard/Colorization-project
|
f6b33d9073890f9524dd7eece4de6933734df0df
|
6479105b74ea8cd8eebc8cad02663ce40865dd5e
|
refs/heads/master
| 2021-01-13T17:09:06.140235
| 2016-06-07T18:34:29
| 2016-06-07T18:34:29
| 69,160,682
| 1
| 4
| null | 2016-09-25T11:35:45
| 2016-09-25T11:35:44
| null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
caffe_root = '../../caffe-future/'
import sys
import os
import scipy.io as sio
sys.path.insert(0, caffe_root + 'python')
import numpy as np
from PIL import Image
import caffe
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
gray_flag = True
testdata_path = 'testset'
testresult_path = 'test_result'
if os.path.exists(testresult_path) == False:
os.makedirs(testresult_path)
# load net
net = caffe.Net('deploy.prototxt', semantic_segmentation.caffemodel', caffe.TEST)
# load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe
filelist = os.listdir(testdata_path)
count = 0
for filename in filelist:
count = count + 1;
print('#{0} : {1}'.format(count, filename))
impath = testdata_path + '/' + filename
im = Image.open(impath)
in_ = np.array(im, dtype=np.float32)
if gray_flag == True:
in_ = rgb2gray(in_)
(row, col) = in_.shape
im3 = np.zeros([row, col, 3], dtype = np.float32)
for i in range(3):
im3[:,:,i] = in_
in_ = im3
in_ = in_[:,:,::-1]
in_ -= np.array((104.00698793,116.66876762,122.67891434))
in_ = in_.transpose((2,0,1))
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
# out = net.blobs['score'].data[0].argmax(axis=0)
out = net.blobs['score'].data[0]
resultname = filename[:-4] + '.mat'
resultpath = testresult_path + '/' + resultname
sio.savemat(resultpath, {'result':out})
|
[
"zerobigpark@gmail.com"
] |
zerobigpark@gmail.com
|
01d597f6437a51fe5e5010b528dc6fabf5779ebc
|
a2d4ed422e72ea6c87cc6f2f4c033f4fed55d9e1
|
/poll/migrations/0002_auto_20210114_2215.py
|
65b9bd2ad1163a0006a5a233a9d9d9cd5e6a3646
|
[
"MIT"
] |
permissive
|
slk007/SahiGalat.com
|
cdf803fe7c4ffa8842942fd43e9607e906378ed4
|
786688e07237f3554187b90e01149225efaa1713
|
refs/heads/master
| 2023-02-18T04:04:02.838852
| 2021-01-18T20:48:47
| 2021-01-18T20:48:47
| 329,948,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
# Generated by Django 3.1.5 on 2021-01-14 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic_name', models.CharField(max_length=50)),
('topic_descrption', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='question',
name='topics',
field=models.ManyToManyField(related_name='questions', to='poll.Topic'),
),
]
|
[
"shubham.soni.lko@gmail.com"
] |
shubham.soni.lko@gmail.com
|
cc55fc5791d42b5b92121217ccd66d67d71c0166
|
0cf407c7e7cab89489e4f16e27769367005ccc4f
|
/pydap/wsgi/file.py
|
1a1eac2bbe107110c485340f8556c5a5b6caa163
|
[] |
no_license
|
QPromise/pydap
|
e520a6b52b174986664f7dfc6f25df073105a301
|
7d68533fdb5f1972bab5b2075b98c41e67b700a1
|
refs/heads/master
| 2020-05-05T13:56:49.035636
| 2013-11-14T18:17:16
| 2013-11-14T18:17:16
| 180,100,672
| 1
| 0
| null | 2019-04-08T08:08:57
| 2019-04-08T08:08:57
| null |
UTF-8
|
Python
| false
| false
| 6,179
|
py
|
"""
A simple file-based Opendap server.
Serves files from a root directory, handling those recognized by installed
handlers.
"""
import re
import os
from os.path import getmtime, getsize
import time
from email.utils import formatdate
from paste.request import construct_url
from paste.httpexceptions import HTTPNotFound, HTTPSeeOther
from paste.fileapp import FileApp
from pydap.handlers.lib import get_handler, load_handlers
from pydap.lib import __version__
from pydap.exceptions import ExtensionNotSupportedError
from pydap.util.template import FileLoader, GenshiRenderer
class FileServer(object):
def __init__(self, root, templates='templates', catalog='catalog.xml', **config):
self.root = root.replace('/', os.path.sep)
self.catalog = catalog
self.config = config
loader = FileLoader(templates)
self.renderer = GenshiRenderer(
options={}, loader=loader)
self.handlers = load_handlers()
def __call__(self, environ, start_response):
path_info = environ.get('PATH_INFO', '')
filepath = os.path.abspath(os.path.normpath(os.path.join(
self.root,
path_info.lstrip('/').replace('/', os.path.sep))))
basename, extension = os.path.splitext(filepath)
assert filepath.startswith(self.root) # check for ".." exploit
# try to set our renderer as the default, it none exists
environ.setdefault('pydap.renderer', self.renderer)
# check for regular file or dir request
if os.path.exists(filepath):
if os.path.isfile(filepath):
# return file download
return FileApp(filepath)(environ, start_response)
else:
# return directory listing
if not path_info.endswith('/'):
environ['PATH_INFO'] = path_info + '/'
return HTTPSeeOther(construct_url(environ))(environ, start_response)
return self.index(environ, start_response,
'index.html', 'text/html')
# else check for opendap request
elif os.path.exists(basename):
# Update environ with configuration keys (environ wins in case of conflict).
for k in self.config:
environ.setdefault(k, self.config[k])
handler = get_handler(basename, self.handlers)
return handler(environ, start_response)
# check for catalog
elif path_info.endswith('/%s' % self.catalog):
environ['PATH_INFO'] = path_info[:path_info.rfind('/')]
return self.index(environ, start_response,
'catalog.xml', 'text/xml')
else:
return HTTPNotFound()(environ, start_response)
def index(self, environ, start_response, template_name, content_type):
# Return directory listing.
path_info = environ.get('PATH_INFO', '')
directory = os.path.abspath(os.path.normpath(os.path.join(
self.root,
path_info.lstrip('/').replace('/', os.path.sep))))
mtime = getmtime(directory)
dirs_ = []
files_ = []
for root, dirs, files in os.walk(directory):
filepaths = [
os.path.abspath(os.path.join(root, filename))
for filename in files ]
# Get Last-modified.
filepaths = filter(os.path.exists, filepaths) # remove broken symlinks
if filepaths:
mtime = max(mtime, *map(getmtime, filepaths))
# Add list of files and directories.
if root == directory:
dirs_ = [d for d in dirs if not d.startswith('.')]
files_ = [{
'name': os.path.split(filepath)[1],
'size': format_size(getsize(filepath)),
'modified': time.localtime(getmtime(filepath)),
'supported': supported(filepath, self.handlers),
} for filepath in filepaths]
# Sort naturally using Ned Batchelder's algorithm.
dirs_.sort(key=alphanum_key)
files_.sort(key=lambda l: alphanum_key(l['name']))
# Base URL.
location = construct_url(environ, with_query_string=False)
root = construct_url(environ, with_query_string=False, with_path_info=False).rstrip('/')
context = {
'environ': environ,
'root': root,
'location': location,
'title': 'Index of %s' % (environ.get('PATH_INFO') or '/'),
'dirs' : dirs_,
'files': files_,
'catalog': self.catalog,
'version': '.'.join(str(d) for d in __version__)
}
template = environ['pydap.renderer'].loader(template_name)
output = environ['pydap.renderer'].render(template, context, output_format=content_type)
last_modified = formatdate(time.mktime(time.localtime(mtime)))
headers = [('Content-type', content_type), ('Last-modified', last_modified)]
start_response("200 OK", headers)
return [output.encode('utf-8')]
def supported(filepath, handlers):
try:
get_handler(filepath, handlers)
return True
except ExtensionNotSupportedError:
return False
# http://svn.colorstudy.com/home/ianb/ImageIndex/indexer.py
def format_size(size):
if not size:
return 'empty'
if size > 1024:
size = size / 1024.
if size > 1024:
size = size / 1024.
return '%.1i MB' % size
return '%.1f KB' % size
return '%i bytes' % size
def alphanum_key(s):
"""
Turn a string into a list of string and number chunks.
>>> alphanum_key("z23a")
['z', 23, 'a']
From http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(s):
try:
return int(s)
except:
return s
return [tryint(c) for c in re.split('([0-9]+)', s)]
def make_app(global_conf, root, templates, **kwargs):
return FileServer(root, templates=templates, **kwargs)
|
[
"roberto@dealmeida.net"
] |
roberto@dealmeida.net
|
87be072d919be62dd86ffe6083240bc43ebcb926
|
ccebf0768f2ba17530c465042646687ce90658fe
|
/src/autoencoder/tables.py
|
c645b4f99023a540900cf3af0dbeebf5678e7e70
|
[] |
no_license
|
sailfish009/SRC
|
0a9c86bb008d9ab8b2d9e42af543653d856359a1
|
d4e812039542464e906fe34ebfe6867eef3b8fd3
|
refs/heads/master
| 2023-08-22T09:08:23.795917
| 2021-10-04T12:57:04
| 2021-10-04T12:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
import argparse
import glob
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, default="./results/")
args = parser.parse_args()
csvs = glob.glob(args.path + "*.csv")
csvs_df = [
pd.read_csv(
csv, header=None, index_col=0, names=[csv.split("_")[-2].split("/")[-1]]
)
for csv in csvs
]
df_out = pd.concat(csvs_df, axis=1, join="outer")
def formatter(s):
if isinstance(s, float):
return "OOR"
avg, std = s.split("+-")
avg = float(avg.strip())
std = float(std.strip())
out_avg = r"{:.5s}".format(f"{1e3 * avg:.4f}")
out_std = r"{:.5s}".format(f"{1e3 * std:.4f}")
out = fr"{out_avg} \tiny{{$\pm${out_std}}}"
return out
def to_numerical(s):
if s == "OOR":
return 9999999999999999
else:
return float(s.split(" ")[0])
def crop(v):
return f"{v:.2f}"
import json
dd = json.load(open("results/dist_mean_ref.json", "r"))
df_out = df_out.applymap(formatter)
df_out = df_out[["Grid2d", "Ring", "Bunny", "Airplane", "Car", "Guitar", "Person"]]
df_out = df_out.transpose()
df_out = df_out[
["DiffPool", "MinCut", "NMF", "LaPool", "TopK", "SAGPool", "NDP", "Graclus"]
]
df_out.columns = list(map(lambda s: rf"\textbf{{{s}}}", df_out.columns))
df_out.loc["Rank"] = (
df_out.applymap(to_numerical)
.rank(axis=1, ascending=True, method="min")
.mean(axis=0)
.map(crop)
)
with pd.option_context("max_colwidth", 1000):
print(df_out.to_latex(escape=False, bold_rows=True))
|
[
"daniele.grattarola@gmail.com"
] |
daniele.grattarola@gmail.com
|
0a15e41796a54381f7c5ea8c62b1fa0f7bf90f7b
|
f6a3fef96cc376a93b96b15f7dda3c7bd2ff4b0f
|
/profiles/models.py
|
7e23a9e67db427501757b558acf89ea767cda9e3
|
[] |
no_license
|
RishabhJain2018/evm
|
bde625317ab82edb71eac57673ec0f14fa2a1a0d
|
f36e21de47112eff80ba4976ed334a25c9d236f8
|
refs/heads/master
| 2020-12-06T12:51:54.145507
| 2016-10-14T17:41:04
| 2016-10-14T17:41:15
| 67,304,771
| 1
| 1
| null | 2016-09-19T17:31:48
| 2016-09-03T18:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 711
|
py
|
from django.db import models
from django.contrib.auth.models import User
class UserDetail(models.Model):
'''
It stores information about the users.
'''
user = models.OneToOneField(User)
github = models.CharField(max_length=200, blank=False, null=False)
linkedin = models.CharField(max_length=200, blank=False, null=False)
contact_no = models.CharField(max_length=200, blank=False, null=False)
about = models.CharField(max_length=500, blank=False, null=False)
created = models.DateTimeField("Created", null=True, auto_now_add=True)
modified = models.DateTimeField("Last Modified", null=True, auto_now=True)
def __unicode__(self):
return self.user.username
|
[
"rishabhjain2018@gmail.com"
] |
rishabhjain2018@gmail.com
|
d0ce2bd2beb94857a80d37c8272a9bc79b6b5ffb
|
aaa85418696016fd883830d41738d5eec9da7c05
|
/Loops/guessing.py
|
b2186c4c423982301ef64133b566093bf9fe0758
|
[] |
no_license
|
TimothyJao/pythonlearning
|
1017a68f4320106f04fdb83b7c88a08eb6026236
|
d00757bbf09788b6946b9a37468030e1dd1b592b
|
refs/heads/master
| 2020-06-12T14:18:59.363288
| 2019-08-22T02:08:40
| 2019-08-22T02:08:40
| 194,326,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
import random
keep_playing = "y"
while keep_playing != "n":
guess = 0
val = random.randint(1, 11)
while guess != val:
guess = int(input("Guess a number between 1 and 10: "))
if val < guess:
print("Too high! Guess again.")
elif val > guess:
print("Too low! Guess again.")
else:
print("You guessed it! You won!")
keep_playing = input("Do you want to keep playing? (y/n): ")
|
[
"timothy.i.jao@gmail.com"
] |
timothy.i.jao@gmail.com
|
1a2f34916aa0d503d30dc52093083af58ea8b662
|
64d0fadae054f71c2b7ab3515c287d2b65de5d6f
|
/server/ordermanager/core/migrations/0001_initial.py
|
b7f197269dfc168cdbd6a9dca543202ebfde7d02
|
[] |
no_license
|
jasoneffendi/ISYS6597-TK3
|
58a4fd0630dec0b74b27374726d6daa90555006f
|
54831f6f4af8e4e843699fb0cec57950bbea0f10
|
refs/heads/master
| 2023-03-01T14:51:48.180246
| 2021-02-06T07:26:25
| 2021-02-06T07:26:25
| 336,293,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
# Generated by Django 3.1.6 on 2021-02-06 03:24
from django.db import migrations, models
import django.db.models.deletion
import ordermanager.core.models.pegawai
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Bagian',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama_bagian', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Barang',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama_barang', models.CharField(max_length=32, unique=True)),
],
),
migrations.CreateModel(
name='Pegawai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(blank=True, max_length=32, unique=True)),
('nama_pegawai', models.CharField(blank=True, max_length=32)),
('alamat_pegawai', models.CharField(blank=True, max_length=64)),
('hp_pegawai', models.CharField(blank=True, max_length=16)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('id_bagian', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='core.bagian')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', ordermanager.core.models.pegawai.UserManager()),
],
),
]
|
[
"jasoneffendi27@gmail.com"
] |
jasoneffendi27@gmail.com
|
c4906d5dd8401f7daa497d48fac9424a118a32cf
|
d2e25085681ac995b16d7f5a2699e0134c219337
|
/main.py
|
47b830a151adeab4d5da7ae719119445950d66bf
|
[] |
no_license
|
fineans/temp-mail
|
8134ceebf87786a1bfd6838c9582d7554a99971a
|
6fb682ddcef36be99549b641f3ddb90d419f5dee
|
refs/heads/main
| 2023-04-28T23:11:47.474243
| 2021-05-20T06:49:45
| 2021-05-20T06:49:45
| 358,484,178
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,312
|
py
|
import pyperclip
import requests
import random
import string
import time
import sys
import re
import os
API = 'https://www.1secmail.com/api/v1/'
domainList = ['1secmail.com', '1secmail.net', '1secmail.org']
domain = random.choice(domainList)
def banner():
print(r'''
Temp Mail
[by fineans/Harry]
''')
def generateUserName():
name = string.ascii_lowercase + string.digits
username = ''.join(random.choice(name) for i in range(10))
return username
def extract():
getUserName = re.search(r'login=(.*)&',newMail).group(1)
getDomain = re.search(r'domain=(.*)', newMail).group(1)
return [getUserName, getDomain]
def print_statusline(msg: str):
last_msg_length = len(print_statusline.last_msg) if hasattr(print_statusline, 'last_msg') else 0
print(' ' * last_msg_length, end='\r')
print(msg, end='\r')
sys.stdout.flush()
print_statusline.last_msg = msg
def deleteMail():
url = 'https://www.1secmail.com/mailbox'
data = {
'action': 'deleteMailbox',
'login': f'{extract()[0]}',
'domain': f'{extract()[1]}'
}
print_statusline("Disposing your email address - " + mail + '\n')
req = requests.post(url, data=data)
def checkMails():
reqLink = f'{API}?action=getMessages&login={extract()[0]}&domain={extract()[1]}'
req = requests.get(reqLink).json()
length = len(req)
if length == 0:
print_statusline("Your mailbox is empty. Hold tight. Mailbox is refreshed automatically every 5 seconds.")
else:
idList = []
for i in req:
for k,v in i.items():
if k == 'id':
mailId = v
idList.append(mailId)
x = 'mails' if length > 1 else 'mail'
print_statusline(f"You received {length} {x}. (Mailbox is refreshed automatically every 5 seconds.)")
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, r'All Mails')
if not os.path.exists(final_directory):
os.makedirs(final_directory)
for i in idList:
msgRead = f'{API}?action=readMessage&login={extract()[0]}&domain={extract()[1]}&id={i}'
req = requests.get(msgRead).json()
for k,v in req.items():
if k == 'from':
sender = v
if k == 'subject':
subject = v
if k == 'date':
date = v
if k == 'textBody':
content = v
mail_file_path = os.path.join(final_directory, f'{i}.txt')
with open(mail_file_path,'w') as file:
file.write("Sender: " + sender + '\n' + "To: " + mail + '\n' + "Subject: " + subject + '\n' + "Date: " + date + '\n' + "Content: " + content + '\n')
banner()
userInput1 = input("Do you wish to use to a custom domain name (Y/N): ").capitalize()
try:
if userInput1 == 'Y':
userInput2 = input("\nEnter the name that you wish to use as your domain name: ")
newMail = f"{API}?login={userInput2}&domain={domain}"
reqMail = requests.get(newMail)
mail = f"{extract()[0]}@{extract()[1]}"
pyperclip.copy(mail)
print("\nYour temporary email is " + mail + " (Email address copied to clipboard.)" +"\n")
print(f"---------------------------- | Inbox of {mail}| ----------------------------\n")
while True:
checkMails()
time.sleep(5)
if userInput1 == 'N':
newMail = f"{API}?login={generateUserName()}&domain={domain}"
reqMail = requests.get(newMail)
mail = f"{extract()[0]}@{extract()[1]}"
pyperclip.copy(mail)
print("\nYour temporary email is " + mail + " (Email address copied to clipboard.)" + "\n")
print(f"---------------------------- | Inbox of {mail} | ----------------------------\n")
while True:
checkMails()
time.sleep(5)
except(KeyboardInterrupt):
deleteMail()
print("\nProgramme Interrupted")
os.system('cls' if os.name == 'nt' else 'clear')
|
[
"noreply@github.com"
] |
fineans.noreply@github.com
|
d5e1c7014bf8d9e61278ce68e402134d2183168c
|
1e6ef86623d1ba96978859fa6a89e42848c0fafe
|
/pages/views.py
|
8ed83abdc8bc9ab36cd12881a0d0c580ab105b8d
|
[] |
no_license
|
khaledgedo/my_first_project_django
|
4aa915bee523d84e889cacb6946753e172c4b361
|
abea1bbd4fdc0946b0f61685421e0b95957b4adc
|
refs/heads/master
| 2022-12-26T10:30:04.219746
| 2020-10-08T05:16:18
| 2020-10-08T05:16:18
| 302,237,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
"""#from django.shortcuts import render
#from django.http import HttpResponse
from django.views.generic import TemplateView
# Create your views here.
#def homePageView(request):
# return HttpResponse('Hello, World!')
class HomePageView(TemplateView):
template_name = 'home.html'"""
from django.views.generic import TemplateView
class HomePageView(TemplateView):
template_name = 'home.html'
class AboutPageView(TemplateView):
template_name = 'about.html'
|
[
"1998kza@gmail.com"
] |
1998kza@gmail.com
|
0baae21df3c83b8794f9038b6cf160e90c2fa15e
|
1c171ad5af62dc76aec9eb823ab53e15f3d4f90c
|
/code/utils/sio_msgs.py
|
4fd545a06c896f596b36d109ec76e0c499026a03
|
[] |
no_license
|
mgangster/RobotX-DeepLearning
|
d5ac1edbcc9f75a335a82402a79cbcefaa07786d
|
7810df4326ab847a980b3b152870b9ae2c66cfd1
|
refs/heads/main
| 2023-02-06T16:11:44.458896
| 2020-12-13T07:11:16
| 2020-12-13T07:11:16
| 320,981,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
#!/usr/bin/env python
import base64
from io import BytesIO
from PIL import Image
def create_box_marker_msg(id, pose, dims=(0.7,0.7,2.0), color=(0,0,1,0.1), duration=0.4):
""" Creates a box marker message to be passed to the simulator.
Args:
id (int): Identifier for the box marker.
pose (list): Pose of the box [x, y, z, roll, pitch, yaw]
dims (list): Dimensions of the box in meters [height, width, depth]
color (list): Color and Alpha of the box, with all values as floats
ranging from 0-1 [r,g,b,a]
duration (int): How long the box will exist, -1 means forever
Returns:
dict: JSON message to be emitted as socketio event
"""
msg = {
'id': id,
'pose': pose,
'dimensions': dims,
'color': color,
'duration': duration
}
return msg
def create_object_detected_msg(position):
"""creates an xyz message of target location"""
return {'coords':position}
def create_delete_marker_msg(id):
""" Creates a delete marker message.
Marker with the given id will be delete, -1 deletes all markers
Args:
id (int): Identifier of the marker
Returns:
dict: JSON message to be emitted as socketio event
"""
return {'id': id}
|
[
"gan.ma@outlook.com"
] |
gan.ma@outlook.com
|
b6ba771bfc4395a6d64aadf41ffaea00f087fa83
|
9bb0bc36b06bfcc5ab107f83ba7d55a5ce38c7ab
|
/tests/test_empty.py
|
4b9095b6ba0aee028e90c735007fb28be6a34790
|
[
"MIT"
] |
permissive
|
LiBa001/disputils
|
8ae94318c5993f1905f2a8052a9e63a91954a8b2
|
313404e02c781a999c44d7f3e87548c01e9cc7da
|
refs/heads/master
| 2021-12-24T21:04:34.338738
| 2021-05-29T21:19:45
| 2021-05-29T21:19:45
| 169,900,101
| 127
| 42
|
MIT
| 2021-10-05T06:48:00
| 2019-02-09T18:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 27
|
py
|
def test_empty():
pass
|
[
"git@libasoft.de"
] |
git@libasoft.de
|
f50c388f09f96c82d10b056d7d08a33bf3c57dec
|
91061fb90c7fc710d0a84f7b1fdec12d27210263
|
/tensorflow_probability/python/internal/samplers.py
|
6b61256731d6b10b984f5371451045d7c2e8a15f
|
[
"Apache-2.0"
] |
permissive
|
RaniereRamos/probability
|
d727f995eb40366c854ba7d453b861912e939d83
|
e5df50d2b23f99338f1543cb6fe6a2bfa095f385
|
refs/heads/master
| 2022-12-02T00:11:41.809774
| 2020-08-12T19:16:25
| 2020-08-12T19:20:24
| 287,109,843
| 1
| 0
|
Apache-2.0
| 2020-08-12T20:31:53
| 2020-08-12T20:31:52
| null |
UTF-8
|
Python
| false
| false
| 7,297
|
py
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Random samplers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
# Dependency imports
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import prefer_static as ps
__all__ = [
'categorical',
'gamma',
'normal',
'poisson',
'sanitize_seed',
'split_seed',
'shuffle',
'uniform',
'zeros_seed',
]
JAX_MODE = False
SEED_DTYPE = np.uint32 if JAX_MODE else np.int32
def zeros_seed():
return tf.constant([0, 0], dtype=SEED_DTYPE)
def sanitize_seed(seed, salt=None):
"""Map various types to a seed `Tensor`."""
if callable(seed): # e.g. SeedStream.
seed = seed()
if salt is not None and not isinstance(salt, str):
raise TypeError('`salt` must be a python `str`, got {}'.format(repr(salt)))
if seed is None or isinstance(seed, six.integer_types):
if JAX_MODE:
raise ValueError('TFP-on-JAX requires a `jax.random.PRNGKey` `seed` arg.')
# TODO(b/147874898): Do we deprecate `int` seeds, migrate ints to stateless?
if salt is not None:
# Prefer to incorporate salt as a constant.
if seed is not None:
seed = int(hashlib.sha512(
str((seed, salt)).encode('utf-8')).hexdigest(), 16) % (2**31 - 1)
salt = None
# Convert "stateful-indicating" `int`/`None` seed to stateless Tensor seed,
# by way of a stateful sampler.
seed = tf.random.uniform([2], seed=seed, minval=np.iinfo(SEED_DTYPE).min,
maxval=np.iinfo(SEED_DTYPE).max, dtype=SEED_DTYPE,
name='seed')
# TODO(b/159209541): Consider ignoring salts for stateless seeds, for
# performance and because using stateless seeds already requires the
# discipline of splitting.
if salt is not None:
salt = int(hashlib.sha512(str(salt).encode('utf-8')).hexdigest(), 16)
if JAX_MODE:
from jax import random as jaxrand # pylint: disable=g-import-not-at-top
seed = jaxrand.fold_in(seed, salt & (2**32 - 1))
else:
seed = tf.bitwise.bitwise_xor(
seed, np.uint64([salt & (2**64 - 1)]).view(np.int32))
return tf.convert_to_tensor(seed, dtype=SEED_DTYPE, name='seed')
def split_seed(seed, n=2, salt=None, name=None):
"""Splits a seed into `n` derived seeds.
Args:
seed: The seed to split; may be an `int`, an `(int, int) tuple`, or a
`Tensor`. `int` seeds are converted to `Tensor` seeds using
`tf.random.uniform` stateful sampling. Tuples are converted to `Tensor`.
n: The number of splits to return.
salt: Optional `str` salt to mix with the seed.
name: Optional name to scope related ops.
Returns:
seeds: If `n` is a Python `int`, a `tuple` of seed values is returned. If
`n` is an int `Tensor`, a single `Tensor` of shape `[n, 2]` is returned. A
single such seed is suitable to pass as the `seed` argument of the
`tf.random.stateless_*` ops.
"""
if not (isinstance(n, int)
or isinstance(n, np.ndarray)
or tf.is_tensor(n)): # avoid confusion with salt.
raise TypeError(
'`n` must be a python `int` or an int Tensor, got {}'.format(repr(n)))
with tf.name_scope(name or 'split'):
seed = sanitize_seed(seed, salt=salt)
if JAX_MODE:
from jax import random as jaxrand # pylint: disable=g-import-not-at-top
return jaxrand.split(seed, n)
seeds = tf.random.stateless_uniform(
[n, 2], seed=seed, minval=None, maxval=None, dtype=SEED_DTYPE)
if isinstance(n, six.integer_types):
seeds = tf.unstack(seeds)
return seeds
def categorical(
logits,
num_samples,
dtype=None,
seed=None,
name=None):
"""As `tf.random.categorical`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'categorical'):
seed = sanitize_seed(seed)
return tf.random.stateless_categorical(
logits=logits, num_samples=num_samples, seed=seed, dtype=dtype)
def gamma(
shape,
alpha,
beta=None,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.gamma`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'gamma'):
seed = sanitize_seed(seed)
alpha = tf.convert_to_tensor(alpha, dtype=dtype)
beta = None if beta is None else tf.convert_to_tensor(beta, dtype=dtype)
params_shape = ps.shape(alpha)
if beta is not None:
params_shape = ps.broadcast_shape(params_shape, ps.shape(beta))
shape = ps.convert_to_shape_tensor(
shape,
dtype=getattr(params_shape, 'dtype', np.int32)) # May be TensorShape.
samples_shape = ps.concat([shape, params_shape], axis=0)
return tf.random.stateless_gamma(
shape=samples_shape, seed=seed, alpha=alpha, beta=beta, dtype=dtype)
def normal(
shape,
mean=0.0,
stddev=1.0,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.normal`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'normal'):
# TODO(b/147874898): Remove workaround for seed-sensitive tests.
if seed is None or isinstance(seed, six.integer_types):
return tf.random.normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
seed = sanitize_seed(seed)
return tf.random.stateless_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
def poisson(
shape,
lam,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.poisson`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'poisson'):
seed = sanitize_seed(seed)
lam_shape = tf.shape(lam)
shape = tf.convert_to_tensor(shape, dtype=lam_shape.dtype)
sample_shape = tf.concat([shape, lam_shape], axis=0)
return tf.random.stateless_poisson(
shape=sample_shape, seed=seed, lam=lam, dtype=dtype)
def shuffle(
value,
seed=None,
name=None):
"""As `tf.random.shuffle`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'shuffle'):
seed = sanitize_seed(seed)
sortkey = tf.random.stateless_uniform(shape=[tf.shape(value)[0]], seed=seed)
return tf.gather(value, tf.argsort(sortkey))
def uniform(
shape,
minval=0,
maxval=None,
dtype=tf.float32,
seed=None,
name=None):
"""As `tf.random.uniform`, but handling stateful/stateless `seed`s."""
with tf.name_scope(name or 'uniform'):
seed = sanitize_seed(seed)
return tf.random.stateless_uniform(
shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
fb19e1295635896b16fb46827d75613073fba4f8
|
233b8920b755fae3a244109354a93876bcfa4e10
|
/i_seqLogica.py
|
6e64817aa4d2b373dd8c818a8c963d66f89bfd86
|
[] |
no_license
|
ClaudiaStrm/UriOnlineJudge
|
290132cdf0789ba5f67aba936aa86361859729f2
|
db07316d084709827165432996f57e70f2f0d018
|
refs/heads/master
| 2020-06-26T16:41:54.130648
| 2016-12-04T07:19:53
| 2016-12-04T07:19:53
| 74,549,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
'''
Escreva um programa que leia um valor inteiro N. N * 2 linhas de saída serão apresentadas na execução do programa, seguindo a lógica do
exemplo abaixo. Para valores com mais de 6 dígitos, todos os dígitos devem ser apresentados
'''
x = int(input())
for y in range(1, (x + 1)):
print(y, y * y, y ** 3)
print(y, y * y + 1, y ** 3 + 1)
|
[
"s_moura@hotmail.com"
] |
s_moura@hotmail.com
|
a17dcf9c8c0134db26e0f4f57ea3d2d326935998
|
352137e8b912e98878ad7177f3648923064c6931
|
/AutoTyper.py
|
087244d4fdd22cca7c827d710d135f26d940fb99
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Jeremy-Leon/AutoTyper-OCR-Script
|
321f455c9834e10e2a8ddf525dc1d8f68ff6dfec
|
7c40ef3902257fafe9cd90045aff11de1ba29f94
|
refs/heads/master
| 2021-03-26T21:17:18.488174
| 2020-03-16T18:00:39
| 2020-03-16T18:00:39
| 247,750,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
from PIL import Image, ImageGrab, ImageOps
import pytesseract
import time
import pynput.mouse as ms
import pynput.keyboard as kb
from pynput.keyboard import Key, Controller
keyboard = Controller()
pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe'
class AutoTyper:
clickCount = 0
pCords = [0,0,0,0]
defined = False
pImage = None
def areaSelect():
print('Click twice to define TEXT window')
def on_click(x, y, button, pressed):
if pressed:
print ('Mouse clicked at ({0}, {1}) with {2}'.format(x, y, button))
if AutoTyper.clickCount == 0:
AutoTyper.pCords[0] = x
AutoTyper.pCords[1] = y
elif AutoTyper.clickCount == 1:
AutoTyper.pCords[2] = x
AutoTyper.pCords[3] = y
AutoTyper.defined = True
print('')
AutoTyper.clickCount = 0
return False
AutoTyper.clickCount += 1
with ms.Listener(on_click = on_click) as listener:
listener.join()
def keyPress():
print('Press UP ARROW to select OCR region')
def on_press(key):
i = 10
def on_release(key):
if key == Key.esc:
print('Stopping key listener')
return False
elif key == Key.up:
print('UP arrow pressed\n')
AutoTyper.areaSelect()
AutoTyper.capture()
return False
with kb.Listener(on_press = on_press, on_release = on_release) as listener:
listener.join()
def startTyping(delayTime: float):
print('Press DOWN ARROW to start typing')
def on_press(key):
i = 10
def on_release(key):
if key == Key.esc:
print('Stopping key listener')
return False
elif key == Key.down:
print('DOWN arrow pressed\n')
AutoTyper.output(delayTime)
return False
with kb.Listener(on_press = on_press, on_release = on_release) as listener:
listener.join()
def capture():
if AutoTyper.defined:
AutoTyper.pImage = ImageGrab.grab(bbox = (AutoTyper.pCords[0],AutoTyper.pCords[1],AutoTyper.pCords[2],AutoTyper.pCords[3]))
else:
print('please define an area to OCR before trying to print')
def output(delayTime: float):
paraString = pytesseract.image_to_string(AutoTyper.pImage)
length = len(paraString)
# Character replacement to make outpur more accurate
paraString = paraString.replace('|','I')
paraString = paraString.replace('\n',' ')
for i in range(length):
keyboard.press(paraString[i])
keyboard.release(paraString[i])
time.sleep(delayTime)
print('The Processed String:\n',paraString,'\n')
def start(delayTime: float):
print('Time interal between chars:',delayTime)
AutoTyper.keyPress()
AutoTyper.startTyping(delayTime)
|
[
"46580391+Srkinko@users.noreply.github.com"
] |
46580391+Srkinko@users.noreply.github.com
|
1685da0aea71988384d8cf1512c114fe4eabe8cc
|
42095168e32e408cf0eaa8aa424da6100b17cadf
|
/setsparser.py
|
8bea02ceebb87d80eaa52d5b9a9fd1ca8f016264
|
[] |
no_license
|
ninjazeroone/dynamics-setsparser
|
bd0acc85bf8a8e13a4d1cdb82cf00e2f8727e3d1
|
75d0b05383f422964a0bbe84d8e079efb6e7b6e8
|
refs/heads/master
| 2020-04-28T22:28:00.352270
| 2019-03-14T13:49:13
| 2019-03-14T13:49:13
| 175,618,699
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
#!/usr/bin/python3
import argparse
import requests
from requests_ntlm import HttpNtlmAuth
import sys
#dealing with arguments
parser = argparse.ArgumentParser()
parser.add_argument('urlGet', action='store', help='url to get all the sets')
parser.add_argument('-uP', action='store', dest='urlParse', help='url to parse all the contents')
parser.add_argument('--login', action='store', dest='login', help='login to authenticate through ntlm (if needed), for example: corp\JSmith')
parser.add_argument('--password', action='store', dest='password', help='password to login through ntlm')
args = parser.parse_args()
def getSets(url):
sets = getRequest(args.urlGet)
return sets['d']['EntitySets']
def getRequest(url, par = None):
if not par:
par = {}
if args.login and args.password:
r = requests.get(url,auth=HttpNtlmAuth(args.login,args.password),headers={'Accept':'application/json'},params=par)
else:
r = requests.get(url,headers={'Accept':'application/json'})
try:
result = r.json()
except:
print('\nError. Request can\'t get JSON, maybe you need to authenticate? Anyway, here\'s server\'s response:\n')
print(r.text)
quit()
return result
def parseSets(url):
sets = getSets(url)
f = open("result.txt","w")
counter = len(sets)
doneCounter = 0
for set in sets:
f.write('SET NAME: ' + set + '\n')
f.write('CONTENT:\n')
f.write(str(getRequest(url + set, par={'$select':'*'})) + '\n\n\n')
counter -= 1
doneCounter += 1
sys.stdout.write('Sets parsed: ' + str(doneCounter) + ' from ' + str(counter) + '\r')
sys.stdout.flush()
print('All done!')
def main():
parseSets(args.urlGet)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ninjazeroone.noreply@github.com
|
13be31c8a1ff4988209e84fb040dc898e547c42f
|
b893e1cf8d1f6c16cb3e8691c08232763ca3195f
|
/homework/migrations/0004_post_image.py
|
0a1e0ce257db80fa64eb5aff91cec915d22e39c1
|
[] |
no_license
|
Jacup/p_interdomowa
|
f1e112d0a51f482cd1f4707b8ddc869691bc8092
|
b9896ac51b610681d4617269e01a8687700d66cd
|
refs/heads/master
| 2021-01-25T09:10:50.745314
| 2017-06-11T07:00:05
| 2017-06-11T07:00:05
| 93,797,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('homework', '0003_auto_20170611_0029'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.FileField(default=datetime.datetime(2017, 6, 11, 4, 52, 38, 656090, tzinfo=utc), upload_to=''),
preserve_default=False,
),
]
|
[
"jacubgr@gmail.com"
] |
jacubgr@gmail.com
|
37a13959fde12095b8e2cb0f8775061e7677e9f2
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/testbase/azure-mgmt-testbase/generated_samples/favorite_process_create.py
|
3fab21d007912206ca6d249b90b6843e148bcd6d
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.testbase import TestBase
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-testbase
# USAGE
python favorite_process_create.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = TestBase(
credential=DefaultAzureCredential(),
subscription_id="subscription-id",
)
response = client.favorite_processes.create(
resource_group_name="contoso-rg1",
test_base_account_name="contoso-testBaseAccount1",
package_name="contoso-package2",
favorite_process_resource_name="testAppProcess",
parameters={"properties": {"actualProcessName": "testApp&.exe"}},
)
print(response)
# x-ms-original-file: specification/testbase/resource-manager/Microsoft.TestBase/preview/2022-04-01-preview/examples/FavoriteProcessCreate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.