blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f2db0f815309f934b46da888e24855c0aad96a91
|
914b504e13df945a50f35eca4d850eb2c5b52c0b
|
/test/compute/test_base.py
|
f8c9bd3ba3a0fd128e1401b5f2e96d9796badcc2
|
[
"Apache-2.0"
] |
permissive
|
cloudkick/libcloud
|
d05c0401bd232279cb38b5abacd3d4c85d7d072f
|
9c8605e1518c6b5e2511f0780e1946089a7256dd
|
refs/heads/master
| 2021-01-01T19:51:41.895189
| 2011-03-14T02:34:57
| 2011-03-14T02:34:57
| 258,426
| 8
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,958
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.base import Response
from libcloud.common.base import ConnectionKey, ConnectionUserAndKey
from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver
from test import MockResponse
class FakeDriver(object):
type = 0
class BaseTests(unittest.TestCase):
def test_base_node(self):
node = Node(id=0, name=0, state=0, public_ip=0, private_ip=0,
driver=FakeDriver())
def test_base_node_size(self):
node_size = NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0,
driver=FakeDriver())
def test_base_node_image(self):
node_image = NodeImage(id=0, name=0, driver=FakeDriver())
def test_base_response(self):
resp = Response(MockResponse(status=200, body='foo'))
def test_base_node_driver(self):
node_driver = NodeDriver('foo')
def test_base_connection_key(self):
conn = ConnectionKey('foo')
def test_base_connection_userkey(self):
conn = ConnectionUserAndKey('foo', 'bar')
# def test_drivers_interface(self):
# failures = []
# for driver in DRIVERS:
# creds = ProviderCreds(driver, 'foo', 'bar')
# try:
# verifyObject(INodeDriver, get_driver(driver)(creds))
# except BrokenImplementation:
# failures.append(DRIVERS[driver][1])
#
# if failures:
# self.fail('the following drivers do not support the \
# INodeDriver interface: %s' % (', '.join(failures)))
# def test_invalid_creds(self):
# failures = []
# for driver in DRIVERS:
# if driver == Provider.DUMMY:
# continue
# conn = connect(driver, 'bad', 'keys')
# try:
# conn.list_nodes()
# except InvalidCredsException:
# pass
# else:
# failures.append(DRIVERS[driver][1])
#
# if failures:
# self.fail('the following drivers did not throw an \
# InvalidCredsException: %s' % (', '.join(failures)))
if __name__ == '__main__':
sys.exit(unittest.main())
|
[
"tomaz@apache.org"
] |
tomaz@apache.org
|
ab5f894430e4173d4f912b2ff27306986e39d566
|
146c71808bdd5fa458ef73df4a9b5837c83e779d
|
/tests/check_accuracy/check_accuracy_tests.py
|
5bad11a87167ac8aab9336ec6f018f0846e9a884
|
[
"MIT"
] |
permissive
|
aladdinpersson/aladdin
|
62cff7ed8c014db91505545986e17b85e1656f98
|
4fd92ff3b6e74761fff75b01070930c9ec6ce29f
|
refs/heads/main
| 2023-04-15T14:41:27.236738
| 2021-04-15T10:39:11
| 2021-04-15T10:39:11
| 352,296,885
| 13
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
# Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("aladdin/")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression')
from check_accuracy import check_accuracy
class TestCheckAccuracy(unittest.TestCase):
def setUp(self):
pass
def test(self):
pass
if __name__ == "__main__":
print("Running Check Accuracy tests")
unittest.main()
|
[
"aladdin.persson@hotmail.com"
] |
aladdin.persson@hotmail.com
|
fe6df273d0824aeb08610dde5812f46f73da6587
|
17cb31350a9d0996e19dd111fc31980df03f82bf
|
/strawberryfields/devicespecs/device_specs.py
|
a32f9ef474ec3c53c88b0f76495cc55a33f98019
|
[
"Apache-2.0"
] |
permissive
|
zeta1999/strawberryfields
|
3eee705b711bd195cc6f1510461d75f6e7d9821b
|
1bf05585be3553a7bb5c2f687dc45b7a064ddb17
|
refs/heads/master
| 2020-06-09T02:56:19.840324
| 2019-06-21T16:50:59
| 2019-06-21T16:50:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,785
|
py
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class for storing device data for validation"""
from typing import List, Set, Dict, Union
import abc
import blackbird
from blackbird.utils import to_DiGraph
class DeviceSpecs(abc.ABC):
"""Abstract base class for backend data"""
@property
@abc.abstractmethod
def modes(self) -> Union[int, None]:
"""The supported number of modes of the device.
If the device supports arbitrary number of modes, set this to 0.
Returns:
int: number of supported modes
"""
@property
@abc.abstractmethod
def local(self) -> bool:
"""Whether the backend supports local execution.
Returns:
bool: ``True`` if the backend supports local execution
"""
@property
@abc.abstractmethod
def remote(self) -> bool:
"""Whether the backend supports remote execution.
Returns:
bool: ``True`` if the backend supports remote execution
"""
@property
@abc.abstractmethod
def interactive(self) -> bool:
"""Whether the backend can be used interactively, that is,
the backend state is not reset between engine executions.
Returns:
bool: ``True`` if the backend supports interactive use
"""
@property
@abc.abstractmethod
def primitives(self) -> Set[str]:
"""The primitive set of quantum operations directly supported
by the backend.
Returns:
set[str]: the quantum primitives the backend supports
"""
@property
@abc.abstractmethod
def decompositions(self) -> Dict[str, Dict]:
"""Quantum operations that are not quantum primitives for the
backend, but are supported via specified decompositions.
This should be of the form
.. code-block:: python
{'operation_name': {'option1': val, 'option2': val,...}}
For each operation specified in the dictionary, the
:meth:`~Operation.decompose` method will be called during
:class:`Program` compilation, with keyword arguments
given by the dictionary value.
Returns:
dict[str, dict]: the quantum operations that are supported
by the backend via decomposition
"""
@property
def parameter_ranges(self) -> Dict[str, List[List[float]]]:
"""Allowed parameter ranges for supported quantum operations.
This property is optional.
Returns:
dict[str, list]: a dictionary mapping an allowed quantum operation
to a nested list of the form ``[[p0_min, p0_max], [p1_min, p0_max], ...]``.
where ``pi`` corresponds to the ``i`` th gate parameter.
"""
return dict()
@property
def graph(self):
"""The allowed circuit topology of the backend device as a directed
acyclic graph.
This property is optional; if arbitrary topologies are allowed by the device,
this will simply return ``None``.
Returns:
networkx.DiGraph: a directed acyclic graph
"""
if self.circuit is None:
return None
# returned DAG has all parameters set to 0
bb = blackbird.loads(self.circuit)
if bb.is_template():
params = bb.parameters
kwargs = {p: 0 for p in params}
# initialize the topology with all template
# parameters set to zero
topology = to_DiGraph(bb(**kwargs))
else:
topology = to_DiGraph(bb)
return topology
@property
def circuit(self):
"""The Blackbird circuit that will be accepted by the backend device.
This property is optional. If arbitrary topologies are allowed by the device,
**do not define this property**. In such a case, it will simply return ``None``.
If the device expects a specific template for the recieved Blackbird
script, this method will return the serialized Blackbird circuit in string
form.
Returns:
Union[str, None]: Blackbird program or template representing the circuit
"""
return None
|
[
"noreply@github.com"
] |
zeta1999.noreply@github.com
|
1cbeaf068eba123dc4966e2c3b506aa29148b80b
|
3ae62276c9aad8b9612d3073679b5cf3cb695e38
|
/easyleetcode/leetcodes/Leetcode_105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py
|
1485bb44b74ef3a8f62d1d7d1e19faff930fb29d
|
[
"Apache-2.0"
] |
permissive
|
gongtian1234/easy_leetcode
|
bc0b33c3c4f61d58a6111d76707903efe0510cb4
|
d2b8eb5d2cafc71ee1ca633ce489c1a52bcc39ce
|
refs/heads/master
| 2022-11-16T17:48:33.596752
| 2020-07-13T02:55:03
| 2020-07-13T02:55:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
n = len(inorder)
inOrderMap = {inorder[i]: i for i in range(n)}
return self.buildTreeUtil(preorder, inorder, inOrderMap, 0, n - 1, 0, n - 1)
def buildTreeUtil(self, preorder, inorder, inOrderMap, pStart, pEnd, iStart, iEnd):
if pStart > pEnd or iStart > iEnd:
return None
# 根节点,永远是先序遍历第一个点
root = TreeNode(preorder[pStart])
# 根节点索引,根据它,找到中序遍历中根节点位置
rootIdx = inOrderMap[root.val]
# 根节点左边 // rootIdx - iStart 得到左边节点数
# 先序遍历(pStart + 1, pStart + rootIdx - iStart)(左边新起点,左边新终点(左边新起点+左边节点数))
root.left = self.buildTreeUtil(preorder, inorder, inOrderMap, pStart + 1, pStart + rootIdx - iStart, iStart,
rootIdx - 1)
# 根节点右边
# 先序遍历(pStart + rootIdx - iStart+1)(左边新终点(左边新起点+左边节点数)的后一个数 (右边起点))
root.right = self.buildTreeUtil(preorder, inorder, inOrderMap, pStart + rootIdx - iStart + 1, pEnd, rootIdx + 1,
iEnd)
return root
def buildTree2(self, preorder, inorder):
if not preorder:
return None
# preorder:根左右
# inorder:左根右
x = preorder.pop(0)
node = TreeNode(x)
i = inorder.index(x)
# preorder.pop(0) ,此时preorder只剩 左右,:i是左部分
node.left = self.buildTree2(preorder[:i], inorder[:i])
node.right = self.buildTree2(preorder[i:], inorder[i + 1:])
return node
|
[
"425776024@qq.com"
] |
425776024@qq.com
|
90904f213074558cd90e413783c1a851ce07f3da
|
55550afe1c18aacba9a481c690755cb7395d35f1
|
/Week_01/G20190343020019/LeetCode_26_0019.py
|
84af779cd76ff44c31d90633db3c8cc0cfbca318
|
[] |
no_license
|
algorithm005-class02/algorithm005-class02
|
eb5c0865fbb2c58362fddcd4fc8f8b9d02bb208c
|
1a1abf5aabdd23755769efaa6c33579bc5b0917b
|
refs/heads/master
| 2020-09-22T11:48:20.613692
| 2020-03-02T05:31:11
| 2020-03-02T05:31:11
| 225,177,649
| 45
| 153
| null | 2020-03-02T05:31:13
| 2019-12-01T14:47:06
|
Java
|
UTF-8
|
Python
| false
| false
| 366
|
py
|
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
size = len(nums)
if size == 0:
return 0
j, pre = 1, nums[0]
for i in range(1, size):
if nums[i] != pre:
if i != j:
nums[j] = nums[i]
j += 1
pre = nums[i]
return j
|
[
"your@email.com"
] |
your@email.com
|
0f03a302c230541b088a7d1a1fe72c11c2e23cb3
|
473035074bd546694d5e3dbe6decb900ba79e034
|
/traffic fluid simulator/backend/env_4_6/model/ExportData.py
|
e92e4e0140bedbeb4290ef2eb08d29b3a966c9a7
|
[] |
no_license
|
johny1614/magazyn
|
35424203036191fb255c410412c195c8f41f0ba5
|
a170fea3aceb20f59716a7b5088ccdcb6eea472f
|
refs/heads/master
| 2022-03-26T01:10:04.472374
| 2019-09-19T16:34:22
| 2019-09-19T16:34:22
| 171,033,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
import json
from typing import List
import attr
from numpy.core.multiarray import ndarray
from model.Net import Net
@attr.s(auto_attribs=True)
class ExportData:
learningMethod: str
learningEpochs: int
nets: List[Net]
netName: str
densityName: str
def __attrs_post_init__(self):
for net_index in range(len(self.nets)):
if isinstance(self.nets[net_index].densities, ndarray):
self.nets[net_index].densities = self.nets[net_index].densities.tolist()
if isinstance(self.nets[net_index].lights, ndarray):
self.nets[net_index].lights = self.nets[net_index].lights.tolist()
def saveToJson(self):
# self.shift_lights()
dicSelf = attr.asdict(self)
try:
jsonData = json.dumps(dicSelf)
outfile = open('../../front/src/assets/densities/' + self.netName + '_' + self.densityName + '.json', 'w')
except:
outfile = open('../../../front/src/assets/densities/' + self.netName + '_' + self.densityName + '.json',
'w')
outfile.write(str(jsonData))
outfile.close()
def shift(lista, n):
return lista[n:] + lista[:n]
|
[
"johny1614@gmail.com"
] |
johny1614@gmail.com
|
1f521210b944fba4b071cab3142d9a054dcff27a
|
07c61596c1fba2e2a7034fe5af9707794ea2e2c1
|
/Hackerrank/Algorithms/The_Time_in_Words.py3
|
6a108c9d2715cc2b096f03b911b89a2ab181b31e
|
[] |
no_license
|
H-Shen/Collection_of_my_coding_practice
|
2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4
|
6415552d38a756c9c89de0c774799654c73073a6
|
refs/heads/master
| 2023-08-24T21:19:08.886667
| 2023-08-22T03:47:39
| 2023-08-22T03:47:39
| 180,731,825
| 8
| 1
| null | 2021-08-13T18:25:25
| 2019-04-11T06:48:09
|
C++
|
UTF-8
|
Python
| false
| false
| 983
|
py3
|
#!/bin/python3
import sys
table = {1:'one',2:'two',3:'three',4:'four',
5:'five',6:'six',7:'seven',8:'eight',
9:'nine',10:'ten',11:'eleven',12:'twelve',
13:'thirteen',14:'fourteen',15:'fifteen',
16:'sixteen',17:'seventeen',18:'eighteen',
19:'nineteen',20:'twenty',30:'thirty',40:'forty',
50:'fifty'}
def handle(n):
global table
if n <= 20:
return table[n]
if n <= 100 and n % 10 == 0:
return table[n]
return table[n // 10 * 10] + ' ' + table[n - n // 10 * 10]
h = int(input().strip())
m = int(input().strip())
if m == 0:
print(table[h] + " o' clock")
elif m == 30:
print("half past " + table[h])
elif m == 45:
print("quarter to " + table[h + 1])
elif m == 15:
print("quarter past " + table[h])
elif m > 30:
print(handle(60 - m) + " minutes to " + table[h + 1])
elif m == 1:
print("one minute past " + table[h])
else:
print(handle(m) + " minutes past " + table[h])
|
[
"haohu.shen@ucalgary.ca"
] |
haohu.shen@ucalgary.ca
|
889178905a0c94d6f492f3c62559edfd6bc207fe
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_quill.py
|
606bcee98c72edaf39b621ab0b0cf03cce527925
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
#calss header
class _QUILL():
def __init__(self,):
self.name = "QUILL"
self.definitions = [u'any of the long sharp pointed hairs on the body of a porcupine', u"a pen made from a bird's feather, used in the past"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1d41eb6ae4fc12acb15c60378e1c758be087de68
|
7cf119239091001cbe687f73018dc6a58b5b1333
|
/datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_ZCGG/ZX_ZCGG_SJS_SJSGG.py
|
88a9dab2b0df420ba808ab19e9438d674c65ae30
|
[
"Apache-2.0"
] |
permissive
|
ILKKAI/dataETL
|
0f5b80c3482994f735f092a1e01fa1009bac4109
|
32f7ec3aaaf32b5074536a615cb9cd5c28bd499c
|
refs/heads/master
| 2022-04-04T19:27:05.747852
| 2020-02-28T11:17:48
| 2020-02-28T11:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
if data.get('URL_')[-3:] == 'pdf':
data['PDF_'] = data.get('URL_')
if not data['PDF_']:
del data['PDF_']
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_ZCGG_SJS_SJSGG", mongo_collection="ZX_ZCGG")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
|
[
"499413642@qq.com"
] |
499413642@qq.com
|
05488b74e06f143a147e1b5d9892a1eb406e1b21
|
a08fc91ecafa7f2b6c8aed7e1ceb33822d4caa49
|
/python/algorithms/tree/segmenttree.py
|
aec0172273542d5f029e5d57384084e6aba33d5d
|
[] |
no_license
|
bryand1/snippets
|
1fcdd4b67809aa27b58e1239d5cca22cfb962f3d
|
f779bf147c420996613b0778e243154cd750c3dd
|
refs/heads/master
| 2023-01-23T18:47:07.389246
| 2020-12-31T20:10:13
| 2020-12-31T20:10:13
| 138,767,383
| 0
| 0
| null | 2023-01-19T13:02:49
| 2018-06-26T16:56:15
|
Python
|
UTF-8
|
Python
| false
| false
| 853
|
py
|
from sys import maxsize
minsize = -99999
def maxquery(segtree, qlo, qhi, lo, hi, pos):
if qlo <= lo and qhi >= hi:
return segtree[pos]
if qlo > hi or qhi < lo:
return minsize
mid = (lo + hi) // 2
return max(
maxquery(segtree, qlo, qhi, lo, mid, 2 * pos + 1),
maxquery(segtree, qlo, qhi, mid + 1, hi, 2 * pos + 2))
def construct(arr, segtree, lo, hi, pos):
if lo == hi:
segtree[pos] = arr[lo]
return
mid = (lo + hi) // 2
construct(arr, segtree, lo, mid, 2 * pos + 1)
construct(arr, segtree, mid + 1, hi, 2 * pos + 2)
segtree[pos] = max(segtree[2 * pos + 1], segtree[2 * pos + 2])
if __name__ == '__main__':
A = [-1, 0, 3, 2, 5]
tree = [minsize] * 2 * (len(A))
construct(A, tree, 0, len(A) - 1, 0)
print(maxquery(tree, 2, 4, 0, 4, 0))
print(tree)
|
[
"bryand1@gmail.com"
] |
bryand1@gmail.com
|
7cf482daf8a47cd604c5fa2b83bb75aa350f97dd
|
aee5f372ba1b5fbb1c8acf6080c4c86ae195c83f
|
/cern-stubs/lsa/client/rest/cern/api/v1/feign/__init__.pyi
|
96054066f4590355d07f8781d938bb4307bcfd26
|
[] |
no_license
|
rdemaria/pjlsa
|
25221ae4a4b6a4abed737a41a4cafe7376e8829f
|
e64589ab2203338db4253fbc05ff5131142dfd5f
|
refs/heads/master
| 2022-09-03T13:18:05.290012
| 2022-08-16T13:45:57
| 2022-08-16T13:45:57
| 51,926,309
| 1
| 5
| null | 2019-07-11T11:50:44
| 2016-02-17T13:56:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,523
|
pyi
|
import cern.lsa.client.rest.api.v1.dto
import cern.lsa.client.rest.api.v1.feign
import cern.lsa.domain.cern.settings
import java.util
import typing
class IncaFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface IncaFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def findIncaPropertyFieldInfos(self, incaPropertyFieldInfosRequestDto: cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfosRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto]: ...
def saveIncaPropertyFieldInfos(self, collection: typing.Union[java.util.Collection[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto], typing.Sequence[cern.lsa.client.rest.api.v1.dto.IncaPropertyFieldInfoDto]]) -> None: ...
class Urls:
FIND_INCA_PROPERTY_FIELDS_INFO: typing.ClassVar[str] = ...
SAVE_INCA_PROPERTY_FIELDS_INFO: typing.ClassVar[str] = ...
class ParameterFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface ParameterFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def addParametersToParameterGroup(self, long: int, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def deleteCriticalProperty(self, long: int, string: str) -> None: ...
def deleteParameterGroup(self, long: int) -> None: ...
def deleteParameterTypes(self, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def deleteParameters(self, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def findAllHierarchies(self) -> java.util.List[str]: ...
def findCommonHierarchyNames(self, list: java.util.List[int]) -> java.util.Set[str]: ...
def findHierarchyNames(self, list: java.util.List[int]) -> java.util.Set[str]: ...
def findMakeRuleForParameterRelation(self, long: int, long2: int) -> cern.lsa.client.rest.api.v1.dto.MakeRuleConfigInfoDto: ...
def findParameterGroupsByAccelerator(self, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterGroupDto]: ...
def findParameterTrees(self, parameterTreesRequestDto: cern.lsa.client.rest.api.v1.dto.ParameterTreesRequestDto) -> cern.lsa.client.rest.api.v1.dto.ParameterTreeDataDto: ...
def findParameterTypes(self, parameterTypesRequestDto: cern.lsa.client.rest.api.v1.dto.ParameterTypesRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto]: ...
def findParameters(self, parametersRequestDto: cern.lsa.client.rest.api.v1.dto.ParametersRequestDto) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def findParametersWithSettings(self, long: int, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def findParametersWithoutSettings(self, long: int, string: str) -> java.util.Set[cern.lsa.client.rest.api.v1.dto.ParameterDto]: ...
def getMaxDelta(self, long: int) -> float: ...
def removeParametersFromParameterGroup(self, long: int, collection: typing.Union[java.util.Collection[int], typing.Sequence[int]]) -> None: ...
def saveCriticalProperty(self, propertyAndDeviceDto: cern.lsa.client.rest.api.v1.dto.PropertyAndDeviceDto) -> None: ...
def saveParameterGroup(self, parameterGroupDto: cern.lsa.client.rest.api.v1.dto.ParameterGroupDto) -> None: ...
def saveParameterTypes(self, collection: typing.Union[java.util.Collection[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto], typing.Sequence[cern.lsa.client.rest.api.v1.dto.ParameterTypeDto]]) -> None: ...
def saveParameters(self, list: java.util.List[cern.lsa.client.rest.api.v1.dto.ParameterAttributesDto]) -> None: ...
class Urls:
FIND_PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETER_TYPES_URL: typing.ClassVar[str] = ...
FIND_ALL_HIERARCHIES_URL: typing.ClassVar[str] = ...
FIND_HIERARCHIES_BY_PARAMETERS_URL: typing.ClassVar[str] = ...
FIND_COMMON_HIERARCHIES_BY_PARAMETERS_URL: typing.ClassVar[str] = ...
SAVE_PARAMETER_RELATIONS: typing.ClassVar[str] = ...
CRITICAL_PROPERTIES_URL: typing.ClassVar[str] = ...
PARAMETER_GROUPS_URL: typing.ClassVar[str] = ...
PARAMETER_GROUP_BY_ID_URL: typing.ClassVar[str] = ...
PARAMETER_GROUP_PARAMETERS_URL: typing.ClassVar[str] = ...
PARAMETER_RELATION_MAKE_RULE_URL: typing.ClassVar[str] = ...
FIND_PARAMETER_TREES_URL: typing.ClassVar[str] = ...
PARAMETERS_WITHOUT_SETTINGS_URL: typing.ClassVar[str] = ...
PARAMETERS_WITH_SETTINGS_URL: typing.ClassVar[str] = ...
PARAMETER_MAX_DELTA_URL: typing.ClassVar[str] = ...
class ReDriveSettingsFeignService(cern.lsa.client.rest.api.v1.feign.FeignService):
"""
public interface ReDriveSettingsFeignService extends cern.lsa.client.rest.api.v1.feign.FeignService
"""
def reDriveDeviceSettings(self, reDriveRequest: cern.lsa.domain.cern.settings.ReDriveRequest) -> cern.lsa.domain.cern.settings.ReDriveResponse: ...
class Urls:
REDRIVE_DEVICES: typing.ClassVar[str] = ...
class __module_protocol__(typing.Protocol):
# A module protocol which reflects the result of ``jp.JPackage("cern.lsa.client.rest.cern.api.v1.feign")``.
IncaFeignService: typing.Type[IncaFeignService]
ParameterFeignService: typing.Type[ParameterFeignService]
ReDriveSettingsFeignService: typing.Type[ReDriveSettingsFeignService]
|
[
"michi.hostettler@cern.ch"
] |
michi.hostettler@cern.ch
|
9a36090e137b6c733f445cb587a0720eccd62adb
|
3bb70650b4b83e4653dcc18c8233c106c7a5611a
|
/sale_shortcut/shortcut_getter.py
|
44c35030e8a22f587ada781c66cd6059851922bb
|
[] |
no_license
|
khanhlu2013/pos_connect_code
|
48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef
|
fdf70de858c10b175832af31ecc0cf770d028396
|
refs/heads/master
| 2023-04-08T02:35:46.181265
| 2016-10-18T21:12:51
| 2016-10-18T21:12:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from sale_shortcut.models import Parent,Child
def get_shortcut(id):
return Parent.objects.prefetch_related('child_set').get(pk=id)
def get_shorcut_lst(store_id):
return Parent.objects.filter(store_id=store_id).prefetch_related('child_set')
|
[
"khanhlu2013@gmail.com"
] |
khanhlu2013@gmail.com
|
f3b575a591741af71ff96affecc01aa7f7b1eeef
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/109/usersdata/188/63757/submittedfiles/av2_p3_civil.py
|
a916fc1a714178afe44fb0829312c99cd7cc0417
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
def linhas (a,m):
soma=0
for i in range(0,m.shape[1],1):
soma=soma+m[a,i]
return(soma)
def colunas (a,m):
soma=0
for i in range(0,m.shape[0],1):
soma=soma+m[i,a]
return(soma)
h=int(input("Digite a dimensão da matriz:"))
x=int(input("Digite x:"))
y=int(input("Digite y:"))
q=np.zeros((h,h))
print(q)
for i in range(0,q.shape[0],1):
for j in range(0,q.shape[1],1):
q[i,j]=float(input("Digite o termo:"))
b=(linhas(x,q)+colunas(y,q)-(2*q[x,y]))
printJ
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
71962d7e86cb76c775309a6190d8d73cdbbb4cf6
|
184bcb482ea5f0f13aa35275847b0e7dd56d8219
|
/tests/test_cli.py
|
75f2834a1bf6022a049f31136d917db671e85112
|
[
"LPPL-1.3c",
"MIT"
] |
permissive
|
svenkreiss/unicodeit
|
643a3ead7fc69160eff82099b33c25ba5d01de28
|
d7f3f0cb9b7f8c3abf8e47ea6158b2ee1f6cbf05
|
refs/heads/main
| 2023-08-23T07:44:45.029170
| 2023-03-12T09:21:04
| 2023-03-12T09:21:04
| 10,319,674
| 234
| 34
|
NOASSERTION
| 2023-07-18T22:48:57
| 2013-05-27T17:52:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
import subprocess
import sys
import pytest
PYTHON = 'python3' if sys.platform != 'win32' else 'python'
def test_cli_symbols1():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'\\Sigma'
])
print(r.decode())
assert r.decode().strip() == 'Σ'
def test_cli_symbols2():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'def\\Sigma_{01234}abc\\alpha_{567}ggg\\beta_{1234}lll "\\Sigma e_0 e^3"'
])
print(r.decode())
assert r.decode().strip() == 'defΣ₀₁₂₃₄abcα₅₆₇gggβ₁₂₃₄lll "Σ e₀ e³"'
def test_cli_symbols3():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'def^{01234}abc\\alpha^{567abc} "\\:) \\:G"'
])
print(r.decode())
assert r.decode().strip() == 'def⁰¹²³⁴abcα⁵⁶⁷ᵃᵇᶜ "☺ ㋡"'
@pytest.mark.skip('this was already broken')
def test_cli_symbols4():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'ggg\\beta^{1234=\\(5\\)}lll'
])
print(r.decode())
assert r.decode().strip() == 'Σ'
def test_subscripts():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'a_{\\beta\\gamma\\varphi\\rho\\chi}'
])
print(r.decode())
assert r.decode().strip() == 'aᵦᵧᵩᵨᵪ'
def test_superscripts():
r = subprocess.check_output([
PYTHON, '-m', 'unicodeit.cli',
'm^{ABDEGHIJKLMNOPRTUWabcdefghiklmnoprstuvwxyz\\beta\\gamma\\delta\\varphi\\chi<>}'
])
print(r.decode())
assert r.decode().strip() == 'mᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁᵂᵃᵇᶜᵈᵉᶠᵍʰⁱᵏˡᵐⁿᵒᵖʳˢᵗᵘᵛʷˣʸᶻᵝᵞᵟᵠᵡ˂˃'
|
[
"me@svenkreiss.com"
] |
me@svenkreiss.com
|
192e0a22a39afd8de4675f9032f1eaadfbe026fb
|
0cb064f4e2f5b27a189b3e7631bb19f7842e150b
|
/zvt/recorders/eastmoney/dividend_financing/spo_detail_recorder.py
|
5afee75a91dc41f48230b292ccc4813ddf9fab99
|
[
"MIT"
] |
permissive
|
stellar2016/zvt
|
35e514927302cffb3577f3535344e2ca55ec9abd
|
f6c2c05c136b14c0c0f239960f08f85bcdee7c28
|
refs/heads/master
| 2021-04-19T13:18:01.020365
| 2020-03-22T14:44:26
| 2020-03-22T14:44:26
| 249,607,341
| 0
| 0
|
MIT
| 2020-03-24T03:51:06
| 2020-03-24T03:51:05
| null |
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
# -*- coding: utf-8 -*-
from zvdata.utils.pd_utils import pd_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp
from zvdata.utils.utils import to_float
from zvt.api.api import get_dividend_financing, get_spo_detail
from zvt.domain import SpoDetail, DividendFinancing
from zvt.recorders.eastmoney.common import EastmoneyPageabeDataRecorder
class SPODetailRecorder(EastmoneyPageabeDataRecorder):
data_schema = SpoDetail
url = 'https://emh5.eastmoney.com/api/FenHongRongZi/GetZengFaMingXiList'
page_url = url
path_fields = ['ZengFaMingXiList']
def get_original_time_field(self):
return 'ZengFaShiJian'
def get_data_map(self):
return {
"spo_issues": ("ShiJiZengFa", to_float),
"spo_price": ("ZengFaJiaGe", to_float),
"spo_raising_fund": ("ShiJiMuJi", to_float)
}
def on_finish(self):
last_year = str(now_pd_timestamp().year)
codes = [item.code for item in self.entities]
need_filleds = get_dividend_financing(provider=self.provider, codes=codes,
return_type='domain',
session=self.session,
filters=[DividendFinancing.spo_raising_fund.is_(None)],
end_timestamp=last_year)
for item in need_filleds:
df = get_spo_detail(provider=self.provider, entity_id=item.entity_id,
columns=[SpoDetail.timestamp, SpoDetail.spo_raising_fund],
start_timestamp=item.timestamp,
end_timestamp="{}-12-31".format(item.timestamp.year))
if pd_is_not_null(df):
item.spo_raising_fund = df['spo_raising_fund'].sum()
self.session.commit()
super().on_finish()
__all__ = ['SPODetailRecorder']
if __name__ == '__main__':
# init_log('spo_detail.log')
recorder = SPODetailRecorder(codes=['000999'])
recorder.run()
|
[
"5533061@qq.com"
] |
5533061@qq.com
|
3ed2beb303c28748c85454eea580163c3338d096
|
3ccd3465c241071827ad98bac61f85d8405bffc9
|
/test/talkytest/clients/voice/tts/test_osxsay.py
|
eb28c42d580273e2a2717801aba2380cd9cfccaf
|
[
"MIT"
] |
permissive
|
keiffster/talk-y
|
a12e2590f3170af1debb4add9c27fd12adb279fa
|
dd2bb2a816c868770d9bec8f02ee9f2bbfcbae2a
|
refs/heads/master
| 2021-06-25T08:32:39.020921
| 2020-02-16T17:18:45
| 2020-02-16T17:18:45
| 102,565,196
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import unittest
from talky.clients.voice.tts.osxsay import OSXSayTextToSpeach
from talky.config.sections.client.voice.voice import VoiceConfiguration
class OSXSayTextToSpeachTests(unittest.TestCase):
def test_init(self):
config = VoiceConfiguration()
tts = OSXSayTextToSpeach(config)
self.assertIsNotNone(tts)
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
af133b71314acc64bdbd6be37d282d55ba8fde6d
|
5b3caf64b77161748d0929d244798a8fb914d9c5
|
/Python Excel Examples/WorksheetsApiDemo/background/deleteBackground.py
|
e5297d90f3031e8342735c46a68eb63f86226e60
|
[] |
no_license
|
EiceblueCloud/Spire.Cloud.Excel
|
0d56864991eaf8d44c38f21af70db614b1d804b7
|
d9845d5cefd15a3ab408b2c9f80828a4767e2b82
|
refs/heads/master
| 2021-07-20T23:44:39.068568
| 2021-07-15T03:04:49
| 2021-07-15T03:04:49
| 230,225,396
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
appId = "your id"
appKey = "your key"
baseUrl="https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.worksheets_api.WorksheetsApi(configuration)
name = "DeleteBackground.xlsx"
storage = ""
folder = "ExcelDocument"
sheet_name = "Sheet1"
api.delete_background(name, sheet_name, folder=folder, storage=storage)
|
[
"noreply@github.com"
] |
EiceblueCloud.noreply@github.com
|
a8510ed4c832d93162788220a8e618a6ac40439c
|
4a09376ef4ddd8cd5752e79bb0f3c18de6906455
|
/iHubCrowdSourcing/tmpScripts/GetUniques.py
|
9d7f8666fb8302f725b05fcd7aee852179ddeb03
|
[] |
no_license
|
GBelzoni/iHub
|
33f36500f090fbfd962977ae266117be499b7cb5
|
e816954bfca2127fdaaf750aef8b0442b287003c
|
refs/heads/master
| 2021-01-18T14:45:09.589406
| 2013-09-06T22:52:15
| 2013-09-06T22:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
fin =open('modeltree.txt')
lines = fin.readlines()
fin.close()
fout =open('unique.txt','w')
lines2 = [ line.lower() for line in lines]
uniques = set(lines2)
print uniques
fout.writelines(uniques)
|
[
"patrickhcostello@gmail.com"
] |
patrickhcostello@gmail.com
|
2a99c1e567547ad471064a62fa5571a65b29f715
|
71324aca11e16d6da17b0440e72d0107f5af6e04
|
/todo_vue_restful/todo_with_vue_and_restful/todo/migrations/0001_initial.py
|
18a12c83dc377d8c7d800fdd1d51722e1aa0f4de
|
[
"MIT"
] |
permissive
|
n3k0fi5t/Django_Tutorial
|
6bad82a919d1de0162b34f4c7f753cd126b05cc3
|
e3953335ca88fe22c68268fd76afb7c4f9bbb55f
|
refs/heads/master
| 2023-02-16T07:56:56.416031
| 2021-01-11T23:17:33
| 2021-01-11T23:17:33
| 291,436,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
# Generated by Django 3.1 on 2020-12-14 00:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TodoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('finish_time', models.DateTimeField(auto_now=True)),
('is_finished', models.BooleanField(default=False)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'todo_item',
'ordering': ['-create_time'],
},
),
]
|
[
"r05922078@ntu.edu.tw"
] |
r05922078@ntu.edu.tw
|
55011e7d72f18177422f55514b292382081f4dcd
|
2caf6885511af24443e22aaa43cd679d694f6f80
|
/note/my_note/first_month/day06/demo01.py
|
18fab09a906c452f8cebed4af58ddeba84253c43
|
[] |
no_license
|
nandadao/Python_note
|
7f9ba54a73af05c935b4f7e24cacb728859a6c69
|
abddfc2e9a1704c88867cff1898c9251f59d4fb5
|
refs/heads/master
| 2020-11-25T18:29:50.607670
| 2019-12-19T01:28:02
| 2019-12-19T01:28:02
| 228,793,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
"""
猜拳
石头 剪刀
剪刀 布
布 石头
随机产生
"""
# import random
# win = ("石头剪刀", "剪刀布", "布石头")
# same = ("石头石头", "剪刀剪刀", "布布")
# choice = ("石头", "剪刀", "布")
# pc1 = choice[random.randint(0, 2)]
# # pc2 = choice[random.randint(0, 2)]
# pc2 = input("请出拳:")
# print(str(pc1)+str(pc2))
# # if str(pc1)+str(pc2) in win or str(pc2)+str(pc1) in win:
# if str(pc2)+str(pc1) in win:
# print("获胜")
# elif str(pc2)+str(pc1) in same:
# print("相同重新开始")
# else:
# print("你输了")
# 统一管理多个数据 :思想很重要
# import random
# tuple_win = (
# ("石头", "剪刀"),
# ("剪刀", "布"),
# ("布", "石头"),
# )
# tuple_item = ("石头", "剪刀", "布")
#
# item_input = input("请出拳:")
# # random.randint(0, 2) # 生成0 1 2
# index_system = random.randint(0, 2)
# item_system = tuple_item[index_system]
#
# if item_input == item_system:
# print("平局")
# elif (item_input, item_system) in tuple_win:
# print("你获胜")
# else:
# print("你失败")
|
[
"1361335953@qq.com"
] |
1361335953@qq.com
|
fb873db4bf3aa718c82d101dda25aca24cd84ce9
|
4edadc6b0c733b208df760e8491b1f1808ed4395
|
/image process/plot_loss.py
|
9bdbe07e304b8e5f2c385c95b6d5b208c45e8f10
|
[] |
no_license
|
RichardcLee/Expression_Transformation
|
d3f8a1bd0b11071f3f085efe821fabc716e617e6
|
ae849789a6c77e4cec0909c0c490305ad13ba06d
|
refs/heads/master
| 2021-04-11T04:51:02.496921
| 2020-09-16T08:21:41
| 2020-09-16T08:21:41
| 248,993,500
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
from matplotlib import pyplot as plt
import re
plt.rcParams['font.sans-serif']=['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False # 用来正常显示负号
lines = []
with open(r"C:\Users\81955\Desktop\ckpts\face\ganimation\200513_232207\logs.txt", "r+") as f:
lines = f.readlines()
# dis_fake WGAN-GP对抗损失第二项,值越大越好(正值)
# dis_real WGAN-GP对抗损失第一项,值越小越好(负值)
# dis_real_aus 条件表情损失第二项
# gen_rec 循环一致性损失
# dis 判别器损失
# gen 生成器损失
loss = {
"dis_fake": [],
"dis_real": [],
"dis_real_aus": [],
"gen_rec": [],
'dis': [],
'gen': [],
"total": []
}
for line in lines:
a, b, c, d = float(re.findall("dis_fake:(.*?)\|", line)[0].strip()), float(re.findall("dis_real:(.*?)\|", line)[0].strip()), float(re.findall("dis_real_aus:(.*?)\|", line)[0].strip()), float(re.findall("gen_rec:(.*?)\|", line)[0].strip())
e, f = float(re.findall("dis:(.*?)\|", line)[0].strip()), float(re.findall("gen:(.*?)\|", line)[0].strip())
loss["dis_fake"].append(a)
loss["dis_real"].append(b)
loss["dis_real_aus"].append(c)
loss["dis"].append(d)
loss["gen"].append(f)
loss["gen_rec"].append(d)
loss["total"].append(10*d + 1*(a+b) + 160*c)
# print(loss)
plt.figure(dpi=120)
plt.tight_layout()
plt.subplots_adjust(wspace=0.45, hspace=0.5) # 调整子图间距
xy = ["321","322", "323", "324", "325", "326"]
widths = [0.07, 0.07, 0.07, 0.09, 0.09, 0.07]
labels = ['adversarial loss 2', 'adversarial loss 1', 'condition loss', 'cycle consistency loss', 'dis loss', 'gen loss', 'total loss']
ticks_y = [[0, 1, 2, 3, 4, 5], [-5, -4, -3, -2, -1, 0], [0, 0.004, 0.008, 0.012, 0.016], [0, 0.1, 0.2, 0.3, 0.4], [0, 0.1, 0.2, 0.3, 0.4], [-3, -2, -1, 0, 1, 2, 3, 4 ,5, 6]]
ticks_x = ['0', '1w', '2w', '3w', '4w']
scale_x = [0, 10000, 20000, 30000, 40000]
idx = 0
space = 10 # 控制损失显示间距,避免图像线条过于集中
step = [i for i in range(len(loss["dis_fake"]))] # step数
fontsize = 10
for name, val in loss.items():
if idx == 6:
continue
plt.subplot(xy[idx])
plt.title(labels[idx], fontsize=fontsize+2)
plt.plot(step[::space], val[::space], linewidth=widths[idx], color='k') # label=labels[idx]
# plt.legend(loc='best')
if idx == 4 or idx == 5:
plt.xlabel("step", fontsize=fontsize-1)
plt.ylabel("loss value", fontsize=fontsize-1)
# 设置刻度字体大小
plt.xticks(scale_x, ticks_x, fontsize=fontsize-1)
plt.yticks(ticks_y[idx], fontsize=fontsize-1)
idx += 1
plt.savefig("1.jpg")
plt.show()
fontsize = 20
plt.figure(dpi=80)
plt.plot(step[::space], loss['total'][::space], linewidth=0.2, color='k')
plt.xlabel("step", fontsize=fontsize-6)
plt.ylabel("loss value", fontsize=fontsize-6)
# 设置刻度字体大小
plt.xticks(scale_x, ticks_x, fontsize=fontsize-6)
plt.yticks(fontsize=fontsize-1)
plt.savefig("2.jpg")
plt.show()
|
[
"0yunhow@gmail.com"
] |
0yunhow@gmail.com
|
ff2d90b1f5ac7311985425547060f8d42ed0a4e2
|
86100df5db058ea25b1859b6d2d4eafef720bede
|
/dirWatch.py
|
f7e991a8815c3efb30907af01c6dd56ae91c92e1
|
[] |
no_license
|
SniPE7/MonitorPy
|
1afb156d1d8185158012e54bb3c387cfde29c7cd
|
e3f7aa672a2909abfa080bf3db9b4ff56bd6b97e
|
refs/heads/master
| 2020-12-24T16:42:52.899784
| 2014-12-15T11:32:58
| 2014-12-15T11:32:58
| 28,030,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
#!/usr/bin/env python
import time
from time import gmtime, strftime
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def on_created(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
def on_deleted(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
def on_moved(self, event):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()), event
observer = Observer()
observer.schedule(Handler(), path='/var', recursive=True)
observer.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
5b0c427c59c3f900fc2f681738a7253d68c9bc70
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/110_concurrency_parallelism/_exercises/templates/Mastering Concurrency in Python/Chapter03/example2.py
|
86fd9c8132767986c2d2392ff9d84973ea193f0b
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
# # ch3/example2.py
#
# ______ _th.. __ thread
# ____ ma__ ______ sqrt
#
# ___ is_prime x
# __ ? < 2
# print('@ is not a prime number.' ?
#
# ____ ? __ 2
# print('@ is a prime number.' ?
#
# ____ ? % 2 __ 0
# print('@ is not a prime number.' ?
#
# ____
# limit _ __. sq ? + 1
# ___ i __ ra.. 3 ? 2
# __ x % i __ 0
# print('@ is not a prime number.' ?
# r_
#
# print('@ is a prime number.' ?
#
# my_input _ 2, 193, 323, 1327, 433785907
#
# ___ x __ ?
# ?.s_n_t.. ? ?
#
# a _ __..('Type something to quit: \n')
# print('Finished.')
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
395326db8f49bcc62b832e19268b8b29aecfb822
|
1b8ae90527e93aab7f80ac7b908a5eefe1b1384e
|
/Data_Structures/queue_with_max.py
|
5423249c1576b9d2c695a000e3a2f44bc7861135
|
[] |
no_license
|
Frikster/CodingNotebook
|
4cbdff626e2b86fde45223775d27078291d41621
|
c371cd43bcbac02cb915a9620b291d013d8fb485
|
refs/heads/master
| 2020-04-14T22:53:58.725429
| 2019-05-11T21:44:54
| 2019-05-11T21:44:54
| 164,182,563
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
# Implement a queue with #enqueue and #dequeue, as well as a #max API,
# a method which returns the maximum element still in the queue. This
# is trivial to do by spending O(n) time upon dequeuing.
# Can you do it in O(1) amortized? Maybe use an auxiliary storage structure?
import pdb
import unittest
from Data_Structures.ring_buffer import RingBuffer
from Data_Structures.min_max_stack_queue import MinMaxStackQueue
class QueueWithMax:
def __init__(self):
self.store = MinMaxStackQueue()
def enqueue(self, val):
self.store.enqueue(val)
def dequeue(self):
return self.store.dequeue()
def max(self):
if len(self) == 0: return float("-inf")
return self.store.max()
def __len__(self):
return len(self.store)
class Queue:
def __init__(self):
self.store = RingBuffer()
def enqueue(self, val):
self.store.append(val)
def dequeue(self):
return self.store.shift()
def __len__(self):
return len(self.store)
def __str__(self):
return self.store.__str__()
class Test(unittest.TestCase):
def test_queue_with_max(self):
q = QueueWithMax()
print(q.max())
q.enqueue(5)
print(q.max())
q.enqueue(1)
print(q.max())
q.enqueue(50)
print(q.max())
q.enqueue(5)
print(q.max())
q.dequeue()
q.dequeue()
print(q.max())
q.dequeue()
print(q.max())
def test_queue(self):
q = Queue()
print(q)
q.enqueue(5)
print(q)
q.enqueue(1)
print(q)
q.enqueue(50)
print(q)
q.enqueue(5)
print(q)
q.dequeue()
q.dequeue()
print(q)
q.dequeue()
print(q)
if __name__ == "__main__":
unittest.main()
|
[
"dirk.haupt@gmail.com"
] |
dirk.haupt@gmail.com
|
91a0fd3c2c049c06e58486ec8a171240d7f057f8
|
e748e6d96aace1c9149327f384e0de07d743715a
|
/challange/largestPlateau.py
|
14569e16c073b32674b62202fc1a064fd2c5fbe3
|
[] |
no_license
|
jorzel/codefights
|
cdfc4cb32261b064ffc605bfd927bf237885b5d2
|
28b62a2ae3809f0eb487198044c0fe74be09d4e8
|
refs/heads/master
| 2022-04-28T06:54:26.170503
| 2022-03-23T22:22:20
| 2022-03-23T22:22:20
| 110,818,719
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
"""
Your terraforming crew is busy at work on a mountaintop, but it's time to break for lunch. In order to allow everyone to have lunch together, we'd like to find a plateau on the mountain where everyone can sit.
Given a topographic map in the form of a matrix of integers map, find the area of the largest plateau.
Example
For
map = [[1,0,0,2,2,0],
[0,0,2,1,0,2],
[0,1,1,2,2,2],
[1,2,1,0,2,1]]
the output should be largestPlateau(map) = 5. The crew could either choose the plateau with elevation 0 or the one with elevation 2; both of which have an area of 5:
"""
from collections import defaultdict
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
def build_graph(maps):
graph = defaultdict(set)
rows = len(maps)
cols = len(maps[0])
for y in range(rows):
for x in range(cols):
neighbours = [(x - 1, y),
(x, y - 1),
(x + 1, y),
(x, y + 1)]
while True:
p = neighbours.pop(0)
if p[1] >= 0 and p[1] < rows and p[0] >= 0 and p[0] < cols:
if maps[p[1]][p[0]] == maps[y][x]:
graph[(y, x)] |= {(p[1], p[0])}
if not neighbours:
break
return graph
def largestPlateau(maps):
if not maps:
return 0
graph = build_graph(maps)
rows = len(maps)
cols = len(maps[0])
visited = set()
max_plateu = 0
for y in range(rows):
for x in range(cols):
if (y, x) not in visited:
plateu = dfs(graph, (y,x))
visited |= plateu
if len(plateu) > max_plateu:
max_plateu = len(plateu)
return max_plateu
|
[
"jaroslaw.orzel@emplocity.pl"
] |
jaroslaw.orzel@emplocity.pl
|
9d5940b50bb1c85781629bf130b65cdb741c45e3
|
6dc72f5c7a1f802a27cbefdd62f1ac05836c5219
|
/PyDemo/DataAnalysisCode/matplotlibTest.py
|
8e4a9d085e51d88380970dabe8927a0aa02479f9
|
[] |
no_license
|
RockJohnson503/MyDemo
|
9e4e5c7b02ee76d5437fd54c36050655fca145fb
|
dc1062df01cc53eb9a2a1849709d2f88e8b4488c
|
refs/heads/master
| 2022-05-13T22:45:27.051170
| 2020-04-24T07:32:13
| 2020-04-24T07:32:13
| 123,227,439
| 5
| 1
| null | 2022-04-22T21:10:48
| 2018-02-28T04:07:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,958
|
py
|
# encoding: utf-8
"""
File: matplotlibTest.py
Author: Rock Johnson
"""
import numpy as np
import matplotlib.pyplot as plt
def main():
# Line
"""
x = np.linspace(-np.pi, np.pi, 256, endpoint=True) # 用numpy生成x轴的线
c, s = np.cos(x), np.sin(x) # 用numpy定义正弦和余弦
plt.figure(1) # 绘制第一个图
plt.plot(x, c, color="blue", linewidth=1.0, linestyle="--", label="COS", alpha=0.5) # 前面的是自变量, 后面的是应变量, 这个是余弦
plt.plot(x, s, color="red", label="SIN") # 这个是正弦
plt.title("COS & SIN") # 给图添加标题
ax = plt.gca() # 轴的编辑器
ax.spines["right"].set_color("none") # 隐藏轴
ax.spines["top"].set_color("none")
ax.spines["left"].set_position(("data", 0)) # 将轴移动到数据域的某个点
ax.spines["bottom"].set_position(("data", 0))
ax.xaxis.set_ticks_position("bottom") # 将x轴显示的数据移到x轴的下方 框架默认就是这样的
ax.yaxis.set_ticks_position("left") # 将y轴显示的数据移到y轴的左方
plt.xticks([-np.pi, -np.pi / 2, 0, np.pi / 2, np.pi], [r"$-\pi$", r"$-\pi/2$", r"$0$", r"$\pi/2$", r"$\pi$"])
plt.yticks(np.linspace(-1, 1, 5, endpoint=True)) # 设置轴的显示内容
for label in ax.get_xticklabels() + ax.get_yticklabels(): # 设置轴显示内容的样式
label.set_fontsize(12)
label.set_bbox(dict(facecolor="white", edgecolor="None", alpha=0.2))
plt.legend(loc="upper left") # 设置图片的说明
plt.grid() # 设置图片的网格线
plt.axis() # 设置图片的显示范围
plt.fill_between(x, np.abs(x) < 0.5, c, c > 0.5, color="green") # 对图片进行填充
t = 1
plt.plot([t, t], [0, np.cos(t)], "y", linewidth="3") # 添加注释线
plt.annotate("cos(1)", xy=(t, np.cos(1)), xycoords="data", xytext=(+10, +13), textcoords="offset points",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.4")) # 给注释线加描述
plt.show() # 展示图
"""
# Scatter
fig = plt.figure()
fig.add_subplot(3, 3, 1)
n = 128
X = np.random.normal(0, 1, n)
Y = np.random.normal(0, 1, n)
T = np.arctan2(Y, X) # 上色
plt.axes([0.025, 0.025, 0.95, 0.95]) # 设置显示范围
plt.scatter(X, Y, s=75, c=T, alpha=.5) # 画散点
plt.xlim(-1.5, 1.5), plt.xticks([]) # x的范围
plt.ylim(-1.5, 1.5), plt.yticks([]) # y的范围
plt.axis()
plt.title("scatter")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test():
x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
y = x/2
plt.figure(1)
plt.plot(x, y)
ax = plt.gca()
ax.spines["top"].set_color("none")
ax.spines["right"].set_color("none")
ax.spines["left"].set_position(("data", 0))
ax.spines["bottom"].set_position(("data", 0))
plt.show()
if __name__ == '__main__':
main()
pass
|
[
"836867547@qq.com"
] |
836867547@qq.com
|
014a8aab3305226f095b71f76e73bfc13dc1caa5
|
eb61d62ca1f6f0123e3771105f5dfbbd6115138d
|
/.history/19-07-21_20210905224104.py
|
016085b1d34f5ea2a80953238ca45e9068b0410c
|
[] |
no_license
|
Alopezm5/CORRECTO-2
|
e0f14bcc3a88c0e222d10e3261e68532008bc42e
|
223613f1fb04dce3fac9f82f243cb2f22fe100f3
|
refs/heads/main
| 2023-07-29T06:52:48.147424
| 2021-09-12T20:33:27
| 2021-09-12T20:33:27
| 388,995,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
class MENU ():
def __init__(self,titulo,opciones=[]):
self.titulo = titulo
self.opciones = opciones
def menu(self):
print(self.titulo)
for opcion in self.opciones:
print(opcion)
opc=input("Elije opcion [1 ..... {}]:".formato(len(self.opciones)))
return opc
menu1=MENU("Menú Principal" , ["1)Calculadora","2)Numeros","3)Listas","4)Cadenas","5)Salir"])
opc=menu1.menu()
if opc=="1":
menu1=MENU("Menú Calculadora",["1)Suma","2)Resta","3)Multiplicacion" , "4) División" , "5) Salir" ])
opc1=menu1.menu()
if opc1 == "1" :
print("Opcion Suma")
n1=int(input("Ingresar n1: "))
n2=int(input("Ingresar n2: "))
suma=n1+n2
print("{} + {} = {}".format( n1 , n2 , suma ))
elif opc1 == "2" :
print ( "Opcion Resta" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
resta = n1 - n2
print ( "{} - {} = {}".format ( n1 , n2 , resta ))
elif opc1 == "3" :
print ( "Opcion Multiplicacion" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
multiplicacion = n1 * n2
print ( "{} * {} = {}" . formato ( n1 , n2 , multiplicacion ))
elif opc1 == "4" :
print ( "Opcion Division" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
division = n1 / n2
print ( "{} / {} = {}" . formato ( n1 , n2 , division ))
elif opc1 == "5" :
print ( "Opcion Salir" )
elif opc == "2" :
menu2 = MENU ( "Menú Numero" , [ "1) Perfecto" , "2) Primo" , "3) Salir" ])
opc2 = input ( "Elije opcion [1 ..... 3]:" )
elif opc == "3" :
print ( "Listas de menú" )
elif opc == "4" :
print ( "Menú Cadenas" )
elif opc == "5" :
print ( "Menú Salir" )
else:
print ( "Opcion no valida" )
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
e2c46e9c3c12ed70166afee419a09e346f651ad9
|
09df89395816834ddf77de620f959c22e74d8c00
|
/Bit Manipulation/Single Number.py
|
382f244e2e89ffbe38dd4070043ad097ea451177
|
[] |
no_license
|
gdh756462786/Leetcode_by_python
|
c853c4e3de255a8b4016c59944a0d40213a539a7
|
6387543a2a23c30aef1d5d37db54ca72cfb19270
|
refs/heads/master
| 2020-06-22T11:53:24.758506
| 2018-12-28T03:03:31
| 2018-12-28T03:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
# coding: utf-8
'''
Given an array of integers, every element appears twice except for one.
Find that single one.
Note:
Your algorithm should have a linear runtime complexity.
Could you implement it without using extra memory?
'''
class Solution(object):
'''
对数组元素执行异或运算,最终结果即为所求。
由于异或运算的性质,两个相同数字的异或等于0,而任意数字与0的亦或都等于它本身。
另外,异或运算满足交换律。
'''
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ans = 0
for num in nums:
ans ^= num
return ans
solution = Solution()
print solution.singleNumber([2,2,5,3,3])
|
[
"pengshuang92@163.com"
] |
pengshuang92@163.com
|
eecca24a0adcd29352d7af9f0f13143148db787d
|
bcc2eadf72d0c2a38e595f973ad4840ac038bd53
|
/Valid Palindrome.py
|
f674e38ffc21634991493803e1287fdb53981cfe
|
[] |
no_license
|
jke-zq/myleetcode.py
|
5841cec144884bcef9f0adadbb10dbe4ed34963f
|
e0f032f34f7fa8fa4f6e5af65c60b3fe581fdc23
|
refs/heads/master
| 2020-04-03T23:24:39.657299
| 2016-09-18T14:35:25
| 2016-09-18T14:35:25
| 49,498,500
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
left, right = 0, len(s) - 1
while left < right:
while left < right and not s[left].isalnum():
left += 1
while left < right and not s[right].isalnum():
right -= 1
if s[left].lower() == s[right].lower():
left, right = left + 1, right - 1
else:
return False
return True
|
[
"jke0zq@gmail.com"
] |
jke0zq@gmail.com
|
b777223f076ace65e55bd0c622a6919bce5bd167
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/m9bcZKy4niMmsg3JX_24.py
|
53034914b39b9e5514126b1c339d6d3218688fbc
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
"""
A group of friends have decided to start a secret society. The name will be
the first letter of each of their names, sorted in alphabetical order.
Create a function that takes in a list of names and returns the name of the
secret society.
### Examples
society_name(["Adam", "Sarah", "Malcolm"]) ➞ "AMS"
society_name(["Harry", "Newt", "Luna", "Cho"]) ➞ "CHLN"
society_name(["Phoebe", "Chandler", "Rachel", "Ross", "Monica", "Joey"]) ➞ "CJMPRR"
### Notes
The secret society's name should be entirely uppercased.
"""
def society_name(friends):
return ('').join(sorted([x[0] for x in friends]))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
930bdd6c664af3339a2e4cd163054d717ef73e87
|
8600ea155f279e5a8dfe5a1926038511f6b6a7ea
|
/hr_timesheet_invoice/report/account_analytic_profit.py
|
600535ee02795acbe0674bc506d0626bbe7cc93d
|
[] |
no_license
|
MarkNorgate/addons-EAD
|
c2fff89ab16fce3ba19fbe433ee5863705a6f4e5
|
840f28642b5d328e4b86839c413e5164622295a5
|
refs/heads/master
| 2020-04-23T22:11:00.164438
| 2015-07-22T12:24:53
| 2015-07-22T12:24:53
| 39,501,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,780
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from report import report_sxw
import pooler
class account_analytic_profit(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_profit, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'lines': self._lines,
'user_ids': self._user_ids,
'journal_ids': self._journal_ids,
'line': self._line,
})
def _user_ids(self, lines):
user_obj=pooler.get_pool(self.cr.dbname).get('res.users')
ids=list(set([b.user_id.id for b in lines]))
res=user_obj.browse(self.cr, self.uid, ids)
return res
def _journal_ids(self, form, user_id):
line_obj=pooler.get_pool(self.cr.dbname).get('account.analytic.line')
journal_obj=pooler.get_pool(self.cr.dbname).get('account.analytic.journal')
line_ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', '=', user_id),
])
ids=list(set([b.journal_id.id for b in line_obj.browse(self.cr, self.uid, line_ids)]))
res=journal_obj.browse(self.cr, self.uid, ids)
return res
def _line(self, form, journal_ids, user_ids):
pool=pooler.get_pool(self.cr.dbname)
line_obj=pool.get('account.analytic.line')
product_obj=pool.get('product.product')
price_obj=pool.get('product.pricelist')
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', journal_ids),
('user_id', 'in', user_ids),
])
res={}
for line in line_obj.browse(self.cr, self.uid, ids):
if line.account_id.pricelist_id:
if line.account_id.to_invoice:
if line.to_invoice:
id=line.to_invoice.id
name=line.to_invoice.name
discount=line.to_invoice.factor
else:
name="/"
discount=1.0
id = -1
else:
name="Fixed"
discount=0.0
id=0
pl=line.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], line.product_id.id, line.unit_amount or 1.0, line.account_id.partner_id.id)[pl]
else:
name="/"
discount=1.0
id = -1
price=0.0
if id not in res:
res[id]={'name': name, 'amount': 0, 'cost':0, 'unit_amount':0,'amount_th':0}
xxx = round(price * line.unit_amount * (1-(discount or 0.0)), 2)
res[id]['amount_th']+=xxx
if line.invoice_id:
self.cr.execute('select id from account_analytic_line where invoice_id=%s', (line.invoice_id.id,))
tot = 0
for lid in self.cr.fetchall():
lid2 = line_obj.browse(self.cr, self.uid, lid[0])
pl=lid2.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], lid2.product_id.id, lid2.unit_amount or 1.0, lid2.account_id.partner_id.id)[pl]
tot += price * lid2.unit_amount * (1-(discount or 0.0))
if tot:
procent = line.invoice_id.amount_untaxed / tot
res[id]['amount'] += xxx * procent
else:
res[id]['amount'] += xxx
else:
res[id]['amount'] += xxx
res[id]['cost']+=line.amount
res[id]['unit_amount']+=line.unit_amount
for id in res:
res[id]['profit']=res[id]['amount']+res[id]['cost']
res[id]['eff']='%d' % (-res[id]['amount'] / res[id]['cost'] * 100,)
return res.values()
def _lines(self, form):
line_obj=pooler.get_pool(self.cr.dbname).get('account.analytic.line')
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', form['employee_ids'][0][2]),
])
res=line_obj.browse(self.cr, self.uid, ids)
return res
report_sxw.report_sxw('report.account.analytic.profit', 'account.analytic.line', 'addons/hr_timesheet_invoice/report/account_analytic_profit.rml', parser=account_analytic_profit)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"mark.norgate@affinity-digital.com"
] |
mark.norgate@affinity-digital.com
|
c682dbe0c08b2b8188a1f15f8be584ff2944f575
|
0810b308b09e6680b5df2b5f412494d07d02f181
|
/1905/month01/code/day11/demo01.py
|
6df18b52956cef2cc3a75f7c6d67874f3607f4cf
|
[] |
no_license
|
952033053/python3
|
d323ecff1bcd208fc81b74e2ab7e0eb9ce31d514
|
29c8fb7f3ca90e18cce1f9a62a27415aac946c46
|
refs/heads/master
| 2020-06-21T18:19:55.610435
| 2019-07-18T02:57:31
| 2019-07-18T02:57:31
| 197,524,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
class Wife:
def __init__(self, name, age, weight):
self.name = name
# 本质:障眼法(实际将变量名改为:_类名__age)
# self.__age = age
self.set_age(age)
# self.__weight = weight
self.set_weight(weight)
# 提供公开的读写方法
def get_age(self):
return self.__age
def set_age(self, value):
if 21 <= value <= 31:
self.__age = value
else:
pass
# 提供公开的读写方法
def get_weight(self):
return self.__weight
def set_weight(self, value):
if 40 <= value <= 60:
self.__weight = value
else:
pass
w01 = Wife("铁锤公主", 20, 20)
# 重新创建了新实例变量(没有改变类中定义的__age)
# w01.__age = 107
w01._Wife__age = 20 # (修改了类中定义的私有变量)
print(w01.__dict__)# python内置变量,存储对象的实例变量.
w01 = Wife("铁锤公主", 30, 50)
w01.set_age(25)
w01.set_weight(55)
print(w01.get_age())
print(w01.get_weight())
# 练习:定义敌人类(姓名,攻击力10 -- 50,血量100 -- 200)
# 创建一个敌人对象,可以修改数据,读取数据。
|
[
"lvze@tedu.cn"
] |
lvze@tedu.cn
|
c5e0b845eec88fe50d7ed7cfda31c0af3417e7a8
|
e645ebf3b5177eb0ebedb7f239bd6e1b40bf1b07
|
/ups/minuit2.cfg
|
53e4d870fb2d8ee6bb0e4e0ceb063982e4b81138
|
[] |
no_license
|
lsst-dm/bp
|
e095cdb7412124fef39bdd8428fce70bbf0f462a
|
31c0b65866d06a09575a53d0dd558320e6994a06
|
refs/heads/main
| 2023-07-22T11:32:48.479329
| 2023-07-10T00:30:32
| 2023-07-10T00:30:32
| 37,212,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,936
|
cfg
|
# -*- python -*-
"""
Dependencies and configuration for minuit2
"""
import os.path
import eups
def _get_root():
"""Return the root directory of the package."""
return eups.productDir("minuit2")
dependencies = {
# Names of packages required to build against this package.
"required": [],
# Names of packages optionally setup when building against this package.
"optional": [],
# Names of packages required to build this package, but not required to build against it.
"buildRequired": [],
# Names of packages optionally setup when building this package, but not used in building against it.
"buildOptional": [],
}
def setup(conf, products, build=False):
"""
Update an SCons environment to make use of the package.
Arguments:
conf ------ An SCons Configure context. The SCons Environment conf.env should be updated
by the setup function.
products -- A dictionary consisting of all dependencies and the return values of calls to their
setup() functions, or None if the dependency was optional and was not found.
build ----- If True, this is the product currently being built, and products in "buildRequired" and
"buildOptional" dependencies will also be present in the products dict.
"""
conf.env.PrependUnique(**paths)
if not build:
conf.env.AppendUnique(**doxygen)
for target in libs:
if target not in conf.env.libs:
conf.env.libs[target] = lib[target].copy()
else:
for lib in libs[target]:
if lib not in conf.env.libs[target]:
conf.env.libs[target].append(lib)
return {"paths": paths, "doxygen": doxygen, "libs": libs, "extra": {}}
###################################################################################################
# Variables for default implementation of setup() below; if the user provides
# a custom implementation of setup(), everything below is unnecessary.
# Packages to be added to the environment.
paths = {
# Sequence of paths to add to the include path.
"CPPPATH": [os.path.join(_get_root(), "include")],
# Sequence of paths to add to the linker path.
"LIBPATH": [os.path.join(_get_root(), "lib")],
}
doxygen = {
# Sequence of Doxygen tag files produced by this product.
"DOXYGEN_TAGFILES": [],
# Sequence of Doxygen configuration files to include in dependent products.
"DOXYGEN_INCLUDES": [],
}
# Libraries provided by the package, not including standard library prefixes or suffixes.
# Additional custom targets besides the standard "main", "python", and "test" targets may
# be provided as well.
libs = {
# Normal libraries.
"main": ["Minuit2"],
# Libraries only linked with C++-coded Python modules.
"python": [],
# Libraries only linked with C++-coded unit tests.
"test": [],
}
|
[
"jbosch@git.lsstcorp.org"
] |
jbosch@git.lsstcorp.org
|
34796e03ad42278148564162c60c8e9b9f5fc4b8
|
56c3cefe1da4731175ee73d90ca2629d79bfe696
|
/egs/ptb_chime4test/local/run_trf_nce_cnn.py
|
3e8eeb368c4f54c64b8acffe0aafefdd38e84a84
|
[] |
no_license
|
wbengine/TRF-NN-Tensorflow
|
022a187c80c80293553958c17a267c7eaf81213f
|
e225829c36043293d092cf8ed620d6dce0abc8f0
|
refs/heads/master
| 2022-04-16T10:20:46.999159
| 2020-03-05T04:56:20
| 2020-03-05T04:56:20
| 114,067,559
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,475
|
py
|
import tensorflow as tf
import os
import sys
import time
import numpy as np
import task
from model import wblib as wb
from model import reader
from model import trfbase
from model import trfnce
from model import lstmlm
import run_lstmlm
# [data]
data = reader.Data().load_raw_data([task.train, task.valid, task.test],
add_beg_token='</s>', add_end_token='</s>')
# data.cut_train_to_length(50)
def create_name(config, q_config):
s = str(config)
if q_config is not None:
s += '_with_' + run_lstmlm.create_name(q_config)
# s += '_op%d' % config.noise_operation_num
# s += '_lstm'
# s += '_logz{}'.format(int(config.init_zeta[0]))
return s
def main(_):
config = trfnce.Config(data)
config.structure_type = 'cnn'
config.embedding_dim = 200
config.cnn_filters = [(i, 100) for i in range(1, 11)]
config.cnn_width = 3
config.cnn_layers = 3
config.cnn_hidden = 200
config.rnn_hidden_layers = 2
config.rnn_hidden_size = 200
config.rnn_predict = True
config.batch_size = 10
config.noise_factor = 10
config.noise_sampler = 'lstm:lstm/lstm_e200_h200x2/model.ckpt'
config.init_weight = 0.1
config.optimize_method = ['adam', 'adam']
config.lr_param = trfbase.LearningRateEpochDelay(0.001)
config.lr_zeta = trfbase.LearningRateEpochDelay(0.01)
config.max_epoch = 100
# config.dropout = 0.75
# config.init_zeta = config.get_initial_logz(20)
config.update_zeta = True
config.write_dbg = False
config.print()
# q_config = run_lstmlm.small_config(data)
q_config = None
name = create_name(config, q_config)
logdir = 'trf_nce/' + name
wb.mkdir(logdir, is_recreate=True)
sys.stdout = wb.std_log(os.path.join(logdir, 'trf.log'))
print(logdir)
data.write_vocab(logdir + '/vocab.txt')
data.write_data(data.datas[1], logdir + '/valid.id')
data.write_data(data.datas[2], logdir + '/test.id')
# wb.rmdir(logdirs)
with tf.Graph().as_default():
if q_config is None:
m = trfnce.TRF(config, data, logdir=logdir, device='/gpu:0')
else:
m = trfnce.TRF(config, data, logdir=logdir, device='/gpu:0',
q_model=lstmlm.LM(q_config, device='/gpu:0')
)
# s1 = trfnce.NoiseSamplerNgram(config, data, 2)
# s2 = trfnce.NoiseSamplerLSTMEval(config, data, config.noise_sampler.split(':')[-1])
sv = tf.train.Supervisor(logdir=os.path.join(logdir, 'logs'),
global_step=m.train_net.global_step)
sv.summary_writer.add_graph(tf.get_default_graph()) # write the graph to logs
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
session_config.gpu_options.allow_growth = True
with sv.managed_session(config=session_config) as session:
with session.as_default():
if m.q_model is not None:
print('load lstmlm for q model')
m.q_model.restore(session, './lstm/' + run_lstmlm.create_name(q_config) + '/model.ckpt')
m.train(sv, session,
print_per_epoch=0.1,
operation=task.Ops(m),
)
if __name__ == '__main__':
tf.app.run(main=main)
|
[
"wb.th08@gmail.com"
] |
wb.th08@gmail.com
|
cad49a464e253ae9342c164c950fd6c0ec78bdcf
|
d5a32e532fe231c16e52149604f0db34c5f4d2f9
|
/binarysearch.io/sum_of_the_deepest_node.py
|
a7848ee9b46081b4f3607498a2a3079159af306e
|
[
"MIT"
] |
permissive
|
mishrakeshav/Competitive-Programming
|
93705f63337639e8464c1d50f3394434b7422f15
|
00c1bd272646754ca4c260d57989304c8e323838
|
refs/heads/master
| 2023-07-06T07:32:23.042324
| 2023-06-29T15:27:24
| 2023-06-29T15:27:24
| 216,195,590
| 3
| 3
|
MIT
| 2020-10-03T07:55:18
| 2019-10-19T11:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
# class Tree:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def solve(self, root):
# Write your code here
nodeLevel = {}
def deepest(root,level=0):
if root is None:
return
else:
if level not in nodeLevel:
nodeLevel[level] = []
nodeLevel[level].append(root.val)
deepest(root.left,level+1)
deepest(root.right, level+1)
deepest(root,0)
return sum(nodeLevel[max(nodeLevel)])
class Solution:
def solve(self, root):
waiting = [root]
while(waiting):
newWaiting = []
possAns = 0
for node in waiting:
possAns += node.val
if node.left:
newWaiting.append(node.left)
if node.right:
newWaiting.append(node.right)
if not newWaiting:
return possAns
else:
waiting = newWaiting
|
[
"mishrakeshav@users.noreply.github.com"
] |
mishrakeshav@users.noreply.github.com
|
cefa36b71fd4da6c8b37f32c155e0eb34813882b
|
0296bc69a0d9608ed826ad7a719395f019df098f
|
/Tools/semantic_check.py
|
7bbe1e264b5436b25ade1ce79adfe0c38466b046
|
[] |
no_license
|
jcn16/Blender_HDRmap_render
|
c0486a77e04c5b41a6f75f123dbdb3d10c682367
|
50e6cdb79fef83081de9830e7105dd425a235a9e
|
refs/heads/main
| 2023-07-19T22:22:53.622052
| 2021-08-20T06:29:10
| 2021-08-20T06:29:10
| 377,757,283
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
import os
from tqdm import tqdm
'''
check if semantic_mask is complete
'''
root='/media/jcn/新加卷/JCN/JCN_test_datset/Train_512'
child_dirs=os.listdir(root)
child_dirs.sort()
pbar=tqdm(total=len(child_dirs))
for model in child_dirs:
pbar.update(1)
sub_dirs=os.listdir(os.path.join(root,model))
sub_dirs.remove('prt')
sub_dirs.remove('GEO')
sub_dirs.sort()
for dir in sub_dirs:
src=os.path.join(root,model,dir,'semantic_mask.png')
if os.path.exists(src):
continue
else:
print(src)
|
[
"591599635@qq.com"
] |
591599635@qq.com
|
5d2e5134e1095e1fd5b25e03a0582d9165899207
|
f0e048b2398b42a3c3ec42925ab75f754cd8d214
|
/configs/RAChallenge/s2anet_r101_fpn_1x_ms_ra.py
|
a06a8e7cd56271360518b99aafbdbfc70973c468
|
[] |
no_license
|
myknowntime/RIDet
|
c56535f52ccf76e41bd181faf2bceb2f0e8fbd57
|
96bee9a7089a267855d494fbf9d2f2f78064c54e
|
refs/heads/master
| 2023-08-14T23:46:32.849835
| 2021-10-06T14:29:31
| 2021-10-06T14:29:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,958
|
py
|
# fp16 settings
# fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='S2ANetDetector',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
rbox_head=dict(
type='S2ANetHead',
num_classes=6,
in_channels=256,
feat_channels=256,
stacked_convs=2,
align_conv_type='AlignConv',#[AlignConv,DCN,GA_DCN]
align_conv_size=3,
with_orconv=True,
anchor_ratios=[1.0],
anchor_strides=[8, 16, 32, 64, 128],
anchor_scales=[4],
target_means=[.0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0],
loss_fam_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=5.0), # loss权重修改 从1到5
loss_fam_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_odm_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=5.0), # loss权重修改 从1到5
loss_odm_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
fam_cfg=dict(
anchor_target_type='hbb_obb_rbox_overlap',
assigner=dict(
type='MaxIoUAssignerRbox',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
odm_cfg=dict(
anchor_target_type='obb_obb_rbox_overlap',
anchor_inside_type='center',
assigner=dict(
type='MaxIoUAssignerRbox',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.15,
nms=dict(type='nms_rotated', iou_thr=0.1),
max_per_img=2000)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RotatedResize', img_scale=(1024, 1024), keep_ratio=True),
# dict(type='RotatedResize', img_scale=(1024, 1024), keep_ratio=True),
dict(type='RotatedRandomFlip', flip_ratio=0),
# dict(type='RandomRotate', rate=0.5, angles=[30, 60, 90, 120, 150], auto_bound=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='RotatedResize', img_scale=(1024, 1024), keep_ratio=True),
# dict(type='RotatedRandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# dataset settings
dataset_type = 'RAChallengeDataset'
data_root = 'data/RAChallenge/stage1/train/' # train无augmentation
warmup_data_root = 'data/RAChallenge/warmup/' # warmup数据无augmentation
test_root = 'data/RAChallenge/stage1/'
stage2_test_root = 'data/RAChallenge/stage2/'
all_data_root = 'data/RAChallenge/stage1/all_data_augment/' # train_aug + warmup_aug
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
# train no aug
train=dict(
type=dataset_type,
ann_file=all_data_root + 'train.json',
img_prefix=all_data_root + 'images/',
pipeline=train_pipeline),
# # train with aug
# train=dict(
# type=dataset_type,
# ann_file=aug_data_root + 'train.json',
# img_prefix=aug_data_root + 'images/',
# pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'trainval_split/trainval.json',
img_prefix=data_root + 'trainval_split/images/',
pipeline=test_pipeline),
# submission
test=dict(
type=dataset_type,
ann_file=stage2_test_root + 'test.json',
img_prefix=stage2_test_root + 'test2/',
pipeline=test_pipeline)
# # evalloss_fam_cls: 1.0105,
# test=dict(
# type=dataset_type,
# ann_file=warmup_data_root + 'train.json',
# img_prefix=warmup_data_root + 'images/',
# pipeline=test_pipeline)
)
# optimizer
# optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001)
optimizer = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.00005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[6, 10])
# step=[8, 16, 20])
# step=[12, 24, 36, 48])
checkpoint_config = dict(interval=2)
# yapf:disable
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'work_dirs/s2anet_r101_fpn_1024_ms_ra/'
load_from = 'work_dirs/s2anet_r101_fpn_1024_ms_ra/70_10-15.pth'
# load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"mq_chaser@126.com"
] |
mq_chaser@126.com
|
87c6b732826010a09b36dc58caec09f610519427
|
8d946e49d0e9c5e038b6dd5fdfc11c72f64470f9
|
/instagram/urls.py
|
f97803739aafc2feba7d773b1e1fc52f1f78a5e7
|
[
"MIT"
] |
permissive
|
gabyxbinnaeah/TwinterApp
|
bfc955fdf529b5ecce89f62ab6bd4f8ecf9e461e
|
a0f68527a3e01cd47e49f9a17988ec5095422695
|
refs/heads/master
| 2023-06-16T01:07:43.531740
| 2021-07-14T08:37:50
| 2021-07-14T08:37:50
| 384,447,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
"""instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django_registration.backends.one_step.views import RegistrationView
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('chat.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/register/',RegistrationView.as_view(success_url='/'),name='django_registration_register'),
]
|
[
"gabyxbinnaeh4@gmail.com"
] |
gabyxbinnaeh4@gmail.com
|
e6f6045cc2fb7e9d2b61ded3d712cc41bf1bd78b
|
c6e5d5ff2ee796fd42d7895edd86a49144998067
|
/platform/core-scheduler/polyaxon/polyconf/wsgi.py
|
752f03d7945e907b86cc6786cbdc1116ab7a7e94
|
[
"Apache-2.0"
] |
permissive
|
zeyaddeeb/polyaxon
|
f4481059f93d8b70fb3d41840a244cd9aaa871e0
|
1f2b236f3ef36cf2aec4ad9ec78520dcc9ef4ee5
|
refs/heads/master
| 2023-01-19T05:15:34.334784
| 2020-11-27T17:08:35
| 2020-11-27T17:08:35
| 297,410,504
| 0
| 0
|
Apache-2.0
| 2020-09-21T17:20:27
| 2020-09-21T17:20:26
| null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI config for search project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "polyconf.settings")
application = get_wsgi_application()
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
9e9a78685cf9219df7db2543b3cb31fc4e86d42d
|
4524c4940d7fa830c23e4dc8e1144d5eec74365b
|
/ex15.py
|
75842d887ad14a1f8b08026f86360c2596f8855c
|
[] |
no_license
|
AmyShackles/LearnPython3TheHardWay
|
ef493209a181f62bfa45ff3ec456ae0fd2c3e8a9
|
4e175d58dfe8c7295ebfbee3947e944b35e52f8c
|
refs/heads/master
| 2020-03-23T03:49:53.052976
| 2018-07-27T21:14:30
| 2018-07-27T21:14:30
| 141,051,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
from sys import argv
# assigns arg[0] to the variable script and argv[1] to the variable filename
script, filename = argv
# assigns the function for opening the file to the variable txt
txt = open(filename)
# prints the string 'Here is your {value of variable filename}'
print(f"Here's your file {filename}:")
# prints the output of invoking read on open(filename)
print(txt.read())
# prints the string 'Type the filename again:'
print("Type the filename again:")
# asks the user for input (the filename)
file_again = input("> ")
# takes the user input and calls the function open on it and assigns it to the variable txt_again
txt_again = open(file_again)
# prints the output of invoking read on open(file_again)
print(txt_again.read())
|
[
"amyshackles@gmail.com"
] |
amyshackles@gmail.com
|
0cdac74013d1815fdcf40dc9165e35d850ef2673
|
7b252f0c1b8ba7c9a35ead166482efbb4d804413
|
/mysite/books/views.py
|
aad9117e9fbeb9c02c506f754902dff380645397
|
[] |
no_license
|
gzpgg3x/PythonExample
|
191024f04796a13106b46f4f00a59185c33af91b
|
c64563f91cd5188b6d3d01688d8184a37ded46eb
|
refs/heads/master
| 2021-01-10T19:38:53.325169
| 2013-04-11T22:36:29
| 2013-04-11T22:36:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,682
|
py
|
# Create your views here.
# from django.shortcuts import render_to_response
# from django.http import Http404, HttpResponse
# def search_form(request):
# return render_to_response('search_form.html')
# def search(request):
# if 'q' in request.GET:
# message = 'You searched for: %r' % request.GET['q']
# else:
# message = 'You submitted an empty form.'
# return HttpResponse(message)
# from django.http import HttpResponse
# from django.shortcuts import render_to_response
# from books.models import Book
# def search_form(request):
# return render_to_response('search_form.html')
# def search(request):
# if 'q' in request.GET and request.GET['q']:
# q = request.GET['q']
# books = Book.objects.filter(title__icontains=q)
# return render_to_response('search_results.html',
# {'books': books, 'query': q})
# else:
# # return HttpResponse('Please submit a search term.')
# return render_to_response('search_form.html', {'error': True})
# def search(request):
# error = False
# if 'q' in request.GET:
# q = request.GET['q']
# if not q:
# error = True
# elif len(q) > 20:
# error = True
# else:
# books = Book.objects.filter(title__icontains=q)
# return render_to_response('search_results.html',
# {'books': books, 'query': q})
# return render_to_response('search_form.html',
# {'error': error})
# def search(request):
# errors = []
# if 'q' in request.GET:
# q = request.GET['q']
# if not q:
# errors.append('Enter a search term.')
# elif len(q) > 20:
# errors.append('Please enter at most 20 characters.')
# else:
# books = Book.objects.filter(title__icontains=q)
# return render_to_response('search_results.html',
# {'books': books, 'query': q})
# return render_to_response('search_form.html',
# {'errors': errors})
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from books.forms import ContactForm
def search(request):
errors = []
if 'q' in request.GET:
q = request.GET['q']
if not q:
errors.append('Enter a search term.')
elif len(q) > 20:
errors.append('Please enter at most 20 characters.')
else:
books = book.objects.filter(title__icontains=q)
return render_to_response('search_results.html',
{'books': books, 'query': q})
return render_to_response('search_form.html',
{'errors': errors})
# def contact(request):
# errors = []
# if request.method == 'POST':
# if not request.POST.get('subject', ''):
# errors.append('Enter a subject.')
# if not request.POST.get('message', ''):
# errors.append('Enter a message.')
# if request.POST.get('email') and '@' not in request.POST['email']:
# errors.append('Enter a valid e-mail address.')
# if not errors:
# send_mail(
# request.POST['subject'],
# request.POST['message'],
# request.POST.get('email', 'noreply@example.com'),
# ['siteowner@example.com'],
# )
# return HttpResponseRedirect('/contact/thanks/')
# return render_to_response('contact_form.html',
# {'errors': errors})
# def contact(request):
# errors = []
# if request.method == 'POST':
# if not request.POST.get('subject', ''):
# errors.append('Enter a subject.')
# if not request.POST.get('message', ''):
# errors.append('Enter a message.')
# if request.POST.get('email') and '@' not in request.POST['email']:
# errors.append('Enter a valid e-mail address.')
# if not errors:
# send_mail(
# request.POST['subject'],
# request.POST['message'],
# request.POST.get('email', 'noreply@example.com'),
# ['siteowner@example.com'],
# )
# return HttpResponseRedirect('/contact/thanks/')
# return render_to_response('contact_form.html', {
# 'errors': errors,
# 'subject': request.POST.get('subject', ''),
# 'message': request.POST.get('message', ''),
# 'email': request.POST.get('email', ''),
# })
# def contact(request):
# if request.method == 'POST':
# form = ContactForm(request.POST)
# if form.is_valid():
# cd = form.cleaned_data
# send_mail(
# cd['subject'],
# cd['message'],
# cd.get('email', 'noreply@example.com'),
# ['siteowner@example.com'],
# )
# return HttpResponseRedirect('/contact/thanks/')
# else:
# form = ContactForm()
# return render_to_response('contact_form.html', {'form': form})
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
send_mail(
cd['subject'],
cd['message'],
cd.get('email', 'noreply@example.com'),
['siteowner@example.com'],
)
return HttpResponseRedirect('/contact/thanks/')
else:
form = ContactForm(
initial={'subject': 'I love your site!'}
)
return render_to_response('contact_form.html', {'form': form})
|
[
"gzpgg3x@yahoo.com"
] |
gzpgg3x@yahoo.com
|
3805406d7d67e5a498dfff6b970543445e2a268e
|
1fb55ab2c082348eb51263357563d20e1fd50b7d
|
/commons/c2cgeoportal_commons/alembic/main/29f2a32859ec_merge_1_6_and_master_branches.py
|
e92c014cbf7e91b379db31f2314f86152fee5f02
|
[
"BSD-2-Clause-Views"
] |
permissive
|
nstoykov/c2cgeoportal
|
40876bf577cc2ed1877affa9f307acef94d86daa
|
42c3aab09e0c44a20d0162a85c51c6a9ca0ff95e
|
refs/heads/master
| 2020-12-06T03:27:00.330795
| 2020-01-07T09:25:07
| 2020-01-07T09:25:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""Merge 1.6 and master branches
Revision ID: 29f2a32859ec
Revises: ('22e6dfb556de', '116b9b79fc4d')
Create Date: 2015-12-16 14:10:56.704614
"""
# revision identifiers, used by Alembic.
revision = "29f2a32859ec"
down_revision = ("22e6dfb556de", "116b9b79fc4d")
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
[
"stephane.brunner@camptocamp.com"
] |
stephane.brunner@camptocamp.com
|
d2f817ce547020deb24980787d61a4775fe21557
|
0f6f95af209ff9192702c2176c4513cb28929ba5
|
/syd/commands/base.py
|
ff759bd373fea19e05e2ad6b670aa903bdbfd1e8
|
[] |
no_license
|
SD2E/aliases-cli
|
87a03f83cbbed5f5860e77457718f7eb6121a311
|
c634012a2623b975b8eeb6e210fabe51fe53a6ab
|
refs/heads/master
| 2020-03-10T19:33:47.852609
| 2018-04-18T22:27:40
| 2018-04-18T22:27:40
| 129,550,330
| 1
| 0
| null | 2018-04-17T19:55:08
| 2018-04-14T20:01:52
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
"""The base command."""
from agavepy.agave import Agave
from .reactors import alias
class Base(object):
"""A base command."""
def __init__(self, options, *args, **kwargs):
self.options = options
self.args = args
self.kwargs = kwargs
self.store = alias.AliasStore(Agave.restore())
def run(self):
raise NotImplementedError('You must implement the run() method yourself!')
|
[
"vaughn@tacc.utexas.edu"
] |
vaughn@tacc.utexas.edu
|
63df08aefaa3c1c7cab07d65e38a0de2816880ca
|
a0801d0e7325b31f0383fc68517e208680bb36d6
|
/Kattis/commercials.py
|
fe78e87a705a79461a41b0c9e1c0aa6f1c6b0f44
|
[] |
no_license
|
conormccauley1999/CompetitiveProgramming
|
bd649bf04438817c7fa4755df2c2c7727273b073
|
a7e188767364be40f625612af3d16182f2d8d4de
|
refs/heads/master
| 2023-05-14T13:19:32.678134
| 2023-05-11T16:07:33
| 2023-05-11T16:07:33
| 179,089,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
n, p = map(int, input().split())
vs = list(map(int, input().split()))
for i in range(n):
vs[i] -= p
mx = -10e8
mxh = 0
for i in range(n):
mxh = mxh + vs[i]
mx = max(mx, mxh)
mxh = max(mxh, 0)
print(mx)
|
[
"conormccauley1999@gmail.com"
] |
conormccauley1999@gmail.com
|
52bac37037d550c2a2aae038c7e551a45f41832d
|
91da8a59561d6f2c7852c0548298434e0ede2ac7
|
/Linked list/sort_a_linkedList.py
|
a0ff4d4ccaf23fb3e9297409fbd6d52413ca3256
|
[] |
no_license
|
prashant97sikarwar/leetcode
|
6d3828772cc426ccf53dad07edb1efbc2f1e1ded
|
e76054e27a5d4493bd1bcef2ebdeb21d257afb63
|
refs/heads/master
| 2023-08-23T05:06:23.181869
| 2021-10-28T18:19:10
| 2021-10-28T18:19:10
| 286,057,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
"""Sort a linked list in O(n log n) time using constant space complexity."""
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
middle = self.findMiddle(head)
nextToMiddle = middle.next
middle.next = None
left = self.sortList(head)
right = self.sortList(nextToMiddle)
sortedList = self.finalMergeSort(left, right)
return sortedList
def findMiddle(self, node):
if node is None or node.next is None:
return node
slow = node
fast = node
while (fast.next != None and fast.next.next != None):
slow = slow.next
fast = fast.next.next
return slow
def finalMergeSort(self, a, b):
result = None
if a == None:
return b
if b == None:
return a
if a.val <= b.val:
result = a
result.next = self.finalMergeSort(a.next, b)
else:
result = b
result.next = self.finalMergeSort(a, b.next)
return result
|
[
"prashant97sikarwar@gmail.com"
] |
prashant97sikarwar@gmail.com
|
e5755a3d897e49d3a0ef501d254813c4afb0c40e
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/bgp/ctxafdef.py
|
d649b8e14aa620c77367e3c7b8b845e9914a019b
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,835
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class CtxAfDef(Mo):
"""
The BGP address family context definition.
"""
meta = ClassMeta("cobra.model.bgp.CtxAfDef")
meta.moClassName = "bgpCtxAfDef"
meta.rnFormat = "bgpCtxAfP-%(af)s"
meta.category = MoCategory.REGULAR
meta.label = "Address Family Context Definition"
meta.writeAccessMask = 0x20000001
meta.readAccessMask = 0x20000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.RtdEpP")
meta.parentClasses.add("cobra.model.fv.BrEpP")
meta.superClasses.add("cobra.model.bgp.ACtxAfPol")
meta.superClasses.add("cobra.model.fabric.L3CtxPol")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoDomPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L3DomPol")
meta.rnPrefixes = [
('bgpCtxAfP-', True),
]
prop = PropMeta("str", "af", "af", 17566, PropCategory.REGULAR)
prop.label = "Address Family"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "ipv4-ucast"
prop._addConstant("ipv4-ucast", "ipv4-unicast-address-family", 1)
prop._addConstant("ipv6-ucast", "ipv6-unicast-address-family", 3)
prop._addConstant("vpnv4-ucast", "vpnv4-unicast-address-family", 2)
prop._addConstant("vpnv6-ucast", "vpnv6-unicast-address-family", 4)
meta.props.add("af", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "eDist", "eDist", 17563, PropCategory.REGULAR)
prop.label = "eBGP Distance"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 20
prop.defaultValueStr = "20"
meta.props.add("eDist", prop)
prop = PropMeta("str", "iDist", "iDist", 17564, PropCategory.REGULAR)
prop.label = "iBGP Distance"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 200
prop.defaultValueStr = "200"
meta.props.add("iDist", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "localDist", "localDist", 17565, PropCategory.REGULAR)
prop.label = "Local Distance"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 255)]
prop.defaultValue = 220
prop.defaultValueStr = "220"
meta.props.add("localDist", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "af"))
def __init__(self, parentMoOrDn, af, markDirty=True, **creationProps):
namingVals = [af]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
197d249f49a3bf0f4bbe8e5c1e093ff2fd5d13c1
|
6f23adb3da803dda89e21cfa21a024a015ec1710
|
/2020/16-2.py
|
8883ed2bc121687d8e600f19d5385f5a9769ba9f
|
[] |
no_license
|
Remboooo/adventofcode
|
1478252bcb19c0dd19e4fa2effd355ee71a5d349
|
5647b8eddd0a3c7781a9c21019f6f06f6edc09bd
|
refs/heads/master
| 2022-12-15T10:21:29.219459
| 2022-12-13T23:02:03
| 2022-12-13T23:02:03
| 226,883,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
from argparse import ArgumentParser
from collections import defaultdict
from functools import reduce
from itertools import islice, count
from pprint import pprint
from util import timed
def parse_rules(f):
rules = {}
for line in f:
line = line.strip()
if line == "":
break
name, values = line.split(": ", 1)
rules[name] = [tuple(int(v) for v in r.split("-", 1)) for r in values.split(" or ")]
return rules
def parse_my_ticket(f):
if f.readline().strip() != "your ticket:":
raise ValueError("First line was not 'your ticket:'")
result = tuple(int(field) for field in f.readline().split(','))
f.readline()
return result
def parse_tickets(f):
if f.readline().strip() != "nearby tickets:":
raise ValueError("First line was not 'nearby tickets:'")
for line in f:
line = line.strip()
yield tuple(int(field) for field in line.split(','))
@timed
def get_valid_tickets(nearby_tickets, rules):
# A ticket is valid if *all* fields match *any* of the rules
return [
ticket for ticket in nearby_tickets
if all(any(rlow <= field <= rhigh for rule in rules.values() for rlow, rhigh in rule) for field in ticket)
]
@timed
def find_field_ids(nearby_tickets, rules):
field_ids = {}
# For every field name in the rulebook, check which field IDs match its rules on all of the valid tickets
for field_name, rule in rules.items():
# Start by considering every possible field ID for this name
possible_ids = set(range(len(nearby_tickets[0])))
for ticket in nearby_tickets:
# Prune the possible IDs for this field name by checking which field IDs match its rules on this ticket
possible_ids &= {n for n, field in enumerate(ticket) if any(rlow <= field <= rhigh for rlow, rhigh in rule)}
field_ids[field_name] = possible_ids
# Some fields still have multiple possibilities after checking all of the tickets, but then others only have one,
# so there's some overlap and we can eliminate the ambiguities.
# I'm 99% sure this will not work in all possible cases, but it works for the test input and my puzzle input 🤷🏻
field_ids = {
name: next(
fid for fid in pid
if not any(
# if there's another field with a shorter list of ID options that also contains this ID, skip it
name != oname and len(opid) < len(pid) and fid in opid
for oname, opid in field_ids.items()
)
)
for name, pid in field_ids.items()
}
return field_ids
def main():
argparse = ArgumentParser()
argparse.add_argument("file", nargs='?', type=str, default="16-input.txt")
args = argparse.parse_args()
with open(args.file, 'r') as f:
rules = parse_rules(f)
my_ticket = parse_my_ticket(f)
nearby_tickets = list(parse_tickets(f))
nearby_tickets = get_valid_tickets(nearby_tickets, rules)
field_ids = find_field_ids(nearby_tickets, rules)
print(
reduce(lambda a, b: a * b, (my_ticket[fid] for name, fid in field_ids.items() if name.startswith('departure')))
)
if __name__ == '__main__':
main()
|
[
"rembrand.vanlakwijk@nedap.com"
] |
rembrand.vanlakwijk@nedap.com
|
d9488e55773f53b084a0d709450f00dfefd69089
|
299fe2ca879e509798e95c00b7ba33914031f4a7
|
/eruditio/shared_apps/django_wizard/wizard.py
|
dbfab0dcfbd21b9bc03b532bad0f2afd2d52e2e6
|
[
"MIT"
] |
permissive
|
genghisu/eruditio
|
dcf2390c98d5d1a7c1044a9221bf319cb7d1f0f6
|
5f8f3b682ac28fd3f464e7a993c3988c1a49eb02
|
refs/heads/master
| 2021-01-10T11:15:28.230527
| 2010-04-23T21:13:01
| 2010-04-23T21:13:01
| 50,865,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
from django_wizard.models import ConfigOption, ConfigFixture, DefinedConfigOption
from django.core.exceptions import ObjectDoesNotExist
class ConfigIndex(object):
def __init__(self):
self._registry = {}
def register(self, configuration):
if configuration.__class__ == ConfigOption and not (configuration.app, configuration.name) in self._registry:
try:
existing_config = ConfigOption.objects.get(app = configuration.app, name = configuration.name)
except ObjectDoesNotExist:
configuration.save()
existing_config = configuration
try:
defined_config = DefinedConfigOption.objects.get(option__name = configuration.name, option__app = configuration.app)
except ObjectDoesNotExist:
defined_config = DefinedConfigOption(option = existing_config, value = configuration.default)
defined_config.save()
self._registry[(configuration.app, configuration.name)] = existing_config
def unregister(self, configuration):
try:
existing_config = ConfigOption.objects.get(app = configuration.app, name = configuration.name)
existing_config.delete()
del self._registry[(configuration.app, configuration.name)]
except ObjectDoesNotExist:
pass
def clear_registry(self):
self._registry = {}
config_index = ConfigIndex()
class FixtureIndex(object):
def __init__(self):
self._registry = {}
def register(self, fixture):
if not (fixture.app_label, fixture.module_name) in self._registry:
try:
existing_fixture = ConfigFixture.objects.get(app_label = fixture.app_label, module_name = fixture.module_name)
except ObjectDoesNotExist:
fixture.save()
existing_fixture = fixture
self._registry[(fixture.app_label, fixture.module_name)] = fixture
fixtures_index = FixtureIndex()
|
[
"genghisu@6a795458-236b-11df-a5e4-cb4ff25536bb"
] |
genghisu@6a795458-236b-11df-a5e4-cb4ff25536bb
|
17c32a613daa0e013bfcaad2caa72d86e7343183
|
53b47cbfea75afd22f37a2a9c8af4573165a0515
|
/Week5/Assessment 1/algorithm/algo.py
|
aa58e2a50fecb31c7a089d08f9c8950556523934
|
[] |
no_license
|
bmolina-nyc/ByteAcademyWork
|
d757ed04033e23a4ec7aa8d09283f65b4cebcb17
|
b7a6790c2905afc9532b348149b730b7ea71de44
|
refs/heads/master
| 2022-12-06T19:17:02.164451
| 2019-03-11T15:31:10
| 2019-03-11T15:31:10
| 169,432,884
| 0
| 1
| null | 2022-11-18T15:08:12
| 2019-02-06T16:00:41
|
Python
|
UTF-8
|
Python
| false
| false
| 679
|
py
|
def sorting(MyList):
zero_count = MyList.count(0)
list_check = MyList.count(0)
check = []
while zero_count > 0:
check.append(0)
zero_count -= 1
while True:
for el in MyList:
if el == 0:
idx = MyList.index(el)
pop = MyList.pop(idx)
MyList.append(pop)
elif el != 0:
continue
if MyList[-list_check:] == check:
return MyList
else:
continue
# print(sorting([1, 0, 7, 2, 0, 3, 9, 0, 4]))
# [1, 7, 2, 3, 9, 4, 0, 0, 0]
if __name__ == "__main__":
print(sorting([1, 0, 7, 2, 0, 3, 9, 0, 4]))
|
[
"bruce.molina.81@gmail.com"
] |
bruce.molina.81@gmail.com
|
01893f13d23f63efc4f427a9eb781cbc09388785
|
dee345b10c7dc29dd6b0cac04677beef14f2d64f
|
/tests/test_manual_quality_merging.py
|
35dc41c621fefe22e33d69b77f397f562e697051
|
[
"MIT"
] |
permissive
|
richard-shepherd/calculation_graph
|
fcd0df6b0d4fc598586ee67c129ccc90b9cac383
|
647b1f13544e3525068c8b3b83a7eed3f7e473bd
|
refs/heads/master
| 2016-09-05T19:46:14.567122
| 2015-05-21T10:58:14
| 2015-05-21T10:58:14
| 31,436,445
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,491
|
py
|
from graph import *
class SourceNode(GraphNode):
"""
A data source. Just a value with data-quality.
"""
def __init__(self, source_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.source_name = source_name
# The value provided by this source...
self.value = 0.0
# True if the source is working correctly...
self.source_is_good = False
def set_value(self, value, source_is_good):
"""
Sets the value of this source and whether the source is good.
This causes the node to need recalculation.
"""
self.value = value
self.source_is_good = source_is_good
self.needs_calculation()
def calculate(self):
"""
We set the data-quality from the source_is_good information.
"""
self.quality.clear_to_good()
if self.source_is_good is False:
self.quality.merge(Quality.BAD, "Source " + self.source_name + " is bad")
return GraphNode.CalculateChildrenType.CALCULATE_CHILDREN
class SourceChooserNode(GraphNode):
"""
Chooses between two of the SourceNodes above, depending on
their data-quality.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Parent nodes...
self.source_A_node = None
self.source_B_node = None
# The value for this node will be chosen from one of the
# parent sources...
self.value = 0.0
def set_dependencies(self):
"""
We hook up to two sources.
"""
self.source_A_node = self.add_parent_node(SourceNode, "A")
self.source_B_node = self.add_parent_node(SourceNode, "B")
def calculate_quality(self):
"""
We override automatic quality merging. In this case, we will
set this node's data-quality in the calculate() function.
"""
pass
def calculate(self):
"""
We choose the value from whichever parent node has Good
data-quality.
"""
if self.source_A_node.quality.is_good():
# Source A has good data...
self.value = self.source_A_node.value
self.quality.set_from(self.source_A_node.quality)
elif self.source_B_node.quality.is_good():
# Source B has good data...
self.value = self.source_B_node.value
self.quality.set_from(self.source_B_node.quality)
else:
# Neither source has good data...
self.value = 0.0
self.quality.set_to_bad("No source has Good data")
return GraphNode.CalculateChildrenType.CALCULATE_CHILDREN
def test_manual_quality_merging():
"""
Tests manual merging of quality from parent nodes.
The graph for this test has a "redundant" data source. The test node
has two parents A and B. It chooses which ever one of them has good
quality.
So in this case, we do not want to automatically merge quality, as
otherwise if one of the parents goes Bad, the "choosing" node would
go bad as well. In this case, as long as one of the parents is Good,
then the choosing node will be Good as well.
"""
graph_manager = GraphManager()
# We create the sources before the chooser, so we can set their values...
source_A_node = NodeFactory.get_node(graph_manager, GraphNode.GCType.NON_COLLECTABLE, SourceNode, "A")
source_B_node = NodeFactory.get_node(graph_manager, GraphNode.GCType.NON_COLLECTABLE, SourceNode, "B")
# We create a node to choose between the two sources above...
chooser_node = NodeFactory.get_node(graph_manager, GraphNode.GCType.NON_COLLECTABLE, SourceChooserNode)
# We set both sources to have Good data-quality. The value from source A
# is chosen when both are good...
source_A_node.set_value(123.0, source_is_good=True)
source_B_node.set_value(456.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 123.0
assert chooser_node.quality.is_good()
assert chooser_node.quality.get_description() == ""
# We set source B bad. The value from A should still be used...
source_B_node.set_value(457.0, source_is_good=False)
graph_manager.calculate()
assert chooser_node.value == 123.0
assert chooser_node.quality.is_good()
assert chooser_node.quality.get_description() == ""
# We set source A bad as well...
source_A_node.set_value(124.0, source_is_good=False)
graph_manager.calculate()
assert chooser_node.value == 0.0
assert chooser_node.quality.is_good() is False
assert "No source has Good data" in chooser_node.quality.get_description()
# We set source B Good...
source_B_node.set_value(567.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 567.0
assert chooser_node.quality.is_good() is True
assert chooser_node.quality.get_description() == ""
# We set source A Good...
source_A_node.set_value(321.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 321.0
assert chooser_node.quality.is_good() is True
assert chooser_node.quality.get_description() == ""
# We update A...
source_A_node.set_value(432.0, source_is_good=True)
graph_manager.calculate()
assert chooser_node.value == 432.0
assert chooser_node.quality.is_good() is True
assert chooser_node.quality.get_description() == ""
|
[
"richard.s.shepherd@gmail.com"
] |
richard.s.shepherd@gmail.com
|
b8124490b623a6c5b281a10cce0cc972f2334d95
|
9017f217abe077aff77f64938a988fcc4a292e40
|
/plate/common/syntax_highlighting.py
|
aa0484c12f2fad1de5134b9f34f2332e331f0d6d
|
[
"Apache-2.0"
] |
permissive
|
gogit/plate
|
c8c47d47de2b11d5c7b4840106181bb177b50c88
|
2e5fdb1ddfad560986b429cf2ff92aed4d35e56c
|
refs/heads/master
| 2021-01-18T07:18:52.770779
| 2016-04-08T11:45:00
| 2016-04-08T11:45:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
# -*- coding:utf-8 -*-
def syntax_highlight(lang, code):
"""
code highlighting HTML Format
:param lang: programming language
:param code: code
:return: highlighted code
"""
from pygments import lexers
from pygments import highlight
from pygments.formatters import HtmlFormatter
try:
lexer = lexers.get_lexer_by_name(lang.lower())
highlighted = highlight(code, lexer, HtmlFormatter())
splitted = highlighted.split('"highlight')
highlighted = splitted[0] + '"highlight '+lang + splitted[1]
highlighted = highlighted.replace("<pre>", "")
highlighted = highlighted.replace("</pre>", "")
highlighted = highlighted.replace("div", "pre")
return highlighted
except Exception as e:
raise e
|
[
"sh84.ahn@gmail.com"
] |
sh84.ahn@gmail.com
|
cf778af0af1dddef7a128d1e74c241f6d5102ed0
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-3/e5f4987a8fe1b2a1907436c11e0c5ae9ae6b12b3-<mandatory>-fix.py
|
a5a74f66992fb6dfef2b09ed691874a499fea364
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
def mandatory(a):
from jinja2.runtime import Undefined
' Make a variable mandatory '
if isinstance(a, Undefined):
if (a._undefined_name is not None):
name = ("'%s' " % to_text(a._undefined_name))
else:
name = ''
raise AnsibleFilterError(('Mandatory variable %snot defined.' % name))
return a
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
b32201fe4bcba5b5044dd43bd63144b156758276
|
8f5c7f28703b274163c2832f6511025e37b4295f
|
/helloworld.com/reviews/migrations/0001_initial.py
|
98cc1576bfc65b810dcb3c4f295628f97c64f0c6
|
[] |
no_license
|
reinaaa05/portfolio
|
159dc4d48b3e215bfb6c7115cd39b7f63ee2418a
|
e93189e3aa027e57bac490d8874519dd7f717620
|
refs/heads/main
| 2023-05-15T09:56:30.741402
| 2021-06-12T13:46:44
| 2021-06-12T13:46:44
| 307,375,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
# Generated by Django 3.1.7 on 2021-03-19 05:50
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ReviewsConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('store_name', models.CharField(max_length=255, verbose_name='店名')),
('title', models.CharField(max_length=255, verbose_name='タイトル')),
('text', models.TextField(blank=True, verbose_name='口コミテキスト')),
('stars', models.IntegerField(choices=[(1, '☆'), (2, '☆☆'), (3, '☆☆☆'), (4, '☆☆☆☆'), (5, '☆☆☆☆☆')], verbose_name='星の数')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
],
),
]
|
[
"Reinaaa0555@gmail.com"
] |
Reinaaa0555@gmail.com
|
afc502fd894e0319fb56f6217f21a3b934829d0c
|
e4045e99ae5395ce5369a1374a20eae38fd5179b
|
/db/add_emp.py
|
07ba831ed9d733fb43393f8841ade51ce422921f
|
[] |
no_license
|
srikanthpragada/09_MAR_2018_PYTHON_DEMO
|
74fdb54004ab82b62f68c9190fe868f3c2961ec0
|
8684137c77d04701f226e1e2741a7faf9eeef086
|
refs/heads/master
| 2021-09-11T15:52:17.715078
| 2018-04-09T15:29:16
| 2018-04-09T15:29:16
| 124,910,054
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
import sqlite3
try:
con = sqlite3.connect(r"e:\classroom\python\hr.db")
cur = con.cursor()
# take input from user
ename = input("Enter name :")
salary = input("Enter salary : ")
dept = input("Enter dept id :")
# get next emp id
cur.execute("select max(empid) + 1 from emp")
empid = cur.fetchone()[0]
cur.execute("insert into emp values(?,?,?,?)", (empid, ename, salary, dept))
con.commit()
print("Added Employee")
except Exception as ex:
print("Error : ", ex)
finally:
con.close()
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
19bf93fb7f263d11e77e96002fe5f58d107ffb35
|
382e308f433dd3b2c2601568f480be30a704e7d7
|
/Django 실습/sample_community/board/views.py
|
94525375f9d3f0d7adf021b9bb0a2d913286ea95
|
[] |
no_license
|
5d5ng/LACUC-capstone1
|
29240f4109d397ceab3ad7bb771cbcdf69cb944c
|
01b0a1136dab592b778ac99c346c318d3c6ed30f
|
refs/heads/master
| 2022-12-03T15:57:55.804687
| 2019-11-18T09:44:04
| 2019-11-18T09:44:04
| 211,851,523
| 0
| 1
| null | 2022-11-17T07:05:21
| 2019-09-30T12:11:32
|
Python
|
UTF-8
|
Python
| false
| false
| 453
|
py
|
from django.shortcuts import render
from .models import Board
from .forms import BoardForm
# Create your views here.
def board_write(request):
form = BoardForm()
return render(request, 'board_write.html', {'form': form})
def board_list(request):
boards = Board.objects.all().order_by('-id') # 시간 역순으로 모든 게시글을 가져옴
return render(request, 'board_list.html', {'boards': boards}) # 템플릿으로 전달
|
[
"deo1915@gmail.com"
] |
deo1915@gmail.com
|
ae8e11dbf700e8b547f3301a18102059e7cdabf8
|
54bb9ba6d507cd25b2c2ac553665bc5fc95280d1
|
/src/onegov/file/__init__.py
|
7cc5a6d0232f51b69f22604b6201246450e833ec
|
[
"MIT"
] |
permissive
|
href/onegov-cloud
|
9ff736d968979380edba266b6eba0e9096438397
|
bb292e8e0fb60fd1cd4e11b0196fbeff1a66e079
|
refs/heads/master
| 2020-12-22T07:59:13.691431
| 2020-01-28T08:51:54
| 2020-01-28T08:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
from onegov.file.collection import FileCollection, FileSetCollection
from onegov.file.integration import DepotApp
from onegov.file.models import (
File,
FileSet,
AssociatedFiles,
SearchableFile
)
__all__ = (
'AssociatedFiles',
'DepotApp',
'File',
'FileCollection',
'FileSet',
'FileSetCollection',
'SearchableFile',
)
|
[
"denis.krienbuehl@seantis.ch"
] |
denis.krienbuehl@seantis.ch
|
5494da1fde51e2b036cfae84db3b9f33a86c2556
|
931926968461bbe8fc6295d4f5b702c5de99c231
|
/paper/plot_cifar10_confusion_diff.py
|
f9a3028e134b9ca9deca51fdf7202d96223084c2
|
[] |
no_license
|
annaproxy/modules
|
93315ce684bdda4fb7a34a518ac2154e506a6579
|
771e1fa49edd2f237883842f741ea1d8ce1fccdc
|
refs/heads/master
| 2022-12-27T22:27:39.408250
| 2020-10-06T10:30:22
| 2020-10-06T10:30:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,606
|
py
|
#!/usr/bin/env python3
import os
import lib
from lib import StatTracker
import torch
import shutil
import matplotlib.pyplot as plt
import numpy as np
import itertools
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lib.common import group
BASE_DIR = "out/cifar10_confusion/"
shutil.rmtree(BASE_DIR, ignore_errors=True)
def draw(runs, name):
VER_DIR = f"{BASE_DIR}/{name}/download/"
os.makedirs(VER_DIR, exist_ok=True)
def draw_confusion(means: np.ndarray, std: np.ndarray):
print("MEAN", means)
figure = plt.figure(figsize=[7,3])#means.shape)
ax = plt.gca()
#, vmin = -65, vmax = 65
im = plt.imshow(means, interpolation='nearest', cmap=plt.cm.viridis, aspect='auto')
x_marks = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
assert len(x_marks) == means.shape[1]
y_marks = x_marks
assert len(y_marks) == means.shape[0]
plt.xticks(np.arange(means.shape[1]), x_marks, rotation=45)
# plt.xticks(np.arange(means.shape[1]), x_marks)
plt.yticks(np.arange(means.shape[0]), y_marks)
# for tick in figure.axes[0].xaxis.get_major_ticks()[1::2]:
# tick.set_pad(15)
# Use white text if squares are dark; otherwise black.
threshold = (means.max() + means.min()) / 2.
print("THRESHOLD", threshold)
# rmap = np.around(means, decimals=0)
rmap = np.round(means).astype(np.int)
std = np.round(std).astype(np.int)
for i, j in itertools.product(range(means.shape[0]), range(means.shape[1])):
color = "white" if means[i, j] < threshold else "black"
plt.text(j, i, f"${rmap[i, j]}\\pm{std[i,j]}$", ha="center", va="center", color=color)
plt.ylabel("True label", labelpad=-10)
plt.xlabel("Predicted label", labelpad=-10)
# plt.xlabel("Predicted label", labelpad=10)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.1)
plt.colorbar(im, cax)
# plt.tight_layout()
return figure
def create_trackers(runs):
trackers = {}
for i_run, run in enumerate(runs):
for f in run.files(per_page=10000):
if not f.name.startswith("export") or "/confusion" not in f.name:
continue
if f.name not in trackers:
trackers[f.name] = StatTracker()
full_name = os.path.join(VER_DIR, f.name)
print(f"Downloading {full_name}")
f.download(root=VER_DIR, replace=True)
data = torch.load(full_name)
data = data.astype(np.float32)
if "confusion_difference" not in f.name:
data = data / np.sum(data, axis=1, keepdims=True)
data = data * 100
trackers[f.name].add(data)
# break
#
# if i_run >= 2:
# break
return trackers
trackers = create_trackers(runs)
for k, v in trackers.items():
s = v.get()
figure = draw_confusion(s.mean, s.std)
prefix = f"out/cifar10_confusion/{name}/"
dir = os.path.join(prefix, os.path.dirname(k))
os.makedirs(dir, exist_ok=True)
figure.savefig(f"{prefix}/{k}.pdf", bbox_inches='tight', pad_inches = 0.01)
plt.close()
draw(lib.get_runs(["cifar10_no_dropout"]), "no_dropout")
draw(lib.get_runs(["cifar10"]), "with_dropout")
draw(lib.get_runs(["cifar10_resnet"]), "resnet")
|
[
"xdever@gmail.com"
] |
xdever@gmail.com
|
74bc86c8f16604ca3cd74876f70d09cfaef95070
|
a568e4dc461f71f0ae053fe51e3ddd0fe23bf858
|
/development/index_site.py
|
1789373a554d1f41d08b10458f9e08a08425dac8
|
[
"MIT"
] |
permissive
|
vatlab/sos-docs
|
413e344a7581e4e2cef5da3d24345a73f3669c43
|
2b42c280dae0feaeea51161041827c362abe6db0
|
refs/heads/master
| 2023-06-26T04:30:59.078944
| 2023-06-16T20:26:39
| 2023-06-16T20:26:39
| 105,951,462
| 3
| 15
|
MIT
| 2023-06-16T20:18:39
| 2017-10-05T23:46:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,206
|
py
|
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
#
import os
import glob
import re
import argparse
from bs4 import BeautifulSoup
'''
A simple script to create tipue content by searching for documentation
files under the top docs directory of the SoS website.
'''
def parse_html(url, html):
print('Parsing {}'.format(html))
with open(html, 'rb') as content:
soup = BeautifulSoup(content, "html.parser", from_encoding='utf-8')
#
# try to get the title of the page from h1, h2, or title, and
# uses filename if none of them exists.
#
title = soup.find('h1')
if title is None:
title = soup.find('h2')
if title is None:
title = soup.find('title')
if title is None:
title = os.path.basename(html).rsplit('.')[0]
else:
title = title.get_text()
maintitle = soup.find('h1')
if maintitle is None:
maintitle = soup.find('h2')
if maintitle is None:
maintitle = soup.find('title')
if maintitle is None:
maintitle = os.path.basename(html).rsplit('.')[0]
else:
maintitle = maintitle.get_text()
# remove special characters which might mess up js file
title = re.sub(r'[¶^a-zA-Z0-9_\.\-]', ' ', title)
#
# sear
all_text = []
for header in soup.find_all(re.compile('^h[1-6]$')):
# remove special character
part = re.sub(r'[^a-zA-Z0-9_\-=\'".,\\]', ' ',
header.get_text()).replace('"', "'").strip() + "\n"
part = re.sub(r'\s+', ' ', part)
ids = [x for x in header.findAll('a') if x.get('id')]
if ids:
tag = '#' + ids[0].get('id')
else:
hrefs = header.findAll('a', {'class': 'anchor-link'})
if hrefs:
tag = hrefs[0].get('href')
else:
tag = ''
part = '{{"mainTitle": "{}", "title": "{}", "text": "{}", "tags": "", "mainUrl": "{}", "url": "{}"}}'.format(
maintitle.replace('¶', '').strip(),
header.get_text().replace('¶', '').replace('"', r'\"').strip(),
part, url, url + tag.replace('"', r'\"'))
all_text.append(part)
return all_text
def generate_tipue_content(docs_dir):
# get a list of html files and their url
documentations = glob.glob(
os.path.join(docs_dir, 'doc', 'user_guide', '*.html'))
text = [
parse_html(url, html)
for (url, html) in [('https://vatlab.github.io/sos-docs/',
os.path.join(docs_dir, 'index.html')),
('https://vatlab.github.io/sos-docs/running.html',
os.path.join(docs_dir, 'running.html')),
('https://vatlab.github.io/sos-docs/notebook.html',
os.path.join(docs_dir, 'notebook.html')),
('https://vatlab.github.io/sos-docs/workflow.html',
os.path.join(docs_dir, 'workflow.html'))] +
[('https://vatlab.github.io/sos-docs/doc/user_guide/{}'.format(
os.path.basename(x)), x) for x in documentations]
]
# write the output to file.
with open(
os.path.join(docs_dir, 'tipuesearch', 'tipuesearch_content.js'),
'w') as out:
out.write('''\
var tipuesearch = {{"pages": [
{}
]}};
'''.format(',\n'.join(sum(text, []))))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Index SoS website')
parser.add_argument(
'docs_dir',
metavar='DIR',
help='''Path of the top SoS docs directory. This script will parse content of
HTML files under $DOC_DIR (e.g. Overview.html, /doc/documentation/*.html), get
the headers of the files, and write the results in $DOC_DIR/tipuesearch_content.hs
''')
args = parser.parse_args()
generate_tipue_content(args.docs_dir)
|
[
"ben.bog@gmail.com"
] |
ben.bog@gmail.com
|
ea2793abe07a25467fb61292b764ddc1f7d4ac4c
|
68263c011d12b19d6ff17f0f2420fe497ef28fc2
|
/api/tacticalrmm/core/views.py
|
93d53c22c421640a769e0772dfab93f3222aa002
|
[
"MIT"
] |
permissive
|
bradhawkins85/tacticalrmm
|
79ec6f003b559c96d15a5bd0621a2e968d2ea53d
|
4371f270569a6eb094dda834f2d1b14ed62af5e4
|
refs/heads/develop
| 2023-05-21T13:19:47.187899
| 2020-09-02T18:52:40
| 2020-09-02T18:52:40
| 292,421,792
| 0
| 0
|
MIT
| 2021-05-05T05:55:52
| 2020-09-03T00:06:11
| null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
import os
from django.conf import settings
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.exceptions import ParseError
from rest_framework.parsers import FileUploadParser
from rest_framework.views import APIView
from .models import CoreSettings
from .serializers import CoreSettingsSerializer
from tacticalrmm.utils import notify_error
class UploadMeshAgent(APIView):
parser_class = (FileUploadParser,)
def put(self, request, format=None):
if "meshagent" not in request.data:
raise ParseError("Empty content")
f = request.data["meshagent"]
mesh_exe = os.path.join(settings.EXE_DIR, "meshagent.exe")
with open(mesh_exe, "wb+") as j:
for chunk in f.chunks():
j.write(chunk)
return Response(status=status.HTTP_201_CREATED)
@api_view()
def get_core_settings(request):
settings = CoreSettings.objects.first()
return Response(CoreSettingsSerializer(settings).data)
@api_view(["PATCH"])
def edit_settings(request):
settings = CoreSettings.objects.first()
serializer = CoreSettingsSerializer(instance=settings, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response("ok")
@api_view()
def version(request):
return Response(settings.APP_VER)
@api_view()
def email_test(request):
core = CoreSettings.objects.first()
r = core.send_mail(
subject="Test from Tactical RMM", body="This is a test message", test=True
)
if not isinstance(r, bool) and isinstance(r, str):
return notify_error(r)
return Response("Email Test OK!")
|
[
"dcparsi@gmail.com"
] |
dcparsi@gmail.com
|
de8074fe4170e2bd14801b70bceb614046f97b3e
|
4b68243d9db908945ee500174a8a12be27d150f9
|
/pogoprotos/settings/trading_global_settings_pb2.py
|
d2d9db44611b0f5eebc2b3e22c3a68f670146ab3
|
[] |
no_license
|
ykram/pogoprotos-py
|
7285c86498f57dcbbec8e6c947597e82b2518d80
|
a045b0140740625d9a19ded53ece385a16c4ad4a
|
refs/heads/master
| 2020-04-20T10:19:51.628964
| 2019-02-02T02:58:03
| 2019-02-02T02:58:03
| 168,787,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 2,630
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/trading_global_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/trading_global_settings.proto',
package='pogoprotos.settings',
syntax='proto3',
serialized_pb=_b('\n1pogoprotos/settings/trading_global_settings.proto\x12\x13pogoprotos.settings\"I\n\x15TradingGlobalSettings\x12\x16\n\x0e\x65nable_trading\x18\x01 \x01(\x08\x12\x18\n\x10min_player_level\x18\x02 \x01(\rb\x06proto3')
)
_TRADINGGLOBALSETTINGS = _descriptor.Descriptor(
name='TradingGlobalSettings',
full_name='pogoprotos.settings.TradingGlobalSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_trading', full_name='pogoprotos.settings.TradingGlobalSettings.enable_trading', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_player_level', full_name='pogoprotos.settings.TradingGlobalSettings.min_player_level', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=147,
)
DESCRIPTOR.message_types_by_name['TradingGlobalSettings'] = _TRADINGGLOBALSETTINGS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TradingGlobalSettings = _reflection.GeneratedProtocolMessageType('TradingGlobalSettings', (_message.Message,), dict(
DESCRIPTOR = _TRADINGGLOBALSETTINGS,
__module__ = 'pogoprotos.settings.trading_global_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.TradingGlobalSettings)
))
_sym_db.RegisterMessage(TradingGlobalSettings)
# @@protoc_insertion_point(module_scope)
|
[
"mark@noffle.net"
] |
mark@noffle.net
|
3dcc56e34644f42ea06d92fb7188107801b668d2
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_YCHF/KCB_YCHF_MM/YZYQ/yzyq_144.py
|
83432d061fadacfc51640039e13e534672cfe407
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class yzyq_144(xtp_test_case):
# yzyq_144
def test_yzyq_144(self):
title = '默认3:订单报价未超过涨跌幅限制-沪A对手方最优转限价买入=涨停价 重启oms'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
61bc779a1546701f5153a4a6635e4020d619e8cb
|
22c5fc7dd52149ebd4338a487ae9ab0db0e43f01
|
/tests/test_dynunet.py
|
39371c0e1dc31ecb08ea630eefe8443f705fb731
|
[
"Apache-2.0"
] |
permissive
|
precision-medicine-um/MONAI-Deep_Learning
|
3d3f547dd9815152561a6853f8d4727b0e5ca4c4
|
d94c4d3a2c465717ba3fae01b7acea7fada9885b
|
refs/heads/master
| 2022-12-28T07:04:07.768415
| 2020-10-17T13:11:56
| 2020-10-17T13:11:56
| 305,346,962
| 3
| 0
|
Apache-2.0
| 2022-12-27T15:44:13
| 2020-10-19T10:30:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,036
|
py
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Any, Sequence, Union
import torch
from parameterized import parameterized
from monai.networks.nets import DynUNet
strides: Sequence[Union[Sequence[int], int]]
kernel_size: Sequence[Any]
expected_shape: Sequence[Any]
TEST_CASE_DYNUNET_2D = []
for kernel_size in [(3, 3, 3, 1), ((3, 1), 1, (3, 3), (1, 1))]:
for strides in [(1, 1, 1, 1), (2, 2, 2, 1)]:
for in_channels in [2, 3]:
for res_block in [True, False]:
out_channels = 2
in_size = 64
spatial_dims = 2
expected_shape = (1, out_channels, *[in_size // strides[0]] * spatial_dims)
test_case = [
{
"spatial_dims": spatial_dims,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"strides": strides,
"upsample_kernel_size": strides[1:],
"norm_name": "batch",
"deep_supervision": False,
"res_block": res_block,
},
torch.randn(1, in_channels, in_size, in_size),
expected_shape,
]
TEST_CASE_DYNUNET_2D.append(test_case)
TEST_CASE_DYNUNET_3D = [] # in 3d cases, also test anisotropic kernel/strides
for out_channels in [2, 3]:
for res_block in [True, False]:
in_channels = 1
in_size = 64
expected_shape = (1, out_channels, 64, 32, 64)
test_case = [
{
"spatial_dims": 3,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": (3, (1, 1, 3), 3, 3),
"strides": ((1, 2, 1), 2, 2, 1),
"upsample_kernel_size": (2, 2, 1),
"norm_name": "instance",
"deep_supervision": False,
"res_block": res_block,
},
torch.randn(1, in_channels, in_size, in_size, in_size),
expected_shape,
]
TEST_CASE_DYNUNET_3D.append(test_case)
TEST_CASE_DEEP_SUPERVISION = []
for spatial_dims in [2, 3]:
for res_block in [True, False]:
for deep_supr_num in [1, 2]:
for strides in [(1, 2, 1, 2, 1), (2, 2, 2, 1), (2, 1, 1, 2, 2)]:
test_case = [
{
"spatial_dims": spatial_dims,
"in_channels": 1,
"out_channels": 2,
"kernel_size": [3] * len(strides),
"strides": strides,
"upsample_kernel_size": strides[1:],
"norm_name": "group",
"deep_supervision": True,
"deep_supr_num": deep_supr_num,
"res_block": res_block,
},
torch.randn(1, 1, *[in_size] * spatial_dims),
]
scale = 1
all_expected_shapes = []
for stride in strides[: 1 + deep_supr_num]:
scale *= stride
deep_out_shape = (1, 2, *[in_size // scale] * spatial_dims)
all_expected_shapes.append(deep_out_shape)
test_case.append(all_expected_shapes)
TEST_CASE_DEEP_SUPERVISION.append(test_case)
class TestDynUNet(unittest.TestCase):
@parameterized.expand(TEST_CASE_DYNUNET_2D + TEST_CASE_DYNUNET_3D)
def test_shape(self, input_param, input_data, expected_shape):
net = DynUNet(**input_param)
net.eval()
with torch.no_grad():
result = net(input_data)
self.assertEqual(result.shape, expected_shape)
class TestDynUNetDeepSupervision(unittest.TestCase):
@parameterized.expand(TEST_CASE_DEEP_SUPERVISION)
def test_shape(self, input_param, input_data, expected_shape):
net = DynUNet(**input_param)
with torch.no_grad():
results = net(input_data)
self.assertEqual(len(results), len(expected_shape))
for idx in range(len(results)):
result, sub_expected_shape = results[idx], expected_shape[idx]
self.assertEqual(result.shape, sub_expected_shape)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
precision-medicine-um.noreply@github.com
|
762eb5522286793c28ee067dc804473cca9f7b95
|
801f367bd19b8f2ab08669fd0a85aad7ace961ac
|
/project/experiments/exp_025_pns_start_identity/src/tmp_which_nodes_are_slow_results.py
|
2b351dbe039f6593b70e34ad3375078ad22ad97b
|
[
"MIT"
] |
permissive
|
Wendong-Huo/thesis-bodies
|
d91b694a6b1b6a911476573ed1ed27eb27fb000d
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
refs/heads/main
| 2023-04-17T18:32:38.541537
| 2021-03-12T19:53:23
| 2021-03-12T19:53:23
| 623,471,326
| 1
| 0
| null | 2023-04-04T12:45:48
| 2023-04-04T12:45:47
| null |
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
import pandas as pd
with open("output_data/tmp/which_nodes_are_slow.txt", "r") as f:
grep_results = f.readlines()
for idx, line in enumerate(grep_results):
if "1785959" in line:
print(grep_results[idx-1])
print(line)
break
# exit(0)
l = len("output_data/tensorboard/")
df_results = pd.read_pickle("output_data/tmp/which_nodes_are_slow")
df_results["node"] = ""
df_results["num_bodies"] = 0
for idx_df, row in df_results.iterrows():
path = row["path"][l:]
df_results.at[idx_df, "path"] = path
df_results.at[idx_df, "num_bodies"] = len(path.split("-"))-3
node = ""
for idx, line in enumerate(grep_results):
if path in line:
job_id = line[:7]
if int(job_id)<1785585 or int(job_id)>1786224:
continue # I started exp_012 several times
_tmp = grep_results[idx-1].split(":")[-1]
node = _tmp.split(".")[0]
break
if node=="":
print("not found.")
else:
df_results.at[idx_df, "node"] = node
df_results = df_results.sort_values(by="node")
df_results.to_csv("output_data/tmp/who_slow.csv")
# df_results = df_results[df_results["path"].str.len()>90]
# print(sorted(df_results["path"].str.len().unique()))
# print(df_results.shape)
# df_results["node_prefix"] = df_results["node"].str.slice(start=0, stop=5)
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# sns.barplot(data=df_results, x="node_prefix", y="min_fps", ax=ax)
sns.barplot(data=df_results, x="node", y="min_fps", ax=ax)
plt.xticks(rotation=45)
# ax1 = ax.twinx()
# ax.set_ylim(0,350)
# ax1.set_ylim(0,350)
# sns.lineplot(x=[-0.5,df_results.shape[0]], y=[34.7,34.7], color="black", ax=ax1)
plt.show()
df_results = df_results.sort_values(by="min_fps")
print(df_results.iloc[0])
# df_slow = df_results[df_results["min_fps"]<80]
# print(df_slow["node"].unique())
# for node in df_slow["node"].unique():
# print(df_results[df_results["node"]==node])
# print(df_results.iloc[-1])
|
[
"sliu1@uvm.edu"
] |
sliu1@uvm.edu
|
98fe52e38140e1691a95e0a3e3e42abfcfd8ead4
|
d96289f157e2bbbf6f3560f3cc327e490df84b54
|
/exp_configs/__init__.py
|
c98ed0eabbf9a5cd320bccd9a1242a1ddc6f5ad4
|
[] |
no_license
|
IssamLaradji/semantic_segmentation_template
|
74e8766ce3265ba7fc9416f9c85811d05dca39f9
|
f7286eaafb5d5bc81e2f7d6bb87f6e24db026a08
|
refs/heads/main
| 2023-08-22T09:53:37.381702
| 2021-10-14T21:45:42
| 2021-10-14T21:45:42
| 417,287,252
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from . import issam_exps, sandra_exps
EXP_GROUPS = {}
EXP_GROUPS.update(issam_exps.EXP_GROUPS)
EXP_GROUPS.update(sandra_exps.EXP_GROUPS)
|
[
"issam.laradji@gmail.com"
] |
issam.laradji@gmail.com
|
f4bff5cd10b131b2a0d7ac0bf7e2d36014f08278
|
6160586aa239eada16e735d40d57970dedbe1dfc
|
/modules/app_additional/app_customaction/app_customaction_delete.py
|
c182fad8429e4802d3dfe6058d3c4d97757f8530
|
[] |
no_license
|
showgea/AIOT
|
7f9ffcd49da54836714b3342232cdba330d11e6c
|
fe8275aba1c4b5402c7c2c2987509c0ecf49f330
|
refs/heads/master
| 2020-07-23T10:19:37.478456
| 2019-09-23T12:25:59
| 2019-09-23T12:25:59
| 207,525,184
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
import requests
from config import readcfg
header_Gary = readcfg.header_Gary
header_Jenny = readcfg.header_Jenny
url = readcfg.url
def app_customaction_delete(customActionId):
url_ = url + "/app/v1.0/lumi/app/customaction/delete"
params_ = {
"customActionId": customActionId
}
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'}
print("请求数据:%s" % params_)
r = requests.get(url=url_, params=params_, headers=header_Gary, proxies=proxies, verify=False)
return r
if __name__ == '__main__':
result_main = app_customaction_delete("123")
print(result_main.text)
|
[
"tangguobing2011@163.com"
] |
tangguobing2011@163.com
|
d1361e5603dbcad0458945a81f77ece19988ca14
|
4e59c2444334c67e419dbc97a2fd326115f15555
|
/db_orm_models/blocking/presence/browsing_intent_snapshot/methods.py
|
64c2c313d23e2bb2069b8e73e40c8bdb2a79cfe0
|
[
"MIT"
] |
permissive
|
bbcawodu/nav-online-backend
|
cebf41fd3373606ac880b1fc4935885d13948c86
|
3085ad686b253ea82478eb2fc365f51dda6d9d96
|
refs/heads/master
| 2021-01-22T04:44:13.105412
| 2018-08-14T16:40:55
| 2018-08-14T16:40:55
| 102,269,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
def filter_query_obj_by_session_id(query_obj, obj_model, rqst_session_id, list_of_ids):
if isinstance(rqst_session_id, unicode) and rqst_session_id.lower() == "all":
query_obj = query_obj.order_by(obj_model.presence_browsing_session_data_id)
else:
query_obj = query_obj.filter(obj_model.presence_browsing_session_data_id.in_(list_of_ids)).\
order_by(obj_model.presence_browsing_session_data_id)
return query_obj
def filter_query_obj_by_intent(query_obj, obj_model, rqst_intent):
query_obj = query_obj.filter(obj_model.calculated_intent == rqst_intent).order_by(obj_model.presence_browsing_session_data_id)
return query_obj
|
[
"awodubradley@gmail.com"
] |
awodubradley@gmail.com
|
fe67cbd2fbdca0fb9203371b298604412056b63b
|
c75ec82316ed5322c5844912ce9c528c24360b9f
|
/nsd1904/py01/day02/game.py
|
da175d732f5814bc887260e614a5f974e7b8ad95
|
[] |
no_license
|
MrZhangzhg/nsd2019
|
a94cde22f2e4bd648bb9e56ca63827f558f3c083
|
54f6d2c7b348a69f13ad5f38f2fbdc8207528749
|
refs/heads/master
| 2021-08-22T17:38:27.697675
| 2020-02-22T08:36:21
| 2020-02-22T08:36:21
| 183,539,489
| 21
| 24
| null | 2020-05-17T12:07:55
| 2019-04-26T02:06:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 778
|
py
|
import random # 导入random模块
# random.choice()从一个序列对象中随机选择一项
computer = random.choice(['石头', '剪刀', '布'])
player = input('请出拳(石头/剪刀/布): ')
print("Your choice: %s, Computer's choice: %s" % (player, computer))
if player == '石头':
if computer == '石头':
print('平局')
elif computer == '剪刀':
print('You WIN!!!')
else:
print('You LOSE!!!')
elif player == '剪刀':
if computer == '石头':
print('You LOSE!!!')
elif computer == '剪刀':
print('平局')
else:
print('You WIN!!!')
else:
if computer == '石头':
print('You WIN!!!')
elif computer == '剪刀':
print('You LOSE!!!')
else:
print('平局')
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
67711448c51b3aa2c18dbd24c029ab0a57c28569
|
9df89a1652d183d8fc654acd728f9a578d6d1912
|
/cli/tests/psym_tests/test_user.py
|
400718eaa8635641ddbdad811c5aae5771aba6a4
|
[
"BSD-3-Clause"
] |
permissive
|
duranrojasm/symphony
|
b37d54a134e29093edacb80442e204fc71a37fbe
|
55b3d0c20b669374303bafb10e9c96c734647c9c
|
refs/heads/main
| 2023-08-24T02:00:33.433220
| 2021-10-28T20:35:23
| 2021-10-28T20:35:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import random
import string
from unittest import skip
from psym import UserDeactivatedException
from psym.api.user import (
activate_user,
add_user,
deactivate_user,
edit_user,
get_active_users,
)
from psym.graphql.enum.user_role import UserRole
from psym.graphql.enum.user_status import UserStatus
from ..utils import init_client
from ..utils.base_test import BaseTest
class TestUser(BaseTest):
@staticmethod
def random_string(length: int = 10) -> str:
letters = string.ascii_lowercase
return "".join(random.choices(letters, k=length))
def test_user_created(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
self.assertEqual(user_name, u.email)
self.assertEqual(UserStatus.ACTIVE, u.status)
active_users = get_active_users(client=self.client)
self.assertEqual(2, len(active_users))
client2 = init_client(email=user_name, password=user_name)
active_users = get_active_users(client=client2)
self.assertEqual(2, len(active_users))
def test_user_edited(self) -> None:
user_name = f"{self.random_string()}@fb.com"
new_password = self.random_string()
u = add_user(client=self.client, email=user_name, password=user_name)
edit_user(
client=self.client,
user=u,
new_password=new_password,
new_role=UserRole.OWNER,
)
client2 = init_client(email=user_name, password=new_password)
active_users = get_active_users(client=client2)
self.assertEqual(2, len(active_users))
def test_user_deactivated(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
deactivate_user(client=self.client, user=u)
active_users = get_active_users(client=self.client)
self.assertEqual(1, len(active_users))
with self.assertRaises(UserDeactivatedException):
init_client(email=user_name, password=user_name)
def test_user_reactivated(self) -> None:
user_name = f"{self.random_string()}@fb.com"
u = add_user(client=self.client, email=user_name, password=user_name)
deactivate_user(client=self.client, user=u)
activate_user(client=self.client, user=u)
active_users = get_active_users(client=self.client)
self.assertEqual(2, len(active_users))
|
[
"jcaroper@everis.com"
] |
jcaroper@everis.com
|
eb2b9cbc7dcb2e45e3686d9f629a4a03d6867c1d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_Class841.py
|
4f84993699cbd099216e52673eb203502687df81
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=24
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=22
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=23
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class841.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
9d271bad43590d0385529bc485e0fd4d18fa1faf
|
e38f7b5d46fd8a65c15e49488fc075e5c62943c9
|
/pychron/core/ui/qt/custom_label_editor.py
|
af48d20e9e521f5da679a7aaf6a049248224d552
|
[
"Apache-2.0"
] |
permissive
|
INGPAN/pychron
|
3e13f9d15667e62c347f5b40af366096ee41c051
|
8592f9fc722f037a61b0b783d587633e22f11f2f
|
refs/heads/master
| 2021-08-15T00:50:21.392117
| 2015-01-19T20:07:41
| 2015-01-19T20:07:41
| 111,054,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,405
|
py
|
#===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from traits.etsconfig.etsconfig import ETSConfig
ETSConfig.toolkit = 'qt4'
#============= enthought library imports =======================
from traits.api import HasTraits, Str, Int, Color, \
Button, Any, Instance, on_trait_change
from traitsui.api import View, UItem
from traitsui.qt4.editor import Editor
from traitsui.basic_editor_factory import BasicEditorFactory
#============= standard library imports ========================
import random
from PySide.QtGui import QLabel
#============= local library imports ==========================
class _CustomLabelEditor(Editor):
# txtctrl = Any
color = Any
bgcolor = Any
weight = Any
text_size = Any
def init(self, parent):
self.control = self._create_control(parent)
# self.item.on_trait_change(self._set_color, 'color')
self.sync_value(self.factory.color, 'color', mode='from')
self.sync_value(self.factory.bgcolor, 'bgcolor', mode='from')
self.sync_value(self.factory.weight, 'weight', mode='from')
self.sync_value(self.factory.text_size, 'text_size', mode='from')
@on_trait_change('color, bgcolor, weight, text_size')
def _update_style(self):
self._set_style()
def _set_style(self, control=None,
color=None, bgcolor=None,
size=None, weight=None):
if control is None:
control = self.control
if color is None:
color = self.color.name()
if bgcolor is None:
if self.bgcolor is None:
bgcolor = 'transparent'
else:
bgcolor = self.bgcolor.name()
if size is None:
size = self.text_size
if not size:
size = self.item.size
if weight is None:
weight = self.weight
if not weight:
weight = self.item.weight
css = '''QLabel {{color:{};
background-color:{};
font-size:{}px;
font-weight:{};}}
'''.format(color,
bgcolor,
size,
weight)
control.setStyleSheet(css)
def update_editor(self):
if self.control:
# print self.object, self.value
if isinstance(self.value, (str, int, float, long, unicode)):
self.control.setText(str(self.value))
# self.control.SetLabel(self.value)
def _create_control(self, parent):
control = QLabel()
color = self.item.color.name()
self._set_style(color=color,
control=control)
control.setMargin(5)
parent.setSpacing(0)
return control
class CustomLabelEditor(BasicEditorFactory):
klass = _CustomLabelEditor
color = Str
bgcolor = Str
weight = Str
text_size = Str
class CustomLabel(UItem):
editor = Instance(CustomLabelEditor, ())
size = Int(12)
size_name = Str
color = Color('black')
color_name = Str
bgcolor = Color('transparent')
bgcolor_name = Str
weight = Str('normal')
top_padding = Int(5)
bottom_padding = Int(5)
left_padding = Int(5)
right_padding = Int(5)
def _size_name_changed(self):
self.editor.text_size = self.size_name
def _color_name_changed(self):
self.editor.color = self.color_name
def _bgcolor_name_changed(self):
self.editor.bgcolor = self.bgcolor_name
#===============================================================================
# demo
#===============================================================================
class Demo(HasTraits):
a = Str('asdfsdf')
foo = Button
color = Color('blue')
bgcolor = Color('green')
cnt = 0
size = Int(12)
def _foo_fired(self):
self.a = 'fffff {}'.format(random.random())
if self.cnt % 2 == 0:
self.color = 'red'
self.bgcolor = 'blue'
else:
self.bgcolor = 'red'
self.color = 'blue'
self.cnt += 1
def traits_view(self):
v = View(
UItem('size'),
'foo',
CustomLabel('a',
# color='blue',
size=24,
size_name='size',
top_padding=10,
left_padding=10,
color_name='color',
bgcolor_name='bgcolor'
),
resizable=True,
width=400,
height=100)
return v
if __name__ == '__main__':
d = Demo()
d.configure_traits()
#============= EOF =============================================
# css = '''QLabel {{ color:{}; font-size:{}px; font-weight:{};}}
# # '''.format(self.item.color.name(), self.item.size, self.item.weight)
# control.setStyleSheet(css)
# control.setAlignment(Qt.AlignCenter)
# control.setGeometry(0, 0, self.item.width, self.item.height)
# vbox = QVBoxLayout()
# vbox.setSpacing(0)
# hbox = QHBoxLayout()
# hbox.addLayout(vbox)
# parent.addLayout(vbox)
# print vbox.getContentsMargins()
# vbox.setContentsMargins(5, 5, 5, 5)
# vbox.setSpacing(-1)
# vbox.addSpacing(5)
# vbox.addSpacing(10)
# vbox.addWidget(control)
# vbox.addSpacing(5)
# vbox.addStretch()
# vbox.setSpacing(-1)
# vbox.setMargin(10)
# control.setLayout(vbox)
# parent.addWidget(control)
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
df5d74665f7e253a5707711a3a7f978bebb10b96
|
50e375bdc8affc1a8c09aa567a740fa19df7d5a6
|
/DSBQ/deployment/fixtures_old/test_Oracle_pytest_new.py
|
cbacf2973495522b4d34ecada22a816bff063a78
|
[] |
no_license
|
michTalebzadeh/SparkStructuredStreaming
|
ca7a257626e251c7b03a9844cfd229fa8ea95af5
|
87ef34ffe52061fcbb4f22fcd97764037717696a
|
refs/heads/master
| 2023-07-13T00:49:10.753863
| 2021-07-12T16:39:50
| 2021-07-12T16:39:50
| 364,826,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
from pyspark.sql import SparkSession
import pytest
from sparkutils import sparkstuff as s
from src.config import ctest, test_url
from src.CreateSampleDataInMysql import extractHiveData, loadIntoMysqlTable, readSourceData, transformData, saveData, readSavedData
"""
@pytest.fixtures_old(scope = "session")
def initParameters():
# Prepare test data here in this fixtures_old
appName = ctest['common']['appName']
spark_session = s.spark_session(appName)
# create sample data
# read Hive source table and select read_df number of rows (see config_test.yml)
house_df = extractHiveData() ## read Hive table as sample source
# write to Mysql DB
loadIntoMysqlTable(house_df)
# data is ready to be tested in mysql
read_df = readSourceData()
# do Transform part of ETL (Extract, Transform, Load)
transformation_df = transformData()
# save data to target test table in mysql
saveData()
# read that data saved to ensure that the rows will tally
readSavedData_df = readSavedData()
return [read_df, transformation_df, readSavedData_df]
"""
def test_validity():
house_df = extractHiveData()
loadIntoMysqlTable(house_df)
# Assert that data read from source table is what is expected
read_df = readSourceData()
assert read_df.count() == ctest['statics']['read_df_rows']
# Assert data written to target table is what it should be
transformation_df = transformData()
assert transformation_df.count() == ctest['statics']['transformation_df_rows']
# Assert what is written tallies with the number of rows transformed
readSavedData_df = readSavedData()
assert readSavedData_df.subtract(transformation_df).count() == 0
|
[
"mich.talebzadeh@gmail.com"
] |
mich.talebzadeh@gmail.com
|
a4a5217a92054490d85cba7f63ef1acb282a4847
|
989bb5d2d3e89db21fcbeac91a1e64967ea6377b
|
/sagemaker_neo_compilation_jobs/deploy_pytorch_model_on_Inf1_instance/resnet18.py
|
9421aea5af6f0bf1ea89a34f99bc2cb5dcbceb35
|
[
"Apache-2.0"
] |
permissive
|
araitats/amazon-sagemaker-examples
|
7cec9ea5822f0469d5dfabbcf3cab62ce9c0f0d1
|
512cb3b6310ae812c6124a451751237d98a109b1
|
refs/heads/master
| 2023-04-19T05:54:47.334359
| 2021-04-27T21:04:33
| 2021-04-27T21:04:33
| 338,094,683
| 2
| 1
|
Apache-2.0
| 2021-04-27T15:35:14
| 2021-02-11T17:07:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
def input_fn(request_body, request_content_type):
import torch
import torchvision.transforms as transforms
from PIL import Image
import io
f = io.BytesIO(request_body)
input_image = Image.open(f).convert('RGB')
preprocess = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
return input_batch
|
[
"noreply@github.com"
] |
araitats.noreply@github.com
|
e4b50200cdcfab029ada56611d23bd13fb829714
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/42e441b34ef3b68f657a5e36027aaa21ff0b4d84-<run_bottleneck_on_image>-bug.py
|
8ad0925041a855fb4f37bc20e4557601749d4a45
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor):
"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: Numpy array of image data.\n image_data_tensor: Input data layer in the graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n "
bottleneck_values = sess.run(bottleneck_tensor, {
image_data_tensor: image_data,
})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
37ba1aa162d25931444ba005344100537f2992fa
|
4dfc7fc9b84f76d690e33414610bc59a9b07001a
|
/bds/views/municipality.py
|
f7a746ad648d09ab470d311aed22e6dbf6fdd580
|
[] |
no_license
|
pythondev0101/-j-natividad-web-billing
|
e62da9ac943a74d2e1e9416d553fd3baafd3937f
|
39f7b0d60d86a08d1c5d40cacf9904b28dc2355c
|
refs/heads/main
| 2023-08-28T00:34:43.435674
| 2021-05-24T12:37:54
| 2021-05-24T12:37:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
from datetime import datetime
from flask import redirect, url_for, request, current_app, flash
from flask_login import current_user, login_required
from app import db
from app.admin.templating import admin_table, admin_edit
from bds import bp_bds
from bds.models import Municipality
from bds.forms import MunicipalityForm, MunicipalityEditForm
@bp_bds.route('/municipalities')
@login_required
def municipalities():
fields = [Municipality.id, Municipality.name, Municipality.description, Municipality.created_at, Municipality.updated_at]
form = MunicipalityForm()
return admin_table(Municipality, fields=fields,form=form,\
create_url='bp_bds.create_municipality', edit_url='bp_bds.edit_municipality')
@bp_bds.route('/municipalities/create', methods=['POST'])
@login_required
def create_municipality():
form = MunicipalityForm()
if not form.validate_on_submit():
for key, value in form.errors.items():
flash(str(key) + str(value), 'error')
return redirect(url_for('bp_bds.municipalities'))
try:
new = Municipality()
new.name = form.name.data
new.description = form.description.data
db.session.add(new)
db.session.commit()
flash('New municipality added successfully!')
except Exception as exc:
flash(str(exc), 'error')
return redirect(url_for('bp_bds.municipalities'))
@bp_bds.route('/municipalities/<int:oid>/edit', methods=['GET', 'POST'])
@login_required
def edit_municipality(oid):
ins = Municipality.query.get_or_404(oid)
form = MunicipalityEditForm(obj=ins)
if request.method == "GET":
return admin_edit(Municipality, form,'bp_bds.edit_municipality', oid, 'bp_bds.municipalities')
if not form.validate_on_submit():
for key, value in form.errors.items():
flash(str(key) + str(value), 'error')
return redirect(url_for('bp_bds.municipalities'))
try:
ins.name = form.name.data
ins.description = form.description.data
ins.updated_at = datetime.now()
ins.updated_by = "{} {}".format(current_user.fname,current_user.lname)
db.session.commit()
flash('Municipality update Successfully!','success')
except Exception as exc:
flash(str(exc),'error')
return redirect(url_for('bp_bds.municipalities'))
|
[
"rmontemayor0101@gmail.com"
] |
rmontemayor0101@gmail.com
|
327914a84e501df8aa4e30d0ab286a73a37f1b35
|
dc75370390e821b857b327100f0d2e9a60f34f89
|
/chat/migrations/0001_initial.py
|
477def66b7a7e08f361a5435958dcb17e478690a
|
[] |
no_license
|
DontTouchMyMind/OnlineChat_Udemy
|
018e24f6dfe7c1c2d1f37540f219f7b652987666
|
77ee36d89adbf71d07b6f73f9b6757aacabde939
|
refs/heads/master
| 2023-01-28T16:56:40.070478
| 2020-12-07T11:40:11
| 2020-12-07T11:40:11
| 315,873,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Generated by Django 3.1.3 on 2020-11-25 07:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Online',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100)),
],
),
]
|
[
"tobigface@gmail.com"
] |
tobigface@gmail.com
|
2ba938b829b5293d85393bdbabcce4e6f8a94016
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/gui/game_control/AOGAS.py
|
d7b1cef8d85ccd796edb079ec9bcef0a8e803485
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406
| 2017-05-04T13:51:43
| 2017-05-04T13:51:43
| 90,268,530
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 8,091
|
py
|
# 2017.05.04 15:21:37 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/game_control/AOGAS.py
import time
import weakref
import BigWorld
import Event
from constants import AOGAS_TIME, ACCOUNT_ATTR
from debug_utils import LOG_ERROR, LOG_DEBUG
from enumerations import AttributeEnumItem, Enumeration
from helpers import time_utils
from skeletons.gui.game_control import IAOGASController
TIME_MODIFER = 3600
AOGAS_FORCE_START_NOTIFY = False
_DEFAULT_AOGAS_NOTIFY_TIMEOUT = 5000.0
AOGAS_NOTIFY_MSG = Enumeration('Notification message for Anti-online game addiction system', [('AOND_1', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_2', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_3', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_MORE_3', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('AOND_MORE_5', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT}),
('RESET', {'timeout': _DEFAULT_AOGAS_NOTIFY_TIMEOUT})], instance=AttributeEnumItem)
class AOGAS_NOTIFY_TIME(object):
AOND_1 = 1 * TIME_MODIFER - 600
AOND_2 = 2 * TIME_MODIFER - 600
AOND_3 = AOGAS_TIME.REDUCED_GAIN
AOND_5 = AOGAS_TIME.NO_GAIN
class AOGAS_NOTIFY_PERIOD(object):
AOND_START = 5 * TIME_MODIFER / 6
AOND_2_3 = 1 * TIME_MODIFER
AOND_3_5 = 0.5 * TIME_MODIFER
AOND_END = 0.25 * TIME_MODIFER
class AOGASController(IAOGASController):
def __init__(self):
super(AOGASController, self).__init__()
self.onNotifyAccount = Event.Event()
self.__isNotifyAccount = False
self.__lastNotifyMessages = []
self.__aogasStartedAt = 0
self.__isAogasEnabled = True
self.__notificator = _AOGASNotificator(self, '_AOGASController__notifyAccount')
def fini(self):
self.__notificator.stop()
self.onNotifyAccount.clear()
super(AOGASController, self).fini()
def onLobbyStarted(self, ctx):
serverTime = ctx.get('aogasStartedAt')
if serverTime is not None:
self.__aogasStartedAt = time_utils.makeLocalServerTime(serverTime)
else:
self.__aogasStartedAt = time.time()
self.__isAogasEnabled = ctx.get('isAogasEnabled', True)
if not self.__notificator.isStarted():
self.__requestRequiredInfo()
return
def onDisconnected(self):
self.__notificator.stop()
self.__isNotifyAccount = False
self.__lastNotifyMessages = []
def onLobbyInited(self, event):
LOG_DEBUG('enableNotifyAccount ', self.__lastNotifyMessages)
self.__isNotifyAccount = True
for message in self.__lastNotifyMessages:
self.onNotifyAccount(message)
self.__lastNotifyMessages = []
def onAvatarBecomePlayer(self):
LOG_DEBUG('disableNotifyAccount')
self.__isNotifyAccount = False
def __notifyAccount(self, message, collect = False):
if self.__isNotifyAccount:
self.onNotifyAccount(message)
elif collect:
self.__lastNotifyMessages.append(message)
else:
self.__lastNotifyMessages = [message]
def __requestRequiredInfo(self):
BigWorld.player().stats.get('attrs', self.__receiveAccountAttrs)
def __receiveAccountAttrs(self, resultID, attrs):
if resultID < 0:
LOG_ERROR('Server return error: ', resultID, attrs)
return
if self.__isAogasEnabled and ACCOUNT_ATTR.AOGAS & attrs != 0 or AOGAS_FORCE_START_NOTIFY:
BigWorld.player().stats.get('accOnline', self.__receiveAccOnline)
elif self.__notificator.isStarted():
self.__notificator.stop()
def __receiveAccOnline(self, resultID, accOnline):
if resultID < 0:
LOG_ERROR('Server return error: ', resultID, accOnline)
return
if not accOnline:
self.__notifyAccount(AOGAS_NOTIFY_MSG.RESET)
delta = round(time.time() - self.__aogasStartedAt)
AOND = delta + accOnline
LOG_DEBUG('Calculate AOND (seconds,seconds,seconds) : ', AOND, delta, accOnline)
self.__notificator.start(AOND)
class _AOGASNotificator(object):
def __init__(self, scope, function):
self.__scope = weakref.ref(scope)
self.__function = function
self.__started = False
self.__AOND = 0
self.__callbackID = None
return
def start(self, AOND):
if self.__started:
return
self.__started = True
self.__AOND = AOND
notificated = False
if AOND > AOGAS_NOTIFY_TIME.AOND_1:
prevAOND = self.__getPrevNotifyTime(AOND)
self.__doNotify(self.__getNotifyMessages(prevAOND))
notificated = prevAOND == AOND
if notificated:
notifyPeriod = self.__getNotifyPeriod(self.__AOND)
LOG_DEBUG('AOGAS started (seconds,seconds)', self.__AOND, notifyPeriod)
self.__callbackID = BigWorld.callback(notifyPeriod, lambda : self.__notify(notifyPeriod))
else:
notifyTime = self.__getNextNotifyTime(AOND)
nextNotifyDelay = abs(notifyTime - AOND)
LOG_DEBUG('AOGAS started (seconds,seconds,seconds)', self.__AOND, notifyTime, nextNotifyDelay)
self.__callbackID = BigWorld.callback(nextNotifyDelay, lambda : self.__notify(nextNotifyDelay))
def stop(self):
self.__started = False
if self.__callbackID is not None:
BigWorld.cancelCallback(self.__callbackID)
self.__callbackID = None
return
def isStarted(self):
return self.__started
def __getNotifyPeriod(self, AOND):
if AOND < AOGAS_NOTIFY_TIME.AOND_1:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_START
elif AOND < AOGAS_NOTIFY_TIME.AOND_3:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_2_3
elif AOND < AOGAS_NOTIFY_TIME.AOND_5:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_3_5
else:
notifyPeriod = AOGAS_NOTIFY_PERIOD.AOND_END
return notifyPeriod
def __getNextNotifyTime(self, AOND):
notifyTime = 0
while notifyTime < AOND:
notifyPeriod = self.__getNotifyPeriod(notifyTime)
notifyTime += notifyPeriod
return notifyTime
def __getPrevNotifyTime(self, AOND):
notifyTime = 0
notifyPeriod = 0
while notifyTime < AOND:
notifyPeriod = self.__getNotifyPeriod(notifyTime)
notifyTime += notifyPeriod
return notifyTime - notifyPeriod
def __getNotifyMessages(self, AOND):
if AOND == AOGAS_NOTIFY_TIME.AOND_1:
messages = (AOGAS_NOTIFY_MSG.AOND_1,)
elif AOND == AOGAS_NOTIFY_TIME.AOND_2:
messages = (AOGAS_NOTIFY_MSG.AOND_2,)
elif AOND == AOGAS_NOTIFY_TIME.AOND_3:
messages = (AOGAS_NOTIFY_MSG.AOND_3, AOGAS_NOTIFY_MSG.AOND_MORE_3)
elif AOND < AOGAS_NOTIFY_TIME.AOND_5:
messages = (AOGAS_NOTIFY_MSG.AOND_MORE_3,)
else:
messages = (AOGAS_NOTIFY_MSG.AOND_MORE_5,)
return messages
def __doNotify(self, messages):
notifyHandler = getattr(self.__scope(), self.__function, None)
if notifyHandler is not None and callable(notifyHandler):
collect = len(messages) > 1
for message in messages:
notifyHandler(message, collect)
LOG_DEBUG('notify (seconds, message)', self.__AOND, message)
else:
LOG_ERROR('Not found notify handler ', self.__scope(), self.__function)
return
def __notify(self, notifyPeriod):
self.__AOND += notifyPeriod
self.__doNotify(self.__getNotifyMessages(self.__AOND))
notifyPeriod = self.__getNotifyPeriod(self.__AOND)
self.__callbackID = BigWorld.callback(notifyPeriod, lambda : self.__notify(notifyPeriod))
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\game_control\AOGAS.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:21:38 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
73268d8e78be08959b7a0ae204f64a99e367dc91
|
ac47074bcf749273941ab01213bb6d1f59c40c99
|
/project/multi_factor/alpha_model/exposure/alpha_factor_dividend_12m.py
|
578ecd49441115d3a844ec792f25ce7045c363c4
|
[] |
no_license
|
xuzhihua95/quant
|
c5561e2b08370610f58662f2871f1f1490681be2
|
c7e312c70d5f400b7e777d2ff4c9f6f223eabfee
|
refs/heads/master
| 2020-05-19T17:04:08.796981
| 2019-04-24T02:50:29
| 2019-04-24T02:50:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
from quant.stock.date import Date
from quant.stock.stock import Stock
from quant.project.multi_factor.alpha_model.exposure.alpha_factor import AlphaFactor
class AlphaDividend12m(AlphaFactor):
"""
因子说明: 最近12月股息率, 根据最新财报更新数据
披露日期 为 最近财报
表明因子估值能力
"""
def __init__(self):
AlphaFactor.__init__(self)
self.exposure_path = self.data_path
self.raw_factor_name = 'alpha_raw_dividend_12m'
def cal_factor_exposure(self, beg_date, end_date):
""" 计算因子暴露 """
dividend_12m = Stock().read_factor_h5("dividendyield2") / 100
beg_date = Date().change_to_str(beg_date)
end_date = Date().change_to_str(end_date)
dividend_12m = dividend_12m.loc[:, beg_date:end_date]
res = dividend_12m.T.dropna(how='all').T
self.save_alpha_factor_exposure(res, self.raw_factor_name)
if __name__ == "__main__":
from datetime import datetime
beg_date = '20040101'
end_date = datetime.today()
self = AlphaDividend12m()
self.cal_factor_exposure(beg_date, end_date)
|
[
"1119332482@qq.com"
] |
1119332482@qq.com
|
49be6f19af78663962e559d4140c0414b52e4836
|
b5644b71eade9abd62e3cd8747808b8edeea8ee1
|
/movies/admin.py
|
25365dac1ee29104521aa3d036714f35d6767529
|
[] |
no_license
|
HSx3/project_UBD
|
68aa8dd1a3a2bf9c3523967a3c489a51c4bdac04
|
fcc2b035dac07376ddb0e6c1eceb4544e3415455
|
refs/heads/master
| 2020-05-24T06:09:14.730903
| 2019-05-17T00:34:29
| 2019-05-17T00:34:29
| 187,133,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from django.contrib import admin
from .models import Movie, Genre, Actor, Director, Cast, Score
# Register your models here.
admin.site.register(Movie)
admin.site.register(Genre)
admin.site.register(Director)
admin.site.register(Actor)
admin.site.register(Cast)
admin.site.register(Score)
|
[
"hs.ssafy@gmail.com"
] |
hs.ssafy@gmail.com
|
7f23664b7bbc4be12bd5c23a8f685cf41f098106
|
f6aac61a48a87743be9c40fecdc24344bae4d263
|
/scripts/gfs/gfs2iemre.py
|
062adac7a62c91781a649ef342cf23c96977f333
|
[
"MIT"
] |
permissive
|
akrherz/iem
|
8714d99b371c8818f7cdde73dd24639e9fc7d42b
|
178015584b7fb5b585f65be6013eaf16fb6db0c7
|
refs/heads/main
| 2023-08-19T02:58:24.507782
| 2023-08-18T12:08:31
| 2023-08-18T12:08:31
| 4,253,774
| 118
| 74
|
MIT
| 2023-09-14T18:28:41
| 2012-05-07T20:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 6,702
|
py
|
"""Copy GFS grib data to IEMRE grid...
Run from RUN_50_AFTER.sh
"""
import shutil
import subprocess
import sys
from datetime import date, timedelta
import numpy as np
import pygrib
from pyiem import iemre
from pyiem.util import logger, ncopen, utc
from scipy.interpolate import NearestNDInterpolator
LOG = logger()
def create(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fn = "/mesonet/data/iemre/gfs_current_new.nc"
with ncopen(fn, "w") as nc:
nc.title = "GFS on IEMRE Grid."
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.gfs_forecast = f"{ts:%Y-%m-%dT%H:%M:%SZ}"
nc.history = f"{date.today():%d %B %Y} Generated"
# Setup Dimensions
nc.createDimension("lat", iemre.NY)
nc.createDimension("lon", iemre.NX)
# store 20 days worth, to be safe of future changes
nc.createDimension("time", 20)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat"))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.bounds = "lat_bnds"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable("lon", float, ("lon"))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.bounds = "lon_bnds"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable("time", float, ("time",))
tm.units = f"Days since {ts:%Y-%m-%d} 00:00:0.0"
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
# Placeholder
tm[:] = np.arange(0, 20)
high = nc.createVariable(
"high_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
high.units = "K"
high.scale_factor = 0.01
high.long_name = "2m Air Temperature 12 Hour High"
high.standard_name = "2m Air Temperature"
high.coordinates = "lon lat"
low = nc.createVariable(
"low_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
low.units = "K"
low.scale_factor = 0.01
low.long_name = "2m Air Temperature 12 Hour Low"
low.standard_name = "2m Air Temperature"
low.coordinates = "lon lat"
ncvar = nc.createVariable(
"tsoil", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
ncvar.units = "K"
ncvar.scale_factor = 0.01
ncvar.long_name = "0-10 cm Average Soil Temperature"
ncvar.standard_name = "0-10 cm Average Soil Temperature"
ncvar.coordinates = "lon lat"
ncvar = nc.createVariable(
"p01d", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
ncvar.units = "mm"
ncvar.scale_factor = 0.01
ncvar.long_name = "Precipitation Accumulation"
ncvar.standard_name = "precipitation_amount"
ncvar.coordinates = "lon lat"
def merge_grib(nc, now):
"""Merge what grib data we can find into the netcdf file."""
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
lons = None
lats = None
tmaxgrid = None
tmingrid = None
tsoilgrid = None
pgrid = None
hits = 0
for fhour in range(6, 385, 6):
fxtime = now + timedelta(hours=fhour)
grbfn = now.strftime(
f"/mesonet/tmp/gfs/%Y%m%d%H/gfs.t%Hz.sfluxgrbf{fhour:03.0f}.grib2"
)
grbs = pygrib.open(grbfn)
for grb in grbs:
name = grb.shortName.lower()
if lons is None:
lats, lons = [np.ravel(x) for x in grb.latlons()]
lons = np.where(lons > 180, lons - 360, lons)
if name == "tmax":
if tmaxgrid is None:
tmaxgrid = grb.values
else:
tmaxgrid = np.where(
grb.values > tmaxgrid, grb.values, tmaxgrid
)
elif name == "tmin":
if tmingrid is None:
tmingrid = grb.values
else:
tmingrid = np.where(
grb.values < tmingrid, grb.values, tmingrid
)
elif name == "prate":
# kg/m^2/s over six hours
hits += 1
if pgrid is None:
pgrid = grb.values * 6.0 * 3600
else:
pgrid += grb.values * 6.0 * 3600
# Hacky
elif name == "st" and str(grb).find("0.0-0.1 m") > -1:
if tsoilgrid is None:
tsoilgrid = grb.values
else:
tsoilgrid += grb.values
grbs.close()
# Write tmax, tmin out at 6z
if fxtime.hour == 6:
# The actual date is minus one
days = (fxtime.date() - now.date()).days - 1
if hits == 4:
LOG.info("Writing %s, days=%s", fxtime, days)
nn = NearestNDInterpolator((lons, lats), np.ravel(tmaxgrid))
nc.variables["high_tmpk"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator((lons, lats), np.ravel(tmingrid))
nc.variables["low_tmpk"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator((lons, lats), np.ravel(pgrid))
nc.variables["p01d"][days, :, :] = nn(xi, yi)
nn = NearestNDInterpolator(
(lons, lats), np.ravel(tsoilgrid / 4.0)
)
nc.variables["tsoil"][days, :, :] = nn(xi, yi)
tmingrid = None
tmaxgrid = None
tsoilgrid = None
hits = 0
def main(argv):
"""Do the work."""
now = utc(*[int(s) for s in argv[1:5]])
# Run every hour, filter those we don't run
if now.hour % 6 != 0:
return
create(now)
with ncopen("/mesonet/data/iemre/gfs_current_new.nc", "a") as nc:
merge_grib(nc, now)
shutil.move(
"/mesonet/data/iemre/gfs_current_new.nc",
"/mesonet/data/iemre/gfs_current.nc",
)
# Archive this as we need it for various projects
cmd = [
"pqinsert",
"-i",
"-p",
(
f"data a {now:%Y%m%d%H%M} bogus "
f"model/gfs/gfs_{now:%Y%m%d%H}_iemre.nc nc"
),
"/mesonet/data/iemre/gfs_current.nc",
]
subprocess.call(cmd)
# Generate 4inch plots based on 6z GFS
if now.hour == 6:
subprocess.call(["python", "gfs_4inch.py"])
if __name__ == "__main__":
main(sys.argv)
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
da325578a57f0f5949a3625ee61b64b1612a13c1
|
04f948d94cf288eafccf2b513078aeed77e3faef
|
/prof.py
|
a35159b88b3feed2074e0fcec867c1df8d0ddf85
|
[
"Apache-2.0"
] |
permissive
|
jdily/qpth
|
a9d0e5a662c407e6b6a92a25962040f0a2834ce8
|
296c01775ac82e7890aa688839f39fff6a6cb681
|
refs/heads/master
| 2021-01-21T12:58:33.373545
| 2017-05-16T15:02:12
| 2017-05-16T15:02:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,433
|
py
|
#!/usr/bin/env python3
import argparse
import sys
import numpy as np
import numpy.random as npr
import qpth.solvers.pdipm.single as pdipm_s
import qpth.solvers.pdipm.batch as pdipm_b
import itertools
import time
import torch
import gurobipy as gpy
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
import setproctitle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nTrials', type=int, default=10)
args = parser.parse_args()
setproctitle.setproctitle('bamos.optnet.prof')
npr.seed(0)
prof(args)
def prof(args):
print('| \# Vars | \# Batch | Gurobi | single | batched |')
print('|----------+----------+--------+--------+---------|')
# for nz, nBatch in itertools.product([100,500], [1, 64, 128]):
for nz, nBatch in itertools.product([100], [1, 64, 128]):
times = []
for i in range(args.nTrials):
times.append(prof_instance(nz, nBatch))
times = np.array(times)
print(("| {:5d} " * 2 + "| ${:.5e} \pm {:.5e}$ s " * 3 + '|').format(
*([nz, nBatch] + [item for sublist in zip(times.mean(axis=0), times.std(axis=0))
for item in sublist])))
def prof_instance(nz, nBatch, cuda=True):
nineq, neq = 100, 0
assert(neq == 0)
L = npr.rand(nBatch, nz, nz)
Q = np.matmul(L, L.transpose((0, 2, 1))) + 1e-3 * np.eye(nz, nz)
G = npr.randn(nBatch, nineq, nz)
z0 = npr.randn(nBatch, nz)
s0 = npr.rand(nBatch, nineq)
p = npr.randn(nBatch, nz)
h = np.matmul(G, np.expand_dims(z0, axis=(2))).squeeze(2) + s0
A = npr.randn(nBatch, neq, nz)
b = np.matmul(A, np.expand_dims(z0, axis=(2))).squeeze(2)
zhat_g = []
gurobi_time = 0.0
for i in range(nBatch):
m = gpy.Model()
zhat = m.addVars(nz, lb=-gpy.GRB.INFINITY, ub=gpy.GRB.INFINITY)
obj = 0.0
for j in range(nz):
for k in range(nz):
obj += 0.5 * Q[i, j, k] * zhat[j] * zhat[k]
obj += p[i, j] * zhat[j]
m.setObjective(obj)
for j in range(nineq):
con = 0
for k in range(nz):
con += G[i, j, k] * zhat[k]
m.addConstr(con <= h[i, j])
m.setParam('OutputFlag', False)
start = time.time()
m.optimize()
gurobi_time += time.time() - start
t = np.zeros(nz)
for j in range(nz):
t[j] = zhat[j].x
zhat_g.append(t)
p, L, Q, G, z0, s0, h = [torch.Tensor(x) for x in [p, L, Q, G, z0, s0, h]]
if cuda:
p, L, Q, G, z0, s0, h = [x.cuda() for x in [p, L, Q, G, z0, s0, h]]
if neq > 0:
A = torch.Tensor(A)
b = torch.Tensor(b)
else:
A, b = [torch.Tensor()] * 2
if cuda:
A = A.cuda()
b = b.cuda()
# af = adact.AdactFunction()
single_results = []
start = time.time()
for i in range(nBatch):
A_i = A[i] if neq > 0 else A
b_i = b[i] if neq > 0 else b
U_Q, U_S, R = pdipm_s.pre_factor_kkt(Q[i], G[i], A_i)
single_results.append(pdipm_s.forward(p[i], Q[i], G[i], A_i, b_i, h[i],
U_Q, U_S, R))
single_time = time.time() - start
start = time.time()
Q_LU, S_LU, R = pdipm_b.pre_factor_kkt(Q, G, A)
zhat_b, nu_b, lam_b, s_b = pdipm_b.forward(p, Q, G, h, A, b, Q_LU, S_LU, R)
batched_time = time.time() - start
# Usually between 1e-4 and 1e-5:
# print('Diff between gurobi and pdipm: ',
# np.linalg.norm(zhat_g[0]-zhat_b[0].cpu().numpy()))
# import IPython, sys; IPython.embed(); sys.exit(-1)
# import IPython, sys; IPython.embed(); sys.exit(-1)
# zhat_diff = (single_results[0][0] - zhat_b[0]).norm()
# lam_diff = (single_results[0][2] - lam_b[0]).norm()
# eps = 0.1 # Pretty relaxed.
# if zhat_diff > eps or lam_diff > eps:
# print('===========')
# print("Warning: Single and batched solutions might not match.")
# print(" + zhat_diff: {}".format(zhat_diff))
# print(" + lam_diff: {}".format(lam_diff))
# print(" + (nz, neq, nineq, nBatch) = ({}, {}, {}, {})".format(
# nz, neq, nineq, nBatch))
# print('===========')
return gurobi_time, single_time, batched_time
if __name__ == '__main__':
main()
|
[
"bamos@cs.cmu.edu"
] |
bamos@cs.cmu.edu
|
3f6e2abacfeac461a57ba7a45a1cf5a7fed12415
|
a275c7e4161c89ed3ee6289b75ad1d017634baab
|
/kontrollbank/pipelines.py
|
fb4ba7933de52e17d5cffd84c31fac2ff44fb0a5
|
[] |
no_license
|
SimeonYS/Oesterreichische-Kontrollbank-AG
|
c277d179aa41990458fbed76143fb48c0d8346d2
|
f2aa83979c1faa52fdc18fb2802222af0de2d0e3
|
refs/heads/main
| 2023-04-18T01:17:55.803542
| 2021-04-29T06:34:11
| 2021-04-29T06:34:11
| 339,081,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import sqlite3
class KontrollbankPipeline:
# Database setup
conn = sqlite3.connect('KontrollBank.db')
c = conn.cursor()
def open_spider(self, spider):
self.c.execute("""CREATE TABLE IF NOT EXISTS articles
(date text, title text, link text, content text)""")
def process_item(self, item, spider):
self.c.execute("""SELECT * FROM articles WHERE title = ? AND date = ?""",
(item.get('title'), item.get('date')))
duplicate = self.c.fetchall()
if len(duplicate):
return item
print(f"New entry added at {item['link']}")
# Insert values
self.c.execute("INSERT INTO articles (date, title, link, content)"
"VALUES (?,?,?,?)", (item.get('date'), item.get('title'), item.get('link'), item.get('content')))
self.conn.commit() # commit after every entry
return item
def close_spider(self, spider):
self.conn.commit()
self.conn.close()
|
[
"simeon.simeonov@ADPVT.com"
] |
simeon.simeonov@ADPVT.com
|
19f06cd1078d337384ddc3da7c6e980f4f9cebf3
|
2328a25664cd427f2043164ad815698bbb021c34
|
/ProfilerApp/ProfilerApp/__init__.py
|
304131b26aa01fa05bbc7b96a95f61758190e504
|
[] |
no_license
|
Dishan765/Automated-Cybercrime-Profiling
|
7f7f017c8d4614ddffd5f662dc7e279a8d40608e
|
31a7f89be7a2ed06444bda7cb0ece52854d4e7e7
|
refs/heads/master
| 2023-07-04T19:35:07.333739
| 2021-08-21T19:44:41
| 2021-08-21T19:44:41
| 347,069,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from ProfilerApp.config import Config
from flask_mail import Mail
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
from ProfilerApp.users.routes import users
from ProfilerApp.posts.routes import posts
from ProfilerApp.profiles.routes import profile
from ProfilerApp.admin.routes import admin
#from ProfilerApp.main.routes import main
from ProfilerApp.api.routes import api
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(profile)
app.register_blueprint(admin)
#app.register_blueprint(main)
app.register_blueprint(api)
return app
|
[
"you@example.com"
] |
you@example.com
|
65cdabf8faee54817569aebc2ce8097e24679139
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03089/s621337044.py
|
f7964c4a3f01cff6041508b36017d68bb3b4e4ed
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
N=int(input())
*A,=map(int,input().split())
ans=[]
for a in A:
if len(ans)<a-1:
ans=[-1]
break
else:
ans.insert(a-1,a)
for x in ans:
print(x)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d6ee7fda37973ff33a434afd1575004b50819c0a
|
751d837b8a4445877bb2f0d1e97ce41cd39ce1bd
|
/codegolf/hello-world-rainbow.py
|
0e86441c738f717c2150798dc6f368cbd9961c53
|
[
"MIT"
] |
permissive
|
qeedquan/challenges
|
d55146f784a3619caa4541ac6f2b670b0a3dd8ba
|
56823e77cf502bdea68cce0e1221f5add3d64d6a
|
refs/heads/master
| 2023-08-11T20:35:09.726571
| 2023-08-11T13:02:43
| 2023-08-11T13:02:43
| 115,886,967
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
#!/usr/bin/env python
"""
Dealing with colors in non-markup languages often complicates things. I would like to see some variations of how color is used in different languages.
The object of this competition is to output 'Hello World' in the seven colors of the rainbow.
According to Wikipedia, these are the 7 colors.
Red #FF0000 (RGB: 255, 0, 0)
Orange #FF7F00 (RGB: 255, 127, 0)
Yellow #FFFF00 (RGB: 255, 255, 0)
Green #00FF00 (RGB: 0, 255, 0)
Blue #0000FF (RGB: 0, 0, 255)
Indigo #6600FF (RGB: 111, 0, 255)
Violet #8B00FF (RGB: 143, 0, 255)
The rules
The program must output 'Hello World'. (Doesn't necessarily need to be text, but it must be distiguishable as 'Hello World')
Each letter must be a different color.
The colors can be in any order.
You must use each of the seven colors at least once. (You may use more than the given colors)
No use of markup languages in any case.
The winner is whoever has the lowest amount of characters AND follows the rules
Bonus -1 character if it is written in DART
I will pick the winner on Jan 11 (if I remember ;D).
Good luck
"""
def rainbow(s):
p = 31
for c in s:
print("\033[%d;1m%c" % (p, c), end='')
p += 1
if p >= 37:
p = 31
print("\033[0m")
def main():
rainbow("Hello World!")
main()
|
[
"qeed.quan@gmail.com"
] |
qeed.quan@gmail.com
|
a678ce0647f4fcc50b8dfa7d82c5c516efdabcc1
|
53262ee5b8437d208a80de997a8de5074a92426a
|
/root_numpy/tmva/__init__.py
|
8286f5266882d4967b02669008fcb582b4da83cb
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-hep/root_numpy
|
bb2c7280a5e9e15df91c86ff3c6d9bfe3464c754
|
049e487879d70dd93c97e323ba6b71c56d4759e8
|
refs/heads/master
| 2023-04-07T11:25:50.080999
| 2023-01-06T17:57:30
| 2023-01-06T17:57:30
| 3,823,872
| 87
| 25
|
BSD-3-Clause
| 2021-02-27T10:02:21
| 2012-03-25T11:40:22
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
try:
from . import _libtmvanumpy
except ImportError: # pragma: no cover
import warnings
warnings.warn(
"root_numpy.tmva requires that you install root_numpy with "
"the tmva interface enabled", ImportWarning)
__all__ = []
else:
from ._data import add_classification_events, add_regression_events
from ._evaluate import evaluate_reader, evaluate_method
__all__ = [
'add_classification_events',
'add_regression_events',
'evaluate_reader',
'evaluate_method',
]
|
[
"noel.dawe@gmail.com"
] |
noel.dawe@gmail.com
|
b34289eaf185e4d32c68ce971ed745443c0712dd
|
9c6837404b15c71ef13b0615701dbde49806ffa3
|
/app/app.py
|
48f35b56eba471c5966b68c407bbd4fabbf14d2f
|
[
"MIT"
] |
permissive
|
gladunvv/send-messages-service
|
d43bd68af892aeb268e2f75b91756eaa5eed1976
|
a467f2daab77feb5ad9c72e02d5aa12741fc20b7
|
refs/heads/master
| 2020-09-17T07:10:48.814024
| 2019-12-09T20:25:37
| 2019-12-09T20:25:37
| 224,031,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
import flask
import os
app = flask.Flask(__name__)
app.config["DEBUG"] = True
import routes
if __name__ == "__main__":
app.run(debug=True)
|
[
"bincha.1997@gmail.com"
] |
bincha.1997@gmail.com
|
50d49eda3d0f6a9bf8a2664a0489184a0a528b18
|
efcd21234f3291e8fc561f49a7c88fc57a63e952
|
/tartiflette/execution/nodes/variable_definition.py
|
d39c40f25262e260c7aa9a9a91e664a5891a9398
|
[
"MIT"
] |
permissive
|
tartiflette/tartiflette
|
146214a43847d2f423bf74594643c1fdefc746f1
|
421c1e937f553d6a5bf2f30154022c0d77053cfb
|
refs/heads/master
| 2023-09-01T02:40:05.974025
| 2022-01-20T14:55:31
| 2022-01-20T14:55:31
| 119,035,565
| 586
| 39
|
MIT
| 2023-09-11T07:49:27
| 2018-01-26T09:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,799
|
py
|
from functools import partial
from typing import Any, Callable
from tartiflette.coercers.inputs.compute import get_input_coercer
from tartiflette.coercers.literals.compute import get_literal_coercer
from tartiflette.coercers.variables import variable_coercer
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.utils.type_from_ast import schema_type_from_ast
__all__ = ("variable_definition_node_to_executable",)
class ExecutableVariableDefinition:
"""
Node representing a GraphQL executable variable definition.
"""
__slots__ = (
"name",
"graphql_type",
"default_value",
"coercer",
"definition",
)
def __init__(
self,
name: str,
graphql_type: "GraphQLType",
default_value: Any,
coercer: Callable,
definition: "VariableDefinitionNode",
) -> None:
"""
:param name: the name of the variable
:param graphql_type: the GraphQLType expected for the variable value
:param default_value: the default value of the variable
:param coercer: callable to use when coercing the user input value
:param definition: the variable definition AST node
:type name: str
:type graphql_type: GraphQLType
:type default_value: Any
:type coercer: Callable
:type definition: VariableDefinitionNode
"""
self.name = name
self.graphql_type = graphql_type
self.default_value = default_value
self.coercer = partial(coercer, self)
self.definition = definition
def variable_definition_node_to_executable(
schema: "GraphQLSchema", variable_definition_node: "VariableDefinitionNode"
) -> "ExecutableVariableDefinition":
"""
Converts a variable definition AST node into an executable variable
definition.
:param schema: the GraphQLSchema instance linked to the engine
:param variable_definition_node: the variable definition AST node to treat
:type schema: GraphQLSchema
:type variable_definition_node: VariableDefinitionNode
:return: an executable variable definition
:rtype: ExecutableVariableDefinition
"""
graphql_type = schema_type_from_ast(schema, variable_definition_node.type)
return ExecutableVariableDefinition(
name=variable_definition_node.variable.name.value,
graphql_type=graphql_type,
default_value=variable_definition_node.default_value
or UNDEFINED_VALUE,
coercer=partial(
variable_coercer,
input_coercer=partial(
get_input_coercer(graphql_type), variable_definition_node
),
literal_coercer=get_literal_coercer(graphql_type),
),
definition=variable_definition_node,
)
|
[
"raulic.maximilien@gmail.com"
] |
raulic.maximilien@gmail.com
|
0fac912558de9a1141bb62d3223f1aa8fd825e70
|
1b9075ffea7d4b846d42981b41be44238c371202
|
/2008/devel/desktop/xfce4/goodies/xfce4-notifyd/actions.py
|
0be89f389aad103384a5f9e18a9beb460910be54
|
[] |
no_license
|
pars-linux/contrib
|
bf630d4be77f4e484b8c6c8b0698a5b34b3371f4
|
908210110796ef9461a1f9b080b6171fa022e56a
|
refs/heads/master
| 2020-05-26T20:35:58.697670
| 2011-07-11T11:16:38
| 2011-07-11T11:16:38
| 82,484,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure('--libexecdir=/usr/lib/xfce4 \
--disable-static')
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "README")
|
[
"MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2"
] |
MeW@a748b760-f2fe-475f-8849-a8a11d7a3cd2
|
4e078c68276aaed1c1699174d8b734d478bb44ce
|
ff85002de8fc3e8d38b96753f7358ea1dc8055af
|
/Infinite_sequence.py
|
105c8cc00705bdc188dbf46bca2fbd0d97a61125
|
[] |
no_license
|
xlax007/Collection-of-Algorithms
|
d0ef8277e4f6dd5a27ed2a67bb720c3d867cbec9
|
4fe4d69f60b3b6f49624be135750f074216aacb9
|
refs/heads/master
| 2022-12-12T23:15:39.991983
| 2020-09-09T23:36:26
| 2020-09-09T23:36:26
| 294,251,463
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 20:27:27 2020
@author: alexi
"""
#https://codeforces.com/problemset/problem/675/A --- Alexis Galvan
def infinite_sequence():
numbers = list(map(int, input().split()))
if numbers[0] == numbers[1] or (numbers[0]+numbers[2]) == numbers[1]:
return 'YES'
elif numbers[2] == 0 or (numbers[0] < numbers[1] and numbers[2] <= 1) or (numbers[0] > numbers[1]) and numbers[2] > 1:
return 'NO'
else:
actual = numbers[0] + numbers[2]
divisor = numbers[1]-actual
if divisor % numbers[2] == 0:
return 'YES'
return 'NO'
A = infinite_sequence()
print(A)
|
[
"noreply@github.com"
] |
xlax007.noreply@github.com
|
ac8d7504a26caa742184cb8d3821d953144997fa
|
6847e4c855a76b9d5ed04879394adcebdb0467e1
|
/fssp/migrations/0005_remove_request_response.py
|
19c9061ec91e08f5c78513c05939bd8d3b7ef84a
|
[] |
no_license
|
i7ionov/knd
|
954227c78043841ac402b76121e0194f608f35b6
|
3f8ecd488454cedea4da3b4f72869c1dbcb24112
|
refs/heads/master
| 2023-06-04T08:30:22.596221
| 2021-07-02T10:52:35
| 2021-07-02T10:52:35
| 372,376,407
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
# Generated by Django 2.2 on 2021-04-26 12:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fssp', '0004_request_token'),
]
operations = [
migrations.RemoveField(
model_name='request',
name='response',
),
]
|
[
"ivsemionov@iggn.permkrai.ru"
] |
ivsemionov@iggn.permkrai.ru
|
dbf95929d8d6ee23c4ba280b0087426af2f2d6a7
|
f966c891c666db846d86406cb9c08a530902d032
|
/algorithms/implementation/larrys_array.py
|
463216acec541b8c6a7c8847fad3576cde14e85c
|
[] |
no_license
|
rickharris-dev/hacker-rank
|
36620a16894571e324422c83bd553440cf5bbeb1
|
2ad0fe4b496198bec1b900d2e396a0704bd0c6d4
|
refs/heads/master
| 2020-12-25T14:33:20.118325
| 2016-09-06T01:10:43
| 2016-09-06T01:10:43
| 67,264,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
#!/usr/bin/python
t = int(raw_input().strip())
for i in range(0,t):
n = int(raw_input().strip())
a = map(int,raw_input().strip().split(' '))
inversions = 0
for j in range(0,n):
inversions += abs(a[j] - (j + 1))
while j > 0:
if a[j - 1] > a[j]:
swap = a[j]
a[j] = a[j - 1]
a[j - 1] = swap
inversions -= 1
j -= 1
else:
break
if inversions % 2 == 0:
print "YES"
else:
print "NO"
|
[
"rickharris724@gmail.com"
] |
rickharris724@gmail.com
|
00100d269f830789446f2c2dec2b09e8f48e9b1a
|
7823d31688879b2d4dcfd2e3c11fb2c862f35a23
|
/image_retrieval/server/algorithm/__init__.py
|
54615a50ab3d115940cbce7402700f464f4a7c66
|
[] |
no_license
|
FMsunyh/dlfive
|
7637631f54520673e4ec417b3c02b5334ecdf026
|
ffae48aac5ece4de5ff9afccc69b093a72e09637
|
refs/heads/master
| 2021-09-19T05:59:51.040214
| 2018-07-24T06:29:40
| 2018-07-24T06:29:40
| 108,929,499
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/7/2017 9:32 AM
# @Author : sunyonghai
# @File : __init__.py.py
# @Software: BG_AI
# =========================================================
|
[
"fmsunyh@gmail.com"
] |
fmsunyh@gmail.com
|
9cdf17ae4fea3bd58bf6f8194f281cd060691c43
|
84cfe9b0ca7209487231e0725f7ad0d233f09544
|
/stylemaster/urls.py
|
75696e60c98bc0d947c8565e886e11919a73a7fd
|
[] |
no_license
|
archit-dwevedi/M4Plan
|
3eefc12ea447d624bae6f758c3648d7caf825c1a
|
d162592748ea37bc070b6217365e8601a6ccdd9a
|
refs/heads/master
| 2021-10-26T23:22:04.456014
| 2019-04-14T20:02:17
| 2019-04-14T20:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
from django.urls import path
from stylemaster import views
from M3_Project import settings
from django.conf.urls.static import static
urlpatterns=[
path('sample/', views.sample, name='sample'),
path('stylemaster/', views.style, name='style'),
path('stylefull/', views.stylefull, name='stylefull'),
path('fabric/', views.fabric, name='fabric'),
path('trims/', views.trims, name='trims'),
path('preview/',views.preview,name='preview'),
path('BOM_selection/',views.bom_select,name='bom_select'),
path('bomfill/',views.bomfill,name='bomfill'),
path('Bill_of_materials/',views.bom,name='bom'),
path('update/<int:pk>',views.update.as_view(),name='update'),
path('styleorder/',views.styleorder,name='styleorder'),
path('dash_styleorder/',views.dashorder,name='dash_styleorder'),
path('garment/',views.garmentitem,name='garment'),
path('dashgarment/',views.dashgarment,name='dashgarment'),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL,document_root=settings.STATICFILES_DIRS)
|
[
"dwevediar@gmail.com"
] |
dwevediar@gmail.com
|
cb45ff90aa4660fc5bccac79d5c77c74cb64959e
|
76c50a9849b4093c6339dfeff888a5a0672a92bc
|
/yatsm/pipeline/_topology.py
|
c1ea0e9ff58ace0c4bec6246c61d26cf8edf1d40
|
[
"MIT"
] |
permissive
|
valpasq/yatsm
|
d2fac9c7eea6f8a785d6c5b6c24196bb9f441db0
|
44e2124c1bae3dd4245437475d709187f52d376d
|
refs/heads/master
| 2021-01-18T07:40:33.260515
| 2016-09-13T16:22:36
| 2016-09-13T16:22:36
| 40,021,588
| 0
| 0
| null | 2015-07-31T19:30:14
| 2015-07-31T19:30:14
| null |
UTF-8
|
Python
| false
| false
| 4,509
|
py
|
""" Build pipeline dependency graph from requirements
"""
from collections import defaultdict
import logging
import six
import toposort
from .language import OUTPUT, REQUIRE, PIPE
logger = logging.getLogger(__name__)
def format_deps(d):
""" Return formatted list of dependencies from 'requires'/'provides'
Transform as follows:
.. code-block:: python
>>> d = {
'data': ['red', 'nir', 'ndvi'],
'record': ['ccdc'],
}
>>> format_deps(d)
['data-red', 'data-nir', 'data-ndvi', 'record-ccdc']
Args:
d (dict): Task specification, requirements or provisions
Returns:
list: Formatted names of task dependencies
"""
out = []
for _type, names in six.iteritems(d):
out.extend(['%s-%s' % (_type, name) for name in names])
return out
def pipe_deps(pipe):
""" Format data and record in a `pipe`
Provides references to dataset bands and record information in `pipe`.
Args:
pipe (dict): A "pipeline" object containing `data` and `record` as
keys.
Returns:
dict: Dependency graph for data or results inside of `pipe`
"""
dsk = {PIPE: set()}
deps = {
'data': pipe['data'].keys(),
'record': pipe['record'].keys()
}
_deps = format_deps(deps)
for _dep in _deps:
dsk[_dep] = set([PIPE])
return dsk
def config_to_deps(config, dsk=None, overwrite=True):
""" Convert a pipeline specification into list of tasks
Args:
config (dict): Specification of pipeline tasks
dsk (dict): Optionally, provide a dictionary that already includes
some dependencies. The values of this dict should be sets.
overwrite (bool): Allow tasks to overwrite values that have already
been computed
Returns:
dict: Dependency graph
"""
dsk = defaultdict(set, dsk) if dsk else defaultdict(set)
for task, spec in config.items():
# from IPython.core.debugger import Pdb; Pdb().set_trace()
# Add in task requirements
deps = format_deps(spec[REQUIRE])
dsk[task] = dsk[task].union(deps)
# Add in data/record provided by task
prov = format_deps(spec[OUTPUT])
task_needed = False
for _prov in prov:
if overwrite or _prov not in dsk:
logger.debug('Adding task: {}'.format(task))
dsk[_prov].add(task)
task_needed = True
else:
logger.debug('Task already computed and not overwrite - not '
'adding: {}'.format(task))
# If this task didn't provide any new data/record, cull it
if not task_needed:
logger.debug('Culling task {} because everything it provides is '
'already calculated (e.g., from cache)'.format(task))
del dsk[task]
return dsk
def validate_dependencies(tasks, dsk):
""" Check that all required tasks are provided by `dsk`
Args:
tasks (Sequence[str]): Tasks to run, given in order of dependency
dsk (dict[str, set[str]]): Dependency graph
Returns:
list[str]: List of input tasks
Raises:
KeyError: Raise if not all dependencies are met
"""
# First validate the DAG
for task, deps in dsk.items():
check = [dep in dsk for dep in deps]
if not all(check):
missing = [dep for dep, ok in zip(deps, check) if not ok]
missing_str = ', '.join(['%i) "%s"' % (i + 1, m) for i, m in
enumerate(missing)])
raise KeyError('Task "{}" has unmet dependencies: {}'
.format(task, missing_str))
return tasks
def config_to_tasks(config, pipe, overwrite=True):
""" Return a list of tasks from a pipeline specification
Args:
config (dict): Pipeline specification
pipe (dict): Container storing `data` and `record` keys
overwrite (bool): Allow tasks to overwrite values that have already
been computed
Returns:
list: Tasks to run from the pipeline specification, given in the
order required to fullfill all dependencies
"""
_dsk = pipe_deps(pipe)
dsk = config_to_deps(config, dsk=_dsk, overwrite=overwrite)
tasks = [task for task in toposort.toposort_flatten(dsk)
if task in config.keys()]
return validate_dependencies(tasks, dsk)
|
[
"ceholden@gmail.com"
] |
ceholden@gmail.com
|
4bcb2952ab0ade3293adefae13298275dbcebc72
|
562d4bf000dbb66cd7109844c972bfc00ea7224c
|
/addons/odoope_einvoice_base/__init__.py
|
b76d1966445c061ecc7fae89cd3e5df0d1312dda
|
[] |
no_license
|
Mohamed33/odoo-efact-11-pos
|
e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059
|
de38355aea74cdc643a347f7d52e1d287c208ff8
|
refs/heads/master
| 2023-03-10T15:24:44.052883
| 2021-03-06T13:25:58
| 2021-03-06T13:25:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-TODAY Odoo Peru(<http://www.odooperu.pe>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import models
# eof:__init__.py
|
[
"root@vmi414107.contaboserver.net"
] |
root@vmi414107.contaboserver.net
|
967b0cd790a778ad56557cb88098e499ebd5e7e5
|
4be96ec1670297ae71efa6c5c4a830255f893743
|
/Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/frozendict.py
|
eeeebf38e22db3bbdce97652812fe2fe6057657f
|
[
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
zooba/PTVS
|
7dbcc0165ded98d077040a6b367fd71a1eb1e6bd
|
0c213e901168fee368a32939256174077a8d4dfa
|
refs/heads/master
| 2021-05-23T03:58:11.007138
| 2021-01-26T00:20:40
| 2021-01-26T00:20:40
| 42,481,352
| 1
| 0
|
Apache-2.0
| 2018-11-19T19:30:24
| 2015-09-14T22:47:51
|
C#
|
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Santiago Lezica
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# source: https://raw.githubusercontent.com/slezica/python-frozendict/c5d16bafcca7b72ff3e8f40d3a9081e4c9233f1b/frozendict/__init__.py
# version: 1.2
# date: 2018-06-29
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
try:
from collections import OrderedDict
except ImportError: # python < 2.7
OrderedDict = NotImplemented
iteritems = getattr(dict, 'iteritems', dict.items) # py2-3 compatibility
class frozendict(Mapping):
"""
An immutable wrapper around dictionaries that implements the complete :py:class:`collections.Mapping`
interface. It can be used as a drop-in replacement for dictionaries where immutability is desired.
"""
dict_cls = dict
def __init__(self, *args, **kwargs):
self._dict = self.dict_cls(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._dict)
def __hash__(self):
if self._hash is None:
h = 0
for key, value in iteritems(self._dict):
h ^= hash((key, value))
self._hash = h
return self._hash
def __json__(self):
# Works with auxlib's EntityEncoder.
return self._dict
class FrozenOrderedDict(frozendict):
"""
A frozendict subclass that maintains key order
"""
dict_cls = OrderedDict
if OrderedDict is NotImplemented:
del FrozenOrderedDict
|
[
"huvalo@microsoft.com"
] |
huvalo@microsoft.com
|
25af442b950800e054a759cc21320c8ce9953edf
|
172c00532812041f491ccea4a548401e9148864c
|
/feedback_form/urls.py
|
8d80bbe0d2024471abdb62d308c7f902f5008db9
|
[] |
no_license
|
henokali1/FANS-feedback-form
|
34830f5d7740f3578b13e5d8f46e2372492e0733
|
ab15cc736571ff55a06ea3e6275428077ef08fde
|
refs/heads/master
| 2020-04-09T02:29:05.240720
| 2018-12-04T03:23:32
| 2018-12-04T03:23:32
| 159,942,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
from trainee_feedback.views import feedback_view, add_feedback_view, thankyou_view, trainer_view, report_view, feedback_detail_view
urlpatterns = [
path('', TemplateView.as_view(template_name='home.html'), name='home'),
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('feedback/', feedback_view),
path('add_feedback/', add_feedback_view),
path('thankyou/', thankyou_view),
path('trainer/', trainer_view),
path('report/', report_view),
path('feedback_detail/<int:pk>/', feedback_detail_view),
]
|
[
"henokali1@gmail.com"
] |
henokali1@gmail.com
|
fc2d3910ce1955870202ad5072111d79369813a1
|
48ca6f9f041a1e9f563500c8a7fa04dbb18fa949
|
/tests/lib/cast/test_to_int.py
|
020a6156975f87a7f23d58cb567094342d837c05
|
[
"MIT"
] |
permissive
|
bogdanvuk/pygears
|
71404e53d4689ec9cdd9db546bfc0f229a7e02da
|
705b11ab6de79868b25753fa9d0ce7128791b346
|
refs/heads/master
| 2023-07-08T11:38:54.625172
| 2022-03-07T12:29:00
| 2022-03-07T12:29:00
| 124,890,922
| 146
| 16
|
MIT
| 2022-08-15T07:57:08
| 2018-03-12T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
import pytest
from pygears.typing import Int, Tuple, Ufixp, Uint, cast
def test_ufixp_type_cast():
assert cast(Ufixp[8, 16], Int) == Int[9]
assert cast(Ufixp[8, 16], Int[16]) == Int[16]
assert cast(Ufixp[16, 8], Int) == Int[17]
with pytest.raises(TypeError):
cast(Ufixp[-1, 16], Int)
def test_ufixp_value_cast():
assert cast(Ufixp[8, 16](2.15), Int) == Int[9](2)
assert cast(Ufixp[8, 16](2.15), Int[16]) == Int[16](2)
with pytest.raises(TypeError):
cast(Ufixp[-1, 16](0.15), Int)
assert cast(Ufixp[-1, 16](0.15), Int[16]) == Int[16](0)
with pytest.raises(TypeError):
cast(Ufixp[8, 16](56.15), Int[8])
def test_uint_type_cast():
assert cast(Uint[8], Int) == Int[9]
assert cast(Uint[8], Int[16]) == Int[16]
with pytest.raises(TypeError):
cast(Uint[8], Int[8])
assert cast(Int[8], Int) == Int[8]
assert cast(Int[8], Int[16]) == Int[16]
with pytest.raises(TypeError):
cast(Int[8], Int[4])
def test_number_value_cast():
assert cast(Uint[8](128), Int[16]) == Int[16](128)
assert cast(Int[8](127), Int[8]) == Int[8](127)
with pytest.raises(TypeError):
cast(Uint[16](128), Int[16])
with pytest.raises(TypeError):
cast(Int[16](-128), Int[8])
assert cast(2.15, Int[4]) == Int[4](2)
assert cast(7, Int[4]) == Int[4](7)
assert cast(-8, Int[4]) == Int[4](-8)
with pytest.raises(ValueError):
cast(-9, Int[4])
def test_unsupported_cast():
for t in [Tuple[Int[2], Uint[2]]]:
with pytest.raises(TypeError):
cast(t, Int)
|
[
"bogdan.vukobratovic@gmail.com"
] |
bogdan.vukobratovic@gmail.com
|
a9add2a2807b26acdea921f6e43d510209324db8
|
7b2d14f78099fde6c4a35082c9c294d1771cb163
|
/Week 12/templates/paint.py
|
e9129563cd2af70c80bb773c5f863d15f03febb8
|
[] |
no_license
|
pinardy/Digital-World
|
04c6ddb369ede7295a0891aaaa006486c557965e
|
dd0a351eb64f05b524b08c47cd0c0ad3eadd775c
|
refs/heads/master
| 2020-12-30T22:45:02.448171
| 2018-01-30T03:06:08
| 2018-01-30T03:06:08
| 80,622,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
from random import random
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line, Rectangle
from kivy.uix.filechooser import FileChooserListView, FileChooserIconView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
class MyBackground(Widget):
def __init__(self, **kwargs):
super(MyBackground, self).__init__(**kwargs)
with self.canvas:
self.bg = Rectangle(source='water.png', pos=self.pos, size=self.size)
self.bind(pos=self.update_bg)
self.bind(size=self.update_bg)
def update_bg(self, *args):
self.bg.pos = self.pos
self.bg.size = self.size
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), random(), random())
with self.canvas:
Color(*color)
d = 30.
touch.ud['line'] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
touch.ud['line'].points += [touch.x, touch.y]
class MyPaintApp(App):
def build(self):
parent = MyBackground()
painter = MyPaintWidget()
parent.add_widget(painter)
return parent
if __name__ == '__main__':
MyPaintApp().run()
|
[
"pinardy@hotmail.com"
] |
pinardy@hotmail.com
|
a56accc8bb039caa306a869de4e06b475ff2da4e
|
0669d94428c972da19346e356861bf11bd668bc9
|
/swagger_client/models/listenoire_reponse.py
|
371bc0bc9cc54f00a8cea83fb5cd140194c384cf
|
[] |
no_license
|
mlemee/iSendProPython
|
e9a0f8351e33ae7598bd1380a26c2fe0a1dacd22
|
3add878dbcd682aa41f2bd07f98d8b56c8e5f9f3
|
refs/heads/master
| 2022-06-10T02:27:12.368498
| 2020-05-04T15:48:13
| 2020-05-04T15:48:13
| 261,206,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,925
|
py
|
# coding: utf-8
"""
API iSendPro
[1] Liste des fonctionnalités : - envoi de SMS à un ou plusieurs destinataires, - lookup HLR, - récupération des récapitulatifs de campagne, - gestion des répertoires, - ajout en liste noire. - comptage du nombre de caractères des SMS [2] Pour utiliser cette API vous devez: - Créer un compte iSendPro sur https://isendpro.com/ - Créditer votre compte - Remarque: obtention d'un crédit de test possible sous conditions - Noter votre clé de compte (keyid) - Elle vous sera indispensable à l'utilisation de l'API - Vous pouvez la trouver dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Configurer le contrôle IP - Le contrôle IP est configurable dans le rubrique mon \"compte\", sous-rubrique \"mon API\" - Il s'agit d'un système de liste blanche, vous devez entrer les IP utilisées pour appeler l'API - Vous pouvez également désactiver totalement le contrôle IP # noqa: E501
OpenAPI spec version: 1.1.1
Contact: support@isendpro.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.listenoire_reponse_etat import LISTENOIREReponseEtat # noqa: F401,E501
class LISTENOIREReponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'etat': 'LISTENOIREReponseEtat'
}
attribute_map = {
'etat': 'etat'
}
def __init__(self, etat=None): # noqa: E501
"""LISTENOIREReponse - a model defined in Swagger""" # noqa: E501
self._etat = None
self.discriminator = None
if etat is not None:
self.etat = etat
@property
def etat(self):
"""Gets the etat of this LISTENOIREReponse. # noqa: E501
:return: The etat of this LISTENOIREReponse. # noqa: E501
:rtype: LISTENOIREReponseEtat
"""
return self._etat
@etat.setter
def etat(self, etat):
"""Sets the etat of this LISTENOIREReponse.
:param etat: The etat of this LISTENOIREReponse. # noqa: E501
:type: LISTENOIREReponseEtat
"""
self._etat = etat
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LISTENOIREReponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"maxime.le.mee.checkandbang@gmail.com"
] |
maxime.le.mee.checkandbang@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.