blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3c6f8b199115f12bd32d0061f4a571a117ca082
|
0bde5f7f09aa537ed1f4828d4e5ebee66475918f
|
/h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_mult.py
|
f2690d0cf18bb0a449c22efbf81071d98ed0caba
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/h2o-3
|
d69f1c07e1f5d2540cb0ce5e6073415fa0780d32
|
dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7
|
refs/heads/master
| 2022-12-14T08:59:04.109986
| 2020-09-23T08:36:59
| 2020-09-23T08:36:59
| 297,947,978
| 2
| 0
|
Apache-2.0
| 2020-09-23T11:28:54
| 2020-09-23T11:28:54
| null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import numpy as np
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
import random
def h2o_H2OFrame_mult():
"""
Python API test: h2o.frame.H2OFrame.mult(matrix)
Copied from pyunit_mmult.py
"""
data = [[random.uniform(-10000,10000)] for c in range(100)]
h2o_data = h2o.H2OFrame(data)
np_data = np.array(data)
h2o_mm = h2o_data.mult(h2o_data.transpose())
np_mm = np.dot(np_data, np.transpose(np_data))
assert_is_type(h2o_mm, H2OFrame)
for x in range(10):
for y in range(10):
r = random.randint(0,99)
c = random.randint(0,99)
h2o_val = h2o_mm[r,c]
np_val = np_mm[r][c]
assert abs(h2o_val - np_val) < 1e-06, "check unsuccessful! h2o computed {0} and numpy computed {1}. expected " \
"equal quantile values between h2o and numpy".format(h2o_val,np_val)
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_mult())
else:
h2o_H2OFrame_mult()
|
[
"noreply@github.com"
] |
Winfredemalx54.noreply@github.com
|
808270b56854e917ed75cbf6c97e1c769dfb54cd
|
bc7b5d2477ca3b0c54383c97a19d29cb7cb63bc5
|
/sdk/lusid/models/resource_list_of_order.py
|
c5bac180964942bdd13bef10b9e59403e18824e9
|
[
"MIT"
] |
permissive
|
mb-a/lusid-sdk-python-2
|
342b6e7b61ca0c93f43e72f69e572478e3a7be4f
|
0eee79c8e36188a735aaae578a9c4be2a8497aed
|
refs/heads/master
| 2023-07-15T16:15:11.878079
| 2021-08-25T20:52:27
| 2021-08-25T20:52:27
| 214,153,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,906
|
py
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3430
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceListOfOrder(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[Order]',
'href': 'str',
'links': 'list[Link]',
'next_page': 'str',
'previous_page': 'str'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links',
'next_page': 'nextPage',
'previous_page': 'previousPage'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional',
'next_page': 'optional',
'previous_page': 'optional'
}
def __init__(self, values=None, href=None, links=None, next_page=None, previous_page=None): # noqa: E501
"""
ResourceListOfOrder - a model defined in OpenAPI
:param values: The resources to list. (required)
:type values: list[lusid.Order]
:param href: The URI of the resource list.
:type href: str
:param links: Collection of links.
:type links: list[lusid.Link]
:param next_page: The next page of results.
:type next_page: str
:param previous_page: The previous page of results.
:type previous_page: str
""" # noqa: E501
self._values = None
self._href = None
self._links = None
self._next_page = None
self._previous_page = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
self.next_page = next_page
self.previous_page = previous_page
@property
def values(self):
"""Gets the values of this ResourceListOfOrder. # noqa: E501
The resources to list. # noqa: E501
:return: The values of this ResourceListOfOrder. # noqa: E501
:rtype: list[Order]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfOrder.
The resources to list. # noqa: E501
:param values: The values of this ResourceListOfOrder. # noqa: E501
:type: list[Order]
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfOrder. # noqa: E501
The URI of the resource list. # noqa: E501
:return: The href of this ResourceListOfOrder. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfOrder.
The URI of the resource list. # noqa: E501
:param href: The href of this ResourceListOfOrder. # noqa: E501
:type: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfOrder. # noqa: E501
Collection of links. # noqa: E501
:return: The links of this ResourceListOfOrder. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfOrder.
Collection of links. # noqa: E501
:param links: The links of this ResourceListOfOrder. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def next_page(self):
"""Gets the next_page of this ResourceListOfOrder. # noqa: E501
The next page of results. # noqa: E501
:return: The next_page of this ResourceListOfOrder. # noqa: E501
:rtype: str
"""
return self._next_page
@next_page.setter
def next_page(self, next_page):
"""Sets the next_page of this ResourceListOfOrder.
The next page of results. # noqa: E501
:param next_page: The next_page of this ResourceListOfOrder. # noqa: E501
:type: str
"""
self._next_page = next_page
@property
def previous_page(self):
"""Gets the previous_page of this ResourceListOfOrder. # noqa: E501
The previous page of results. # noqa: E501
:return: The previous_page of this ResourceListOfOrder. # noqa: E501
:rtype: str
"""
return self._previous_page
@previous_page.setter
def previous_page(self, previous_page):
"""Sets the previous_page of this ResourceListOfOrder.
The previous page of results. # noqa: E501
:param previous_page: The previous_page of this ResourceListOfOrder. # noqa: E501
:type: str
"""
self._previous_page = previous_page
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfOrder):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"concourse@finbourne.com"
] |
concourse@finbourne.com
|
8b4e464b1ad0ef3c918a96fd380ac7b91b8c909d
|
7f57c12349eb4046c40c48acb35b0f0a51a344f6
|
/2015/ConstructBinaryTreeFromPreorderAndInorderTraversal_v0.py
|
daaa268e1414a2c429629aa49eadb7c663488efa
|
[] |
no_license
|
everbird/leetcode-py
|
0a1135952a93b93c02dcb9766a45e481337f1131
|
b093920748012cddb77258b1900c6c177579bff8
|
refs/heads/master
| 2022-12-13T07:53:31.895212
| 2022-12-10T00:48:39
| 2022-12-10T00:48:39
| 11,116,752
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {integer[]} preorder
# @param {integer[]} inorder
# @return {TreeNode}
def buildTree(self, preorder, inorder):
if not inorder or not preorder:
return None
p = preorder.pop(0)
t = TreeNode(p)
k = inorder.index(p)
t.left = self.buildTree(preorder, inorder[:k])
t.right = self.buildTree(preorder, inorder[k+1:])
return t
def inorder(self, root):
if not root:
return
self.inorder(root.left)
print root.val
self.inorder(root.right)
def preorder(self, root):
if not root:
return
print root.val
self.inorder(root.left)
self.inorder(root.right)
if __name__ == '__main__':
s = Solution()
h = s.buildTree([1,2,3,4,5], [2,1,4,3,5])
s.inorder(h)
print '-' * 6
s.preorder(h)
h = s.buildTree([], [])
|
[
"stephen.zhuang@gmail.com"
] |
stephen.zhuang@gmail.com
|
bc0b93fd8478e0c280277ddeab4f81620aed29f5
|
6d42b5219f25cb12626c79d4ec45a0eab0b51d9c
|
/pthbldr/datasets/tests/test_datasets.py
|
38d66b8c1dd1cd81729e649848f3bb94db6e55e0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
kastnerkyle/pthbldr
|
cc370182126784645f6a4b24af81aefb12e2adcc
|
4dd65f9973a0199b920fb7b76d9b3163b5928675
|
refs/heads/master
| 2020-12-30T13:20:07.846618
| 2017-11-06T23:45:07
| 2017-11-06T23:45:07
| 91,346,119
| 2
| 0
| null | 2017-11-06T23:45:08
| 2017-05-15T14:16:43
|
Python
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
from pthbldr.datasets import load_digits
from pthbldr.datasets import load_iris
from nose.tools import assert_equal
def test_digits():
digits = load_digits()
assert_equal(len(digits["data"]), len(digits["target"]))
def test_iris():
iris = load_iris()
assert_equal(len(iris["data"]), len(iris["target"]))
|
[
"kastnerkyle@gmail.com"
] |
kastnerkyle@gmail.com
|
436d1079e865e48d975bd6ffa89a5f3ed150f80d
|
bdfd3889e1cc02f97b3e2dc0032ce0c9b59bf37e
|
/src/gork/contrib/gtag/forms.py
|
f975a8005e00e6d6fd6d68bfd55cee536a5ff863
|
[
"MIT"
] |
permissive
|
indexofire/gork
|
c85728953cfa9ab98c59b79a440d4e12212cbc4e
|
c5e172b896a51c15f358d3aabbcb66af837b54b2
|
refs/heads/master
| 2016-09-06T04:58:01.435002
| 2014-02-06T08:35:51
| 2014-02-06T08:35:51
| 9,260,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from gtag.utils import parse_tags, edit_string_for_tags
class TagWidget(forms.TextInput):
def render(self, name, value, attrs=None):
if value is not None and not isinstance(value, basestring):
value = edit_string_for_tags([o.tag for o in value.select_related("tag")])
return super(TagWidget, self).render(name, value, attrs)
class TagField(forms.CharField):
widget = TagWidget
def clean(self, value):
value = super(TagField, self).clean(value)
try:
return parse_tags(value)
except ValueError:
raise forms.ValidationError(_("Please provide a comma-separated list of tags."))
|
[
"indexofire@gmail.com"
] |
indexofire@gmail.com
|
72301ec7c64df86bd3500a01d59262d2037866dd
|
e59f696a96f216cdeea8d638f05b75bb0c26ef55
|
/4 Python_Programs/1 Problems on numbers/10_EvenFactors/Demo.py
|
2f5e56dcd12e00dd9d0888a7d45b09e65f1ca07a
|
[] |
no_license
|
Aditya-A-Pardeshi/Coding-Hands-On
|
6858686bdf8f4f1088f6cc2fc0035a53c4875d81
|
0d72d45e92cb0698129636412f7bf5a8d865fd2f
|
refs/heads/main
| 2023-05-29T05:35:34.052868
| 2021-06-14T18:52:57
| 2021-06-14T18:52:57
| 376,928,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
'''
Write a program which accept number from user and print even factors of that number
Input : 24
Output: 2 4 6 8 12
'''
def PrintEvenFactors(no):
if(no<0):
no = -no;
for i in range(2,int(no/2)+1):
if(no%i == 0):
print("{} ".format(i),end = " ");
def main():
no = int(input("Enter number:"));
PrintEvenFactors(no);
if __name__ == "__main__":
main();
|
[
"adityapardeshi0078@gmail.com"
] |
adityapardeshi0078@gmail.com
|
4030f63098bbbd19a41102250c7f615bf1a647c3
|
dd87194dee537c2291cf0c0de809e2b1bf81b5b2
|
/test/test_v1_job.py
|
2a33786b9e31d281aa3aff1f99a840af636abb55
|
[
"Apache-2.0"
] |
permissive
|
Arvinhub/client-python
|
3ea52640ab02e4bf5677d0fd54fdb4503ecb7768
|
d67df30f635231d68dc4c20b9b7e234c616c1e6a
|
refs/heads/master
| 2023-08-31T03:25:57.823810
| 2016-11-02T22:44:36
| 2016-11-02T22:44:36
| 73,865,578
| 1
| 0
|
Apache-2.0
| 2018-10-10T12:16:45
| 2016-11-15T23:47:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import k8sclient
from k8sclient.rest import ApiException
from k8sclient.models.v1_job import V1Job
class TestV1Job(unittest.TestCase):
""" V1Job unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Job(self):
"""
Test V1Job
"""
model = k8sclient.models.v1_job.V1Job()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
9b6e87777212e93821e9296860eef76c187aa685
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/298/82997/submittedfiles/testes.py
|
526e6017d8abd2de623bff22ac68e85aeb35505c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,205
|
py
|
n = int(input('Digite um numero inteiro positivo entre 1 e 100: '))
while (n<1) and (n>100):
print('Entrada inválida.')
n = int(input('Digite um numero inteiro positivo entre 1 e 100: '))
if n in range (1,100):
kn = float(((n + 2)/10)*2)
print('%.4f' % kn)
#-----------------------------------------------------------------------------------------------------------------------
n1 = int(input('Digite um número inteiro: '))
n2 = int(input('Digite um número inteiro: '))
n3 = int(input('Digite um número inteiro: '))
listan1 = []
kn1 = 1
while (kn1<=n1*n2*n3):
multiplo1 = n1*kn1
listan1 = listan1 + [multiplo1]
kn1 = kn1 + 1
listan2 = []
kn2 = 1
while (kn2<=n1*n2*n3):
multiplo2 = n2*kn2
listan2 = listan2 + [multiplo2]
kn2 = kn2 + 1
listan3 = []
kn3 = 1
while (kn3<=n1*n2*n3):
multiplo3 = n3*kn3
listan3 = listan3 + [multiplo3]
kn3 = kn3 + 1
listapreliminar = [i for i in listan1 if i in listan2]
listafinal = [i for i in listapreliminar if i in listan3]
mmc = listafinal[0]
print('\nO mmc é:')
print(mmc)
print('\n')
#---------------------------------------------------------------------------------------------------------------------------
b = int(input('Digite um número inteiro: '))
c = int(input('Digite um número inteiro: '))
d = int(input('Digite um número inteiro: '))
lista1=[]
k1=1
while k1<=b:
if (b%k1)==0:
lista1=lista1+ [k1]
k1=k1+1
lista2=[]
k2=1
while k2<=c:
if (c%k2)==0:
lista2=lista2+ [k2]
k2=k2+1
lista3=[]
k3=1
while k3<=d:
if (d%k3)==0:
lista3=lista3+ [k3]
k3=k3+1
listapreliminar = [i for i in lista1 if i in lista2]
listafinal = [i for i in listapreliminar if i in lista3]
mdc = listafinal[(len(listafinal) - 1)]
print('\nMáximo divisor comum:')
print(mdc)
print('\n')
#---------------------------------------------------------------------------------------------------------------------
a = int(input('Digite um número inteiro: '))
lista=[]
k=1
while k<=a:
if (a%k)==0:
lista = lista + [k]
k=k+1
divisores=int(len(lista))
print('\nLista de divisores:')
print(lista)
print('\nQuantidade de divisores: %i' % divisores)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7580db354e832980cb68e6afb7c9c89485dede71
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Modules/datetime/d1.py
|
29a423c96933b4d0b2a03754743aa30a42bb5be7
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935
| 2021-03-02T14:15:09
| 2021-03-02T14:15:09
| 343,797,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
import datetime
bday=(datetime.date(2000,7,26))
print(bday)
print('Year : ',bday.year)
print('Month : ',bday.month)
print('Date : ',bday.day)
t = datetime.time(9, 25, 39,10)
print( t )
print( 'hour :', t.hour )
print( 'minute:', t.minute )
print( 'second:', t.second )
print( 'microsecond:', t.microsecond )
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
9e7c2bcddff622777890630a880abba20a2ceb93
|
9152c6f5b692694c4cb95725319fc8dd21d30455
|
/office365/runtime/client_value_object.py
|
6d76794e0d56308f5fb685632d48f2ac654b41b2
|
[
"MIT"
] |
permissive
|
VISIN9/Office365-REST-Python-Client
|
cf3de86a6bdd2461ff5814dbfa02d4d4185917d5
|
91c07d427a76197f6eb143c6253bdc832cbb889d
|
refs/heads/master
| 2021-05-25T08:43:35.530546
| 2020-04-06T20:24:53
| 2020-04-06T20:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
from office365.runtime.odata.json_light_format import JsonLightFormat
from office365.runtime.odata.odata_metadata_level import ODataMetadataLevel
class ClientValueObject(object):
"""Base client value object"""
def map_json(self, json):
for key, val in json.items():
# if hasattr(type(self), key):
self.__dict__[key] = val
def to_json(self):
return dict((k, v) for k, v in vars(self).items() if v is not None)
@property
def entityTypeName(self):
return None
|
[
"vvgrem@gmail.com"
] |
vvgrem@gmail.com
|
386494842d287ea6d6dfa1a2affa37e918c30a55
|
1a937b899af949d23e667782a7360b9de1634456
|
/SoftUni/Advanced Tree Structures - II/homework/sweep_and_prune.py
|
3f531c9cb5c36afd49d3a4e1d23949822687a1c9
|
[] |
no_license
|
stanislavkozlovski/data_structures_feb_2016
|
c498df6ea7cb65d135057a300e0d7e6106713722
|
adedac3349df249fe056bc10c11b0b51c49e24bb
|
refs/heads/master
| 2021-07-06T17:37:18.117104
| 2017-09-30T19:01:51
| 2017-09-30T19:01:51
| 75,526,414
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
OBJECT_WIDTH, OBJECT_HEIGHT = 10, 10
class BoundableObject:
def __init__(self, name, x1, y1):
self.name = name
self.x1 = x1
self.x2 = x1 + OBJECT_WIDTH
self.y1 = y1
self.y2 = y1 + OBJECT_HEIGHT
def __repr__(self):
return '{x1} {x2}'.format(x1=self.x1, x2=self.x2,)
def __str__(self):
return self.name
def __gt__(self, other):
return self.x1 > other.x1
def __lt__(self, other):
return self.x1 < other.x1
def intersects(self, other):
return (
self.x1 <= other.x2 and other.x1 <= self.x2
and self.y1 <= other.y2 and other.y1 <= self.y2)
def change_coords(self, new_x, new_y):
self.x1 = new_x
self.x2 = self.x1 + OBJECT_WIDTH
self.y1 = new_y
self.y2 = self.y1 + OBJECT_HEIGHT
def insertion_sort(arr):
for idx in range(1, len(arr)):
pos = idx
curr_value = arr[idx]
while pos > 0 and curr_value < arr[pos-1]:
arr[pos] = arr[pos-1]
pos -= 1
arr[pos] = curr_value
return arr
class Game:
def __init__(self):
self.has_started = False
self.objects = []
self.tick_count = 1
def game_tick(self):
self.objects = insertion_sort(self.objects)
self.check_for_collisions()
self.tick_count += 1
def check_for_collisions(self):
for idx, obj in enumerate(self.objects):
for sec_idx in range(idx + 1, len(self.objects)):
if obj.intersects(self.objects[sec_idx]):
print('({tick}) - {obj1} collides with {obj2}'.format(tick=self.tick_count, obj1=obj,
obj2=self.objects[sec_idx]))
else: # no need to continue since this is a sorted array
break
def handle_commands(self):
command = input()
if not self.has_started:
if command == 'start':
self.has_started = True
return
if command.startswith('add'):
name, x1, y1 = command.split()[1:]
x1, y1 = int(x1), int(y1)
self.objects.append(BoundableObject(name, x1, y1))
else: # game has started
if command.startswith('move'):
name, x1, y1 = command.split()[1:]
x1, y1 = int(x1), int(y1)
obj = self.find_object_with_name(name)
if obj is None:
raise Exception('No such object in the array!')
obj.change_coords(x1, y1)
self.game_tick()
def find_object_with_name(self, name):
for obj in self.objects:
if obj.name == name:
return obj
def main():
game = Game()
while True:
game.handle_commands()
if __name__ == '__main__':
main()
|
[
"familyguyuser192@windowslive.com"
] |
familyguyuser192@windowslive.com
|
1c345f9f054da786bb4b108d6e9fe03934792328
|
0f47b8b3775e1730f92141128491b0bbfe3d89e0
|
/data_structure/graph/shortest_path/floydwarshall.py
|
e74f989ab56a38084b43fe4e47c8b53dce6094b5
|
[] |
no_license
|
hongmin0907/CS
|
1d75c38da98c6174ea19de163c850d0f3bac22e3
|
697e8e1a5bde56a7588381a12f74bbb0e3aee3e8
|
refs/heads/master
| 2020-06-23T20:10:22.051477
| 2019-07-15T00:20:09
| 2019-07-15T00:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
from copy import deepcopy
class ShortestPath:
def __init__(self, A, path):
#2차원 배열 A
self.A=A
#2차원 배열 path
self.path=path
def print_shortest_path(self, source, dest):
print(source, end=" ")
self.__print_sp(source, dest)
print(dest, end=" ")
def __print_sp(self, i, j):
if self.path[i][j]==None:
return
k=self.path[i][j]
# i~k까지 출력
self.__print_sp(i, k)
# k 출력
print(k, end=" ")
# k~j까지 출력
self.__print_sp(k, j)
class Graph:
#모든 가중치보다 충분히 큰 수(inf 대신 사용)
BIG_NUMBER=2000
def __init__(self, vnum):
#A^-1 mat을 만들 때 if <u, v> not in E(G) then inf
#inf 대신에 모든 가중치보다 충분히 큰 수를 사용
self.adjacency_matrix=[[self.BIG_NUMBER for _ in range(vnum)] for _ in range(vnum)]
for i in range(vnum):
self.adjacency_matrix[i][i]=0
self.vertex_num=vnum
def insert_edge(self, u, v, w):
self.adjacency_matrix[u][v]=w
def floyd_warshall(self):
#A^-1 mat
A=deepcopy(self.adjacency_matrix)
#경로 기록을 위한 2차원 배열
path=[[None for _ in range(self.vertex_num)] for _ in range(self.vertex_num)]
for k in range(self.vertex_num):
for i in range(self.vertex_num):
for j in range(self.vertex_num):
#A^k[i][j]=min{A^(k-1)[i][j], A^(k-1)[i][k]+A^(k-1)[k][j]}
if A[i][j] > A[i][k] + A[k][j]:
A[i][j]=A[i][k]+A[k][j]
path[i][j]=k
sp=ShortestPath(A, path)
return sp
if __name__=="__main__":
# simple example
# g=Graph(4)
# g.insert_edge(0, 1, 12)
# g.insert_edge(0, 2, 3)
# g.insert_edge(1, 3, 15)
# g.insert_edge(1, 2, 5)
# g.insert_edge(2, 0, 7)
# g.insert_edge(2, 1, 6)
# g.insert_edge(2, 3, 2)
# g.insert_edge(3, 1, 13)
# g.insert_edge(3, 2, 6)
# source=0
# dest=3
# complicated example
g=Graph(6)
g.insert_edge(0, 1, 5)
g.insert_edge(0, 2, 7)
g.insert_edge(0, 5, 9)
g.insert_edge(1, 3, 4)
g.insert_edge(1, 5, 2)
g.insert_edge(2, 0, 8)
g.insert_edge(2, 4, 6)
g.insert_edge(3, 0, 6)
g.insert_edge(3, 4, 2)
g.insert_edge(3, 5, 3)
g.insert_edge(4, 2, 3)
g.insert_edge(4, 5, 10)
g.insert_edge(5, 1, 7)
g.insert_edge(5, 2, 4)
source=2
dest=3
sp=g.floyd_warshall()
print("A mat")
for i in range(g.vertex_num):
for j in range(g.vertex_num):
print("{}".format(sp.A[i][j]).rjust(4), end="")
print()
print()
print("path mat")
for i in range(g.vertex_num):
for j in range(g.vertex_num):
if sp.path[i][j]==None:
print("{} ".format("N").rjust(4), end="")
else:
print("{} ".format(sp.path[i][j]).rjust(4), end="")
print()
print()
print("path from {} to {}".format(source, dest))
sp.print_shortest_path(source, dest)
print()
|
[
"ythwork83@gmail.com"
] |
ythwork83@gmail.com
|
41fab65078d3fb5f668f2cb47c7cc24fb333394f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03371/s845988695.py
|
4818cf4f4ba7277bd6347d6792e793a348563d0c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
A,B,C,X,Y=map(int,input().split())
ans=0
if C*2<=A+B:
mi=min(X,Y)
ans=mi*C*2
if Y<=X:
ans+=min(A*(X-mi),2*C*(X-mi))
else:
ans+=min(B*(Y-mi),2*C*(Y-mi))
else:
ans=A*X+B*Y
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e3d72dcb67b7c3981d26691b7181d72ccf66814e
|
7807d8d9d109a3e272fffed91bf841201da39256
|
/trans_ALDS1_1_B/HARU55_ALDS1_1_B_kotonoha.py
|
2d3f4f61cfadb439b240ad350d2e8f32c35c3b9d
|
[] |
no_license
|
y-akinobu/AOJ_to_Kotonoha
|
0e8df43393964fcdd5df06c75545091bd6c0c2e2
|
5a694a55a3d85e3fbc4a07b57edc4374556db9a1
|
refs/heads/main
| 2023-02-05T15:33:16.581177
| 2020-12-30T16:14:44
| 2020-12-30T16:14:44
| 325,524,216
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# map(整数,入力された文字列を空白で分割した列)を展開し順にNとMとする
N, M = map(int, input().split())
# 真の間、繰り返す
while True :
# NがMより小さいとき、
if N < M :
# NとMを入れ替える
N, M = M, N
# NをMで割った余りをaとする
a = N % M
# aが0のとき、
if a == 0 :
# 繰り返すのを中断する
break
# ()
else :[#Else [#Block [#MultiAssignment left: [# [#Name 'N'][#Name 'M']]right: [#Tuple [#Name 'M'][#Name 'a']]]]]
# Mを出力する
print(M)
|
[
"sx2_vn_yuka@outlook.jp"
] |
sx2_vn_yuka@outlook.jp
|
4b9bf327874fb716a5ef0c14b6c733148ed3e614
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/FXJSMM/YW_FXJSMM_SZSJ_307.py
|
94a0758207761f5b78d6a48514fdeaef3d006e5d
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FXJSMM_SZSJ_307(xtp_test_case):
# YW_FXJSMM_SZSJ_307
def test_YW_FXJSMM_SZSJ_307(self):
title = '交易日本方最优-T+0卖→T+0买'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('001066', '2', '0', '0', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 10000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
04ae437e0973bf482e71ebf5563db2e45951c6c9
|
9c32890a188dfcd949883c10c8db112aed5321d6
|
/learner/wsgi.py
|
1f2d5f930764af173595a367e93da2297a089d37
|
[] |
no_license
|
gitanjali1077/learner
|
dcb11d92b84bac0a9254a2409570261a03503945
|
fef0c5554b100216210ba8f3777bad69a9219d4f
|
refs/heads/master
| 2022-12-12T08:24:55.268350
| 2018-03-02T04:34:43
| 2018-03-02T04:34:43
| 122,777,081
| 1
| 0
| null | 2022-11-22T02:15:06
| 2018-02-24T20:28:20
|
CSS
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
"""
WSGI config for learner project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learner.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[
"gitanjali1077@gmail.com"
] |
gitanjali1077@gmail.com
|
cb03689df98cce2aeec0d424a2ee39025ac09c42
|
cc3d7bd252c225bf588a8e663b2214b8ccc3b399
|
/report/make_report.py
|
cd623d6d4f3915b774a17d563e74329b25cd9405
|
[] |
no_license
|
stestagg/dict_index
|
c1ca8cac3389b5f2d22882a159ab8ea68439e4a5
|
41d06d705e28e8c52c3a9c76349c2aadfd984dff
|
refs/heads/master
| 2022-11-27T12:14:58.791348
| 2020-08-01T18:11:52
| 2020-08-01T18:11:52
| 278,656,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
import textwrap
from pathlib import Path
from datetime import datetime
import json
import click
import dateformat
from jinja2 import Environment, FileSystemLoader, select_autoescape, Markup
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
JINJA_ENV = Environment(
loader=FileSystemLoader([Path(__file__).parent]),
autoescape=select_autoescape(['html', 'xml'])
)
HTML_FORMATTER = HtmlFormatter(style="monokai")
PROPOSAL_COLOR = '#2ca02c'
COLORS = ('#1f77b4', '#ff7f0e', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf')
PROPOSAL_NAMES = {'proposed', 'keys_index', 'items_index'}
def format_date(value, format='YYYY-MM-DD'):
date = datetime.utcfromtimestamp(value)
return dateformat.DateFormat(format).format(date)
def format_code(code):
return Markup(highlight(code.rstrip(), PythonLexer(), HTML_FORMATTER))
JINJA_ENV.filters['date'] = format_date
JINJA_ENV.filters['code'] = format_code
JINJA_ENV.filters['num'] = lambda v: "{:,}".format(v)
JINJA_ENV.filters['dedent'] = textwrap.dedent
def load_results(path):
all_results = []
for child in path.iterdir():
if child.suffix.lower() == ".json":
result = json.loads(child.read_text())
result['name'] = child.stem
all_results.append(result)
return all_results
def make_index(results, dest):
content = JINJA_ENV.get_template('index.html').render(results=results)
dest.write_text(content)
def reshape_results_for_chart(results):
reshaped = []
for cls_name, meth_results in results.items():
cls_data = {
'cls': cls_name,
'series': []
}
reshaped.append(cls_data)
for i, (meth_name, variants) in enumerate(meth_results.items()):
color = COLORS[i]
if meth_name in PROPOSAL_NAMES:
meth_name = f'{meth_name}(*)'
color = PROPOSAL_COLOR
points = []
point_data = {
'name': f'{ meth_name }.runs',
'color': color,
'type': 'scatter',
'showInLegend': False,
'dataPoints': points
}
cls_data['series'].append(point_data)
mins = []
min_data = {
'name': meth_name,
'color': color,
'type': 'spline',
'showInLegend': True,
'dataPoints': mins
}
cls_data['series'].append(min_data)
for variant, times in variants.items():
dict_size = int(variant)
for time in times:
points.append({'x': dict_size, 'y': time})
mins.append({'x': dict_size, 'y': min(times)})
mins.sort(key=lambda x: x['x'])
return reshaped
def reshape_results_for_table(results):
reshaped = {}
for cls_name, meth_raw in results.items():
cls_results = {}
cls_variants = set()
reshaped[cls_name] = cls_results
for meth_name, variants in meth_raw.items():
cls_variants.update(variants.keys())
cls_variants = sorted(int(v) for v in cls_variants)
cls_results['variants'] = cls_variants
meth_results = {}
cls_results['meth'] = meth_results
for meth_name, variants in meth_raw.items():
meth_results[meth_name] = [None] * len(cls_variants)
for i, variant in enumerate(cls_variants):
times = variants.get(str(variant))
if times is not None:
meth_results[meth_name][i] = min(times)
return reshaped
def make_results_page(results, dest):
style = HTML_FORMATTER.get_style_defs()
chart_data = reshape_results_for_chart(results['results'])
table_data = reshape_results_for_table(results['results'])
content = JINJA_ENV.get_template('results.html').render(
results=results,
style=style,
chart_data=chart_data,
table_data=table_data,
PROPOSAL_NAMES=PROPOSAL_NAMES,
)
dest.write_text(content)
@click.command()
@click.argument('result_dir')
@click.argument('output_dir')
def main(result_dir, output_dir):
results_path = Path(result_dir)
assert results_path.is_dir()
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
results = load_results(results_path)
make_index(results, output_dir / 'index.html')
for result in results:
result_path = output_dir / f'{result["name"]}.html'
make_results_page(result, result_path)
if __name__ == '__main__':
main()
|
[
"stestagg@gmail.com"
] |
stestagg@gmail.com
|
7e23ddf115d5ed2d24c73fb7144f196d75b02cdf
|
92237641f61e9b35ff6af6294153a75074757bec
|
/Machine Learning/수업 자료/2주차_데이터 과학을 통한 자연어 처리와 통계학습/제06~09일차_정형.비정형 데이터 처리 및 결합 분석/xmlEx02.py
|
3fecb052979d01243b6104879395476414339d15
|
[] |
no_license
|
taepd/study
|
8ded115765c4f804813e255d9272b727bf41ec80
|
846d3f2a5a4100225b750f00f992a640e9287d9c
|
refs/heads/master
| 2023-03-08T13:56:57.366577
| 2022-05-08T15:24:35
| 2022-05-08T15:24:35
| 245,838,600
| 0
| 1
| null | 2023-03-05T23:54:41
| 2020-03-08T15:25:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
# xmlEx02.py
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import ElementTree
mydict = {'kim': ('김철수', 30, '남자', '강남구 역삼동'), 'park': ('박영희', 40, '여자', '서초구 방배동')}
print(mydict)
members = Element('members')
for key, mytuple in mydict.items():
myattrib = {'a': 'b', 'c': 'd'}
mem = SubElement(members, 'member', attrib=myattrib)
mem.attrib['id'] = key
SubElement(mem, 'name').text = mytuple[0]
SubElement(mem, 'age').text = str(mytuple[1])
SubElement(mem, 'gender').text = mytuple[2]
SubElement(mem, 'address').text = mytuple[3]
def indent(elem, level=0):
mytab = '\t'
i = '\n' + level * mytab
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + mytab
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(members)
xmlFile = 'xmlEx_02.xml'
ElementTree(members).write(xmlFile, encoding='utf-8')
print(xmlFile + ' 파일 생성됨')
print('finished')
|
[
"taepd1@gmail.com"
] |
taepd1@gmail.com
|
802eca46c3e683e50f6e47f7a5d22d1b059841c1
|
b80c84125f8176d5e40ffd51af36a0aed2df9d7c
|
/roboPortal/views.py
|
df87e4b4fe81d4d9cd6354a524d49fb76790be56
|
[] |
no_license
|
bhatiaisb5300/RobotixDevelopmentv1
|
cbee6f90f06c85c8bfec308e8d35ef3a2587e263
|
6d541bdc8a9b89565c103fdff53a28373b0afb8b
|
refs/heads/master
| 2022-12-13T10:12:30.841794
| 2020-02-15T15:33:00
| 2020-02-15T15:33:00
| 223,969,445
| 0
| 1
| null | 2022-12-08T05:24:26
| 2019-11-25T14:35:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,171
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.contrib.auth.models import User, auth
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.conf import settings
from .models import portalUser, UserLink, Token, Team
subject_roboPortalVerification = 'Verify your Robo Portal Email Address. '
message_verification = ' it means a world to us '
subject_robathon = 'Robo Portal Selection'
message_robathon = ' Congrats.You have been selected for the hackathon.'
email_from = settings.EMAIL_HOST_USER
import random
import string
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def home(request):
if request.user.portal.joined_team == False:
if 'create' in request.POST:
name = request.POST['teamName']
team = Team(admin = request.user,name = name)
team.save()
team.member.add(request.user)
token = randomString(15)
token += str(team.id)
temp_id = team.id
team.token = token
team.save()
portal_user = portalUser.objects.get(user = request.user)
portal_user.joined_team = True
portal_user.user_team_id = int(temp_id)
portal_user.is_admin = True
portal_user.save()
message = "Team has been created"
return render(request,'team.html',{'team':team,'message':message})
if 'join' in request.POST:
token = request.POST['token']
try:
team = Team.objects.get(token = token)
team.member.add(request.user)
portal = portalUser.objects.get(user = request.user)
portal.joined_team = True
portal.user_team_id = team.id
portal.save()
message = "Sucessfully added to team " + team.name
return render(request,'team.html',{'team':team,'message':message})
except Team.DoesNotExist:
message = "Invalid Team Token"
return render(request,'joinCreate.html',{'message':message})
return render(request,'joinCreate.html')
else:
team = Team.objects.get(id = request.user.portal.user_team_id)
return render(request,'team.html',{'team':team})
def register(request):
global subject_roboPortalVerification, message_verification, email_from
if 'register' in request.POST:
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password1 = request.POST['password1']
password2 = request.POST['password2']
"""
elif User.objects.filter(email = email).exists():
return render(request,'register.html',{'error_message':"Email already taken"})
"""
if password1 == password2:
if User.objects.filter(username = username).exists():
return render(request,'register.html',{'error_message':"Username already taken"})
else:
user= User.objects.create_user(username = username, password = password1, email = email, first_name= first_name, last_name = last_name)
user.save()
a= portalUser(user = user)
a.save()
recipient_list = [user.email,]
random_string = randomString(25)
random_string += str(user.id)
token = Token(token = random_string)
token.save()
email_verify_html = "<a href = 'http://127.0.0.1:8000/roboPortal/verify/" +random_string + "/"+ str(user.id) + "'> Verify</a>"
send_mail( subject_roboPortalVerification, message_verification, email_from,recipient_list,fail_silently=False,html_message= email_verify_html )
return redirect('/roboPortal/login/')
else:
return render(request,'register.html',{'error_message':"Password does not match"})
return render(request,'register.html')
def login(request):
if 'login' in request.POST:
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username = username, password = password)
if user != None:
if user.portal.verified == True:
auth.login(request,user)
return redirect('/')
else:
raise Http404("You are not verified yet.")
else:
return render(request, 'login.html', {'error_message': "Invalid Credentials"})
return render(request, 'login.html')
def email(request):
subject = 'Thank you for registering to our site'
message = ' it means a world to us '
email_from = settings.EMAIL_HOST_USER
recipient_list = ['tuhina840@gmail.com',]
send_mail( subject, message, email_from, recipient_list,html_message= email_verify_html ,fail_silently=True )
return HttpResponse("views used and email sent attempted")
def verify(request,token,id):
if Token.objects.filter(token = token).exists() == False:
raise Http404("Invalid Token.")
else:
user = User.objects.get(id = id)
if user.portal.verified == True:
raise Http404("Already verified")
else:
a = portalUser.objects.get(user = user)
a.verified = True
a.save()
print(user.portal.verified)
return HttpResponse("You have now been verified.")
def createProfile(request):
if 'create' in request.POST:
portal = portalUser.objects.get(user = request.user)
resume = None
if 'resume' in request.FILES:
resume = request.FILES['resume']
description = request.POST['description']
portal.description = description
portal.resume = resume
portal.save()
return HttpResponse("Updated")
return render(request,'createProfile.html')
def adminView(request):
if request.user.portal.is_member == True:
all_team = Team.objects.all()
return render(request,'adminView.html',{'all_team':all_team})
else:
raise Http404("You are not authorized as you are not robotix club member")
def profileView(request,user_id):
user = User.objects.get(id = user_id)
return render(request,'profile.html',{'profile_user':user})
def select(request,team_id):
global subject_robathon,message_robathon
team = Team.objects.get(id = team_id)
team.selected = True
team.save()
email_from = settings.EMAIL_HOST_USER
recipient_list = []
for user in team.member.all():
recipient_list.append(user.email)
send_mail( subject_robathon, message_robathon, email_from, recipient_list ,fail_silently=False )
return HttpResponse("ok")
"""
def create(request):
team = Team(admin = request.user)
team.save()
return HttpResponse("hiiii")
"""
|
[
"ayushganguli1769@gmail.com"
] |
ayushganguli1769@gmail.com
|
53564b0dbb1fb2ca036162b4a6dc25d45b8bdaf8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03846/s787181482.py
|
9e79a1ae234c00a297c69bb69d20566532f2d43a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
def main():
N=int(input())
A=list(map(int,input().split()))
mod=10**9+7
ans=1
A.sort()
if N%2==0:
for i in range(0,N,2):
if A[i]!=A[i+1] or A[i]!=i+1:
print(0)
exit()
ans=ans*2%mod
print(ans)
else:
if A[0]!=0:
print(0)
exit()
ans=1
for i in range(1,N,2):
if A[i]!=A[i+1] or A[i]!=i+1:
print(0)
exit()
ans=ans*2%mod
print(ans)
if __name__=='__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
67afec9212a1433422a4bc56f2afbeca8ea194b9
|
e66fa131cff76fa3fe70e7b6649fa1332159c781
|
/ch06/Packages.py
|
8ff16ca6520d9fb4e3c32d634368017d536a0500
|
[] |
no_license
|
chc1129/python_tutorial
|
c6d97c6671a7952d8a7b838ccb8aa3c352fa6881
|
2f8b389731bafbda73c766c095d1eaadb0f99a1c
|
refs/heads/main
| 2023-08-24T07:00:43.424652
| 2021-10-28T16:07:57
| 2021-10-28T16:07:57
| 341,532,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
import sound.effects.echo
sound.effects.echo.echofilter(input, output, delay=0.7, atten=4)
from sound.effects import echo
echo.echofilter(input, output, delay=0.7, atten=4)
from sound.effects.echo import echofilter
echofilter(input, output, delay=0.7, atten=4)
|
[
"chc1129@gmail.com"
] |
chc1129@gmail.com
|
6991dc207815ab48ae46791891d445b78c2359a0
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_recentest.py
|
f93941f4b13cefdd2333686e329e9b615cf9de18
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _RECENTEST():
def __init__(self,):
self.name = "RECENTEST"
self.definitions = recent
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['recent']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
143a2b435ce857dbd0475e94aef1e9bf00d0afb5
|
19236d9e966cf5bafbe5479d613a175211e1dd37
|
/cohesity_management_sdk/models/role_update.py
|
733ad80c72ec30c51a96e5f47fa441e5e2415f91
|
[
"MIT"
] |
permissive
|
hemanshu-cohesity/management-sdk-python
|
236c44fbd9604809027f8ddd0ae6c36e4e727615
|
07c5adee58810979780679065250d82b4b2cdaab
|
refs/heads/master
| 2020-04-29T23:22:08.909550
| 2019-04-10T02:42:16
| 2019-04-10T02:42:16
| 176,474,523
| 0
| 0
|
NOASSERTION
| 2019-03-19T09:27:14
| 2019-03-19T09:27:12
| null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class RoleUpdate(object):
"""Implementation of the 'Role Update.' model.
Specifies parameters required to update a role.
Attributes:
description (string): Specifies a description about the role.
privileges (list of string): Array of Privileges. Specifies the list
of privileges to assign to the role.
"""
# Create a mapping from Model property names to API property names
_names = {
"description":'description',
"privileges":'privileges'
}
def __init__(self,
description=None,
privileges=None):
"""Constructor for the RoleUpdate class"""
# Initialize members of the class
self.description = description
self.privileges = privileges
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
description = dictionary.get('description')
privileges = dictionary.get('privileges')
# Return an object of this model
return cls(description,
privileges)
|
[
"ashish@cohesity.com"
] |
ashish@cohesity.com
|
24fee577c01fbd41cd14296cf06baa7ff9dd8694
|
76e62ddbfdfba19c80b37e855a4df67672ef0808
|
/IVTa/2014/ALEKSEEV_I_S/task_12_50.py
|
ab19443ceba4961fb2b12b4689e1e91a89688863
|
[
"Apache-2.0"
] |
permissive
|
stasvorosh/pythonintask
|
9d30f3cd492e89783b7221402375c1ebe4690baa
|
8169ed26510022fe0d589f4013f11749131957df
|
refs/heads/master
| 2021-01-17T16:49:32.778063
| 2016-10-10T14:08:04
| 2016-10-10T14:08:04
| 52,255,539
| 6
| 0
| null | 2016-02-22T07:33:16
| 2016-02-22T07:33:15
| null |
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
# Задача 12. Вариант 50
# Разработайте игру "Крестики-нолики".
#(см. М.Доусон Программируем на Python гл. 6).
# Alekseev I.S.
# 20.05.2016
board = list(range(1,10))
def draw_board(board):
print("-------------")
for i in range(3):
print( "|", board[0+i*3], "|", board[1+i*3], "|", board[2+i*3], "|")
print( "-------------")
def take_input(player_token):
valid = False
while not valid:
player_answer = input("Куда поставим " + player_token+"? ")
try:
player_answer = int(player_answer)
except:
print( "Некорректный ввод. Вы уверены, что ввели число?")
continue
if player_answer >= 1 and player_answer <= 9:
if (str(board[player_answer-1]) not in "XO"):
board[player_answer-1] = player_token
valid = True
else:
print( "Эта клеточка уже занята")
else:
print( "Некорректный ввод. Введите число от 1 до 9 чтобы походить.")
def check_win(board):
win_coord = ((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))
for each in win_coord:
if board[each[0]] == board[each[1]] == board[each[2]]:
return board[each[0]]
return False
def main(board):
counter = 0
win = False
while not win:
draw_board(board)
if counter % 2 == 0:
take_input("X")
else:
take_input("O")
counter += 1
if counter > 4:
tmp = check_win(board)
if tmp:
print( tmp, "выиграл!")
win = True
break
if counter == 9:
print( "Ничья!")
break
draw_board(board)
main(board)
|
[
"stasyan.v@gmail.com"
] |
stasyan.v@gmail.com
|
5a39161daf1b0158febc6f1084a130433c1c9944
|
bffd457e17dc250c81d7bd9e25c20a483f1a1ed5
|
/pandatools/demo_hw.py
|
609861d6783d01829e0f12b67428d2acf0459b39
|
[] |
no_license
|
Harzva/gigavision
|
3121f183b8cfc66f9c89f4afe1bd0bdf1c1fe104
|
1fb1ad9b9d5aac6c18dc83184a52c484964be7fb
|
refs/heads/master
| 2023-02-20T10:34:21.735085
| 2021-01-24T10:07:30
| 2021-01-24T10:07:30
| 332,416,570
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,892
|
py
|
import json
import cv2
import numpy as np
import os
# basepath="/root/data/gvision/dataset/train_all_annos/s0.3_t0.7_all"
# load_path="/root/data/gvision/dataset/output/my_pv_train/my_inference/coco_pv_inference_results.json"
# load_path_coco="/root/data/gvision/dataset/predict/s0.5_t0.8_141517/image_annos/person_bbox_test_141517_split.json"
""" "14_OCT_Habour_IMG_14_01___0.5__1408__3072.jpg": {
"image size": {
"height": 2049,
"width": 1025
},
"image id": 18
{
"file_name": "14_OCT_Habour_IMG_14_01___0.5__704__1024.jpg",
"height": 2049,
"width": 1025,
"id": 9
},
"""
# aaas=os.listdir("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img")
# for i in aaas:
# print(os.path.join("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img",i))
# im=cv2.imread(os.path.join("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img",i))
# print(im.shape)
# load_path="/root/data/rubzz/ruby/ruby_output2/train_person_unsure_cell/train_person_unsure_cell_3category.json"
# print(im.shape)
# with open(load_path,'r') as load_f:
# dataset_dicts = json.load(load_f)
# # print(dataset_dicts[0:100])
# with open(load_path_coco,'r') as load_path_coco:
# coco_dataset_dicts = json.load(load_path_coco)
# for coco_images_dict in coco_dataset_dicts["images"]:
# print(coco_images_dict["id"])
# for images_dict in dataset_dicts["images"]:
# if coco_images_dict["id"]==images_dict["id"]:
# h,w=images_dict["height"],images_dict["width"]
# coco_images_dict["height"]=h
# coco_images_dict["width"]=w
# with open(output_path, 'w') as load_f:
# COCO_dataset_dicts= json.dumps(coco_dataset_dicts,indent=2)
# load_f.write(COCO_dataset_dicts)
# with open("/root/data/gvision/dataset/train_all_annos/s0.3_t0.7_all/image_annos/coco_vehicle_train_hwnoi.json",'r') as load_f:
# dataset_dicts = json.load(load_f)
# print(len(dataset_dicts["annotations"]))
# # print(dataset_dicts)#1,2
# print("type",type(dataset_dicts))
"""
450558 coco_person_train_hwnoi.json
483276 coco_pv_train_bbox_hwnoi.json coco_pv_train_hwnoi.json
32718 coco_vehicle_train_bbox_hwnoi.json coco_vehicle_train_hwnoi
"""
def coco_hw(load_path_coco,save_path):
with open(load_path_coco,'r') as load_path_coco:
coco_dataset_dicts = json.load(load_path_coco)
f=open(save_path,'w')
for images_dict in coco_dataset_dicts["images"]:
imagename=images_dict["file_name"]
print(imagename)
height,width=cv2.imread(os.path.join("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img",imagename)).shape[0:2]
images_dict['height'] =height
images_dict['width'] = width
f.write(json.dumps(coco_dataset_dicts,indent=2))
coco_hw(load_path_coco="/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/split_train_person_panda_fafaxue_3category.json",
save_path="/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/split_train_person_panda_fafaxue_3category_hw.json")
# class MyEncoder(json.JSONEncoder):
# def default(self, obj):
# if isinstance(obj, np.integer):
# return int(obj)
# elif isinstance(obj, np.floating):
# return float(obj)
# elif isinstance(obj, np.ndarray):
# return obj.tolist()
# else:
# return super(MyEncoder, self).default(obj)
# load_path_coco="/root/data/gvision/dataset/d2_output/my_pv_mask/metrics.json"
# # target="/root/data/gvision/dataset/d2_output/my_pv_mask/my_predict/predict_all_0500.json"
# with open(load_path_coco,'r') as load_path_coco:
# result_list= json.load(load_path_coco)
# print(result_list)
# f=open(target,'w')
# f.write(json.dumps(result_list[0:500],cls=MyEncoder))
# a=[]
# for result_dict in result_list:
# result_dict.pop('segmentation')
# a.append(result_dict)
# f=open(target,'w')
# f.write(json.dumps(a,cls=MyEncoder))
# a=np.load("/root/data/gvision/dataset/d2_output/my_pv_mask/model_final_indexedresults.npy",allow_pickle=True)
# print(len(a))
# print(os.path.getsize("/root/data/gvision/dataset/d2_output/my_pv_mask/model_final_indexedresults.npy"))
# load_path_coco="/root/data/gvision/dataset/d2_output/my_pv_center_mask/metrics_18499.json"
# import json
# data = []
# a=[0,0]
# f=open(load_path_coco, 'r', encoding="utf-8")
# # 读取所有行 每行会是一个字符串
# loss=10
# for line,j in enumerate(f.readlines()):
# j = json.loads(j)
# if j["total_loss"]<loss:
# loss=j["total_loss"]
# a[0]=line+1
# # print(line)
# # print(loss)
# a[1]=loss
# print(a)
# img=cv2.imread("/root/data/gvision/panda_tools/panda-imgae-test.png")
# img18=img[0:238,0:423,:]
# img14=img[0:238,423:423*2,:]
# img17=img[0:238,423*2:423*3,:]
# print(img14.shape,img14.shape,img14.shape)
# cv2.imwrite("/root/data/gvision/panda_tools/test18.png",img18)
# cv2.imwrite("/root/data/gvision/panda_tools/test14.png",img14)
# cv2.imwrite("/root/data/gvision/panda_tools/test17.png",img17)
# img18=cv2.resize(img18,(423*50,238*50),interpolation=cv2.INTER_CUBIC)
# img14=cv2.resize(img14,(423*50,238*50),interpolation=cv2.INTER_CUBIC)
# img17=cv2.resize(img17,(423*50,238*50),interpolation=cv2.INTER_CUBIC)
# cv2.imwrite("/root/data/gvision/panda_tools/test_18.png",img18)
# cv2.imwrite("/root/data/gvision/panda_tools/test_14.png",img14,[int(cv2.IMWRITE_PNG_COMPRESSION), 9])
# cv2.imwrite("/root/data/gvision/panda_tools/test_17.png",img17,[int(cv2.IMWRITE_PNG_COMPRESSION), 9])
# import numpy as np
# a=[[1,2,3,4],[1,2,3,4],[1,2,3,4]]
# b=[1]
# c=[b,b,b,b]
# [old+new for old,new in zip(a,c)]
# print([old+new for old,new in zip(a,c)])
# print([1176.27, 637.9455, 1412.9817, 1139.9287] +[0.7856537])
|
[
"562282219@qq.com"
] |
562282219@qq.com
|
d74a7b8a8e2ea23d2b2855097c8f985640ed438f
|
2fac796fa58c67fb5a4a95a6e7f28cbef169318b
|
/python/copy-books.py
|
2e512fdb8dea53102e37f2f032524d38ae208b24
|
[] |
no_license
|
jwyx3/practices
|
f3fe087432e79c8e34f3af3a78dd10278b66dd38
|
6fec95b9b4d735727160905e754a698513bfb7d8
|
refs/heads/master
| 2021-03-12T20:41:59.816448
| 2019-04-14T06:47:30
| 2019-04-14T06:47:30
| 18,814,777
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
class Solution:
# @param pages: a list of integers
# @param k: an integer
# @return: an integer
def copyBooks(self, pages, k):
# can k persons copy books within x minutes
def check(x):
total, kt = 0, 0
for p in pages:
# current person cannot copy any more
# add one more person
if total + p > x:
kt += 1
total = 0
total += p
return (kt + (0 if total == 0 else 1)) <= k
# no books
if not pages:
return 0
# has books but no person
if pages and k <= 0:
return -1
left, right = 0, 0
for p in pages:
# the time of book with max pages
left = max(left, p)
# the total time to copy books for one person
right += p
while left + 1 < right:
mid = left + (right - left) / 2
if check(mid):
right = mid
else:
left = mid + 1
if check(left):
return left
return right
class Solution:
# @param pages: a list of integers
# @param k: an integer
# @return: an integer
def copyBooks(self, pages, k):
# no book
if not pages:
return 0
# invalid
if pages and k <= 0:
return -1
start, end = max(pages), sum(pages)
while start + 1 < end:
mid = start + (end - start) / 2
# If mid is ok, then all x > mid is ok
if self.check(pages, k, mid):
end = mid
else:
start = mid
if self.check(pages, k, start):
return start
return end
# @param t: time used to copy book
# return: boolean, whether all books can be copied within t
@staticmethod
def check(pages, k, t):
total, k_tmp = 0, 0
for page in pages:
# this one can not read any more,
# add one more person
if total + page > t:
k_tmp += 1
total = 0
total += page
if total > 0:
k_tmp += 1
return k_tmp <= k
|
[
"jwyx88003@gmail.com"
] |
jwyx88003@gmail.com
|
4b304a6e1a76ef2a2f7c84a60648ce425b7fb6eb
|
4ad94b71e30883d6df07a3277265bd6fb7457ba7
|
/python/examples/doc_examples/export/animation_mpeg4.py
|
7c73d2ff682be520d5d63b21b8a1722fb7631a24
|
[
"MIT"
] |
permissive
|
Tecplot/handyscripts
|
7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c
|
84a89bfecff5479a0319f08eb8aa9df465283830
|
refs/heads/master
| 2023-08-22T15:29:22.629644
| 2023-08-12T01:19:59
| 2023-08-12T01:19:59
| 149,826,165
| 89
| 64
|
MIT
| 2022-01-13T01:11:02
| 2018-09-21T22:47:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
import argparse, os
import tecplot as tp
from tecplot.constant import *
def parse_args():
"""
This script is to be run from the command line and accepts the
following command line arguments. Run this script with "--help"
to see usage and help information.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--connect', action='store_true',
help='connect to TecUtil Server')
parser.add_argument('-p', '--port', type=int, default=7600,
help='port to use when connecting to TecUtil Server')
parser.add_argument('-n', '--nframes', type=int, default=360,
help='number of frames to produce in video')
parser.add_argument('outfile', nargs='?', default='aileron_roll.mp4',
help='output file name')
return parser.parse_args()
def setup_plot():
"""
Load the F-18 dataset from Tecplot 360's examples and show the
jet surface in 3D.
"""
tp.new_layout()
exdir = tp.session.tecplot_examples_directory()
datafile = os.path.join(exdir, 'SimpleData', 'F18.plt')
ds = tp.data.load_tecplot(datafile)
frame = tp.active_frame()
frame.show_border = False
plot = frame.plot(PlotType.Cartesian3D)
plot.activate()
plot.contour(0).variable = ds.variable('S')
plot.show_contour = True
return plot
def translate_view(view, x=0, y=0, z=0):
"""
Translate the viewer with respect to the data.
"""
p = view.position
view.position = p.x + x, p.y + y, p.z + z
def create_animation(outfile, plot, nframes):
"""
Using the tp.export.animation_mpeg4() context manager, the F-18 is
recorded doing an "aileron roll" by rotating and translating the
viewer with respect to the data by a small amount and capturing
each frame of the animation with a call to ani.export_animation_frame()
"""
with tp.session.suspend():
opts = dict(
width=400,
animation_speed=30,
supersample=3,
)
view = plot.view
translate_view(view, -15)
#{DOC:highlight}[
with tp.export.animation_mpeg4(outfile, **opts) as ani:
#]
for i in range(args.nframes):
view.rotate_axes(5, (1, 0, 0))
translate_view(view, 30 / args.nframes)
#{DOC:highlight}[
ani.export_animation_frame()
#]
"""
This script is meant to run on the command line. Run with "--help" to see
usage and help information about the options it understands. It loads
the F-18 dataset from Tecplot 360's examples directory and produces a
video of the model doing an "aileron roll" by manipulating the viewer
position.
"""
args = parse_args()
if args.connect:
tp.session.connect(port=args.port)
plot = setup_plot()
create_animation(args.outfile, plot, args.nframes)
print('video file created:', args.outfile)
|
[
"55457608+brandonmarkham@users.noreply.github.com"
] |
55457608+brandonmarkham@users.noreply.github.com
|
826ff5fbda6157abe17679e09c88a2f5ee00718f
|
7b2d14f78099fde6c4a35082c9c294d1771cb163
|
/Week 8/class_test.py
|
53f6096a898910fbe51a1f31c0374884107660a0
|
[] |
no_license
|
pinardy/Digital-World
|
04c6ddb369ede7295a0891aaaa006486c557965e
|
dd0a351eb64f05b524b08c47cd0c0ad3eadd775c
|
refs/heads/master
| 2020-12-30T22:45:02.448171
| 2018-01-30T03:06:08
| 2018-01-30T03:06:08
| 80,622,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
class A:
x = []
def add(self):
self.x.append(1)
class B:
def __init__(self):
self.x = []
def add(self):
self.x.append(1)
x = A()
y = A()
x.add()
print x.x
y.add()
# print "A's x:",x.x
x = B()
y = B()
x.add()
y.add()
# print "B's x:",x.x
|
[
"pinardy@hotmail.com"
] |
pinardy@hotmail.com
|
2d959348fb53a3f73acfd66f0441f7e8c6026727
|
813284b9dac4477f4893cb6b30ffafab8e181cc4
|
/contrib/linearize/linearize-hashes.py
|
d09065439ec8c13970e3d8289d3634fc41ad3053
|
[
"MIT"
] |
permissive
|
phoenixkonsole/xbtx
|
609809c29c32e2c4373a26204480a0e2a9f0922e
|
2f9db3d0ca34103e315a5bc9ef2fa2d42cb71810
|
refs/heads/master
| 2023-05-11T17:37:28.762478
| 2023-05-03T09:54:14
| 2023-05-03T09:54:14
| 243,993,348
| 3
| 11
|
MIT
| 2023-05-03T09:54:15
| 2020-02-29T15:30:47
|
C++
|
UTF-8
|
Python
| false
| false
| 4,698
|
py
|
#!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Copyright (c) 2017 The BitcoinSubsidium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinSubsidiumRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinSubsidiumRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8766
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
|
[
"william.kibbler@googlemail.com"
] |
william.kibbler@googlemail.com
|
db31b8d2ff15f45a463ad6fbc60c2cb57dc1f3f5
|
31e3e0ce6d8b8cd1b286971aa1ea3c56a338ca48
|
/sunpy/util/sphinx/doctest.py
|
5a29458fbf6228f895f78a2fab1182fc8684257c
|
[
"BSD-2-Clause"
] |
permissive
|
sunpy/sunpy
|
d8df998cf7753834ffd7add6911c0e4f6828b5b8
|
edd3ea805f4540d41ce2932a0e865cab2d6a4cf5
|
refs/heads/main
| 2023-09-01T12:05:09.343909
| 2023-08-31T13:36:35
| 2023-08-31T13:36:35
| 2,165,383
| 792
| 683
|
BSD-2-Clause
| 2023-09-14T14:03:09
| 2011-08-06T15:34:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
# Licensed under the Astropy 3-clause BSD license - see licenses/ASTROPY.rst
"""
This is a set of three directives that allow us to insert metadata
about doctests into the .rst files so the testing framework knows
which tests to skip.
This is quite different from the doctest extension in Sphinx itself,
which actually does something. For astropy, all of the testing is
centrally managed from py.test and Sphinx is not used for running
tests.
"""
import re
from docutils.nodes import literal_block
from docutils.parsers.rst import Directive
class DoctestSkipDirective(Directive):
has_content = True
def run(self):
# Check if there is any valid argument, and skip it. Currently only
# 'win32' is supported in astropy.tests.pytest_plugins.
if re.match('win32', self.content[0]):
self.content = self.content[2:]
code = '\n'.join(self.content)
return [literal_block(code, code)]
class DoctestOmitDirective(Directive):
has_content = True
def run(self):
# Simply do not add any content when this directive is encountered
return []
class DoctestRequiresDirective(DoctestSkipDirective):
# This is silly, but we really support an unbounded number of
# optional arguments
optional_arguments = 64
def setup(app):
app.add_directive('doctest-requires', DoctestRequiresDirective)
app.add_directive('doctest-skip', DoctestSkipDirective)
app.add_directive('doctest-skip-all', DoctestSkipDirective)
app.add_directive('doctest', DoctestSkipDirective)
# Code blocks that use this directive will not appear in the generated
# documentation. This is intended to hide boilerplate code that is only
# useful for testing documentation using doctest, but does not actually
# belong in the documentation itself.
app.add_directive('testsetup', DoctestOmitDirective)
return {'parallel_read_safe': True,
'parallel_write_safe': True}
|
[
"stuart@cadair.com"
] |
stuart@cadair.com
|
44846d39e03d4c86d424fa57d50d5d22b76e2b30
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/73/usersdata/214/39773/submittedfiles/triangulo.py
|
69ff688cd3048f7f591723dc3954f22789a5366c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# -*- coding: utf-8 -*-
import math
a=int(input('Digite a:'))
b=int(input('Digite a:'))
c=int(input('Digite a:')
a>=b>=c>0
if a<b+c:
print('S')
else:
print('N')
if a+b<c:
if(a**2)==(b*2)+(c**2):
print ('Re')
if(a**2)==(b*2)+(c**2):
print ('Re')
if(a**2)==(b*2)+(c**2):
print ('Re')
if a==b==c:
print('Eq)
if b=-c1=a:
print ('is')
if (a!=b) and (b!=c):
print ('Es')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
694a32db49cb1ca6e8ff77d71cfc2dbc980fad97
|
a668806b052884b2f1faef6a28304c908a89fc68
|
/test/fixtures/LoggerFixture.py
|
fcb55b6b5af8ab5768519c8566db92db68e92c05
|
[
"MIT"
] |
permissive
|
pip-services3-python/pip-services3-elasticsearch-python
|
aa2e49a70a0debcb2e77f59aefe144baf401b4ca
|
fe2fba2aeaef7ba80c17732d0065e5bcd60fcb82
|
refs/heads/master
| 2023-01-28T14:40:18.698083
| 2023-01-24T08:08:32
| 2023-01-24T08:08:32
| 140,886,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
# -*- coding: utf-8 -*-
import time
from pip_services3_components.log import LogLevel
class LoggerFixture:
def __init__(self, logger):
self.__logger = logger
def test_log_level(self):
assert self.__logger.get_level() >= LogLevel.Nothing
assert self.__logger.get_level() <= LogLevel.Trace
def test_simple_logging(self):
self.__logger.set_level(LogLevel.Trace)
self.__logger.fatal(None, None, 'Fatal error message')
self.__logger.error(None, None, 'Error message')
self.__logger.warn(None, 'Warning message')
self.__logger.info(None, 'Information message')
self.__logger.debug(None, 'Debug message')
self.__logger.trace(None, 'Trace message')
self.__logger.dump()
time.sleep(1)
def test_error_logging(self):
try:
# Raise an exception
raise Exception('test')
except Exception as err:
self.__logger.fatal('123', err, 'Fatal error')
self.__logger.error('123', err, 'Recoverable error')
assert err is not None
self.__logger.dump()
time.sleep(1)
|
[
"judas.priest999@gmail.com"
] |
judas.priest999@gmail.com
|
32f57999cceed14699a94052de464465d2c5f3f6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_mendicant.py
|
b54bd489ffe8ff7e7bd2a65fa8196b2a4fa0cc55
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
#calss header
class _MENDICANT():
def __init__(self,):
self.name = "MENDICANT"
self.definitions = [u'someone, especially a member of a religious group, who lives by asking people they do not know for money']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9c7997be9cd575f150d0f208a7ab48236e792676
|
ffc6efca14efd126e9b0541e7b0c1f3a05ee90e1
|
/algorithms/sorts.py
|
639abd50908bf5d6bb9026c50b0d6391653e9e3c
|
[] |
no_license
|
dlopes7/python-tests
|
65b6687fcee9c6230d1fd13aa2941fef34cbaa8f
|
93c175c717499f75a2f533c2bccf4e4e0b886e25
|
refs/heads/master
| 2021-01-13T00:16:32.516708
| 2015-12-28T17:45:43
| 2015-12-28T17:45:43
| 48,704,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
import random
import pygame
from algorithms.colors import *
def draw_array(array, nome, frame):
DISPLAY.fill(BLACK)
aux_surf = DISPLAY_FONT.render(nome+ ' - ' + str(frame), True, WHITE)
aux_rect = aux_surf.get_rect()
aux_rect.topleft = (10, 10)
DISPLAY.blit(aux_surf, aux_rect)
for idx, value in enumerate(array):
x = 10 + idx * 2
pygame.draw.line(DISPLAY, WHITE, (x, WINDOW_HEIGHT-10), (x, WINDOW_HEIGHT - value - 10), 1)
CLOCK.tick(FPS)
pygame.display.update()
def selection_sort():
frame = 0
lista = list(range(0, 500))
random.shuffle(lista)
for i in range( len(lista) ):
frame += 1
draw_array(lista, 'Selection Sort', frame)
menor = i
for k in range( i + 1 , len(lista) ):
if lista[k] < lista[menor]:
menor = k
lista[menor],lista[i]=lista[i],lista[menor]
def bubble_sort():
frame = 0
badList = list(range(0, 500))
random.shuffle(badList)
length = len(badList)
for i in range(0,length):
frame += 1
draw_array(badList, 'Bubble Sort', frame)
swapped = False
for element in range(0, length-i-1):
if badList[element] > badList[element + 1]:
hold = badList[element + 1]
badList[element + 1] = badList[element]
badList[element] = hold
swapped = True
if not swapped: break
def heapsort():
frame = 0
lst = list(range(0, 501))
random.shuffle(lst)
for start in range(int((len(lst)-2)/2), -1, -1):
frame += 1
draw_array(lst, 'Heap Sort', frame)
siftdown(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
frame += 1
draw_array(lst, 'Heap Sort', frame)
lst[end], lst[0] = lst[0], lst[end]
siftdown(lst, 0, end - 1)
return lst
def siftdown(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end: break
if child + 1 <= end and lst[child] < lst[child + 1]:
child += 1
if lst[root] < lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def gnome():
frame = 0
lista = list(range(0, 100))
random.shuffle(lista)
pivot = 0
lista_length = len(lista)
while pivot < lista_length - 1:
frame += 1
draw_array(lista, 'Gnome Sort', frame)
if lista[pivot] > lista[pivot + 1]:
lista[pivot + 1], lista[pivot] = lista[pivot], lista[pivot + 1]
if pivot > 0:
pivot -= 2
pivot += 1
if __name__ == '__main__':
pygame.init()
CLOCK = pygame.time.Clock()
WINDOW_WIDTH = 1100
WINDOW_HEIGHT = 600
DISPLAY = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
DISPLAY.fill(BLACK)
pygame.font.init()
DISPLAY_FONT = pygame.font.SysFont('couriernew', 36)
pygame.display.set_caption("Sort Tests")
FPS = 60
selection_sort()
bubble_sort()
heapsort()
gnome()
|
[
"davidribeirolopes@gmail.com"
] |
davidribeirolopes@gmail.com
|
09abbc7def9184b80f439ee054f332587bccaf68
|
03e3138f99f275d15d41a5c5bfb212f85d64d02e
|
/source/res/scripts/common/Lib/distutils/tests/test_core.py
|
2de38bab70119d63190c0ca07ab4ad94abb32f45
|
[] |
no_license
|
TrenSeP/WorldOfTanks-Decompiled
|
e428728e7901146d0b599d02c930d70532232a97
|
1faa748acec1b7e435b657fd054ecba23dd72778
|
refs/heads/1.4.1
| 2020-04-27T08:07:49.813023
| 2019-03-05T17:37:06
| 2019-03-05T17:37:06
| 174,159,837
| 1
| 0
| null | 2019-03-06T14:33:33
| 2019-03-06T14:24:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/distutils/tests/test_core.py
import StringIO
import distutils.core
import os
import shutil
import sys
import test.test_support
from test.test_support import captured_stdout, run_unittest
import unittest
from distutils.tests import support
setup_using___file__ = '\n__file__\n\nfrom distutils.core import setup\nsetup()\n'
setup_prints_cwd = '\nimport os\nprint os.getcwd()\n\nfrom distutils.core import setup\nsetup()\n'
class CoreTestCase(support.EnvironGuard, unittest.TestCase):
def setUp(self):
super(CoreTestCase, self).setUp()
self.old_stdout = sys.stdout
self.cleanup_testfn()
self.old_argv = (sys.argv, sys.argv[:])
def tearDown(self):
sys.stdout = self.old_stdout
self.cleanup_testfn()
sys.argv = self.old_argv[0]
sys.argv[:] = self.old_argv[1]
super(CoreTestCase, self).tearDown()
def cleanup_testfn(self):
path = test.test_support.TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def write_setup(self, text, path=test.test_support.TESTFN):
f = open(path, 'w')
try:
f.write(text)
finally:
f.close()
return path
def test_run_setup_provides_file(self):
distutils.core.run_setup(self.write_setup(setup_using___file__))
def test_run_setup_uses_current_dir(self):
sys.stdout = StringIO.StringIO()
cwd = os.getcwd()
os.mkdir(test.test_support.TESTFN)
setup_py = os.path.join(test.test_support.TESTFN, 'setup.py')
distutils.core.run_setup(self.write_setup(setup_prints_cwd, path=setup_py))
output = sys.stdout.getvalue()
if output.endswith('\n'):
output = output[:-1]
self.assertEqual(cwd, output)
def test_debug_mode(self):
sys.argv = ['setup.py', '--name']
with captured_stdout() as stdout:
distutils.core.setup(name='bar')
stdout.seek(0)
self.assertEqual(stdout.read(), 'bar\n')
distutils.core.DEBUG = True
try:
with captured_stdout() as stdout:
distutils.core.setup(name='bar')
finally:
distutils.core.DEBUG = False
stdout.seek(0)
wanted = 'options (after parsing config files):\n'
self.assertEqual(stdout.readlines()[0], wanted)
def test_suite():
return unittest.makeSuite(CoreTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
7c464d2064133dba060722b62dd9afa6f8efab4a
|
7864ab2c567f5f3a98e7ab38ff38a3bd7c816fde
|
/fireplace/cards/gvg/spare_parts.py
|
eabe7299eaca68fb72038d7f15c9cb40af004331
|
[] |
no_license
|
gmagogsfm/fireplace
|
bfa1b57254b673317442518a997c635183bd3e61
|
f16ee0659310a003d54552d0660ea3eb15c4da3f
|
refs/heads/master
| 2021-01-09T09:06:35.035741
| 2015-02-09T14:30:24
| 2015-02-09T14:30:24
| 28,540,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""
Spare Parts
"""
from ..utils import *
# Armor Plating
class PART_001:
action = buffTarget("PART_001e")
class PART_001e:
Health = 1
# Time Rewinder
class PART_002:
action = bounceTarget
# Rusty Horn
class PART_003:
def action(self, target):
target.taunt = True
# Finicky Cloakfield
class PART_004:
action = buffTarget("PART_004e")
class PART_004e:
Stealth = True
def OWN_TURN_BEGIN(self):
self.destroy()
# Emergency Coolant
class PART_005:
def action(self, target):
target.frozen = True
# Reversing Switch
class PART_006:
action = buffTarget("PART_006a")
class PART_006a:
def apply(self, target):
atk = target.atk
self.setAtk(target.health)
self.setHealth(atk)
# Whirling Blades
class PART_007:
action = buffTarget("PART_007e")
class PART_007e:
Atk = 1
|
[
"jerome@leclan.ch"
] |
jerome@leclan.ch
|
6e8c6813730dff827293a1ea8bb73eac583c808b
|
32a3396cf8d879c92c09f5411af946084ed2ca3c
|
/blog/company_name_context_processor.py
|
945c41dda6a2e989d60d18204aab0c0e256507cc
|
[] |
no_license
|
mPowering/django-mpowering-healthcare
|
5ae527dd7abac8d2f9debc506b6cb197b4db0ab8
|
52cff8d864d9363f0115831963bfa43a92ee2b47
|
refs/heads/master
| 2020-12-25T18:16:32.992431
| 2014-05-23T15:52:46
| 2014-05-23T15:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
# Django imports
from django.conf import settings
def get_company_name(request):
return {"company": settings.COMPANY_NAME}
|
[
"gitumarkk@gmail.com"
] |
gitumarkk@gmail.com
|
9582efa970cbb8fbcb5908d20387bee0ac01cdcb
|
00b405a49ac6108d24986243c4b52fa53fb58acc
|
/0517_super_washing_machines.py
|
2f24982ec5d0bc0f3747d006eebaca662cd97192
|
[] |
no_license
|
Shin-jay7/LeetCode
|
0325983fff95bfbc43a528812582cbf9b7c0c2f2
|
953b0b19764744753f01c661da969bdab6521504
|
refs/heads/master
| 2023-07-19T07:17:21.513531
| 2023-07-15T06:05:06
| 2023-07-15T06:05:06
| 231,285,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
from __future__ import annotations
from typing import List
class Solution:
def findMinMoves(self, machines: List[int]) -> int:
total, n = sum(machines), len(machines)
if total % n:
return -1
target, ans, to_right = total // n, 0, 0
# to_right: num of dresses to pass to the right machine
# dresses: num of dresses in the machine
for dresses in machines:
to_right = dresses + to_right - target
ans = max(ans, abs(to_right), dresses-target)
return ans
|
[
"shin@jay7.net"
] |
shin@jay7.net
|
84d78b6245a076777dc308a6a42e78272b8479ec
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03330/s929352693.py
|
253b3802f55ad2c29dbbdab985c6ea0170a9fbee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
n,c=map(int,input().split())
irohen=[list(map(int,input().split())) for i in range(c)]
grid=[list(map(int,input().split())) for i in range(n)]
rem0=[0]*c
rem1=[0]*c
rem2=[0]*c
for i in range(n):
for j in range(n):
if (i+j)%3==0:
rem0[grid[i][j]-1]+=1
elif (i+j)%3==1:
rem1[grid[i][j]-1]+=1
elif (i+j)%3==2:
rem2[grid[i][j]-1]+=1
ans=10**10
for i in range(c):
for j in range(c):
for h in range(c):
chk=0
if i==j or i==h or j==h:
continue
for k in range(c):
chk+=rem0[k]*irohen[k][i]+rem1[k]*irohen[k][j]+rem2[k]*irohen[k][h]
if chk < ans:ans=chk
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c6658efc9c3b1000d0b1be621573728ac5c30b16
|
fc4f97918ac9366837cb05f51091178bbf37ac18
|
/shelve_ex.py
|
d7cf2d49cdfc05a1eac38bbb50efef5cfafaa194
|
[] |
no_license
|
zoejane/automate-python
|
ae72ef7bed291b757ee41d578844c132cd1fc192
|
9c4e8ce69da21dc58e4fc85604d9e1fc848d8c3e
|
refs/heads/master
| 2021-01-10T01:51:23.009746
| 2015-10-24T13:14:09
| 2015-10-24T13:14:09
| 43,808,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import shelve
# list,dictionary,etc..
shelfFile =shelve.open('mydata')
shelfFile['cats']=['Pooka','Simon','Cleo']
shelfFile.close()
shelfFile =shelve.open('mydata')
print(shelfFile['cats'])
print(list(shelfFile.keys()))
print(list(shelfFile.values()))
shelfFile.close()
|
[
"dadac123@gmail.com"
] |
dadac123@gmail.com
|
7b53a6dde1bd8e550782891bc7ea6c8a288fb41e
|
b005369db9f220e8548a11fceab8543a726def3c
|
/stacked-autoencoder-pytorch/untitled.py
|
d5d81ba59073148107d09a96f751aa0269d6617a
|
[] |
no_license
|
liuyanqi/shallow_learning
|
9ec2b365986f66f2a3c07d377e3d979a07ebb2bd
|
b5fafb5b6ae5886bbd1a4ed03611eaee5481b627
|
refs/heads/master
| 2020-04-09T03:27:21.907715
| 2018-12-01T22:16:23
| 2018-12-01T22:16:23
| 159,983,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,552
|
py
|
import os
import torch
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.utils import save_image
from model3 import VAE
if not os.path.exists('./mlp_img'):
os.mkdir('./mlp_img')
def to_img(x):
x = x.view(x.size(0), 3, 32, 32)
return x
num_epochs = 10
batch_size = 128
learning_rate = 1e-3
transform = transforms.Compose([transforms.ToTensor()])
dataset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
model = VAE().cuda()
model.train()
for epoch in range(20):
for i, data in enumerate(dataloader):
img, _ = data
# noisy_img = theano_rng.binomial(size=img.shape, n=1, p=0.1, dtype=theano.config.floatX) * img
img = Variable(img).cuda()
# ===================forward=====================
output = model(img, epoch)
# ===================log========================
# print("sparsity:", torch.sum(output.data > 0.0)*100 / output.data.numel())
x_reconstructed = model.reconstruct(output)
orig = to_img(img.cpu().data)
save_image(orig, './imgs_cifar/orig_1_{}.png'.format(epoch))
pic = to_img(x_reconstructed.cpu().data)
save_image(pic, './imgs_cifar/reconstruction_1_{}.png'.format(epoch))
##fine tuning
model.eval()
classifier = nn.Sequential(nn.Linear(8*8*200, 324), nn.ReLU(), nn.Linear(324, 10), nn.Softmax())
criterion = nn.CrossEntropyLoss()
params = list(VAE.encoder.parameters()) + list(classifier.parameters())
optimizer = torch.optim.SGD(params, lr=0.1)
for epoch in range(30):
for i, data in enumerate(dataloader):
img, target = data
img = Variable(img).cuda()
target = Variable(target).cuda()
feature = VAE(img)
feature = feature.view(feature.size(0), -1)
prediction = classifier(feature)
loss = criterion(prediction, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred = prediction.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
# if epoch % 10 == 0:
# x = to_img(img.cpu().data)
# x_hat = to_img(output.cpu().data)
# x_noisy = to_img(noisy_img.cpu().data)
# weights = to_img(model.encoder[0].weight.cpu().data)
# save_image(x, './mlp_img/x_{}.png'.format(epoch))
# save_image(x_hat, './mlp_img/x_hat_{}.png'.format(epoch))
# save_image(x_noisy, './mlp_img/x_noisy_{}.png'.format(epoch))
# save_image(weights, './filters/epoch_{}.png'.format(epoch))
# torch.save(model.state_dict(), './sim_autoencoder.pth')
|
[
"liuyanqi@umich.edu"
] |
liuyanqi@umich.edu
|
9e6ee83397fceeb430e08c5252d3be5dfb030f62
|
c5458f2d53d02cb2967434122183ed064e1929f9
|
/sdks/python/setup.py
|
0e7640cc7b4ed0f33efa8b96d3aad2210fed2a4f
|
[] |
no_license
|
ross-weir/ergo-node-api-sdks
|
fd7a32f79784dbd336ef6ddb9702b9dd9a964e75
|
9935ef703b14760854b24045c1307602b282c4fb
|
refs/heads/main
| 2023-08-24T05:12:30.761145
| 2021-11-08T10:28:10
| 2021-11-08T10:28:10
| 425,785,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
"""
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: ergoplatform@protonmail.com
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "ergo-node"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
]
setup(
name=NAME,
version=VERSION,
description="Ergo Node API",
author="Ergo Platform Team",
author_email="ergoplatform@protonmail.com",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "Ergo Node API"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="CC0 1.0 Universal",
long_description="""\
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
"""
)
|
[
"29697678+ross-weir@users.noreply.github.com"
] |
29697678+ross-weir@users.noreply.github.com
|
c8b0402d15b859aaa402c2e24f7481605a77cd2a
|
762de1c66746267e05d53184d7854934616416ee
|
/tools/MolSurfGenService/MolSurfaceGen32/chimera/share/SimpleSession/versions/v25.py
|
ab3a358e3694974f623f33517a4eaf4975369ca4
|
[] |
no_license
|
project-renard-survey/semanticscience
|
6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677
|
024890dba56c3e82ea2cf8c773965117f8cda339
|
refs/heads/master
| 2021-07-07T21:47:17.767414
| 2017-10-04T12:13:50
| 2017-10-04T12:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,944
|
py
|
# --- UCSF Chimera Copyright ---
# Copyright (c) 2000 Regents of the University of California.
# All rights reserved. This software provided pursuant to a
# license agreement containing restrictions on its disclosure,
# duplication and use. This notice must be embedded in or
# attached to all copies, including partial copies, of the
# software or any revisions or derivations thereof.
# --- UCSF Chimera Copyright ---
#
# $Id: v25.py 26655 2009-01-07 22:02:30Z gregc $
from v24 import RemapDialog, reportRestoreError, restoreWindowSize, \
restoreOpenModelsAttrs, noAutoRestore, autoRestorable, \
registerAfterModelsCB, makeAfterModelsCBs, restoreModelClip, \
restoreSelections, restoreCamera, getColor, findFile, \
setSessionIDparams, sessionID, idLookup, expandSummary, init, \
beginRestore, endRestore, restoreColors, restoreSurfaces, restoreVRML, \
restorePseudoBondGroups, restoreOpenStates, restoreFontInfo
import globals # so that various version files can easily access same variables
import chimera
def restoreMolecules(molInfo, resInfo, atomInfo, bondInfo, crdInfo):
items = []
sm = globals.sessionMap
res2mol = []
atom2mol = []
openModelsArgs = {}
for ids, name, cid, display, lineWidth, pointSize, stickScale, \
pdbHeaders, surfaceOpacity, ballScale, vdwDensity, autochain, \
ribbonHidesMainchain in zip(
expandSummary(molInfo['ids']),
expandSummary(molInfo['name']),
expandSummary(molInfo['color']),
expandSummary(molInfo['display']),
expandSummary(molInfo['lineWidth']),
expandSummary(molInfo['pointSize']),
expandSummary(molInfo['stickScale']),
molInfo['pdbHeaders'],
expandSummary(molInfo['surfaceOpacity']),
expandSummary(molInfo['ballScale']),
expandSummary(molInfo['vdwDensity']),
expandSummary(molInfo['autochain']),
expandSummary(molInfo['ribbonHidesMainchain'])
):
m = chimera.Molecule()
sm[len(items)] = m
items.append(m)
m.name = name
from SimpleSession import modelMap, modelOffset
chimera.openModels.add([m],
baseId=ids[0]+modelOffset, subid=ids[1])
modelMap.setdefault(ids, []).append(m)
m.color = getColor(cid)
m.display = display
m.lineWidth = lineWidth
m.pointSize = pointSize
m.stickScale = stickScale
m.setAllPDBHeaders(pdbHeaders)
m.surfaceOpacity = surfaceOpacity
m.ballScale = ballScale
m.vdwDensity = vdwDensity
m.autochain = autochain
m.ribbonHidesMainchain = ribbonHidesMainchain
if molInfo['optional']:
for attrName, info in molInfo['optional'].items():
for a, val in zip(items, expandSummary(info)):
if val is not None:
setattr(a, attrName, val)
resStart = len(items)
for mid, name, chain, pos, insert, rcid, lcid, ss, ribbonDrawMode, \
ribbonDisplay, label in zip(
expandSummary(resInfo['molecule']),
expandSummary(resInfo['name']),
expandSummary(resInfo['chain']),
resInfo['position'],
expandSummary(resInfo['insert']),
expandSummary(resInfo['ribbonColor']),
expandSummary(resInfo['labelColor']),
expandSummary(resInfo['ss']),
expandSummary(resInfo['ribbonDrawMode']),
expandSummary(resInfo['ribbonDisplay']),
expandSummary(resInfo['label'])
):
m = idLookup(mid)
r = m.newResidue(name, chain, pos, insert)
sm[len(items)] = r
items.append(r)
r.ribbonColor = getColor(rcid)
r.labelColor = getColor(lcid)
r.isHelix, r.isStrand, r.isTurn = ss
r.ribbonDrawMode = ribbonDrawMode
r.ribbonDisplay = ribbonDisplay
r.label = label
if resInfo['optional']:
residues = items[resStart:]
for attrName, info in resInfo['optional'].items():
for a, val in zip(residues, expandSummary(info)):
if val is not None:
setattr(a, attrName, val)
atomStart = len(items)
for rid, name, element, cid, vcid, lcid, scid, drawMode, display, \
label, surfaceDisplay, surfaceCategory, surfaceOpacity, radius, vdw, \
idatmType in zip(
expandSummary(atomInfo['residue']),
expandSummary(atomInfo['name']),
expandSummary(atomInfo['element']),
expandSummary(atomInfo['color']),
expandSummary(atomInfo['vdwColor']),
expandSummary(atomInfo['labelColor']),
expandSummary(atomInfo['surfaceColor']),
expandSummary(atomInfo['drawMode']),
expandSummary(atomInfo['display']),
expandSummary(atomInfo['label']),
expandSummary(atomInfo['surfaceDisplay']),
expandSummary(atomInfo['surfaceCategory']),
expandSummary(atomInfo['surfaceOpacity']),
expandSummary(atomInfo['radius']),
expandSummary(atomInfo['vdw']),
expandSummary(atomInfo['idatmType'])
):
r = idLookup(rid)
a = r.molecule.newAtom(name, chimera.Element(element))
sm[len(items)] = a
items.append(a)
r.addAtom(a)
a.color = getColor(cid)
a.vdwColor = getColor(vcid)
a.labelColor = getColor(lcid)
a.surfaceColor = getColor(scid)
a.drawMode = drawMode
a.display = display
a.label = label
a.surfaceDisplay = surfaceDisplay
a.surfaceCategory = surfaceCategory
a.surfaceOpacity = surfaceOpacity
a.radius = radius
a.vdw = vdw
if idatmType:
a.idatmType = idatmType
if atomInfo['optional']:
atoms = items[atomStart:]
for attrName, info in atomInfo['optional'].items():
for a, val in zip(atoms, expandSummary(info)):
if val is not None:
setattr(a, attrName, val)
for atoms, drawMode, display in zip(
bondInfo['atoms'],
expandSummary(bondInfo['drawMode']),
expandSummary(bondInfo['display'])
):
a1, a2 = [idLookup(a) for a in atoms]
b = a1.molecule.newBond(a1, a2)
sm[len(items)] = b
items.append(b)
b.drawMode = drawMode
b.display = display
from chimera import Point
for mid, crdSets in crdInfo.items():
m = idLookup(mid)
active = crdSets.pop('active')
for key, crds in crdSets.items():
coordSet = m.newCoordSet(key, len(crds))
for aid, crdString in crds:
idLookup(aid).setCoord(Point(*tuple([float(c)
for c in crdString.split()])), coordSet)
if key == active:
m.activeCoordSet = coordSet
|
[
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] |
alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5
|
699107beaaa0112e11b92168d41891cd13d99d01
|
996bb6bf244dded0a778b9035e34311a4ca0fbfe
|
/docs/conf.py
|
57e9c9ffe3ccd4b6a7747a216820b2bfb4ab4c67
|
[
"MIT"
] |
permissive
|
edeposit/cz-urnnbn-api
|
7eb4229c03051300ddc375030d8233e8b3b2e95f
|
2c9d36648491bfcbf0f29bedaf6f507a51805f8e
|
refs/heads/master
| 2020-05-30T17:01:38.993229
| 2015-10-12T12:38:11
| 2015-10-12T12:38:11
| 31,064,934
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import sys
import urllib
import os.path
sys.path.insert(0, os.path.abspath('../src/'))
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.intersphinx'
]
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'amqp': ("http://edeposit-amqp.readthedocs.org/en/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
# Sorting of items
autodoc_member_order = "bysource"
# Document all methods in classes
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cz-urnnbn-api'
copyright = u'2015 E-deposit team'
# The full version, including alpha/beta/rc tags.
try:
# read data from CHANGES.rst
sys.path.insert(0, os.path.abspath('../'))
from docs import getVersion
release = getVersion(open("../CHANGES.rst").read())
except Exception:
# this is here specially for readthedocs, which downloads only docs, not
# other files
fh = urllib.urlopen("https://pypi.python.org/pypi/" + project + "/")
release = filter(lambda x: "<title>" in x, fh.read().splitlines())
release = release[0].split(":")[0].split()[1]
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'cz-urnnbn-api'
|
[
"bystrousak@kitakitsune.org"
] |
bystrousak@kitakitsune.org
|
be8ba9e73db30402493becf02ec4687d74472442
|
a343a405ecc557a52974fa84bc0481cc11405b14
|
/33_searchRotatedSortedArray_V2.py
|
16dcda7bdf1a8844681f255d5bae37eca5ec09c1
|
[] |
no_license
|
jennyChing/leetCode
|
926c2a5ff9f6c03152e93725b64f7bad804c415a
|
f3fc71f344cd758cfce77f16ab72992c99ab288e
|
refs/heads/master
| 2020-05-21T16:42:17.325980
| 2017-03-18T01:59:15
| 2017-03-18T01:59:15
| 61,048,131
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
'''
33. Search in Rotated Sorted Array
Suppose a sorted array is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
'''
class Solution(object):
def search(self, nums, target):
# find the start of rotated array
left, right = 0, len(nums) - 1
while left + 1 < right: # careful off-by-1 case!!
mid = (left + right ) // 2
if nums[mid] == target:
return mid
if nums[left] <= target < nums[mid]:
right = mid
elif nums[mid] <= target <= nums[right]:
left = mid
elif nums[mid] > nums[left]:
left = mid
else:
right = mid
if nums[left] == target: return left
if nums[right] == target: return right
return -1
if __name__ == '__main__':
nums = [4, 4, 5, 6, 7, 0, 1, 2]
nums = [1, 1, 3, 1]
res = Solution().search(nums, 3)
print(res)
|
[
"jklife3@gmail.com"
] |
jklife3@gmail.com
|
29e07ede867807108d273fe7196934df3cefeaac
|
b0cfa5e2d84057ece11f3316f82b806f1383a9df
|
/modules/standard/whois/org_list_controller.py
|
634951f4f7f98df7331e81c2e4726dc4fdd7b993
|
[] |
no_license
|
comatech/Tyrbot
|
0b862afc834ec2d587fd5f8f67926569b109b667
|
1f8648d81c0a5f089ef7aaa6581809a47657b624
|
refs/heads/master
| 2020-07-06T06:40:13.863215
| 2019-07-21T09:51:45
| 2019-07-21T09:51:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,161
|
py
|
from core.buddy_service import BuddyService
from core.chat_blob import ChatBlob
from core.command_param_types import Int, Any, Character
from core.decorators import instance, command, event
from core.dict_object import DictObject
@instance()
class OrgListController:
ORGLIST_BUDDY_TYPE = "orglist"
def __init__(self):
self.orglist = None
self.governing_types = DictObject({
"Anarchism": ["Anarchist"],
"Monarchy": ["Monarch", "Counsil", "Follower"],
"Feudalism": ["Lord", "Knight", "Vassal", "Peasant"],
"Republic": ["President", "Advisor", "Veteran", "Member", "Applicant"],
"Faction": ["Director", "Board Member", "Executive", "Member", "Applicant"],
"Department": ["President", "General", "Squad Commander", "Unit Commander", "Unit Leader", "Unit Member", "Applicant"]
})
def inject(self, registry):
self.bot = registry.get_instance("bot")
self.db = registry.get_instance("db")
self.util = registry.get_instance("util")
self.text = registry.get_instance("text")
self.pork_service = registry.get_instance("pork_service")
self.org_pork_service = registry.get_instance("org_pork_service")
self.pork_service = registry.get_instance("pork_service")
self.buddy_service: BuddyService = registry.get_instance("buddy_service")
self.character_service = registry.get_instance("character_service")
@command(command="orglist", params=[Int("org_id")], access_level="all",
description="Show online status of characters in an org")
def orglist_cmd(self, request, org_id):
self.start_orglist_lookup(request.reply, org_id)
@command(command="orglist", params=[Any("character|org_name|org_id")], access_level="all",
description="Show online status of characters in an org")
def orglist_character_cmd(self, request, search):
if search.isdigit():
org_id = int(search)
else:
orgs = self.pork_service.find_orgs(search)
num_orgs = len(orgs)
if num_orgs == 0:
char_info = self.pork_service.get_character_info(search)
if char_info:
if not char_info.org_id:
return "<highlight>%s<end> does not appear to belong to an org." % search.capitalize()
else:
org_id = char_info.org_id
else:
return "Could not find character or org <highlight>%s<end>." % search
elif num_orgs == 1:
org_id = orgs[0].org_id
else:
blob = ""
for org in orgs:
blob += self.text.make_chatcmd("%s (%d)" % (org.org_name, org.org_id), "/tell <myname> orglist %d" % org.org_id) + "\n"
return ChatBlob("Org List (%d)" % num_orgs, blob)
self.start_orglist_lookup(request.reply, org_id)
def start_orglist_lookup(self, reply, org_id):
if self.orglist:
reply("There is an orglist already in progress.")
return
reply("Downloading org roster for org id %d..." % org_id)
self.orglist = self.org_pork_service.get_org_info(org_id)
if not self.orglist:
reply("Could not find org with ID <highlight>%d<end>." % org_id)
return
self.orglist.reply = reply
self.orglist.waiting_org_members = {}
self.orglist.finished_org_members = {}
reply("Checking online status for %d members of <highlight>%s<end>..." % (len(self.orglist.org_members), self.orglist.org_info.name))
# process all name lookups
while self.bot.iterate():
pass
self.iterate_org_members()
self.check_for_orglist_end()
@event(event_type=BuddyService.BUDDY_LOGON_EVENT, description="Detect online buddies for orglist command", is_hidden=True)
def buddy_logon_event(self, event_type, event_data):
if self.orglist and event_data.char_id in self.orglist.waiting_org_members:
self.update_online_status(event_data.char_id, True)
self.buddy_service.remove_buddy(event_data.char_id, self.ORGLIST_BUDDY_TYPE)
self.check_for_orglist_end()
@event(event_type=BuddyService.BUDDY_LOGOFF_EVENT, description="Detect offline buddies for orglist command", is_hidden=True)
def buddy_logoff_event(self, event_type, event_data):
if self.orglist and event_data.char_id in self.orglist.waiting_org_members:
self.update_online_status(event_data.char_id, False)
self.buddy_service.remove_buddy(event_data.char_id, self.ORGLIST_BUDDY_TYPE)
self.check_for_orglist_end()
def update_online_status(self, char_id, status):
self.orglist.finished_org_members[char_id] = self.orglist.waiting_org_members[char_id]
self.orglist.finished_org_members[char_id].online = status
del self.orglist.waiting_org_members[char_id]
def check_for_orglist_end(self):
if self.orglist.org_members:
self.iterate_org_members()
return
if not self.orglist.waiting_org_members:
self.orglist.reply(self.format_result())
self.orglist = None
def format_result(self):
org_ranks = {}
for rank_name in self.governing_types[self.orglist.org_info.governing_type]:
org_ranks[rank_name] = DictObject({
"online_members": [],
"offline_members": []
})
for char_id, org_member in self.orglist.finished_org_members.items():
if org_member.online:
org_ranks[org_member.org_rank_name].online_members.append(org_member)
else:
org_ranks[org_member.org_rank_name].offline_members.append(org_member)
blob = ""
num_online = 0
num_total = 0
for rank_name, rank_info in org_ranks.items():
rank_num_online = len(rank_info.online_members)
rank_num_total = len(rank_info.offline_members) + rank_num_online
blob += "<header2>%s (%d / %d)<end>\n" % (rank_name, rank_num_online, rank_num_total)
num_online += rank_num_online
num_total += rank_num_total
for org_member in rank_info.online_members:
level = org_member.level if org_member.ai_level == 0 else "%d/<green>%d<end>" % (org_member.level, org_member.ai_level)
blob += "%s (Level <highlight>%s<end>, %s %s <highlight>%s<end>)\n" % (org_member.name, level, org_member.gender, org_member.breed, org_member.profession)
if rank_num_total < 200:
blob += "<font color='#555555'>" + ", ".join(map(lambda x: x.name, rank_info.offline_members)) + "<end>"
blob += "\n"
else:
blob += "<font color='#555555'>Offline members ommitted for brevity<end>\n"
blob += "\n"
return ChatBlob("Orglist for '%s' (%d / %d)" % (self.orglist.org_info.name, num_online, num_total), blob)
def iterate_org_members(self):
# add org_members that we don't have online status for as buddies
for char_id, org_member in self.orglist.org_members.copy().items():
self.orglist.waiting_org_members[char_id] = self.orglist.org_members[char_id]
del self.orglist.org_members[char_id]
is_online = self.buddy_service.is_online(char_id)
if is_online is None:
if self.character_service.resolve_char_to_id(org_member.name):
self.buddy_service.add_buddy(char_id, self.ORGLIST_BUDDY_TYPE)
else:
# character is inactive, set as offline
self.update_online_status(char_id, False)
else:
self.update_online_status(char_id, is_online)
if not self.buddy_list_has_available_slots():
break
def buddy_list_has_available_slots(self):
return self.buddy_service.buddy_list_size - len(self.buddy_service.buddy_list) > 5
|
[
"email1@jkbff.com"
] |
email1@jkbff.com
|
acf3f83d7f62c84c967c3097d82a174f12128cc8
|
6296d071fb9f48d8a12b1a14b371f9c4da29f98b
|
/notebook_format/formats.py
|
9c774ef6ba757215b4411ff71f8112eb2f17798c
|
[] |
no_license
|
rosdyana/programming
|
681b14d9977cca527b8d787ffbcc4322ceca4725
|
eef81128a76519c96c7dd3e236f7a3bcd7e48d71
|
refs/heads/master
| 2021-01-18T16:07:23.625612
| 2017-03-26T23:33:30
| 2017-03-26T23:33:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
import json
import warnings
import matplotlib.pyplot as plt
from IPython.core.display import HTML
def load_style(css_style = 'custom1.css'):
"""
custom1.css adapted from
https://github.com/rlabbe/ThinkBayes/blob/master/code/custom.css
custom2.css adapted from
https://github.com/neilpanchal/iPython-Notebook-Theme
"""
# recent matplotlibs are raising deprecation warnings that
# we don't worry about (it's the axes_prop_cycle).
warnings.filterwarnings('ignore')
# update the default matplotlib's formating
with open('plot.json') as f:
s = json.load(f)
plt.rcParams.update(s)
# load the styles for the notebooks
with open(css_style) as f:
styles = f.read()
return HTML(styles)
|
[
"ethen8181@gmail.com"
] |
ethen8181@gmail.com
|
59286046da3a22e4b8f901dfac7e4065db049967
|
42a0760a051935b2e765d57c445235221a28f49e
|
/791_custom_sort_string.py
|
b4bb0b9295a5d278dee0a6d9d69b7aff5e664df3
|
[] |
no_license
|
Th3Lourde/l33tcode
|
3bea3a3e7c633a2d5a36f7d76d5d776d275d8ee3
|
eb6b11f97a022b66716cb3890cc56c58f62e8aa4
|
refs/heads/master
| 2022-12-22T19:05:04.384645
| 2022-12-18T19:38:46
| 2022-12-18T19:38:46
| 232,450,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
'''
S and T are strings that contain lowercase letters.
S is composed of unique chrs.
S is sorted. Sort the characters in T that are in S.
If there are characters in T not in S, they can be
put anywhere in the resulting permutation.
Idea: go through all elements of T, put them in a dictionary.
We will want to keep track of the elements that we haven't used
and append them later.
Get the keys of the dict.
Step through S. If c ∈ S is in T, append all instances of that
character to our new permutation. Also delete that character from
our dictionary.
When we have seen all elements in S, step through the remaining
elements and add them to our permutation.
"acdbf"
"aaabbbcccdddeeeefff" <-- Some random permutation.
{"a":3, "b":3, "c":3, "d":3, "e":4, "f":3}
keys = [a,b,c,d,e,f]
stepping through S
a, is a ∈ keys?
yes, ans = "aaa"
keys = [b,c,d,e,f]
c, is c ∈ keys?
yes, ans = "aaaccc"
keys = [b,d,e,f]
d, is d ∈ keys?
yes, ans = "aaacccddd"
keys = [b,e,f]
b, is b ∈ keys?
yes, ans = "aaacccdddbbb"
keys = [e,f]
f, is f ∈ keys?
yes, ans = "aaacccdddbbbfff"
keys = [e]
Step through e, append to ans.
ans = "aaacccdddbbbfffeeee"
Test cases: Vary # in S, T, overlap.
Had s,t at zero, not zero, varied
amount of overlap, looks good, let's run it.
'''
class Solution:
def customSortString(self, S, T):
d = {}
for c in T:
if c in d:
d[c] += 1
else:
d[c] = 1
ans = ""
keys = list(d.keys())
for c in S:
if c in d:
keys.remove(c)
ans = ans + "{}".format(c)*d[c]
for c in keys:
ans = ans + "{}".format(c)*d[c]
return ans
if __name__ == '__main__':
s = Solution()
# print(s.customSortString("cba", "aaaabalaadfahdflakjdvdcd"))
print(s.customSortString("", "aaaabalaadfahdflakjdvdcd"))
print(s.customSortString("cba", ""))
print(s.customSortString("bzadc", "aaaababbdbdbdbdbdlaadfahdflakjdvdcd"))
|
[
"th3sylvia.lourde@gmail.com"
] |
th3sylvia.lourde@gmail.com
|
8707126656c4925ca4d8fbc116ad308a37a5d15e
|
964b063c2461aad267ddd991fefaf7ab53b1ca94
|
/7-kyu/remove-the-minimum/python/solution.py
|
11722bbad1643eb8888c64bafc9b75e0de5391b9
|
[] |
no_license
|
lucasbflopes/codewars-solutions
|
26c4e2cd1be19db50cc8c1d9fc117c51c82a2259
|
72ef2c02dde7bd0d5a691e04e3b2a383e892f84b
|
refs/heads/master
| 2022-03-14T01:26:41.816498
| 2019-11-23T17:17:19
| 2019-11-23T17:17:19
| 114,834,447
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def remove_smallest(numbers):
if not numbers:
return []
else:
min = numbers[0]
for number in numbers[1:]:
if number < min:
min = number
numbers.remove(min)
return numbers
|
[
"LucasBraganca@Lucas-MacBook-Pro.local"
] |
LucasBraganca@Lucas-MacBook-Pro.local
|
d6fa72819b42cb6d6b28b3aa70ee781aee56d539
|
e93cdd365b302dcbdb0dbef8accbd61473851354
|
/core/migrations/0018_field_picture.py
|
f10c67a1c51ad0c9dc125f0fb51f1423dbc3a9ad
|
[] |
no_license
|
jonerra/radcliff
|
7bfd1004a2e1c789c4894b89df7d6408c5dc5034
|
13957e48a96175f11318187f3e180efa4fba2294
|
refs/heads/master
| 2021-01-21T13:53:45.409435
| 2016-05-16T23:21:51
| 2016-05-16T23:21:51
| 52,484,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-14 15:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0017_auto_20160414_1533'),
]
operations = [
migrations.AddField(
model_name='field',
name='Picture',
field=models.ImageField(null=True, upload_to=b''),
),
]
|
[
"jonerra23@aim.com"
] |
jonerra23@aim.com
|
0c056a6628998a8dee81f09c8ff8bf4f17b95073
|
ed1165acc8937976142f00009df5a942c02dbd24
|
/database.py
|
fe32b9a6206e5442caefce688ca62b803109ef8d
|
[] |
no_license
|
Kha/shaderwall
|
1cb116b41c36ef9b20e86bfe2e16aaf4bf24e164
|
afac9d484fbee345500167cfc1e2edcf5c752b5c
|
refs/heads/master
| 2021-01-15T12:30:50.704084
| 2014-12-28T21:23:54
| 2014-12-28T21:23:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
from sqlalchemy import Column, ForeignKey, Integer, String, Text, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import datetime
from config import connection_url
import random
import string
def generate_authcode():
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))
Base = declarative_base()
class Shader(Base):
__tablename__ = 'shader'
id = Column(Integer, primary_key=True)
source = Column(Text)
authcode = Column(String(32), default=generate_authcode)
created = Column(DateTime, default=datetime.datetime.now)
updated = Column(DateTime, default=datetime.datetime.now)
views = Column(Integer, default=0)
def setup_db():
global engine
engine = create_engine(connection_url, pool_recycle=14400)
Base.metadata.create_all(engine)
def db_session():
DBSession = sessionmaker(bind=engine)
session = DBSession()
return session
|
[
"lukas@schauer.so"
] |
lukas@schauer.so
|
2160e87b55ae05a8679e74bdf72ae4a4de990797
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_lofts.py
|
8a42b14b52f7fdd76fb42f78e75a7c6bdbbbc98f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#calss header
class _LOFTS():
def __init__(self,):
self.name = "LOFTS"
self.definitions = loft
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['loft']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3e8087f0c76fb300a58687be1be04060a5486a08
|
e66fa131cff76fa3fe70e7b6649fa1332159c781
|
/ch10/statisticsModule.py
|
457ed3ce44eb38e2f21007c4ca924d22a6b5f722
|
[] |
no_license
|
chc1129/python_tutorial
|
c6d97c6671a7952d8a7b838ccb8aa3c352fa6881
|
2f8b389731bafbda73c766c095d1eaadb0f99a1c
|
refs/heads/main
| 2023-08-24T07:00:43.424652
| 2021-10-28T16:07:57
| 2021-10-28T16:07:57
| 341,532,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
import statistics
data = [2.74, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
print(statistics.mean(data))
print(statistics.median(data))
print(statistics.variance(data))
|
[
"chc1129@gmail.com"
] |
chc1129@gmail.com
|
f00e73727670667a1e871603bb509b79a7a90568
|
d190750d6cb34e9d86ae96724cf4b56a2f57a74a
|
/tests/r/test_saving.py
|
d5ffb1d760e2806452c3283ac3a9f4c3fa58f4c0
|
[
"Apache-2.0"
] |
permissive
|
ROAD2018/observations
|
a119f61a48213d791de0620804adb8d21c2ad9fb
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
refs/heads/master
| 2021-09-24T04:28:02.725245
| 2018-09-16T23:06:30
| 2018-09-16T23:06:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.saving import saving
def test_saving():
"""Test module saving.py by downloading
saving.csv and testing shape of
extracted data has 100 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = saving(test_path)
try:
assert x_train.shape == (100, 7)
except:
shutil.rmtree(test_path)
raise()
|
[
"dustinviettran@gmail.com"
] |
dustinviettran@gmail.com
|
119d98cbfe961151ff8f55209511e83d900e5f00
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/point-mutations/80163ab58f714c95a8b2ee96815a1fcb.py
|
1d9ab6a4040ca28d3ccf3327a89a15e4fdbfb616
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
class DNA(object):
def __init__(self, strand):
self.strand = strand
def hamming_distance(self, strand):
return len([(x,y) for (x,y) in zip(strand, self.strand) if
x != y])
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
c4a8cd354d040c15afd379aff695a191ded6cdc5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_redistributing.py
|
7376d9ca1ac4caa9854da9d4a805a8cb3a3c1b21
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from xai.brain.wordbase.verbs._redistribute import _REDISTRIBUTE
#calss header
class _REDISTRIBUTING(_REDISTRIBUTE, ):
def __init__(self,):
_REDISTRIBUTE.__init__(self)
self.name = "REDISTRIBUTING"
self.specie = 'verbs'
self.basic = "redistribute"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
39fa71b52214a84429c1e12d21c534e2b0f13a00
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/102/usersdata/195/49999/submittedfiles/av1_2.py
|
aa7d3b44f7edf2b0de15cd659ec5a4cc9d377f3f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# -*- coding: utf-8 -*-
import math
a=int(input('digite a :'))
b=int(input('digte b:'))
c=int(input('digite c:'))
d=int(input('digite d:'))
if a!=b and a==c and a!=d:
print(V)
else:
print(F)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
6619ecbf5ce70002cdfdedb21633647ee067064e
|
3d8aae8aa43e0fbd8a8cffc4fa2cd67419059d66
|
/module_PyQt/xtest/test_echo_array.py
|
33ca00c7a33595dfb60b15d778273c5fa491ec50
|
[] |
no_license
|
onitonitonito/k_mooc_reboot
|
b8273b7e9fa3fc5958bca57c39f2f3a9108964f1
|
68c8c6a94adc99005fb0fc8c38c416f902d37888
|
refs/heads/main
| 2021-07-21T22:32:26.080330
| 2021-07-04T02:22:08
| 2021-07-04T02:22:08
| 109,581,972
| 0
| 0
| null | 2020-05-05T22:28:26
| 2017-11-05T13:30:03
|
Python
|
UTF-8
|
Python
| false
| false
| 945
|
py
|
"""
# how to stack echo file
"""
print(__doc__)
import random
import _add_syspath_root
from assets.config import dir_statics
from string import ascii_letters
SYMBOLS = [sym for sym in "!@#$%^&*()_-+=,.?/|;:{}~{}" + ascii_letters]
RANDOM_START = (0, 39)
RANDOM_END = (40, 78)
LINES = 50
REPEAT = 10
FILE_NAME = dir_statics + 'test_echo_array.txt'
def main():
for i in range(REPEAT):
print(*get_echo_array(write=True))
def get_echo_array(write=False):
echo_array = []
for i in range(LINES):
random.shuffle(SYMBOLS)
x1, x2 = (random.randint(*RANDOM_START), random.randint(*RANDOM_END))
string_shuffled = "".join(SYMBOLS)
add_string = string_shuffled[x1:x2]
echo_array.append(f"\n{add_string}")
if write:
with open(file=FILE_NAME, mode='w', encoding='utf8') as f:
f.write("".join(echo_array))
return echo_array
if __name__ == '__main__':
main()
|
[
"nitt0x0@gmail.com"
] |
nitt0x0@gmail.com
|
c4ea0a8154024f7e95ffa9605300406c7e7de34f
|
e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14
|
/top/api/rest/UserGetRequest.py
|
b3bd7048fb45379401d4afc382aa2472248e42f6
|
[] |
no_license
|
htom78/taobao_comet_py
|
9224dbca1a413a54bcc5569873e4c7a9fc9ba059
|
ad8b2e983a14d3ab7665244449f79dd72f390815
|
refs/heads/master
| 2020-05-17T10:47:28.369191
| 2013-08-27T08:50:59
| 2013-08-27T08:50:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
'''
Created by auto_sdk on 2013-06-16 16:36:02
'''
from top.api.base import RestApi
class UserGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
self.nick = None
def getapiname(self):
return 'taobao.user.get'
|
[
"tomhu@ekupeng.com"
] |
tomhu@ekupeng.com
|
bf4d638465250a538af5ac031e32f9596dbd63e1
|
4b773103a5000a0a980739dd65426878c90dc098
|
/core/models.py
|
9e45dbcac258c6b24c9a9cf4ee079215ea16be1f
|
[] |
no_license
|
gpchelkin/grading_system
|
6ef693a89700fb86ce9567e33f697fb529c34297
|
e34f85fd1d9ac6bad892222d68516bbab5d7cf23
|
refs/heads/master
| 2020-06-15T07:10:27.193190
| 2016-12-20T14:12:32
| 2016-12-20T14:12:32
| 75,315,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
# coding=utf-8
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from curriculum.models import ClassesType
SEMESTER_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
)
COURSE_CHOICE = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
)
class User(AbstractUser):
is_student = models.BooleanField("Этот пользователь студент", default=False)
is_teacher = models.BooleanField("Этот пользователь учитель", default=False)
class Group(models.Model):
name = models.CharField(verbose_name=u'Группа', max_length=10)
def __unicode__(self):
return u'{}'.format(self.name)
class Student(models.Model):
year_start = models.IntegerField(verbose_name=u'Год поступления', validators=[MaxValueValidator(3000), MinValueValidator(1970)])
year_end = models.IntegerField(verbose_name=u'Год окончания', validators=[MaxValueValidator(3000), MinValueValidator(1970)])
user_group_full_name = models.ForeignKey(verbose_name=u'Группа студента', to=Group)
user_connection = models.OneToOneField(verbose_name=u'Пользователь', to=User)
def __unicode__(self):
return u'{} {}'.format(self.user_connection.first_name, self.user_connection.last_name)
class Subject(models.Model):
name = models.CharField(verbose_name=u'Предмет', max_length=50)
subject_group = models.ManyToManyField(verbose_name=u'Группы', to=Group)
subject_type = models.ForeignKey(verbose_name=u'Тип предмета', to=ClassesType)
def __unicode__(self):
return u'{} - {}'.format(self.name, self.subject_type)
class Teacher(models.Model):
all_subjects = models.ManyToManyField(verbose_name=u'Предметы', to=Subject)
user_connection = models.OneToOneField(User)
def __unicode__(self):
return u'{} {}'.format(self.user_connection.first_name, self.user_connection.last_name)
|
[
"a.s.maratkanov@gmail.com"
] |
a.s.maratkanov@gmail.com
|
0fd11a4dfafdc5db0041929e7300c6f3c2bac9da
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/oepiudBYC7PT7TXAM_12.py
|
b4db6ac9bfd7b1baa70fc45eadc33d11817f752d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def parse_roman_numeral(num):
d = {'M':1000,'D':500,'C':100,'L':50,'X':10,'V':5,'I':1}
return sum(d[num[i]] if (i+1 == len(num) or d[num[i]]>=d[num[i+1]]) else -d[num[i]] for i in range(len(num)))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
aa9043d32112f48f44454ef11cc5e8715ec14cc7
|
f320d83c1b6f4854cb808a17a2dbb8827051636e
|
/setfreq/ParaSetTest.py
|
d9afb304b9f8f27e4bf782ec4f1758761566c8d9
|
[] |
no_license
|
JiahuiSun/Digital-Signal-Analyzer-based-on-SDR
|
f5214f2b2b36d4a24896f7d0c4a712979c236fd0
|
1e57dbb9cfcec7c0cb0a3f2335f3e68ecd2694d6
|
refs/heads/master
| 2020-04-04T19:50:16.131461
| 2018-11-06T12:51:42
| 2018-11-06T12:51:42
| 156,222,484
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
import ParaSetCliSock
import time
f1 = 1500
f2 = 1600
##while True:
ParaSetCliSock.set_param('rx_freq',f1)
##time.sleep(5)
ParaSetCliSock.set_param('tx_freq',f2)
##time.sleep(5)
|
[
"you@example.com"
] |
you@example.com
|
6cf93c00ab4fa724d6ba734a936d3ff553a95395
|
6e3f97742562ff3cdf9372f54320c78e5c72fe97
|
/apps/partidos/serializers.py
|
984cd9ae22ba0f32acb4b3a184c75e363f0e068c
|
[] |
no_license
|
desarrollosimagos/exit_poll
|
6892e9ad504691fa44eb5b599881c7fb044b260d
|
e4572e2b222cf6b5de8a221ac300ccb0062f8e41
|
refs/heads/master
| 2021-01-01T16:36:31.726898
| 2017-07-20T18:52:30
| 2017-07-20T18:52:30
| 97,868,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
from rest_framework import serializers
from .models import Partidos
class PartidosSerializer(serializers.ModelSerializer):
"""
Clase donde llamamos al modelo `Partidos` y serializamos los campos
"""
class Meta:
model = Partidos
fields = ('id', 'n_partidos','siglas','foto_partido','nom_presidente',
'ape_presidente', 'correo','twitter','telefono','partido_binario',
'user_create','user_update','fecha_create','fecha_update',)
|
[
"ing.omar.orozco@gmail.com"
] |
ing.omar.orozco@gmail.com
|
699ab4ccd658741ab0ee7d42f5a80900e4a99ca3
|
a137466dbaa5d704cd5a15ab9dfd17907b24be04
|
/utility/aggregator.py
|
dd85c5817e5daa2bc1e2d53f3a9685154e121927
|
[
"Apache-2.0"
] |
permissive
|
xlnwel/g2rl
|
92c15b8b9d0cd75b6d2dc8df20e6717e1a621ff6
|
e1261fdd2ce70724a99ddd174616cf013917b241
|
refs/heads/master
| 2023-08-30T10:29:44.169523
| 2021-11-08T07:50:43
| 2021-11-08T07:50:43
| 422,582,891
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
class Aggregator:
"""Allows accumulating values and computing their mean."""
def __init__(self):
self.total = 0
self.last = 0
self.reset()
def reset(self):
self.sum = 0.
self.count = 0
def average(self):
return self.sum / self.count if self.count else 0.
def add(self, v):
self.last = v
self.total += v
self.sum += v
self.count += 1
|
[
"122134545@qq.com"
] |
122134545@qq.com
|
4d5ea0573f752d71751f6d8611db7e239774bfc2
|
ea5a801283e5c8dd822d755aa8824e9fd17c9ecf
|
/nomuraholdings/spiders/nomura.py
|
9dde6136aea2930291b6ca2abd5eacd2b258ca05
|
[] |
no_license
|
daniel-kanchev/nomuraholdings
|
3a5c98c2540fac135346267504eedd8bc8375ee1
|
4cf5faeeba53cf1122b1efe7698bac71af21b8fb
|
refs/heads/main
| 2023-03-03T14:54:43.454865
| 2021-02-11T07:33:46
| 2021-02-11T07:33:46
| 337,959,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from nomuraholdings.items import Article
class NomuraSpider(scrapy.Spider):
name = 'nomura'
start_urls = ['https://www.nomuraholdings.com/news/nr/index.html']
def parse(self, response):
links = response.xpath('//table[@class="js-selectList"]//a/@href').getall()
yield from response.follow_all(links, self.parse_year)
def parse_year(self, response):
links = response.xpath('//p[@class="c-List-info__link"]/a/@href').getall()
yield from response.follow_all(links, self.parse_article)
def parse_article(self, response):
if 'pdf' in response.url:
return
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = response.xpath('//h1[@class="u-h1"]/text()').get()
if title:
title = title.strip()
else:
return
date = response.xpath('//div[@class="news-header__date"]/p/text()[1]').get()
if date:
date = datetime.strptime(date.strip(), '%B %d, %Y')
date = date.strftime('%Y/%m/%d')
content = response.xpath('//p[@class="news-paragraph"]//text()').getall()
content = [text for text in content if text.strip()]
content = "\n".join(content).strip()
item.add_value('title', title)
item.add_value('date', date)
item.add_value('link', response.url)
item.add_value('content', content)
return item.load_item()
|
[
"daniel.kanchev@adata.pro"
] |
daniel.kanchev@adata.pro
|
22c627fae44f0079e535d66064c91480db572937
|
7bad6ecb04b57f4a692426bb23766cf0b5916d3d
|
/microdrop/core_plugins/command_plugin/plugin.py
|
8fa27ab553a21e769406c2338ae015ca0488a243
|
[
"BSD-3-Clause"
] |
permissive
|
cfobel/microdrop
|
b943bed4a765c5419b6dead8344dbff420af283e
|
721f2c9c040406bb3c70a9928923aad10a725b96
|
refs/heads/master
| 2020-04-15T18:50:20.035858
| 2018-10-23T14:26:58
| 2018-10-23T14:31:04
| 164,927,192
| 0
| 0
|
BSD-3-Clause
| 2019-01-09T19:47:11
| 2019-01-09T19:47:10
| null |
UTF-8
|
Python
| false
| false
| 3,735
|
py
|
from multiprocessing import Process
import logging
import sys
from zmq_plugin.plugin import Plugin as ZmqPlugin
from zmq_plugin.schema import decode_content_data
import pandas as pd
from logging_helpers import _L #: .. versionadded:: 2.20
logger = logging.getLogger(__name__)
class CommandZmqPlugin(ZmqPlugin):
'''
API for registering commands.
'''
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.control_board = None
self._commands = pd.DataFrame(None, columns=['namespace',
'plugin_name',
'command_name', 'title'])
super(CommandZmqPlugin, self).__init__(*args, **kwargs)
def on_execute__unregister_command(self, request):
data = decode_content_data(request)
commands = self._commands
ix = commands.loc[(commands.namespace == data['namespace']) &
(commands.plugin_name == data['plugin_name']) &
(commands.command_name == data['command_name']) &
(commands.title == data['title'])].index
self._commands.drop(ix, inplace=True)
self._commands.reset_index(drop=True, inplace=True)
return self.commands
def on_execute__register_command(self, request):
data = decode_content_data(request)
plugin_name = data.get('plugin_name', request['header']['source'])
return self.register_command(plugin_name, data['command_name'],
namespace=data.get('namespace', ''),
title=data.get('title'))
def on_execute__get_commands(self, request):
return self.commands
def register_command(self, plugin_name, command_name, namespace='',
title=None):
'''
Register command.
Each command is unique by:
(namespace, plugin_name, command_name)
'''
if title is None:
title = (command_name[:1].upper() +
command_name[1:]).replace('_', ' ')
row_i = dict(zip(self._commands, [namespace, plugin_name, command_name,
title]))
self._commands = self._commands.append(row_i, ignore_index=True)
return self.commands
@property
def commands(self):
'''
Returns
-------
pd.Series
Series of command groups, where each group name maps to a series of
commands.
'''
return self._commands.copy()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
from zmq_plugin.bin.plugin import run_plugin
def run_plugin_process(uri, name, subscribe_options, log_level):
plugin_process = Process(target=run_plugin,
args=())
plugin_process.daemon = False
plugin_process.start()
args = parse_args()
logging.basicConfig(level=args.log_level)
task = CommandZmqPlugin(None, args.name, args.hub_uri, {})
run_plugin(task, args.log_level)
|
[
"christian@fobel.net"
] |
christian@fobel.net
|
c94b34d4d6623b867a7cc91d3672366334dd307f
|
9cb6a655735b954eac4feeb006b174b8a5d759f4
|
/test/test_sighash.py
|
1e3dbf744d8ce0de53675357bfa42832889eef0d
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Conio/pybitcointools
|
c68639439c5fc84694bb28213cc6d9c21198ad94
|
3025e73092789121ecf5aef4e6815be24f475735
|
refs/heads/master
| 2021-11-27T07:55:38.707876
| 2021-11-23T10:17:32
| 2021-11-23T10:17:32
| 53,937,108
| 20
| 13
| null | 2017-11-15T01:03:28
| 2016-03-15T10:55:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
import unittest
import bitcoin
from bitcoin import *
class TestTransaction(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Attempting transaction creation")
def test3(self):
print(
deserialize_script('52210248905f94419795ea33cd42474e10bfaddc3ee5f0f0c66ecc29238fea6555f29c2103fde505b2f67b2c8ec17c7540bbc9aafb527366c0863d655d03a00e5f3c4bbbd121023f96141f1bec4df22465539ecd807762e2c96b75e436540d3e7654d461b62a1953ae')
)
def test2(self):
pub = '029b06d73294a2fe59dd5d2156f9d7bf1cadc8e741b39fff834d39a055ab8f5c97'
addr = 'bcrt1q8s2hkukgulyf575hakxazset8v2z5ltxnvepy8'
self.assertEqual(pubkey_to_bech32_address(pub, prefix='bcrt'), addr)
print(deserialize_script('00141976a9141d0f172a0ecb48aee1be1f2687d2963ae33f71a188ac'))
print(hash160(binascii.unhexlify('025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357')))
def test_multisig(self):
priv1 = sha256(b'sighash_priv_key_text')
priv2 = sha256(b'sighash_priv_key_text_2')
pub1 = compress(privtopub(priv1))
pub2 = compress(privtopub(priv2))
witness_program = mk_multisig_script([pub1, pub2], 2, 2)
addr = bech32_script_to_address(witness_program, prefix='bc')
print('addr', addr)
recipient = '3AbjFnwcChgaAGsPx28hnpDWF3yUobvTFT'
amount = 0.00028295
transaction_to_sign = mktx(
{
'output': '99911f6ddabc51290a45194f268d7e618284d7f42d79a2b57bee9bc5b11787c5:0',
'segregated': True
},
[
{'address': recipient, 'value': int(amount * 10**8) - 4500}
]
)
tx = bitcoin.deserialize(transaction_to_sign)
""" test big opreturn size"""
bigscript = [os.urandom(1024).hex() for _ in range(0, 1000)]
tx['outs'].append(
{
'value': 0,
'script': '00' + bitcoin.serialize_script(bigscript)
}
)
txs = bitcoin.serialize(tx)
tx = bitcoin.deserialize(txs)
s = bitcoin.deserialize_script(tx['outs'][-1]['script'])
self.assertEqual(s[0], None)
self.assertEqual(s[1:], bigscript)
sig1 = bech32_multisign(
transaction_to_sign, 0, priv1, int(amount * 10 ** 8),
witness_program, hashcode=SIGHASH_NONE|SIGHASH_ANYONECANPAY
)
sig2 = bech32_multisign(transaction_to_sign, 0, priv2, int(amount * 10 ** 8), witness_program)
tx = apply_bech32_multisignatures(transaction_to_sign, 0, witness_program, [sig1, sig2])
print(tx)
def test_hash_opreturn(self):
tx = '0100000000010122371ebb7a0432f0d506c35c8a78da70d29258dd50fc870426b3ced80839ebe50100000000fdffffff03983a00000000000017a9148380f47f331682e3683cc0628b04d3e1c918af8887464d00000000000017a914cc2008ff35eea6390b32dde0cf5998fd1016fcec8700000000000000005100160014636f6e696f5f66726f7a656e5f6f75747075747301010102040100000020e5eb3908d8ceb3260487fc50dd5892d270da788a5cc306d5f032047abb1e372202010008de8700000000000002020002000000000000'
txhash = bitcoin.segwit_txhash(tx)
print(txhash)
des_tx = bitcoin.deserialize(tx)
des_tx['outs'] = des_tx['outs'][:-1]
tx2 = bitcoin.serialize(des_tx)
txhash = bitcoin.segwit_txhash(tx2)
print(txhash)
|
[
"guido.dassori@gmail.com"
] |
guido.dassori@gmail.com
|
09a20fc08bd4c36c320dd80bd12158ff8b3dd30e
|
0549916a0d04943a0d944a2794e103aed2d1299c
|
/docs/conf.py
|
1e9f35385b8904bab54fa8a27ed5d249e4bc4fcb
|
[
"MIT"
] |
permissive
|
stephtdouglas/thejoker
|
20d6eac36520477b0478ae84effa519fde625f2f
|
b1f2681cd72b6c04d19b24aadf818639c5f59ad0
|
refs/heads/master
| 2020-03-18T10:26:55.842576
| 2018-04-16T20:24:47
| 2018-04-16T20:24:47
| 134,612,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,806
|
py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# see if we're running on travis
if 'CI' in os.environ:
ON_TRAVIS = True
else:
ON_TRAVIS = False
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('**.ipynb_checkpoints')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
# TODO: swap this once bugfix is in nbsphinx
# see: https://github.com/spatialaudio/nbsphinx/issues/38
# rst_epilog = ""
rst_epilog += """
.. |thejoker| replace:: *The Joker*
"""
# Add h5py to intersphinx mapping
intersphinx_mapping['h5py'] = ('http://docs.h5py.org/en/latest/', None)
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# Use astropy plot style
plot_rcparams = dict()
if not ON_TRAVIS:
plot_rcparams['text.usetex'] = True
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_apply_rcparams = True
plot_formats = [('png', 512)]
# -- Options for HTML output --------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Please update these texts to match the name of your package.
html_theme_options = {
'logotext1': 'The', # white, semi-bold
'logotext2': 'Joker', # orange, light
'logotext3': ':docs' # white, light
}
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
html_favicon = os.path.join(path, 'icon.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# Static files to copy after template files
html_static_path = ['_static']
html_style = 'thejoker.css'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ---------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = 'https://github.com/{0}/issues/'.format(setup_cfg['github_project'])
# -- Custom --
# add nbsphinx extension
extensions += ['nbsphinx']
extensions += ['IPython.sphinxext.ipython_console_highlighting']
# try:
# source_parsers['.ipynb'] = 'nbsphinx.NotebookParser'
# except NameError:
# source_parsers = {'.ipynb': 'nbsphinx.NotebookParser'}
|
[
"adrian.prw@gmail.com"
] |
adrian.prw@gmail.com
|
198f417c20f548b5837d62bb3ea3650d6729a7b7
|
a66460a46611483dfbdc94c7996893f427e60d97
|
/ansible/my_env/lib/python2.7/site-packages/ansible/modules/source_control/github_hooks.py
|
df0f0f2199b9d4b047c892050d52ba99640ae995
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/yyashkin_infra
|
06b57807dde26f94f501828c07503d6bf1d70816
|
0cd0c003884155ac922e3e301305ac202de7028c
|
refs/heads/master
| 2020-04-29T02:42:22.056724
| 2019-05-15T16:24:35
| 2019-05-15T16:24:35
| 175,780,718
| 0
| 0
|
MIT
| 2019-05-15T16:24:36
| 2019-03-15T08:37:35
|
HCL
|
UTF-8
|
Python
| false
| false
| 5,874
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Phillip Gentry <phillip@cx.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: github_hooks
short_description: Manages GitHub service hooks.
description:
- Adds service hooks and removes service hooks that have an error status.
version_added: "1.4"
options:
user:
description:
- Github username.
required: true
oauthkey:
description:
- The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens"
required: true
repo:
description:
- >
This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
Note this is different than the normal repo url.
required: true
hookurl:
description:
- When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook.
required: false
action:
description:
- This tells the githooks module what you want it to do.
required: true
choices: [ "create", "cleanall", "list", "clean504" ]
validate_certs:
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
content_type:
description:
- Content type to use for requests made to the webhook
required: false
default: 'json'
choices: ['json', 'form']
author: "Phillip Gentry, CX Inc (@pcgentry)"
'''
EXAMPLES = '''
# Example creating a new service hook. It ignores duplicates.
- github_hooks:
action: create
hookurl: http://11.111.111.111:2222
user: '{{ gituser }}'
oauthkey: '{{ oauthkey }}'
repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would
# be called from a handler.
- github_hooks:
action: cleanall
user: '{{ gituser }}'
oauthkey: '{{ oauthkey }}'
repo: '{{ repo }}'
delegate_to: localhost
'''
import json
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes
def request(module, url, user, oauthkey, data='', method='GET'):
auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', ''))
headers = {
'Authorization': 'Basic %s' % auth,
}
response, info = fetch_url(module, url, headers=headers, data=data, method=method)
return response, info
def _list(module, oauthkey, repo, user):
url = "%s/hooks" % repo
response, info = request(module, url, user, oauthkey)
if info['status'] != 200:
return False, ''
else:
return False, response.read()
def _clean504(module, oauthkey, repo, user):
current_hooks = _list(module, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
for hook in decoded:
if hook['last_response']['code'] == 504:
_delete(module, oauthkey, repo, user, hook['id'])
return 0, current_hooks
def _cleanall(module, oauthkey, repo, user):
current_hooks = _list(module, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
for hook in decoded:
if hook['last_response']['code'] != 200:
_delete(module, oauthkey, repo, user, hook['id'])
return 0, current_hooks
def _create(module, hookurl, oauthkey, repo, user, content_type):
url = "%s/hooks" % repo
values = {
"active": True,
"name": "web",
"config": {
"url": "%s" % hookurl,
"content_type": "%s" % content_type
}
}
data = json.dumps(values)
response, info = request(module, url, user, oauthkey, data=data, method='POST')
if info['status'] != 200:
return 0, '[]'
else:
return 0, response.read()
def _delete(module, oauthkey, repo, user, hookid):
url = "%s/hooks/%s" % (repo, hookid)
response, info = request(module, url, user, oauthkey, method='DELETE')
return response.read()
def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']),
hookurl=dict(required=False),
oauthkey=dict(required=True, no_log=True),
repo=dict(required=True),
user=dict(required=True),
validate_certs=dict(default='yes', type='bool'),
content_type=dict(default='json', choices=['json', 'form']),
)
)
action = module.params['action']
hookurl = module.params['hookurl']
oauthkey = module.params['oauthkey']
repo = module.params['repo']
user = module.params['user']
content_type = module.params['content_type']
if action == "list":
(rc, out) = _list(module, oauthkey, repo, user)
if action == "clean504":
(rc, out) = _clean504(module, oauthkey, repo, user)
if action == "cleanall":
(rc, out) = _cleanall(module, oauthkey, repo, user)
if action == "create":
(rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out)
if __name__ == '__main__':
main()
|
[
"theyashkins@gmail.com"
] |
theyashkins@gmail.com
|
7669acaa0139374e57a42a7a5e950e3fd981b1cf
|
3ca6b34676a0adeaba85a2953a8c9abf5d6ef3e4
|
/cap 5/pizza.py
|
44790867aae04d53d68213989d73d8dcd4ef7e96
|
[] |
no_license
|
giusepper11/Curso-intensivo-Python
|
34fb8e94c7c9afb09f54d8fc67136b337d0ef106
|
613cd502af3ff877dac0d62d9eb09b290d227838
|
refs/heads/master
| 2021-08-30T11:41:42.824065
| 2017-12-17T19:47:15
| 2017-12-17T19:47:15
| 114,535,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
available_toppings = ['mushrooms', 'olives', 'green pepper', 'pepperoni', 'pinapple', 'extra cheese']
requested_toppings = ['mushrooms', 'extra cheese', 'french fries']
# if 'mushrooms' in requested_toppings:
# print('Adding mushrooms')
# if 'pepperoni' in requested_toppings:
# print('Adding pepperoni')
# if 'extra cheese' in requested_toppings:
# print('Adding extra cheese')
#
for requested_topping in requested_toppings:
if requested_topping in available_toppings:
print('Adding {} as requested'.format(requested_topping.title()))
else:
print('Nao temos {}'.format(requested_topping.title()))
|
[
"giusepper11@gmail.com"
] |
giusepper11@gmail.com
|
a04ff5d4bae9109384d468f2375916651f0782c8
|
fe1d3a2e3b51d1440a5c431c32afc334841dcdc6
|
/view-point-server/tests/landmark_objects/perform_modeling.py
|
a351aa50fa3ffb0b0128eba0135d867485c7742d
|
[] |
no_license
|
vyzuer/view_point
|
5a9b2251880de93a6ac41058f7d05eac2f4d814d
|
3ae071f8b5eca883f5d8790ad441d8ae419e9242
|
refs/heads/master
| 2020-06-15T23:35:26.027690
| 2016-12-01T05:35:33
| 2016-12-01T05:35:33
| 75,258,808
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
import sys
# add the package
sys.path.append('/home/vyzuer/Copy/Research/Project/code/view-point/view-point-python')
import landmark_object.classify_objects as cl_obj
import landmark_object.gmm_modeling as gmm_model
import landmark_object.geo_pixel_map as gpmap
def process(cluster_model_path, dump_path, model_type):
# preprocess
# cl_obj.process_dataset(cluster_model_path, dump_path)
# perform modeling
# model_type = "weather"
ext = "gmm_" + model_type
gmm_model.process_context(cluster_model_path, dump_path, ext, model_type=model_type)
gmm_model.process_human_object(cluster_model_path, dump_path, ext, model_type=model_type)
def process_geo_pixel_map(cluster_model_path, dump_path):
gpmap.process_lmo(cluster_model_path, dump_path, dump_map=True)
if __name__ == '__main__':
if len(sys.argv) != 4:
print "Usage : cluster_model dump_path gmm_type"
sys.exit(0)
cluster_model_path = sys.argv[1]
dump_path = sys.argv[2]
gmm_type = sys.argv[3]
# process(cluster_model_path, dump_path, gmm_type)
# dump the geo-pixel map for each landmark object
process_geo_pixel_map(cluster_model_path, dump_path)
|
[
"ysrawat.cse@gmail.com"
] |
ysrawat.cse@gmail.com
|
b216d0f072c2e1c156b59d7618b849f5928627d9
|
5d5f6ba3bdcb52b4750a5f28afa8a1a1019bfc9e
|
/django/extras/miniRegisterProject/miniRegisterProject/wsgi.py
|
611c6dc7765e6df4a6c4145df88f0e835082e8e1
|
[] |
no_license
|
eDiazGtz/pythonLearning
|
06e96f2f5a6e48ac314cb815cf9fbf65d0b7c2c8
|
57d7b2292cf5d9769cce9adf765962c3c0930d6c
|
refs/heads/master
| 2023-06-18T02:16:09.293375
| 2021-05-03T18:09:52
| 2021-05-03T18:09:52
| 335,090,531
| 0
| 0
| null | 2021-05-03T18:09:53
| 2021-02-01T21:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
WSGI config for miniRegisterProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'miniRegisterProject.settings')
application = get_wsgi_application()
|
[
"ediaz-gutierrez@hotmail.com"
] |
ediaz-gutierrez@hotmail.com
|
1aa5f30f75d756e2d60d09e99a08e0c7a06a8549
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/Athena/training/exercises/exercises/software_craftsmanship/code_check/code_check_solution.py
|
503dc3833b752d73aa4ecca182f07f0d07cda69b
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546
| 2021-07-16T01:38:55
| 2021-07-16T01:38:55
| 87,686,563
| 8
| 2
| null | 2023-07-11T22:49:03
| 2017-04-09T05:57:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
"""
Code Check
----------
This code has an assortment of bugs, and its style doesn't
conform to PEP-8. Use pyflakes and pep8 to find and fix
the code.
You may have to install pep8 with the command:
$ easy_install pep8
It might take a few iterations before pyflakes doesn't
complain about something.
"""
from math import acos, sqrt
class Vector(object):
def __init__(self, x, y, z):
""" Constructor method.
"""
self.x = x
self.y = y
self.z = z
def dot(self, v):
d = self.x * v.x + self.y * v.y + self.z * v.z
return d
def abs(self):
m = sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
return m
def angle(self, v):
theta = acos(self.dot(v) / (self.abs() * v.abs()))
return theta
def __repr__(self):
s = "Vector(x=%s, y=%s, z=%s)" % (self.x, self.y, self.z)
return s
if __name__ == "__main__":
v1 = Vector(2.0, 13.0, -1.0)
print v1, " magnitude is", v1.abs()
v2 = Vector(1.0, 2.0, 3.0)
print "v1.angle(v2) =", v1.angle(v2)
|
[
"GreenJedi@protonmail.com"
] |
GreenJedi@protonmail.com
|
78294f6a8aef669474858e616f2609a6d163080a
|
bef7c41e7b51417f9cc5c3d30a7f94b59286e2b7
|
/Algorithms/subsets.py
|
eb73b6b90fe89a9c354b65f360339b14b82bdd11
|
[] |
no_license
|
algometrix/LeetCode
|
40dd6ea93c370cabe57ba672d820f261e0595cae
|
3dc885ac2a93781c36fbe2735061da29194caba4
|
refs/heads/master
| 2021-07-01T19:45:08.018784
| 2021-06-03T05:38:13
| 2021-06-03T05:38:13
| 235,496,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import pprint
def generateSubset(array):
temp = []
result = []
def search(k):
if k == len(array):
result.append([array[index] for index in temp])
else:
temp.append(k)
search(k+1)
temp.pop()
search(k+1)
search(0)
return result
if __name__ == "__main__":
array = [2,5,9]
result = generateSubset(array)
print('All possible permuations')
pprint.pprint(result)
|
[
"ashishnagar31@gmail.com"
] |
ashishnagar31@gmail.com
|
1def5be0b51e055f3389540b66364e0974814105
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_159/624.py
|
cf0d31f0a91c8e0ded44e2339ac0e8cf0e446e08
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,099
|
py
|
#codejam 4/17/15
import math as m
import time
#import codejam
import sys
sys.setrecursionlimit(100)#1100) #we need 1000 max
#filename = r'c:\g\1A\1-test.in.txt'
filename = r'c:\g\1A\A-large.in'
#filename = r'c:\g\A1\1-large.in'
foutname = r'c:\g\1A\1-out-large.txt'
#foutname = r'c:\g\1A\1-out-large.txt'
FILE = open(filename)
FOUT = open(foutname,"w")
T = int(FILE.readline())
def ceildiv(x, d):#like x//d but ceiling, for positives only
return (x + (d-1)) // d
def sol1(M, dbg): #first method, given samples in array M, which is of length 2 to 1000
S = M[0] #number at start
E = 0 #total eaten
pmj = M[0] #previous mj
for mj in M[1:]:
D = mj - pmj #delta
if D>0: #more were put on plate, none eaten
pass
elif D<0: #some were removed, must have been eaten
if dbg: print "D<0: D=",D,", ate",-D," so total eaten=",(E-D)
E -= D
else: #no change
pass
pmj = mj
return E
def sol2(M, dbg): #second method, eats at constant rate
#first find minimum eating rate - largest decline
changes = [b-a for a,b in zip(M[:-1],M[1:])]
R = abs(min(changes))
E = 0 #number eaten
if dbg: print "sol2 R=",R #minimum eating rate
P = M[0] #number on plate at start
pmj = M[0] #previous mj
for mj in M[1:]:
P2 = max(0,P - R) #she would eat down to this if none were added
#if dbg: print "See mj=",mj,"so ate",(P-P2)," P2=",P2
E += (P - P2)
#if mj > P2: #more were added, assumed an instant before time sample (for minimum)
# pass
#else: #some (or none) were removed
# pass #must have been eaten
P = mj
pmj = mj
return E
dbg=0
if dbg: print ""
if 1:
t0 = time.time()
sumz = 0
for i in range(1,T+1):
rawline = FILE.readline().split(' ')
D = int(rawline[0]) #number of samples at 10 second intervals
if len(rawline)>1: #trick to check known answers
manual_ans = [int(a) for a in rawline[-2:]]
else:
manual_ans = None
s = FILE.readline()
if s[-1]<'0': s=s[:-1]#strip newline
P = [int(ps) for ps in s.split(' ')]
if dbg: print "Case #" + str(i)+": D=",D," ["+(' '.join([str(xp) for xp in P]))+']',("manual_ans="+str(manual_ans) if manual_ans else "")
#if dbg and manual_ans: print "manual_ans = ",manual_ans
z1 = sol1(P, 0)
z2 = sol2(P, dbg)
if dbg: print " ==> ",z1,z2
sumz += z1
msg = 'Case #' + str(i) + ': ' + str(z1)+' '+str(z2)
if dbg:
if manual_ans: print msg+ (" 1 is OK!" if manual_ans[0]==z1 else "1 DIFF!") + (" 2 is OK!" if manual_ans[1]==z2 else "2 DIFF!")
else: print msg
if not dbg and i%10==1: print msg
FOUT.write(msg + "\n")
if manual_ans!=None:
if manual_ans[0]!=z1 or manual_ans[1]!=z2: print "...DIFFERENT! ",manual_ans," but we got: ",(z1,z2)
if dbg: print ""
print "finished",T,"cases,", round(time.time() - t0,3),"s, sumz:",sumz
FOUT.close()
FILE.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2a06374a0a793b1371880df1dcb25fa45b93da2c
|
aea74a8c1d4ad17eb65b7c70da5342c01fd1a930
|
/websites_mongo/scraper_military_shop.py
|
b0d8f9173ceff9468d92e846a3b1818f303f0b09
|
[] |
no_license
|
savusebastian/angular_project
|
4e6d8b398e17ca91842d7579d8f4da8650e7a13a
|
9c28c25e4b9875abf346f7e9a7e8baa34bc3f9ee
|
refs/heads/main
| 2023-04-17T07:03:32.016850
| 2021-05-09T09:07:55
| 2021-05-09T09:07:55
| 365,710,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
from bs4 import BeautifulSoup
from bson.objectid import ObjectId
from pymongo import MongoClient
import pymongo
import requests
def military_shop_DB():
cluster = MongoClient('mongodb://localhost:27017/vrem_reduceri_db')
db = cluster['vrem_reduceri_db']
collection = db['military_shop_products']
all_links = [
'https://www.military-shop.ro/sitemap_cat_85.xml',
'https://www.military-shop.ro/sitemap_cat_67.xml',
'https://www.military-shop.ro/sitemap_cat_2.xml',
'https://www.military-shop.ro/sitemap_cat_4.xml',
'https://www.military-shop.ro/sitemap_cat_101.xml',
'https://www.military-shop.ro/sitemap_cat_40.xml',
'https://www.military-shop.ro/sitemap_cat_119.xml',
'https://www.military-shop.ro/sitemap_cat_37.xml',
'https://www.military-shop.ro/sitemap_cat_39.xml',
'https://www.military-shop.ro/sitemap_cat_120.xml',
'https://www.military-shop.ro/sitemap_cat_147.xml',
'https://www.military-shop.ro/sitemap_cat_171.xml',
'https://www.military-shop.ro/sitemap_cat_44.xml',
'https://www.military-shop.ro/sitemap_cat_35.xml',
'https://www.military-shop.ro/sitemap_cat_148.xml',
'https://www.military-shop.ro/sitemap_cat_36.xml',
'https://www.military-shop.ro/sitemap_cat_141.xml',
'https://www.military-shop.ro/sitemap_cat_100.xml',
'https://www.military-shop.ro/sitemap_cat_41.xml',
'https://www.military-shop.ro/sitemap_cat_38.xml',
'https://www.military-shop.ro/sitemap_cat_42.xml',
'https://www.military-shop.ro/sitemap_cat_43.xml',
]
for text in all_links:
URL = text
shop = URL.split('/')[2].split('.')[1]
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
available_data = soup.find_all('loc')
links = [item.get_text() for item in available_data]
for link in links[60:]:
try:
web_page = requests.get(link)
web_soup = BeautifulSoup(web_page.content, 'html.parser')
schemaorg_data = web_soup.find_all(type='application/ld+json')[0].contents[0]
split_data = schemaorg_data.split('"')
data = {}
data['_id'] = ObjectId()
i = 0
for item in split_data:
if item == 'name' and data[i - 2] == 'Product':
data[item] = split_data[i + 2]
data['slug'] = split_data[i + 2].lower().replace('"', '').replace(',', '').replace('.', '-').replace(' ', '-')
if item == 'image' or item == 'sku' or item == 'priceCurrency':
data[item] = split_data[i + 2]
if item == 'price':
data[item] = split_data[i + 1][1:-1]
if item == 'brand':
data[item] = split_data[i + 8]
if item == 'availability':
data[item] = split_data[i + 2].split('/')[-1]
i += 1
data['url'] = link
data['shop'] = shop
print(len(data))
if len(data) > 5:
result = collection.find_one({'name': data['name']})
if result == None:
# print('Insert', link)
collection.insert_one(data)
else:
# print('Update', link)
data['_id'] = result['_id']
collection.replace_one({'name': data['name']}, data)
except:
print(link)
# for item in data:
# print(item, ':', data[item])
print('military_shop_DB')
if __name__ == '__main__':
military_shop_DB()
|
[
"savusebastianf@gmail.com"
] |
savusebastianf@gmail.com
|
4e09b2610a1de447484dfa0b2a454a2e60fbe606
|
c1c7f9e400f788c296d9464117ba6cac553b03ca
|
/src/datasets/soilmoist.py
|
4985c8c3541822a678276e354a53aac02b638597
|
[
"MIT"
] |
permissive
|
nasa/RHEAS
|
1d8e0d6cb2df13713d458db07c0348fcf18eb9e1
|
27d0abcaeefd8760ce68e05e52905aea5f8f3a51
|
refs/heads/master
| 2023-08-03T23:05:47.535575
| 2023-08-01T16:55:13
| 2023-08-01T16:55:13
| 46,281,533
| 88
| 63
|
MIT
| 2023-08-01T16:55:15
| 2015-11-16T14:57:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,223
|
py
|
"""Definition for abstract soil moisture class.
.. module:: soilmoist
:synopsis: Definition of the Soilmoist class
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
import numpy as np
import dbio
import logging
class Soilmoist(object):
def __init__(self, uncert=None):
"""Initialize SMOS soil moisture object."""
self.statevar = ["soil_moist"]
self.obsvar = "soil_moist"
self.uncert = uncert
def x(self, dt, models):
"""Retrieve state variable from database."""
data = {}
db = dbio.connect(models.dbname)
cur = db.cursor()
for s in self.statevar:
sql = "select ensemble,st_x(geom),st_y(geom),sum(val) from (select ensemble,layer,(ST_PixelAsCentroids(rast)).* from {0}.{1} where fdate=date '{2}-{3}-{4}') foo group by ensemble,geom order by ensemble".format(
models.name, s, dt.year, dt.month, dt.day)
cur.execute(sql)
e, lon, lat, vals = zip(*cur.fetchall())
gid = [models[0].lgid[(l[0], l[1])] for l in zip(lat, lon)]
nens = max(e)
data[s] = np.array(vals).reshape((len(vals) / nens, nens))
lat = np.array(lat).reshape((len(lat) / nens, nens))
lon = np.array(lon).reshape((len(lon) / nens, nens))
gid = np.array(gid).reshape((len(gid) / nens, nens))
cur.close()
db.close()
return data, lat, lon, gid
def get(self, dt, models):
"""Retrieve observations from database for date *dt*."""
db = dbio.connect(models.dbname)
cur = db.cursor()
sql = "select st_x(geom),st_y(geom),val from (select (st_pixelascentroids(st_clip(rast,geom))).* from {0},{1}.basin where st_intersects(rast,geom) and fdate=date '{2}-{3}-{4}') foo".format(
self.tablename, models.name, dt.year, dt.month, dt.day)
cur.execute(sql)
if bool(cur.rowcount):
lon, lat, data = zip(*cur.fetchall())
data = np.array(data).reshape((len(data), 1))
lat = np.array(lat).reshape((len(lat), 1))
lon = np.array(lon).reshape((len(lon), 1))
self.nobs = len(data)
else:
data = lat = lon = None
cur.close()
db.close()
return data, lat, lon
def hx(self, models, dt):
"""Retrieve observed variable from database and resample to observation resolution."""
db = dbio.connect(models.dbname)
cur = db.cursor()
sql = "with f as (select st_union(st_clip(rast,geom)) as rast from {0},{1}.basin where st_intersects(rast,geom) and fdate=date '{2}-{3}-{4}') select ensemble,st_x(geom),st_y(geom),val from (select ensemble,(st_pixelascentroids(st_resample(b.rast,f.rast,'average'))).* from f,{1}.{5} as b where layer=1 and fdate=date '{2}-{3}-{4}') foo order by ensemble".format(
self.tablename, models.name, dt.year, dt.month, dt.day, self.obsvar)
cur.execute(sql)
e, lon, lat, data = zip(*cur.fetchall())
nens = max(e)
lat = np.array(lat).reshape((len(lat) / nens, nens))
lon = np.array(lon).reshape((len(lon) / nens, nens))
data = np.array(data).reshape((len(data) / nens, nens))
sql = "select depths from {0}.basin order by geom <-> st_geomfromtext('POINT(%(lon)s %(lat)s)',4326) limit 1".format(
models.name)
for i in range(len(data) / nens):
for e in range(nens):
cur.execute(sql, {'lat': lat[i, e], 'lon': lon[i, e]})
z = cur.fetchone()[0][0]
# convert to volumetric soil moisture
data[i, e] /= (1000.0 * z)
cur.close()
db.close()
return data, lat, lon
def E(self, nens):
"""Generate observation error vector."""
log = logging.getLogger(__name__)
e = None
if self.uncert is not None:
try:
e = self.uncert(size=(self.nobs, nens))
except:
log.warning("Error using provided parameters in observation error PDF. Reverting to default.")
if e is None:
e = np.random.normal(0.0, self.stddev, (self.nobs, nens))
return e
|
[
"kandread@jpl.nasa.gov"
] |
kandread@jpl.nasa.gov
|
c09e738a65a63a9205d71eecf0d10d4efcb5b816
|
cd2aaf0097f2e244aa4a22c9da7133dd0e2f2fb8
|
/Saylani/python-code-master/23July2017/hello/first.py
|
3447c8c7a278396583b817b155ad18eece3784af
|
[] |
no_license
|
EnggQasim/SSUET-2017-Module-I
|
349ea6e9b0554fa8c55899622bf0ee97fd19b685
|
cd41ab8e768616ca56ddaa1d7662283f653674f9
|
refs/heads/master
| 2020-03-25T10:36:38.330710
| 2018-09-30T13:17:38
| 2018-09-30T13:17:38
| 143,698,684
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
print("Hello New world")
name = "Mughal"
name = 'Mughal'
age = 45
email = "zeeshanhanif@gmail.com"
print(name)
name = 67
print(name)
name1 = "My father\"s name is M. Aslam"
print(name1)
|
[
"m.qasim077@gmail.com"
] |
m.qasim077@gmail.com
|
5fb8b2e6dd69ef1453ce691638668749ee32b12b
|
29e1133741b339c2e6c4c0385a103f68baa32a11
|
/findata/gbif/gbif.py
|
94ef7024bcdaf8ce800a3eefbe841ba0e9df5a59
|
[] |
no_license
|
Gscsd8527/AllProject
|
b406935dd1e969d1f45a62f870fb409f81ba4200
|
10b56c432b6f433e3a37967b7c717840e726765c
|
refs/heads/master
| 2023-02-21T20:25:48.397668
| 2022-03-04T14:01:27
| 2022-03-04T14:01:27
| 199,461,253
| 13
| 6
| null | 2023-02-15T20:47:23
| 2019-07-29T13:45:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import json
import pymongo
from loguru import logger
myclient = pymongo.MongoClient('mongodb://*********:27017/')
mydb = myclient['dataset'] # 数据库
mycol = mydb['gbif'] # 表
class Gbif:
def __init__(self):
self.url = 'https://www.gbif.org/api/dataset/search?facet=type&facet=publishing_org&facet=hosting_org&facet=publishing_country&facet=project_id&facet=license&locale=en&offset={offset}'
self.count = 54976 # 总量
self.page_num = 20 # 一页的数量
self.pages = self.get_pages()
def get_pages(self):
"""
获取页数
:return:
"""
pages = self.count // self.page_num
ys = self.count % self.page_num
if ys > 0:
pages += 1
print(pages)
return pages
def get_works(self):
works = [self.url.format(offset=page*self.page_num) for page in range(self.pages)]
return works
def request(self, url):
response = requests.get(url)
if response.status_code == 200:
text = response.text
data_json = json.loads(text)
results = data_json['results']
return results
else:
print('错误响应码为: ', response.status_code)
def main():
"""
https://www.gbif.org/dataset/search
:return:
"""
gbif = Gbif()
works = gbif.get_works()
pool = ThreadPoolExecutor(max_workers=10)
jobs = []
for work in works:
p = pool.submit(gbif.request, work) # 异步提交任务
jobs.append(p)
for _ in as_completed(jobs):
for result in _.result():
logger.info(result['title'])
# mycol.insert_one(result)
if __name__ == '__main__':
main()
|
[
"tan_gscsd@163.com"
] |
tan_gscsd@163.com
|
b8732654492ceef372cc4e82ca927642071ce0f8
|
47ec91bedb4ca9d69bf288fd25484b08e013a8ac
|
/themylog/config/processors.py
|
ade5bc4eb60a7c781f37f1697dc4a98080629c66
|
[] |
no_license
|
themylogin/themylog
|
23d1238866240d168cf3ce828bbb85d38276a226
|
d4de99f08f066972a06c1463a1e2440a56513bfa
|
refs/heads/master
| 2020-04-15T17:29:36.318428
| 2016-04-17T14:53:14
| 2016-04-17T14:53:14
| 14,795,831
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from collections import namedtuple
import sys
from themylog.config.scripts import find_scripts
Processor = namedtuple("Processor", ["name", "process"])
def get_processors(config):
processors = []
directory = config.get("processors", {}).get("directory")
if directory:
sys.path.insert(0, directory)
for script in find_scripts(directory, {}):
processors.append(Processor(name=script.name,
process=__import__(script.name).process))
return processors
|
[
"themylogin@gmail.com"
] |
themylogin@gmail.com
|
e5f2f94778e5364a8c9c19af7062bf8a1f7f02e9
|
2734b77a68f6d7e22e8b823418ad1c59fe1a34af
|
/opengever/workspaceclient/tests/test_keys.py
|
96b2015989f56a8230f5fa71a7c7b1f05fd50952
|
[] |
no_license
|
4teamwork/opengever.core
|
5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1
|
a01bec6c00d203c21a1b0449f8d489d0033c02b7
|
refs/heads/master
| 2023-08-30T23:11:27.914905
| 2023-08-25T14:27:15
| 2023-08-25T14:27:15
| 9,788,097
| 19
| 8
| null | 2023-09-14T13:28:56
| 2013-05-01T08:28:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
from contextlib import contextmanager
from ftw.builder import Builder
from ftw.builder import create
from opengever.testing import IntegrationTestCase
from opengever.workspaceclient.exceptions import ServiceKeyMissing
from opengever.workspaceclient.keys import key_registry
from plone.restapi.serializer.converters import json_compatible
import json
import shutil
import tempfile
class TestKeyRegistry(IntegrationTestCase):
@contextmanager
def temp_fs_key(self, key):
temp_dir = tempfile.mkdtemp()
original_key_directory = key_registry.key_directory
original_keys = key_registry.keys
key_registry.key_directory = temp_dir
file_ = tempfile.NamedTemporaryFile(
dir=temp_dir, suffix=".json", delete=False)
file_.write(json.dumps(json_compatible(key)))
file_.close()
try:
key_registry.load_file_system_keys()
yield temp_dir
finally:
shutil.rmtree(temp_dir)
key_registry.key_directory = original_key_directory
key_registry.keys = original_keys
def test_raises_an_error_if_the_key_file_not_found_for_a_specific_url(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone/'))
with self.temp_fs_key(service_key_client) as path:
with self.assertRaises(ServiceKeyMissing) as cm:
key_registry.get_key_for('http://example.de/plone/')
self.maxDiff = None
self.assertEqual(
"No workspace service key found for URL http://example.de/plone.\n"
"Found keys ('http://example.com/plone',) in the folder: {}".format(path),
str(cm.exception))
def test_skip_fs_keys_without_a_token_uri(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone/'))
del service_key_client['token_uri']
with self.temp_fs_key(service_key_client):
key_registry.load_file_system_keys()
self.assertEqual([], key_registry.keys)
def test_return_registered_keys_on_the_filesystem(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone'))
with self.temp_fs_key(service_key_client):
self.assertEqual(
['http://example.com/plone'],
key_registry.keys_by_token_uri.keys())
def test_get_key_for(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone/'))
self.assertDictContainsSubset(
service_key_client,
key_registry.get_key_for('http://example.com/plone/'))
|
[
"e.schmutz@4teamwork.ch"
] |
e.schmutz@4teamwork.ch
|
eb28e0f54441e884f4107a7771de1bbcac4b2f24
|
3b60e6f4bbc011003ac4929f01eb7409918deb79
|
/Analysis_v1/Simulation/Pythia/Unparticles/CP2UnparticlesPythia8fragments-2018PSWeights/UnparToGG_Spin2_du1p1_LambdaU-2000_pT70_M2000_TuneCP2_13TeV_pythia8_cfi.py
|
f5943fddccc2bb59b36b498dd9eadffdd636bb94
|
[] |
no_license
|
uzzielperez/Analyses
|
d1a64a4e8730325c94e2bc8461544837be8a179d
|
1d66fa94763d7847011ea551ee872936c4c401be
|
refs/heads/master
| 2023-02-09T04:54:01.854209
| 2020-09-07T14:57:54
| 2020-09-07T14:57:54
| 120,850,137
| 0
| 0
| null | 2020-06-17T16:48:16
| 2018-02-09T03:14:04
|
C++
|
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP2SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsUnpart:ffbar2gammagamma = on',
'ExtraDimensionsUnpart:gg2gammagamma = on',
'PromptPhoton:gg2gammagamma = on',
#'PromptPhoton:ffbar2gammagamma = on',
'ExtraDimensionsUnpart:LambdaU = 2000.0',
'ExtraDimensionsUnpart:lambda = 1.0',
'ExtraDimensionsUnpart:dU = 1.1',
'ExtraDimensionsUnpart:spinU = 2',
'PhaseSpace:pTHatMin = 70',
'PhaseSpace:mHatMin = 2000',
'PhaseSpace:mHatMax = 1',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP2Settings',
'processParameters',
'pythia8PSweightsSettings',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"uzzie.perez@cern.ch"
] |
uzzie.perez@cern.ch
|
dc7b67abce3d12e28bf013bb0871e37d84a241c6
|
40eb57784dc62179eafcf21f796a7d0c43cf55e5
|
/calliope/constraints/planning.py
|
39e9ce39f71b7a76af050a81764360e4c2893cc3
|
[
"Apache-2.0"
] |
permissive
|
sjpfenninger/calliope
|
61b202f8519076a95ee8bad3d0d2215043e1b497
|
a4e49c3b7d37f908bafc84543510eec0b4cf5d9f
|
refs/heads/master
| 2020-06-11T01:01:36.709420
| 2016-12-06T14:47:20
| 2016-12-06T14:47:20
| 75,827,649
| 1
| 1
| null | 2016-12-07T11:01:51
| 2016-12-07T11:01:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
"""
Copyright (C) 2013-2016 Stefan Pfenninger.
Licensed under the Apache 2.0 License (see LICENSE file).
planning.py
~~~~~~~~~~~
Planning constraints.
"""
import numpy as np
import pyomo.core as po
def node_constraints_build_total(model):
"""
"""
m = model.m
# Constraint rules
def c_e_cap_total_systemwide_rule(m, y):
total_max = model.get_option(y + '.constraints.e_cap.total_max')
total_equals = model.get_option(y + '.constraints.e_cap.total_equals')
scale = model.get_option(y + '.constraints.e_cap_scale')
if np.isinf(total_max) and not total_equals:
return po.Constraint.NoConstraint
sum_expr = sum(m.e_cap[y, x] for x in m.x)
total_expr = total_equals * scale if total_equals else total_max * scale
if total_equals:
return sum_expr == total_expr
else:
return sum_expr <= total_expr
# Constraints
m.c_e_cap_total_systemwide = \
po.Constraint(m.y, rule=c_e_cap_total_systemwide_rule)
def system_margin(model):
"""
"""
m = model.m
time_res = model.data['_time_res'].to_series()
def carrier(y):
return model.get_option(y + '.carrier')
# Constraint rules
def c_system_margin_rule(m, c):
# If no margin defined for a carrier, use 0 (i.e. no margin)
margin = model.config_model.system_margin.get_key(c, default=0)
if margin:
t = model.t_max_demand[c]
return (sum(m.es_prod[c, y, x, t] for y in m.y for x in m.x)
* (1 + margin)
<= time_res.at[t]
* sum((m.e_cap[y, x] / model.get_eff_ref('e', y, x))
for y in m.y if carrier(y) == c
for x in m.x))
else:
return po.Constraint.NoConstraint
# Constraints
m.c_system_margin = po.Constraint(m.c, rule=c_system_margin_rule)
|
[
"stefan@pfenninger.org"
] |
stefan@pfenninger.org
|
a727ae60692c2636d6abd360bd56330c24e06fee
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/hr_maintenance/models/res_users.py
|
c97a2bb60b15c15017414adf202109752bb76078
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656
| 2021-08-29T11:48:36
| 2021-08-29T11:48:36
| 401,010,175
| 0
| 0
|
Apache-2.0
| 2021-08-29T10:13:58
| 2021-08-29T10:13:58
| null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
from odoo import api, models, fields
class Users(models.Model):
_inherit = 'res.users'
equipment_ids = fields.One2many('maintenance.equipment', 'owner_user_id', string="Managed Equipments")
equipment_count = fields.Integer(related='employee_id.equipment_count', string="Assigned Equipments")
def __init__(self, pool, cr):
""" Override of __init__ to add access rights.
Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(Users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
type(self).SELF_READABLE_FIELDS = type(self).SELF_READABLE_FIELDS + ['equipment_count']
return init_res
class Employee(models.Model):
_inherit = 'hr.employee'
equipment_ids = fields.One2many('maintenance.equipment', 'employee_id')
equipment_count = fields.Integer('Equipments', compute='_compute_equipment_count')
@api.depends('equipment_ids')
def _compute_equipment_count(self):
for employee in self:
employee.equipment_count = len(employee.equipment_ids)
|
[
"36736117+SHIVJITH@users.noreply.github.com"
] |
36736117+SHIVJITH@users.noreply.github.com
|
2de6c9501b1b8560c72788d40905ffe4818ba046
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/5_map/构造O(1)复杂度数组.py
|
4022012734ec37223659443e2deaa1ed6ec62b0f
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
# 设计一个特殊的数组,要求该数据结构以下三种操作的时间复杂度均为O(1)
# 1. 查询数组某个位置的元素
# 2. 将数组某个位置的元素修改为指定值
# 3. 将数组所有元素修改为指定值
from collections import defaultdict
class SpecialArray:
__slots__ = "_data"
def __init__(self) -> None:
self._data = defaultdict(int)
def get(self, index: int) -> int:
return self._data[index]
def set(self, index: int, value: int) -> None:
self._data[index] = value
def setAll(self, value: int) -> None:
self._data = defaultdict(lambda: value)
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
d6f0dd5c587a5205dc3e3b19517b90443f991d4e
|
97e349765284a1239580f4ae6943f597797fdc0d
|
/dingweitest/test1.py
|
88b98797ba89775967126c4b52b4988562f63047
|
[] |
no_license
|
chenhanfang/test2
|
716aa9b1f875a6c88bfc6fb45ddc9879441c3c34
|
5d9d44086815bdf514636a1fc14bcd2c1f4284a5
|
refs/heads/master
| 2021-01-20T14:22:51.885745
| 2017-05-09T01:59:34
| 2017-05-09T01:59:34
| 90,597,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
#coding=utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains#######鼠标事件的类
import time
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
driver=webdriver.Remote(desired_capabilities=DesiredCapabilities.CHROME)
driver.get('http://www.baidu.com/')
time.sleep(1)
driver.find_element_by_xpath('//a[@href="http://www.baidu.com/gaoji/preferences.html" and @class="pf"]').click()###设置
driver.find_element_by_xpath('//a[@class="setpref" and @href="javascript:;"]').click()###搜索设置
time.sleep(1)
m=driver.find_element_by_xpath("//select[@name='NR']")####下来框操作
m.find_element_by_xpath("//option[@value='20']").click()
time.sleep(1)
driver.find_element_by_xpath("//a[@class='prefpanelgo']").click()
time.sleep(1)
date=driver.switch_to.alert.text####返回alert/confirm/prompt中的文字信息
print(date)
driver.switch_to.alert.accept()####accept弹出的带有确定按钮的提示框,来接受确认提示框操作
'''dissmiss 点击取消按钮,如果存在取消按钮;send_keys 输入值,这个
alert\confirm没有对话框就不能用了,不然会报错'''
cookie=driver.get_cookies()#获取cookie
print(cookie)
driver.find_element_by_xpath("//input[@id='kw']").send_keys('selenium')
driver.find_element_by_xpath("//input[@id='su']").click()
time.sleep(2)
js="var q=document.documentElement.scrollTop=1000"###将页面滚动条拖到底部
driver.execute_script(js)
time.sleep(2)
# data=driver.find_element_by_xpath('//p[@id="cp"]').text####获取元素的文本信息
# print(data)
# driver.find_element_by_xpath('//a[@name="tj_mp3"]').click()
print(driver.title)####打印浏览器标题
# driver.set_window_size(480,800)
# driver.back()####后退
# time.sleep(2)
# driver.forward()#####前进
'''
qqq=driver.find_element_by_xpath("///")
ActionChains(driver).context_click(qqq).perform()####鼠标右击事件
ActionChains(driver).double_click(qqq).perform()####鼠标双击事件
ppp=driver.find_element_by_xpath("///")
ActionChains(driver).drag_and_drop(qqq,ppp).perform()####鼠标拖地事件,perform()执行所有存储的行为
switch_to_frame()#####框架(frame)或者窗口(window)的定位
switch_to_window()
'''
|
[
"chenhanfang@zhixunkeji.cn"
] |
chenhanfang@zhixunkeji.cn
|
914fa86716ed865bb5eabf6824fd0f4239243ca5
|
163c66e58f04268c884335ed66461d5ddf513280
|
/hw2/quicksort.py
|
9d115b440faadc402a3c34e630c76fdcad1375f1
|
[] |
no_license
|
pz325/Coursera_ADA
|
4ca0d8273c0571b45364b951d52a5d06cbdc652c
|
b968dd6b60f73d1ebe34195ddfa7fc39df3726cd
|
refs/heads/master
| 2016-09-05T22:16:13.865655
| 2014-11-18T21:27:54
| 2014-11-18T21:27:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
def chooseFirstElementAsPivot(A, l, r):
return A[l]
def chooseLastElementAsPivot(A, l, r):
tmp = A[l]
A[l] = A[r]
A[r] = tmp
return A[l]
def chooseMedianOfThreeAsPivot(A, l, r):
if r - l == 1:
return chooseFirstElementAsPivot(A, l, r)
mid = (r - l) / 2 + l
# print(l, mid, r)
# print(A[l], A[mid], A[r])
if (A[mid]-A[l])*(A[mid]-A[r]) < 0:
tmp = A[l]
A[l] = A[mid]
A[mid] = tmp
if (A[r]-A[l])*(A[r]-A[mid]) < 0:
tmp = A[l]
A[l] = A[r]
A[r] = tmp
return A[l]
def quicksort(A, l, r, choosePivot):
# print('========')
# print('before sort', A)
compares = r - l
if r - l <= 0: return 0
pivot = choosePivot(A, l, r)
# print('pivot', pivot)
# print('choose pivot', A)
l1, r1, l2, r2 = partition(A, l, r, pivot)
# print(A[l1:r1+1], A[l2:r2+1])
# print('after partition', A)
compares += quicksort(A, l1, r1, choosePivot)
# print('sort 1st part', A)
compares += quicksort(A, l2, r2, choosePivot)
# print('sort 2nd part', A)
return compares
def partition(A, l, r, pivot):
i = l + 1
for j in range(l+1, r+1):
if A[j] < pivot:
tmp = A[j]
A[j] = A[i]
A[i] = tmp
i += 1
tmp = A[l]
A[l] = A[i-1]
A[i-1] = tmp
l1 = l
r1 = i-2
l2 = i
r2 = r
return l1, r1, l2, r2
def test():
A = [3, 8, 2, 5, 1, 4, 7, 6]
compares = quicksort(A, 0, 7, chooseFirstElementAsPivot)
print(compares)
solution('10.txt')
solution('100.txt')
solution('1000.txt')
def solution(source):
print(source)
A = [int(l.strip()) for l in open(source).readlines()]
compares = quicksort(A, 0, len(A)-1, chooseFirstElementAsPivot)
print('choose 1st element', compares)
A = [int(l.strip()) for l in open(source).readlines()]
compares = quicksort(A, 0, len(A)-1, chooseLastElementAsPivot)
print('choose last element', compares)
A = [int(l.strip()) for l in open(source).readlines()]
compares = quicksort(A, 0, len(A)-1, chooseMedianOfThreeAsPivot)
print('choose median of three', compares)
def main():
test()
solution('QuickSort.txt')
if __name__ == '__main__':
main()
|
[
"sg71.cherub@gmail.com"
] |
sg71.cherub@gmail.com
|
9b1aec656d50ff842d5761e6a750df7afab50cad
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/tests/bartpho/test_tokenization_bartpho.py
|
3e35ad15c1ee543473709c7f66f9c1e22cda20ae
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,656
|
py
|
# coding=utf-8
# Copyright 2021 HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from os.path import dirname
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece_bpe.model")
class BartphoTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BartphoTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
def setUp(self):
super().setUp()
vocab = ["▁This", "▁is", "▁a", "▁t", "est"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.special_tokens_map = {"unk_token": "<unk>"}
self.monolingual_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "This is a là test"
output_text = "This is a<unk><unk> test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map)
text = "This is a là test"
bpe_tokens = "▁This ▁is ▁a ▁l à ▁t est".split()
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
[
"dongwenbo6@huawei.com"
] |
dongwenbo6@huawei.com
|
d6667c371f5635050e24804b3548edbb78015a8e
|
d2e69d4d3d1e11a87f5a377e4a423422fe0a7058
|
/FullStack/12/celery_stuff/periodic_task.py
|
5d5dcb1b71af4eff0549304787a59963e751cecf
|
[] |
no_license
|
oJacker/_python
|
6f30dd4a60c1593d27c00ac485163fc0ba77dd8c
|
8086d0cd78e156abfff9819a56384149dd431c56
|
refs/heads/master
| 2021-05-06T03:13:29.167281
| 2018-02-01T09:41:42
| 2018-02-01T09:41:42
| 114,827,084
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
from celery import Celery
from celery.schedules import crontab
app = Celery()
@app.on_after_configure.connect
def setup_periodic_tasks(sender,**kwargs):
# Calls test('hello) every 10 seconds
sender.add_periodic_task(10.0, test.s('hello'),name='add every 10')
# Calls test('world') every 30 seconds
sender.add_periodic_task(30.0.test.s('world'),expires=10)
# Executes every Monday moring at 7:30 a.m
sender.add_periodic_task(
crontab(hour=7,minute=30,day_of_week=1),
test.s('Happy Mondays!'),
)
# app.conf.beat_schedule = {
# 'add-every-30-seconds':{
# 'task': 'tasks.add',
# 'schedule': 30.0,
# 'args': (16, 16)
# },
# }
# app.conf.timezone = 'UTC'
@app.task
def test(arg):
print(arg)
|
[
"623657285@qq.com"
] |
623657285@qq.com
|
97f136b14681008e20c099f12631a94a0fc21e33
|
b7203262280b8fabcf5573ea494e8e2408d8d2b9
|
/turtle/star.py
|
9282c3b8c1bd90f12603fc067e860a8e5d21d5fd
|
[
"Apache-2.0"
] |
permissive
|
MDGSF/PythonPractice
|
1c11994a047ecb01c74b0cf0b320b6ffc570209d
|
77e81d7c965c5de1629df223cb27dd541d128eb1
|
refs/heads/master
| 2021-06-16T13:49:00.310063
| 2021-04-15T11:32:24
| 2021-04-15T11:32:24
| 177,229,019
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import turtle as t
def main():
t.color('red', 'yellow')
t.begin_fill()
while True:
t.forward(200)
t.left(170)
if abs(t.pos()) < 1:
break
t.end_fill()
t.done()
if __name__ == "__main__":
main()
|
[
"1342042894@qq.com"
] |
1342042894@qq.com
|
c351193f7c13944665260375b74e52d614f9e126
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/list/double/Schema+Instance/NISTXML-SV-IV-list-double-maxLength-3-3.py
|
b87e6657a0667e04224d685b68de4abfd4acfadf
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
from output.models.nist_data.list_pkg.double.schema_instance.nistschema_sv_iv_list_double_max_length_3_xsd.nistschema_sv_iv_list_double_max_length_3 import NistschemaSvIvListDoubleMaxLength3
obj = NistschemaSvIvListDoubleMaxLength3(
value=[
6.828163737338829e+162,
4.3832452374445357e+167,
4.21622419951358e+263,
4.477423873143575e+138,
7.653382762597696e+277,
]
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
5b89597467106b28a80cea60757167381bfd8edc
|
2aee45d23b47c6adba9eafc5a84d606a021f9300
|
/web_dashboard/models/models.py
|
fec9ca50de1abcd2af54f1f4e7a897ee9cb90ce6
|
[] |
no_license
|
charles-123456/Primoris-System
|
23b183460ea79bfa8d896556aa35d62460154567
|
0880b8266eedfd0016a3b365c9939c34ad301155
|
refs/heads/main
| 2023-08-21T06:24:42.840026
| 2021-10-25T06:10:48
| 2021-10-25T06:10:48
| 385,922,247
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, api
from lxml.builder import E
class BaseModel(models.AbstractModel):
_inherit = 'base'
@api.model
def _get_default_dashboard_view(self):
""" Generates a default dashboard view containing default sub graph and
pivot views.
:returns: a dashboard view as an lxml document
:rtype: etree._Element
"""
dashboard = E.dashboard()
dashboard.append(E.view(type="graph"))
dashboard.append(E.view(type="pivot"))
return dashboard
|
[
"charlesit333@gmail.com"
] |
charlesit333@gmail.com
|
bb6fb54135f828268b32553a317043acc288650b
|
a2ab6c23253badb3be54b19ba061e1aeaac6a8cd
|
/utils/image_annotator.py
|
6d3168d8b386e8ae48a956d41739e56d99f89255
|
[] |
no_license
|
vivek09pathak/ImageDetection_RealTime
|
0720fb4a6f35a81591f401a04ae44aa3bbea013f
|
d9e376b41a1216aecaacc9626cee59d45001695c
|
refs/heads/master
| 2022-12-26T22:04:18.328476
| 2020-09-30T10:20:15
| 2020-09-30T10:20:15
| 152,729,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
import cv2
import numpy as np
from data.videos import path as videos_path
def from_image(image):
try:
grid_interval = 25
grid_color = (200, 100, 200)
points = []
current = [0, 0]
width = image.shape[1]
height = image.shape[0]
img = image.copy()
c_x = int(width / 2)
c_y = int(height / 2)
for i in range(0, c_x + 1, grid_interval):
cv2.line(img, (i, 0), (i, height), grid_color, 1)
cv2.line(img, (width - i, 0), (width - i, height), grid_color, 1)
for i in range(0, c_y + 1, grid_interval):
cv2.line(img, (0, i), (width, i), grid_color, 1)
cv2.line(img, (0, height - i), (width, height - i), grid_color, 1)
def select_point(event, x, y, flags, param):
current[0] = x
current[1] = y
if event == cv2.EVENT_LBUTTONDBLCLK:
points.append([x, y])
winname = 'window1'
print(winname)
cv2.namedWindow(winname)
cv2.imshow(winname, image)
cv2.resizeWindow(winname, 200, 200)
cv2.setMouseCallback(winname, select_point)
cv2.moveWindow(winname, 0, 0)
while True:
temp_img = img.copy()
cv2.putText(temp_img, str(current), (current[0] + 20, current[1]), cv2.FONT_HERSHEY_PLAIN, 0.5,
(255, 255, 255), 1)
for point in points:
cv2.circle(temp_img, (point[0], point[1]), 1, (255, 0, 0), -1)
cv2.imshow(winname, temp_img)
k = cv2.waitKey(20) & 0xFF
if k == 8:
try:
points.pop()
except:
pass
if k == 27:
break
print("Here!!!")
roi = np.float32(np.array(points.copy()))
mark = 0.47 * width
temp_img = image.copy()
cv2.polylines(temp_img, [np.int32(roi)], 1, (0, 255, 0), 3)
cv2.imshow(winname, temp_img)
cv2.waitKey(0)
roi = roi.tolist()
if roi:
return roi
while(True):
k = cv2.waitKey(0)
except:
pass
if __name__ == '__main__':
cap = cv2.VideoCapture(videos_path.get()+'/ra_rafee_cabin_1.mp4')
ret = False
while not ret:
ret, frame = cap.read()
print(from_image(frame))
|
[
"anupamb266@gmail.com"
] |
anupamb266@gmail.com
|
ab2e4af17f8e39c556b8394fc307067c0fcf635b
|
da0d673da16f92ffed008b4c8b8c82c336d78122
|
/server/app.py
|
90039f57aee1197bfae1a0fe21bfe9586aac1f61
|
[] |
no_license
|
aparkalov/sc-web
|
5dcac607c42376df205c6a025dbfe076f018970b
|
43c0d79f0fefa435bb9f53b230e9b9048e000613
|
refs/heads/master
| 2021-01-17T18:21:58.247615
| 2015-10-25T20:45:11
| 2015-10-25T20:45:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,155
|
py
|
import tornado.ioloop
import tornado.web
import tornado.options
import secret
import os
from handlers.main import MainHandler
import handlers.api as api
import handlers.auth as auth
import admin.main as admin
import admin.users as admin_users
import ws, db
is_closing = False
def signal_handler(signum, frame):
global is_closing
is_closing = True
def try_exit():
global is_closing
if is_closing:
# clean up here
tornado.ioloop.IOLoop.instance().stop()
class NoCacheStaticHandler(tornado.web.StaticFileHandler):
""" Request static file handlers for development and debug only.
It disables any caching for static file.
"""
def set_extra_headers(self, path):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def main():
tornado.options.define("static_path", default = "../client/static", help = "path to static files directory", type = str)
tornado.options.define("templates_path", default = "../client/templates", help = "path to template files directory", type = str)
tornado.options.define("sctp_port", default = 55770, help = "port of sctp server", type = int)
tornado.options.define("sctp_host", default = "localhost", help = "host of sctp server", type = str)
tornado.options.define("event_wait_timeout", default = 10, help = "time to wait commands processing", type = int)
tornado.options.define("idtf_serach_limit", default = 30, help = "number of maximum results for searching by identifier", type = int)
tornado.options.define("redis_host", default = "localhost", help = "host of redis server", type = str)
tornado.options.define("redis_port", default = 6379, help = "port of redis server", type = int)
tornado.options.define("redis_db_idtf", default = 0, help = "number of redis database to store identifiers", type = int)
tornado.options.define("redis_db_user", default = 1, help = "number of redis database to store user info", type = int)
tornado.options.define("host", default = "localhost", help = "host name", type = str)
tornado.options.define("port", default = 8000, help = "host port", type = int)
tornado.options.define("google_client_id", default = "", help = "client id for google auth", type = str)
tornado.options.define("google_client_secret", default = "", help = "client secret for google auth", type = str)
tornado.options.define("user_key_expire_time", default = 600, help = "user key expire time in seconds", type = int)
tornado.options.define("super_emails", default = "", help = "email of site super administrator (maximum rights)", type = list)
tornado.options.define("db_path", default = "data.db", help = "path to database file", type = str)
tornado.options.define("cfg", default = "server.conf", help = "path to configuration file", type = str)
tornado.options.parse_command_line()
if os.path.exists(tornado.options.options.cfg):
tornado.options.parse_config_file(tornado.options.options.cfg)
# prepare database
database = db.DataBase()
database.init()
rules = [
(r"/", MainHandler),
(r"/static/(.*)", NoCacheStaticHandler, {"path": tornado.options.options.static_path}),
# api
(r"/api/init/", api.Init),
(r"/api/context/", api.ContextMenu),
(r"/api/cmd/do/", api.CmdDo),
(r"/api/question/answer/translate/", api.QuestionAnswerTranslate),
(r"/api/link/content/", api.LinkContent),
(r"/api/link/format/", api.LinkFormat),
(r"/api/languages/", api.Languages),
(r"/api/languages/set/", api.LanguageSet),
(r"/api/idtf/find/", api.IdtfFind),
(r"/api/idtf/resolve/", api.IdtfResolve),
(r"/api/addr/resolve/", api.AddrResolve),
(r"/api/info/tooltip/", api.InfoTooltip),
(r"/api/user/", api.User),
(r"/auth/google$", auth.GoogleOAuth2LoginHandler),
(r"/auth/logout$", auth.LogOut),
(r"/admin$", admin.MainHandler),
(r"/admin/users/get$", admin_users.UsersInfo),
(r"/admin/users/set_rights$", admin_users.UserSetRights),
(r"/admin/users/list_rights$", admin_users.UserListRights),
(r"/sctp", ws.SocketHandler),
]
application = tornado.web.Application(
handlers = rules,
cookie_secret = secret.get_secret(),
login_url = "/auth/google",
template_path = tornado.options.options.templates_path,
xsrf_cookies = False,
gzip = True,
google_oauth = {"key": tornado.options.options.google_client_id,
"secret": tornado.options.options.google_client_secret
}
)
application.listen(tornado.options.options.port)
tornado.ioloop.PeriodicCallback(try_exit, 1000).start()
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
[
"denis.koronchik@gmail.com"
] |
denis.koronchik@gmail.com
|
efdf17b0386a062ae1786386cfcd575b6dfe973d
|
397e125e94f4f139f2bf5055824d81f24b8b1757
|
/ABC/165/F-1.py
|
e46b77a108de4a46bd7d12686ca0056a7ddfe1e2
|
[] |
no_license
|
tails1434/Atcoder
|
ecbab6ee238e3f225551297db961b1b502841fa4
|
e7c7fed36be46bbaaf020a70997842240ba98d62
|
refs/heads/master
| 2021-07-07T00:31:49.235625
| 2020-09-30T01:42:01
| 2020-09-30T01:42:01
| 189,009,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
import sys
import bisect
from collections import deque
sys.setrecursionlimit(4100000)
input = sys.stdin.readline
def main():
N = int(input())
A = list(map(int, input().split()))
edge = [[] for _ in range(N)]
for i in range(N-1):
u, v = map(int, input().split())
u -= 1
v -= 1
edge[u].append(v)
edge[v].append(u)
ans = [0] * (N + 1)
LIS = [-1]
def dfs(v, p = N):
if A[v] > LIS[-1]:
LIS.append(A[v])
ans[v] = ans[p] + 1
for u in edge[v]:
if u == p:
continue
dfs(u,v)
LIS.pop()
else:
ans[v] = ans[p]
idx = bisect.bisect_left(LIS, A[v])
old = LIS[idx]
LIS[idx] = A[v]
for u in edge[v]:
if u == p:
continue
dfs(u,v)
LIS[idx] = old
dfs(0)
for i in range(N):
print(ans[i])
if __name__ == "__main__":
main()
|
[
"sososo1333@gmail.com"
] |
sososo1333@gmail.com
|
e5298e2cb42519ec6fdfc02dc68398406969417c
|
cc096d321ab5c6abf54fdcea67f10e77cd02dfde
|
/flex-backend/pypy/translator/js/function.py
|
c14b8c8c19d81579e42f69e8c29f1ca539832eaa
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
limweb/flex-pypy
|
310bd8fcd6a9ddc01c0b14a92f0298d0ae3aabd2
|
05aeeda183babdac80f9c10fca41e3fb1a272ccb
|
refs/heads/master
| 2021-01-19T22:10:56.654997
| 2008-03-19T23:51:59
| 2008-03-19T23:51:59
| 32,463,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,649
|
py
|
try:
set
except NameError:
from sets import Set as set
from pypy.objspace.flow import model as flowmodel
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Void, Bool, Float
from pypy.rpython.lltypesystem.lltype import SignedLongLong, UnsignedLongLong
from pypy.rpython.ootypesystem import ootype
from pypy.translator.oosupport.metavm import Generator,InstructionList
from pypy.translator.oosupport import function
from pypy.translator.js.log import log
from types import FunctionType
import re
class BaseGenerator(object):
def load(self, v):
if isinstance(v, flowmodel.Variable):
if v.name in self.argset:
selftype, selfname = self.args[0]
if self.is_method and v.name == selfname:
self.ilasm.load_self()
else:
self.ilasm.load_arg(v)
else:
self.ilasm.load_local(v)
elif isinstance(v, flowmodel.Constant):
self.db.load_const(v.concretetype, v.value, self.ilasm)
elif isinstance(v, str):
self.ilasm.load_const("'" + v + "'")
else:
assert False
def store(self, v):
assert isinstance(v, flowmodel.Variable)
if v.concretetype is not Void:
self.ilasm.store_local(v)
else:
self.ilasm.store_void()
def change_name(self, name, to_name):
self.ilasm.change_name(name, to_name)
def add_comment(self, text):
pass
def function_signature(self, graph):
return self.cts.graph_to_signature(graph, False)
def class_name(self, ooinstance):
return ooinstance._name
def emit(self, instr, *args):
self.ilasm.emit(instr, *args)
def call_graph(self, graph):
self.db.pending_function(graph)
func_sig = self.function_signature(graph)
self.ilasm.call(func_sig)
def call_external(self, name, args):
self.ilasm.call((name, args))
#def call_signature(self, signature):
# self.ilasm.call(signature)
def cast_to(self, lltype):
cts_type = self.cts.lltype_to_cts(lltype, False)
self.ilasm.castclass(cts_type)
def new(self, obj):
self.ilasm.new(self.cts.obj_name(obj))
def set_field(self, obj, name):
self.ilasm.set_field(obj, name)
#self.ilasm.set_field(self.field_name(obj,name))
def get_field(self, useless_stuff, name):
self.ilasm.get_field(name)
def call_method(self, obj, name):
func_name, signature = self.cts.method_signature(obj, name)
self.ilasm.call_method(obj, name, signature)
def call_external_method(self, name, arg_len):
self.ilasm.call_method(None, name, [0]*arg_len)
def instantiate(self):
self.ilasm.runtimenew()
def downcast(self, TYPE):
pass
def load_special(self, v):
# special case for loading value
# when setting builtin field we need to load function instead of None
# FIXME: we cheat here
if isinstance(v, flowmodel.Constant) and v.concretetype is ootype.Void and isinstance(v.value, FunctionType):
graph = self.db.translator.annotator.bookkeeper.getdesc(v.value).cachedgraph(None)
self.db.pending_function(graph)
name = graph.name
self.ilasm.load_str(name)
else:
self.load(v)
def cast_function(self, name, num):
self.ilasm.cast_function(name, num)
def prefix_op(self, st):
self.ilasm.prefix_op(st)
def load_str(self, s):
self.ilasm.load_str(s)
def load_void(self):
self.ilasm.load_void()
def list_setitem(self, base_obj, item, val):
self.load(base_obj)
self.load(val)
self.load(item)
self.ilasm.list_setitem()
def list_getitem(self, base_obj, item):
self.load(base_obj)
self.load(item)
self.ilasm.list_getitem()
def push_primitive_constant(self, TYPE, value):
self.db.load_const(TYPE, value, self.ilasm)
def branch_unconditionally(self, target_label):
self.ilasm.jump_block(self.block_map[target_label])
def branch_conditionally(self, exitcase, target_label):
self.ilasm.branch_if(exitcase)
self.ilasm.jump_block(self.block_map[target_label])
self.ilasm.close_branch()
class Function(function.Function, BaseGenerator):
def __init__(self, db, graph, name=None, is_method=False,
is_entrypoint=False, _class=None):
self._class = _class
super(Function, self).__init__(db, graph, name, is_method, is_entrypoint)
self._set_args()
self._set_locals()
self.order = 0
self.name = name or self.db.get_uniquename(self.graph, self.graph.name)
def _setup_link(self, link, is_exc_link = False):
target = link.target
for to_load, to_store in zip(link.args, target.inputargs):
if to_load.concretetype is not Void:
if is_exc_link and isinstance(to_load, flowmodel.Variable) and re.match("last_exc_value", to_load.name):
self.ilasm.load_str("exc")
else:
self.load(to_load)
self.store(to_store)
def _create_generator(self, ilasm):
return self
def begin_render(self):
block_map = {}
for blocknum, block in enumerate(self.graph.iterblocks()):
block_map[self._get_block_name(block)] = blocknum
self.block_map = block_map
if self.is_method:
args = self.args[1:] # self is implicit
else:
args = self.args
if self.is_method:
self.ilasm.begin_method(self.name, self._class, [i[1] for i in args])
else:
self.ilasm.begin_function(self.name, args)
self.ilasm.set_locals(",".join([i[1] for i in self.locals]))
self.ilasm.begin_for()
def render_return_block(self, block):
return_var = block.inputargs[0]
if return_var.concretetype is not Void:
self.load(return_var)
self.ilasm.ret()
else:
self.ilasm.load_void()
self.ilasm.ret()
def end_render(self):
self.ilasm.end_for()
self.ilasm.end_function()
def render_raise_block(self, block):
self.ilasm.throw(block.inputargs[1])
def end_try(self, target_label):
self.ilasm.jump_block(self.block_map[target_label])
self.ilasm.catch()
#self.ilasm.close_branch()
def record_ll_meta_exc(self, ll_meta_exc):
pass
def begin_catch(self, llexitcase):
real_name = self.cts.lltype_to_cts(llexitcase._inst.class_._INSTANCE)
s = "isinstanceof(exc, %s)"%real_name
self.ilasm.branch_if_string(s)
def end_catch(self, target_label):
""" Ends the catch block, and branchs to the given target_label as the
last item in the catch block """
self.ilasm.close_branch()
def store_exception_and_link(self, link):
self._setup_link(link, True)
self.ilasm.jump_block(self.block_map[self._get_block_name(link.target)])
def after_except_block(self):
#self.ilasm.close_branch()
self.ilasm.throw_real("exc")
self.ilasm.close_branch()
def set_label(self, label):
self.ilasm.write_case(self.block_map[label])
#self.ilasm.label(label)
def begin_try(self):
self.ilasm.begin_try()
def clean_stack(self):
self.ilasm.clean_stack()
|
[
"lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d"
] |
lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d
|
02c88c50e679434e3e4ae163ee0e5026f6e74efc
|
e489172f6e49e1239db56c047a78a29a6ffc0b36
|
/via_purchase_enhancements/stock.py
|
94ac503ae599e50b3f088a456a70e97b5fede72d
|
[] |
no_license
|
eksotama/prln-via-custom-addons
|
f05d0059353ae1de89ccc8d1625a896c0215cfc7
|
f2b44a8af0e7bee87d52d258fca012bf44ca876f
|
refs/heads/master
| 2020-03-25T19:49:08.117628
| 2015-12-01T07:29:43
| 2015-12-01T07:29:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2012 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv, fields
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'po_line_desc':fields.related('purchase_line_id', 'name', type="char", string="PO Description", readonly=True),
'po_line_note':fields.related('purchase_line_id', 'notes', type="text", string="PO Notes", readonly=True),
}
stock_move()
|
[
"aero@aero.(none)"
] |
aero@aero.(none)
|
680addf26dba5b848a8d83cd34e73d8f679a6b41
|
b67ba573498318c906968bd2c946543dbd4658fe
|
/gravityspytools/retrain_model/views.py
|
479c7bb11b8c958af3bc4fd322cdb30c88d2d2d7
|
[
"BSD-3-Clause"
] |
permissive
|
Gravity-Spy/gravityspytools
|
1f86f91a00063afdfe507f1d1bf38be5c8e1b421
|
23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0
|
refs/heads/master
| 2021-05-09T08:55:12.904449
| 2020-07-14T18:38:25
| 2020-07-14T18:38:25
| 119,413,494
| 4
| 4
|
BSD-3-Clause
| 2020-07-14T18:38:27
| 2018-01-29T17:05:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
# -*- coding: utf-8 -*-
#from __future__ import unicode_literals
from django.shortcuts import render, redirect
from login.utils import make_authorization_url
from collection_to_subjectset.utils import retrieve_subjects_from_collection
from .forms import NewClassForm
from .models import NewClass
from gwpy.table import EventTable
def index(request):
if request.user.is_authenticated:
form = NewClassForm()
return render(request, 'retrain-model-form.html', {'form': form})
else:
return redirect(make_authorization_url())
def retrain_model(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = NewClassForm(request.POST)
# check whether it's valid:
if form.is_valid():
collection_owner = str(form.cleaned_data['collection_owner'])
collection_name = str(form.cleaned_data['collection_name'])
new_class_name = str(form.cleaned_data['new_class_name'])
# First determine the subjects attempting to be added to the training set
subjects_in_collection, tmp = retrieve_subjects_from_collection(collection_owner, collection_name)
subjects_in_collection = [str(isubject) for isubject in subjects_in_collection]
new_subjects = list(EventTable.fetch('gravityspy',
'glitches WHERE CAST(links_subjects AS FLOAT) IN ({0})'.format(str(",".join(subjects_in_collection))),
columns=["gravityspy_id"], host='gravityspyplus.ciera.northwestern.edu')['gravityspy_id'])
requested_model, created = NewClass.objects.get_or_create(collection_owner=collection_owner,
collection_name=collection_name,
new_class_name=new_class_name,
new_subjects=new_subjects,
user=request.user)
requested_model.save()
return render(request, 'temp.html')
else:
return render(request, 'retrain-model-form.html', {'form': form})
|
[
"scottcoughlin2014@u.northwestern.edu"
] |
scottcoughlin2014@u.northwestern.edu
|
5d17e513b427415520b3fd591509b1b5542e8fb0
|
556db265723b0cc30ad2917442ed6dad92fd9044
|
/tensorflow/lite/tutorials/dataset.py
|
fdaf84c2bb43e306fe0bf9c9172c996cdcefe1c6
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
graphcore/tensorflow
|
c1669b489be0e045b3ec856b311b3139858de196
|
085b20a4b6287eff8c0b792425d52422ab8cbab3
|
refs/heads/r2.6/sdk-release-3.2
| 2023-07-06T06:23:53.857743
| 2023-03-14T13:04:04
| 2023-03-14T13:48:43
| 162,717,602
| 84
| 17
|
Apache-2.0
| 2023-03-25T01:13:37
| 2018-12-21T13:30:38
|
C++
|
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset interface to the MNIST dataset.
This is cloned from
https://github.com/tensorflow/models/blob/master/official/r1/mnist/dataset.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
27175455b13f7f9029aeb649eab51391f2080bbf
|
6be8aa517e679b33b47d35f100e6590902a8a1db
|
/Math/Problem05.py
|
bd5e99bc0820fd1c4d6b55798e43ebf0508f2baa
|
[] |
no_license
|
LeeJuhae/Algorithm-Python
|
7ca4762712e5e84d1e277abecb3bf39c9cbd4e56
|
729947b4428205adfbac194a5527b0eeafe1c525
|
refs/heads/master
| 2023-04-24T01:02:36.430970
| 2021-05-23T07:17:25
| 2021-05-23T07:17:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
# https://www.acmicpc.net/problem/1722
import sys
read = sys.stdin.readline
n = int(read().strip())
cmd = list(map(int, read().strip().split()))
cmd[0] -= 1
fact = [-1 for _ in range(21)]
fact[0] = 1
for i in range(1, 21):
fact[i] = fact[i - 1] * i
def go(d, ans):
global k, arr, fact
if d == n:
print(" ".join(map(str, ans)))
sys.exit()
c = len(arr)
cnt = fact[c - 1]
for i in range(c):
if k <= (i + 1) * cnt:
ans.append(arr[i])
arr.pop(i)
k -= i * cnt
go(d + 1, ans)
def binary(arr, target):
l, r = 0, len(arr)
while l < r:
mid = (l + r) // 2
if arr[mid] < target:
l = mid + 1
else:
r = mid
return r
if cmd[0]:
# 몇 번째인지 찾기
ret = 0
arr = list(range(1, n + 1))
for e in cmd[1:]:
n -= 1
idx = binary(arr, e)
arr.pop(idx)
ret += fact[n] * idx
print(ret + 1)
else:
# 순열 찾기
k = cmd[1]
arr = list(range(1, n + 1))
go(0, [])
|
[
"gusdn0657@gmail.com"
] |
gusdn0657@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.