blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19effaf2fd28cbfbcc5bf1197122f93d208d746b
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-res/huaweicloudsdkres/v1/model/show_res_datasource_request.py
|
174e9c77405184d91a01ce3c43989fd6fba03d2b
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,956
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowResDatasourceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'content_type': 'str',
'workspace_id': 'str',
'datasource_id': 'str'
}
attribute_map = {
'content_type': 'Content-Type',
'workspace_id': 'workspace_id',
'datasource_id': 'datasource_id'
}
def __init__(self, content_type=None, workspace_id=None, datasource_id=None):
"""ShowResDatasourceRequest
The model defined in huaweicloud sdk
:param content_type: 内容类型,取值为application/json。
:type content_type: str
:param workspace_id: 工作空间id。
:type workspace_id: str
:param datasource_id: 数据源id。
:type datasource_id: str
"""
self._content_type = None
self._workspace_id = None
self._datasource_id = None
self.discriminator = None
self.content_type = content_type
self.workspace_id = workspace_id
self.datasource_id = datasource_id
@property
def content_type(self):
"""Gets the content_type of this ShowResDatasourceRequest.
内容类型,取值为application/json。
:return: The content_type of this ShowResDatasourceRequest.
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this ShowResDatasourceRequest.
内容类型,取值为application/json。
:param content_type: The content_type of this ShowResDatasourceRequest.
:type content_type: str
"""
self._content_type = content_type
@property
def workspace_id(self):
"""Gets the workspace_id of this ShowResDatasourceRequest.
工作空间id。
:return: The workspace_id of this ShowResDatasourceRequest.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this ShowResDatasourceRequest.
工作空间id。
:param workspace_id: The workspace_id of this ShowResDatasourceRequest.
:type workspace_id: str
"""
self._workspace_id = workspace_id
@property
def datasource_id(self):
"""Gets the datasource_id of this ShowResDatasourceRequest.
数据源id。
:return: The datasource_id of this ShowResDatasourceRequest.
:rtype: str
"""
return self._datasource_id
@datasource_id.setter
def datasource_id(self, datasource_id):
"""Sets the datasource_id of this ShowResDatasourceRequest.
数据源id。
:param datasource_id: The datasource_id of this ShowResDatasourceRequest.
:type datasource_id: str
"""
self._datasource_id = datasource_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowResDatasourceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
c810b83e4d978275269dbf2edf81ba3749d40a39
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/general_remark_3.py
|
37e4e5426aafa26b117cdb81d48dcfa92904cffc
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,367
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDate, XmlDateTime
from travelport.models.type_direction_3 import TypeDirection3
from travelport.models.type_element_status_4 import TypeElementStatus4
from travelport.models.type_product_3 import TypeProduct3
__NAMESPACE__ = "http://www.travelport.com/schema/common_v33_0"
@dataclass
class GeneralRemark3:
"""A textual remark container to hold any printable text.
(max 512 chars)
Parameters
----------
remark_data
Actual remarks data.
booking_traveler_ref
Reference to Booking Traveler.
key
category
A category to group and organize the various remarks. This is not
required, but it is recommended.
type_in_gds
supplier_type
The type of product this reservation is relative to
provider_reservation_info_ref
Provider reservation reference key.
provider_code
supplier_code
direction
Direction Incoming or Outgoing of the GeneralRemark.
create_date
The date and time that this GeneralRemark was created.
use_provider_native_mode
Will be true when terminal process required, else false
el_stat
This attribute is used to show the action results of an element.
Possible values are "A" (when elements have been added to the UR)
and "M" (when existing elements have been modified). Response only.
key_override
If a duplicate key is found where we are adding elements in some
cases like URAdd, then instead of erroring out set this attribute to
true.
"""
class Meta:
name = "GeneralRemark"
namespace = "http://www.travelport.com/schema/common_v33_0"
remark_data: None | str = field(
default=None,
metadata={
"name": "RemarkData",
"type": "Element",
"required": True,
}
)
booking_traveler_ref: list[str] = field(
default_factory=list,
metadata={
"name": "BookingTravelerRef",
"type": "Element",
"max_occurs": 999,
}
)
key: None | str = field(
default=None,
metadata={
"name": "Key",
"type": "Attribute",
}
)
category: None | str = field(
default=None,
metadata={
"name": "Category",
"type": "Attribute",
"max_length": 10,
}
)
type_in_gds: None | str = field(
default=None,
metadata={
"name": "TypeInGds",
"type": "Attribute",
"max_length": 30,
}
)
supplier_type: None | TypeProduct3 = field(
default=None,
metadata={
"name": "SupplierType",
"type": "Attribute",
}
)
provider_reservation_info_ref: None | str = field(
default=None,
metadata={
"name": "ProviderReservationInfoRef",
"type": "Attribute",
}
)
provider_code: None | str = field(
default=None,
metadata={
"name": "ProviderCode",
"type": "Attribute",
"min_length": 2,
"max_length": 5,
}
)
supplier_code: None | str = field(
default=None,
metadata={
"name": "SupplierCode",
"type": "Attribute",
"min_length": 2,
"max_length": 5,
}
)
direction: None | TypeDirection3 = field(
default=None,
metadata={
"name": "Direction",
"type": "Attribute",
}
)
create_date: None | XmlDateTime = field(
default=None,
metadata={
"name": "CreateDate",
"type": "Attribute",
}
)
use_provider_native_mode: bool = field(
default=False,
metadata={
"name": "UseProviderNativeMode",
"type": "Attribute",
}
)
el_stat: None | TypeElementStatus4 = field(
default=None,
metadata={
"name": "ElStat",
"type": "Attribute",
}
)
key_override: None | bool = field(
default=None,
metadata={
"name": "KeyOverride",
"type": "Attribute",
}
)
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
30acd6fabbb86e2029fe9bdb373bcb1912239b99
|
7b4820948845f55274b211d676ab8a6253a6298b
|
/addons/plugin.video.phstreams/default.py
|
165d263833b1730268ad874343597e83e0a9e838
|
[] |
no_license
|
bopopescu/mw
|
524c57d4b859751e298b907a12e44e9711ef72a6
|
5ef2acea0fb4150578e53201463c6bc5da37be20
|
refs/heads/master
| 2021-05-30T19:33:11.750160
| 2016-01-11T05:28:46
| 2016-01-11T05:28:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,159
|
py
|
# -*- coding: utf-8 -*-
'''
Phoenix Add-on
Copyright (C) 2015 Blazetamer
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,sys
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
try:
action = params['action']
except:
action = None
try:
name = params['name']
except:
name = '0'
try:
url = params['url']
except:
url = '0'
try:
playable = params['playable']
except:
playable = '0'
try:
content = params['content']
except:
content = '0'
try:
tvshow = params['tvshow']
except:
tvshow = '0'
try:
audio = params['audio']
except:
audio = '0'
try:
image = params['image']
except:
image = '0'
try:
fanart = params['fanart']
except:
fanart = '0'
if action == None:
from resources.lib.indexers import phstreams
phstreams.getCategory()
elif action == 'dmode' or action == 'ndmode':
from resources.lib.indexers import phstreams
phstreams.getDirectory(name, url, audio, image, fanart, playable, content)
elif action == 'subDirectory':
from resources.lib.indexers import phstreams
phstreams.subDirectory(name, url, audio, image, fanart, playable, tvshow, content)
elif action == 'localDirectory':
from resources.lib.indexers import phstreams
phstreams.localDirectory()
elif action == 'search':
from resources.lib.indexers import phstreams
phstreams.getSearch()
elif action == 'searchDirectory':
from resources.lib.indexers import phstreams
phstreams.searchDirectory()
elif action == 'searchDirectory2':
from resources.lib.indexers import phstreams
phstreams.searchDirectory(url)
elif action == 'clearSearch':
from resources.lib.indexers import phstreams
phstreams.clearSearch()
elif action == 'resolveUrl':
from resources.lib.indexers import phstreams
phstreams.resolveUrl(name, url, audio, image, fanart, playable, content)
elif action == 'openDialog':
from resources.lib.libraries import phdialogs
phdialogs.openDialog(url,audio)
elif action == 'openSettings':
from resources.lib.libraries import control
control.openSettings()
elif action == 'addView':
from resources.lib.libraries import views
views.addView(content)
elif action == 'downloader':
from resources.lib.libraries import downloader
downloader.downloader()
elif action == 'addDownload':
from resources.lib.libraries import downloader
downloader.addDownload(name,url,image)
elif action == 'removeDownload':
from resources.lib.libraries import downloader
downloader.removeDownload(url)
elif action == 'startDownload':
from resources.lib.libraries import downloader
downloader.startDownload()
elif action == 'startDownloadThread':
from resources.lib.libraries import downloader
downloader.startDownloadThread()
elif action == 'stopDownload':
from resources.lib.libraries import downloader
downloader.stopDownload()
elif action == 'statusDownload':
from resources.lib.libraries import downloader
downloader.statusDownload()
elif action == 'trailer':
from resources.lib.libraries import trailer
trailer.trailer().play(name)
elif action == 'clearCache':
from resources.lib.libraries import cache
cache.clear()
elif action == 'radioDirectory':
from resources.lib.indexers import phradios
phradios.radioDirectory()
elif action == 'radioResolve':
from resources.lib.indexers import phradios
phradios.radioResolve(name, url, image)
elif action == 'radio1fm':
from resources.lib.indexers import phradios
phradios.radio1fm(image, fanart)
elif action == 'radio181fm':
from resources.lib.indexers import phradios
phradios.radio181fm(image, fanart)
elif action == 'radiotunes':
from resources.lib.indexers import phradios
phradios.radiotunes(image, fanart)
elif action == 'Kickinradio':
from resources.lib.indexers import phradios
phradios.Kickinradio(image, fanart)
elif action == 'Kickinradiocats':
from resources.lib.indexers import phradios
phradios.Kickinradiocats(url, image, fanart)
elif action == 'CartoonDirectory':
from resources.lib.indexers import phtoons
phtoons.CartoonDirectory()
elif action == 'CartoonCrazy':
from resources.lib.indexers import phtoons
phtoons.CartoonCrazy(image, fanart)
elif action == 'CCsearch':
from resources.lib.indexers import phtoons
phtoons.CCsearch(url, image, fanart)
elif action == 'CCcat':
from resources.lib.indexers import phtoons
phtoons.CCcat(url, image, fanart)
elif action == 'CCpart':
from resources.lib.indexers import phtoons
phtoons.CCpart(url, image, fanart)
elif action == 'CCstream':
from resources.lib.indexers import phtoons
phtoons.CCstream(url)
elif action == 'nhlDirectory':
from resources.lib.indexers import nhlcom
nhlcom.nhlDirectory()
elif action == 'nhlScoreboard':
from resources.lib.indexers import nhlcom
nhlcom.nhlScoreboard()
elif action == 'nhlArchives':
from resources.lib.indexers import nhlcom
nhlcom.nhlArchives()
elif action == 'nhlStreams':
from resources.lib.indexers import nhlcom
nhlcom.nhlStreams(name,url)
elif action == 'nhlResolve':
from resources.lib.indexers import nhlcom
nhlcom.nhlResolve(url)
|
[
"bialagary@Garys-Mac-mini.local"
] |
bialagary@Garys-Mac-mini.local
|
59b02c1565c66a501284ae2b8e71274d82d42d8e
|
ce6cb09c21470d1981f1b459293d353407c8392e
|
/lib/jnpr/healthbot/swagger/models/command_rpc.py
|
95cb417debd31ead46f6fe1b6170628878d617e1
|
[
"Apache-2.0"
] |
permissive
|
minefuto/healthbot-py-client
|
c4be4c9c3153ef64b37e5344bf84154e93e7b521
|
bb81452c974456af44299aebf32a73abeda8a943
|
refs/heads/master
| 2022-12-04T07:47:04.722993
| 2020-05-13T14:04:07
| 2020-05-13T14:04:07
| 290,145,286
| 0
| 0
|
Apache-2.0
| 2020-08-25T07:27:54
| 2020-08-25T07:27:53
| null |
UTF-8
|
Python
| false
| false
| 8,149
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CommandRpc(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'args': 'dict(str, str)',
'filename': 'str',
'host': 'str',
'password': 'str',
'tablename': 'str',
'target': 'str',
'username': 'str'
}
attribute_map = {
'args': 'args',
'filename': 'filename',
'host': 'host',
'password': 'password',
'tablename': 'tablename',
'target': 'target',
'username': 'username'
}
def __init__(self, args=None, filename=None, host=None, password=None, tablename=None, target=None, username=None): # noqa: E501
"""CommandRpc - a model defined in Swagger""" # noqa: E501
self._args = None
self._filename = None
self._host = None
self._password = None
self._tablename = None
self._target = None
self._username = None
self.discriminator = None
if args is not None:
self.args = args
self.filename = filename
self.host = host
self.password = password
self.tablename = tablename
if target is not None:
self.target = target
self.username = username
@property
def args(self):
"""Gets the args of this CommandRpc. # noqa: E501
Optional key/value pair arguments to table # noqa: E501
:return: The args of this CommandRpc. # noqa: E501
:rtype: dict(str, str)
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this CommandRpc.
Optional key/value pair arguments to table # noqa: E501
:param args: The args of this CommandRpc. # noqa: E501
:type: dict(str, str)
"""
self._args = args
@property
def filename(self):
"""Gets the filename of this CommandRpc. # noqa: E501
Command-rpc table filename in which the table is defined # noqa: E501
:return: The filename of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""Sets the filename of this CommandRpc.
Command-rpc table filename in which the table is defined # noqa: E501
:param filename: The filename of this CommandRpc. # noqa: E501
:type: str
"""
if filename is None:
raise ValueError("Invalid value for `filename`, must not be `None`") # noqa: E501
self._filename = filename
@property
def host(self):
"""Gets the host of this CommandRpc. # noqa: E501
Host name or ip-address of the device in which command will be inspected # noqa: E501
:return: The host of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this CommandRpc.
Host name or ip-address of the device in which command will be inspected # noqa: E501
:param host: The host of this CommandRpc. # noqa: E501
:type: str
"""
if host is None:
raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501
self._host = host
@property
def password(self):
"""Gets the password of this CommandRpc. # noqa: E501
Password to connect to device # noqa: E501
:return: The password of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this CommandRpc.
Password to connect to device # noqa: E501
:param password: The password of this CommandRpc. # noqa: E501
:type: str
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def tablename(self):
"""Gets the tablename of this CommandRpc. # noqa: E501
Command-rpc table name # noqa: E501
:return: The tablename of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._tablename
@tablename.setter
def tablename(self, tablename):
"""Sets the tablename of this CommandRpc.
Command-rpc table name # noqa: E501
:param tablename: The tablename of this CommandRpc. # noqa: E501
:type: str
"""
if tablename is None:
raise ValueError("Invalid value for `tablename`, must not be `None`") # noqa: E501
self._tablename = tablename
@property
def target(self):
"""Gets the target of this CommandRpc. # noqa: E501
To run command on FPC, specifiy FPC target # noqa: E501
:return: The target of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this CommandRpc.
To run command on FPC, specifiy FPC target # noqa: E501
:param target: The target of this CommandRpc. # noqa: E501
:type: str
"""
self._target = target
@property
def username(self):
"""Gets the username of this CommandRpc. # noqa: E501
Username to connect to device # noqa: E501
:return: The username of this CommandRpc. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CommandRpc.
Username to connect to device # noqa: E501
:param username: The username of this CommandRpc. # noqa: E501
:type: str
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CommandRpc, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CommandRpc):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"nitinkr@juniper.net"
] |
nitinkr@juniper.net
|
6a1ca19e78e7528364bc0b93d19a51ccb649f761
|
f4be3422b28dda8802ea75368d665d17b634b83f
|
/babo/__init__.py
|
5d6700be06951195e3f22bed513d5b4a0e966b6e
|
[] |
no_license
|
carpedm20/babo
|
0bab8abee49058fb4c0c6ab629f174d8a85d50a7
|
1fae47214918b4a64fc305787fb6a4df07c49768
|
refs/heads/master
| 2020-04-15T16:12:10.274756
| 2014-08-15T12:00:33
| 2014-08-15T12:00:33
| 22,943,235
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# -*- coding: utf-8 -*-
"""
babo
~~~~
The world will have a generation of idiots.. by Albert Einstein
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
__copyright__ = 'Copyright 2014 by Taehoon Kim'
__version__ = '0.0.1'
__license__ = 'BSD'
__author__ = 'Taehoon Kim'
__author_email__ = 'carpedm20@gmail.com'
__url__ = 'http://github.com/carpedm20/babo'
__all__ = [
]
|
[
"carpedm20@gmail.com"
] |
carpedm20@gmail.com
|
a1250d373944bf65cff70e384219809151ab23bf
|
fadf50987ab3aaefc993f00187d8a833457e9e97
|
/torchstat/model_hook.py
|
3ff8d3a0ebcaae409b34f6e8da4cdb375d8cf88d
|
[
"MIT"
] |
permissive
|
Hulalazz/torchstat
|
4cff14e2b272246d9fd7136b969eaab6165abfeb
|
b533d917ba8f2e0871a60c3ff73704e294b769eb
|
refs/heads/master
| 2020-04-04T08:59:07.626893
| 2018-11-01T09:21:35
| 2018-11-01T09:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,980
|
py
|
import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
from torchstat import compute_madd
from torchstat import compute_flops
class ModelHook(object):
def __init__(self, model, input_size):
assert isinstance(model, nn.Module)
assert isinstance(input_size, (list, tuple))
self._model = model
self._input_size = input_size
self._origin_call = dict() # sub module call hook
self._hook_model()
x = torch.rand(1, *self._input_size) # add module duration time
self._model.eval()
self._model(x)
@staticmethod
def _register_buffer(module):
assert isinstance(module, nn.Module)
if len(list(module.children())) > 0:
return
module.register_buffer('input_shape', torch.zeros(3).int())
module.register_buffer('output_shape', torch.zeros(3).int())
module.register_buffer('parameter_quantity', torch.zeros(1).int())
module.register_buffer('inference_memory', torch.zeros(1).long())
module.register_buffer('MAdd', torch.zeros(1).long())
module.register_buffer('duration', torch.zeros(1).float())
module.register_buffer('Flops', torch.zeros(1).long())
def _sub_module_call_hook(self):
def wrap_call(module, *input, **kwargs):
assert module.__class__ in self._origin_call
start = time.time()
output = self._origin_call[module.__class__](module, *input, **kwargs)
end = time.time()
module.duration = torch.from_numpy(
np.array([end - start], dtype=np.float32))
module.input_shape = torch.from_numpy(
np.array(input[0].size()[1:], dtype=np.int32))
module.output_shape = torch.from_numpy(
np.array(output.size()[1:], dtype=np.int32))
parameter_quantity = 0
# iterate through parameters and count num params
for name, p in module._parameters.items():
parameter_quantity += (0 if p is None else torch.numel(p.data))
module.parameter_quantity = torch.from_numpy(
np.array([parameter_quantity], dtype=np.long))
inference_memory = 1
for s in output.size()[1:]:
inference_memory *= s
# memory += parameters_number # exclude parameter memory
inference_memory = inference_memory * 4 / (1024 ** 2) # shown as MB unit
module.inference_memory = torch.from_numpy(
np.array([inference_memory], dtype=np.float32))
if len(input) == 1:
madd = compute_madd(module, input[0], output)
flops = compute_flops(module, input[0], output)
elif len(input) > 1:
madd = compute_madd(module, input, output)
flops = compute_flops(module, input, output)
else: # error
madd = 0
flops = 0
module.MAdd = torch.from_numpy(
np.array([madd], dtype=np.int64))
module.Flops = torch.from_numpy(
np.array([flops], dtype=np.int64))
return output
for module in self._model.modules():
if len(list(module.children())) == 0 and module.__class__ not in self._origin_call:
self._origin_call[module.__class__] = module.__class__.__call__
module.__class__.__call__ = wrap_call
def _hook_model(self):
self._model.apply(self._register_buffer)
self._sub_module_call_hook()
@staticmethod
def _retrieve_leaf_modules(model):
leaf_modules = []
for name, m in model.named_modules():
if len(list(m.children())) == 0:
leaf_modules.append((name, m))
return leaf_modules
def retrieve_leaf_modules(self):
return OrderedDict(self._retrieve_leaf_modules(self._model))
|
[
"technext.jpn@gmail.com"
] |
technext.jpn@gmail.com
|
e037a3f03bb035f9294a2db24cabd7bccc5d1501
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/demo/time.py
|
56ab715a7f7ed7eb6c4c24c99973845bd00f6ad4
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
"""Demo platform that offers a fake time entity."""
from __future__ import annotations
from datetime import time
from homeassistant.components.time import TimeEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the demo time platform."""
async_add_entities([DemoTime("time", "Time", time(12, 0, 0), "mdi:clock", False)])
class DemoTime(TimeEntity):
"""Representation of a Demo time entity."""
_attr_has_entity_name = True
_attr_name = None
_attr_should_poll = False
def __init__(
self,
unique_id: str,
device_name: str,
state: time,
icon: str,
assumed_state: bool,
) -> None:
"""Initialize the Demo time entity."""
self._attr_assumed_state = assumed_state
self._attr_icon = icon
self._attr_native_value = state
self._attr_unique_id = unique_id
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, unique_id)}, name=device_name
)
async def async_set_value(self, value: time) -> None:
"""Update the time."""
self._attr_native_value = value
self.async_write_ha_state()
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
be55d9627d221ef15a7208f8625d6dac784efb54
|
64ae307de1a6966ec948662df695cd09cefd5eff
|
/Day_6/mm_project/mm_project/tests/conftest.py
|
b924ef165574f9638d0da245dcbadd78736a65fc
|
[
"BSD-3-Clause"
] |
permissive
|
janash/mm_project_sss2019
|
3a4d61c4f4dbe7eee5af401d831e7483480bb509
|
84f9da3efe335a7024213ddae6fd56113d4fda09
|
refs/heads/master
| 2020-05-19T23:03:53.143517
| 2019-07-26T23:03:08
| 2019-07-26T23:03:08
| 185,258,555
| 0
| 2
|
BSD-3-Clause
| 2019-07-09T17:33:19
| 2019-05-06T19:26:20
|
Python
|
UTF-8
|
Python
| false
| false
| 682
|
py
|
"""
Fixtures for monte carlo tests
"""
# Import package, test suite, and other packages as needed
import mm_project as mc
import numpy as np
import os
import pytest
import sys
@pytest.fixture
def nist_file():
current_directory = os.path.dirname(os.path.abspath(__file__))
nist_file = os.path.join(current_directory,'..', 'data', 'nist_sample_config1.txt')
coordinates = mc.generate_initial_coordinates(method='file', fname=nist_file)
return coordinates, nist_file
@pytest.fixture
def mc_box(nist_file):
coordinates = nist_file[0][0]
box_length = nist_file[0][1]
fname = nist_file[1]
test_box = mc.Box(box_length, coordinates)
return test_box
|
[
"ben@bennyp.org"
] |
ben@bennyp.org
|
0001b37bd0d1d6b08e473e5f1c41d7bc88ba50bd
|
48a8430d19c4d8d6fdcecf1cb9875d74b5efce6a
|
/CycleGAN/data_loader.py
|
7cf7d6a2fd0954c3313fa0ba7bc7a498ee9437a9
|
[] |
no_license
|
orange-eng/GAN
|
af00f469b763893b2e474f8adb83460164c843e0
|
7a7fafa4c6e9aac0da73791ca646b6503c39b24f
|
refs/heads/main
| 2023-02-25T20:21:54.825164
| 2021-01-25T08:34:41
| 2021-01-25T08:34:41
| 324,327,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
import cv2
from glob import glob
# glob是python自己带的一个文件操作相关模块,用它可以查找符合自己目的的文件,就类似于Windows下的文件搜索
# https://www.cnblogs.com/lovemo1314/archive/2011/04/26/2029556.html
import numpy as np
import os
import sys
apath = os.path.abspath(os.path.dirname(sys.argv[0]))
#得到文件下面的所有文件目录。果然很方便
path = glob(apath+"/datasets/monet2photo/*")
print(path)
class DataLoader():
def __init__(self,dataset_name,img_res=(128,128)):
self.img_res = img_res
self.dataset_name = dataset_name
def load_data(self,domain,batch_size=1,is_testing = False):
data_type = "train%s"% domain if not is_testing else "test%s"% domain
path = glob(apath+"/datasets/%s/%s/*"%(self.dataset_name,data_type))
batch_images = np.random.choice(path,size=batch_size)
imgs = []
for img_path in batch_images:
img = self.imread(img_path)
img = cv2.resize(img,self.img_res) #把图像变为128*128*3
img = np.array(img)/127.5 - 1
cv2.imshow("img",img)
cv2.waitKey(0)
imgs.append(img)
return imgs
def load_batch(self,batch_size=1,is_testing=False):
data_type = "train" if not is_testing else "val"
path_A = glob(apath +'./datasets/%s/%sA/*' % (self.dataset_name, data_type))
path_B = glob(apath +'./datasets/%s/%sB/*' % (self.dataset_name, data_type))
self.n_batches = int(min(len(path_A),len(path_B)) / batch_size )
print("min:",int(min(len(path_A),len(path_B))))
total_samples = self.n_batches * batch_size
path_A = np.random.choice(path_A, total_samples, replace=False)
path_B = np.random.choice(path_B, total_samples, replace=False)
for i in range(self.n_batches - 1):
batch_A = path_A[i*batch_size:(i+1)*batch_size]
batch_B = path_B[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img_A,img_B in zip(batch_A,batch_B):
'''
a = [1, 2, 3]
b = [4, 5, 6]
a_b_zip = zip(a, b) # 打包为元组的列表,而且元素个数与最短的列表一致
print("type of a_b_zip is %s" % type(a_b_zip)) # 输出zip函数的返回对象类型
a_b_zip = list(a_b_zip) # 因为zip函数返回一个zip类型对象,所以需要转换为list类型
print(a_b_zip)
'''
img_A = self.imread(img_A)
img_B = self.imread(img_B)
img_A = cv2.resize(img_A,self.img_res)
img_B = cv2.resize(img_B,self.img_res)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A,dtype=np.float32)/127.5 - 1
imgs_B = np.array(imgs_B,dtype=np.float32)/127.5 - 1
yield imgs_A,imgs_B
# 带yield的函数是一个生成器,而不是一个函数了,
# 这个生成器有一个函数就是next函数,next就相当于“下一步”生成哪个数
# 这一次的next开始的地方是接着上一次的next停止的地方执行的
#把BGR格式的图片转化为RGB格式的图片
def imread(self,path):
img = cv2.imread(path)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img
# if __name__ == "__main__":
# Data = DataLoader(dataset_name="monet2photo")
# for batch_i,(imgs_A,imgs_B) in enumerate(Data.load_batch(50)):
# print(batch_i)
|
[
"972353371@qq.com"
] |
972353371@qq.com
|
139ae4368f9dcc52c84dcbfbcab84a8112ca406a
|
727987094c01eaf41343464567a52fbb705f6701
|
/yabgp/message/attribute/nlri/__init__.py
|
c67e29f0a1e9105cce0eecd0e3eebb32ea38ff2a
|
[
"Apache-2.0"
] |
permissive
|
xinwu/yabgp
|
1377d11e4e42f259dd66bb08060b74d0683a1796
|
ae7cc871a4a8a67d08eef2abc82cf1397f2601c3
|
refs/heads/master
| 2021-01-13T03:05:33.031083
| 2016-12-14T16:27:23
| 2016-12-14T16:27:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
# Copyright 2016 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
import binascii
import netaddr
class NLRI(object):
@classmethod
def parse(cls, *args):
raise NotImplementedError
@classmethod
def construct(cls, *args):
raise NotImplementedError
@staticmethod
def construct_prefix_v4(masklen, prefix_str):
ip_hex = struct.pack('!I', netaddr.IPNetwork(prefix_str).value)
if 16 < masklen <= 24:
ip_hex = ip_hex[0:3]
elif 8 < masklen <= 16:
ip_hex = ip_hex[0:2]
elif masklen <= 8:
ip_hex = ip_hex[0:1]
return ip_hex
@staticmethod
def construct_prefix_v6(prefix):
mask = int(prefix.split('/')[1])
prefix_hex = binascii.unhexlify(hex(netaddr.IPNetwork(prefix).ip)[2:])
offset = mask / 8
offset_re = mask % 8
if offset == 0:
return prefix_hex[0: 1]
return prefix_hex[0: offset + offset_re]
|
[
"xiaoquwl@gmail.com"
] |
xiaoquwl@gmail.com
|
1d71cdd16103283b54ddbfae586dbd58e635dea8
|
7c28640e152dad3843423d04c96a3a37015bd9ba
|
/Examples/Game Tutorial/Tutorial Part 6.py
|
0b571e28753411ea66a52d103d7f671bc5c1d42d
|
[] |
no_license
|
DocVaughan/Pythonista
|
251bbfd69203cf91f3d6a6bf20d478efd74a61a0
|
7d482c7db2c7b4daae10289b765f09a4f348a50c
|
refs/heads/master
| 2021-01-20T20:48:32.603993
| 2017-12-31T10:33:48
| 2017-12-31T10:33:48
| 61,178,643
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,233
|
py
|
# coding: utf-8
'''
Part 6 -- Meteors Incoming! ☄️
Collecting coins is fun, but did you notice the distinct lack of... challenge?
Let's change that now, and add some meteors to the mix. The mechanism is essentially the same as with the coins, but when the alien collides with a meteor, the game is over.
To make the game a bit harder, the speed at which coins and meteors fall to the ground now increases slightly over time.
'''
from scene import *
import sound
import random
A = Action
def cmp(a, b):
return ((a > b) - (a < b))
standing_texture = Texture('plf:AlienGreen_front')
walk_textures = [Texture('plf:AlienGreen_walk1'), Texture('plf:AlienGreen_walk2')]
# ---[1]
# Because the alien can be hit by a meteor, we need one additional texture for the unhappy alien:
hit_texture = Texture('plf:AlienGreen_hit')
class Coin (SpriteNode):
def __init__(self, **kwargs):
SpriteNode.__init__(self, 'plf:Item_CoinGold', **kwargs)
# ---[2]
# As with the coins, we use a custom subclass of SpriteNode to represent the meteors. For some variety, the texture of the meteor is chosen randomly.
class Meteor (SpriteNode):
def __init__(self, **kwargs):
img = random.choice(['spc:MeteorBrownBig1', 'spc:MeteorBrownBig2'])
SpriteNode.__init__(self, img, **kwargs)
class Game (Scene):
def setup(self):
self.background_color = '#004f82'
self.ground = Node(parent=self)
x = 0
while x <= self.size.w + 64:
tile = SpriteNode('plf:Ground_PlanetHalf_mid', position=(x, 0))
self.ground.add_child(tile)
x += 64
self.player = SpriteNode(standing_texture)
self.player.anchor_point = (0.5, 0)
self.add_child(self.player)
score_font = ('Futura', 40)
self.score_label = LabelNode('0', score_font, parent=self)
self.score_label.position = (self.size.w/2, self.size.h - 70)
self.score_label.z_position = 1
self.items = []
# ---[3]
# Because the game can end now, we need a method to restart it.
# Some of the initialization logic that was previously in `setup()` is now in `new_game()`, so it can be repeated without having to close the game first.
self.new_game()
def new_game(self):
# Reset everything to its initial state...
for item in self.items:
item.remove_from_parent()
self.items = []
self.score = 0
self.score_label.text = '0'
self.walk_step = -1
self.player.position = (self.size.w/2, 32)
self.player.texture = standing_texture
self.speed = 1.0
# ---[4]
# The game_over attribute is set to True when the alien gets hit by a meteor. We use this to stop player movement and collision checking (the update method simply does nothing when game_over is True).
self.game_over = False
def update(self):
if self.game_over:
return
self.update_player()
self.check_item_collisions()
if random.random() < 0.05 * self.speed:
self.spawn_item()
def update_player(self):
g = gravity()
if abs(g.x) > 0.05:
self.player.x_scale = cmp(g.x, 0)
x = self.player.position.x
max_speed = 40
x = max(0, min(self.size.w, x + g.x * max_speed))
self.player.position = x, 32
step = int(self.player.position.x / 40) % 2
if step != self.walk_step:
self.player.texture = walk_textures[step]
sound.play_effect('rpg:Footstep00', 0.05, 1.0 + 0.5 * step)
self.walk_step = step
else:
self.player.texture = standing_texture
self.walk_step = -1
def check_item_collisions(self):
# ---[5]
# The hit testing is essentially the same as before, but now distinguishes between coins and meteors (simply by checking the class of the item).
# When a meteor hits, the game is over (see the `player_hit()` method below).
player_hitbox = Rect(self.player.position.x - 20, 32, 40, 65)
for item in list(self.items):
if item.frame.intersects(player_hitbox):
if isinstance(item, Coin):
self.collect_item(item)
elif isinstance(item, Meteor):
self.player_hit()
elif not item.parent:
self.items.remove(item)
def player_hit(self):
# ---[6]
# This is. alled from `check_item_collisions()` when the alien collides with a meteor. The alien simply drops off the screen, and after 2 seconds, a new game is started.
self.game_over = True
sound.play_effect('arcade:Explosion_1')
self.player.texture = hit_texture
self.player.run_action(A.move_by(0, -150))
# Note: The duration of the `wait` action is multiplied by the current game speed, so that it always takes exactly 2 seconds, regardless of how fast the rest of the game is running.
self.run_action(A.sequence(A.wait(2*self.speed), A.call(self.new_game)))
def spawn_item(self):
if random.random() < 0.3:
# ---[7]
# Whenever a new item is created, there's now a 30% chance that it is a meteor instead of a coin.
# Their behavior is very similar to that of the coins, but instead of moving straight down, they may come in at an angle. To accomplish this, the x coordinate of the final position is simply chosen randomly.
meteor = Meteor(parent=self)
meteor.position = (random.uniform(20, self.size.w-20), self.size.h + 30)
d = random.uniform(2.0, 4.0)
actions = [A.move_to(random.uniform(0, self.size.w), -100, d), A.remove()]
meteor.run_action(A.sequence(actions))
self.items.append(meteor)
else:
coin = Coin(parent=self)
coin.position = (random.uniform(20, self.size.w-20), self.size.h + 30)
d = random.uniform(2.0, 4.0)
actions = [A.move_by(0, -(self.size.h + 60), d), A.remove()]
coin.run_action(A.sequence(actions))
self.items.append(coin)
# ---[8]
# To make things a bit more interesting, the entire game gets slightly faster whenever a new item is spawned. The `speed` attribute is essentially a multiplier for the duration of all actions in the scene. Note that this is actually an attribute of `Node`, so you could apply different speeds for different groups of nodes. Since all items are added directly to the scene in this example, we don't make use of that here though.
self.speed = min(3, self.speed + 0.005)
def collect_item(self, item, value=10):
sound.play_effect('digital:PowerUp7')
item.remove_from_parent()
self.items.remove(item)
self.score += value
self.score_label.text = str(self.score)
if __name__ == '__main__':
run(Game(), PORTRAIT, show_fps=True)
|
[
"joshua.vaughan@louisiana.edu"
] |
joshua.vaughan@louisiana.edu
|
165dab7e57e2a352300f0576717c1cdae8927d4b
|
5e2dddce9c67d5b54d203776acd38d425dbd3398
|
/spacy/lang/es/syntax_iterators.py
|
869f404e040edf4e143bf6e80dab2eaac4390688
|
[
"MIT"
] |
permissive
|
yuxuan2015/spacy_zh_model
|
8164a608b825844e9c58d946dcc8698853075e37
|
e89e00497ab3dad0dd034933e25bc2c3f7888737
|
refs/heads/master
| 2020-05-15T11:07:52.906139
| 2019-08-27T08:28:11
| 2019-08-27T08:28:11
| 182,213,671
| 1
| 0
| null | 2019-04-19T06:27:18
| 2019-04-19T06:27:17
| null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import NOUN, PROPN, PRON, VERB, AUX
def noun_chunks(obj):
doc = obj.doc
np_label = doc.vocab.strings.add('NP')
left_labels = ['det', 'fixed', 'neg'] # ['nunmod', 'det', 'appos', 'fixed']
right_labels = ['flat', 'fixed', 'compound', 'neg']
stop_labels = ['punct']
np_left_deps = [doc.vocab.strings[label] for label in left_labels]
np_right_deps = [doc.vocab.strings[label] for label in right_labels]
stop_deps = [doc.vocab.strings[label] for label in stop_labels]
def noun_bounds(root):
left_bound = root
for token in reversed(list(root.lefts)):
if token.dep in np_left_deps:
left_bound = token
right_bound = root
for token in root.rights:
if (token.dep in np_right_deps):
left, right = noun_bounds(token)
if list(filter(lambda t: is_verb_token(t) or t.dep in stop_deps,
doc[left_bound.i: right.i])):
break
else:
right_bound = right
return left_bound, right_bound
token = doc[0]
while token and token.i < len(doc):
if token.pos in [PROPN, NOUN, PRON]:
left, right = noun_bounds(token)
yield left.i, right.i+1, np_label
token = right
token = next_token(token)
def is_verb_token(token):
return token.pos in [VERB, AUX]
def next_token(token):
try:
return token.nbor()
except:
return None
SYNTAX_ITERATORS = {
'noun_chunks': noun_chunks
}
|
[
"yuxuan2015@example.com"
] |
yuxuan2015@example.com
|
4450db57e64db6586c682bfbdf846ffb456d9e4e
|
4d718292ec9f90444eeda13d18febb10757da894
|
/mission 11/classement.py
|
e50dc9fbba32e99394191730b85603be4aa7080f
|
[] |
no_license
|
rverschuren/Info
|
b40fb04a6260dacfc95d12e63c99abd82b140e06
|
c9aa0bdc1b026c8ba8134b878b5fae7d49d75e19
|
refs/heads/master
| 2020-04-16T07:29:49.847812
| 2019-01-14T14:50:18
| 2019-01-14T14:50:18
| 165,389,281
| 1
| 2
| null | 2019-01-12T18:56:01
| 2019-01-12T13:12:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,812
|
py
|
class Classement :
"""
Une implémentation primitive de classement, non ordonnée et de capacité fixe.
@author Kim Mens
@version 02 Décembre 2018
"""
__maxcapacity = 10
def __init__(self):
"""
@pre: -
@post: un classement vide de taille 0 a été créé
"""
self.__resultats = {} # dictionnaire de résultats actuelle (clé = coureur; valeur = résultat)
self.__size = 0 # nombre de résultats actuel (initialement 0, maximum __maxcapacity)
def size(self):
"""
Méthode accesseur.
Retourne la taille de ce classement.
@pre: -
@post: Le nombre de résultats actuellement stockés dans ce classement a été retourné.
"""
return self.__size
def add(self,r):
"""
Ajoute un résultat r dans ce classement.
@pre: r est une instance de la classe Resultat
@post: Le résultat r a été inséré selon l'ordre du classement.
En cas d'ex-aequo, r est inséré après les autres résultats de même ordre.
ATTENTION : L'implémentation actuelle ne respecte pas encore la post-condition!
Le résultat est simplement ajouté à la dictionnaire, sans tenir compte de l'ordre.
Une dictionnaire ne donne pas de garanties sur l'ordre des éléments.
"""
if self.size() >= self.__maxcapacity :
raise Error("Capacity of classement exceeded")
else :
self.__size += 1
self.__resultats[r.coureur()] = r
def get(self,c):
"""
Retourne le résultat d'un coureur donné.
@pre c est un Coureur
@post retourne le premier (meilleur) Resultat r du coureur c dans le
classement. Retourne None si le coureur ne figure pas (encore)
dans le classement.
"""
return self.__resultats.get(c)
def get_position(self,c):
"""
Retourne la meilleure position d'un coureur dans ce classement.
@pre c est un Coureur
@post retourne un entier représentant la position du coureur c dans ce classement,
à partir de 1 pour la tête de ce classement. Si le coureur figure plusieurs fois
dans le classement, la première (meilleure) position est retournée.
Retourne -1 si le coureur ne figure pas dans le classement.
ATTENTION : L'implémentation actuelle ne respecte pas encore la post-condition!
Etant donné que la dictionnaire de résultats ne connaît pas de position,
pour le moment cette méthode retourne toujours "***position inconnue***".
A vous de la corriger en utilisant une liste chaînée ordonnée
comme structure de données, plutôt qu'une simple dictionnaire.
"""
return "***position inconnue***"
def remove(self,c):
"""
Retire un résultat du classement.
@pre c est un Coureur
@post retire le premier (meilleur) résultat pour le coureur c du classement.
c est comparé au sens de __eq__. Retourne c si un résultat a été retiré,
of False si c n'est pas trouvé dans la liste.
"""
self.__size -= 1
return self.__resultats.pop(c,False)
def __str__(self):
"""
Méthode magique
Retourne une représentation string de cet objet.
@pre: -
@post: Retourne une représentation de ce classement sous forme d'un string,
avec une ligne par résultat.
"""
s = ""
d = self.__resultats
for c in d:
s += " " + str(self.get_position(c)) + " > " + str(d[c]) + "\n"
return s
|
[
"bastien.wiaux@gmail.com"
] |
bastien.wiaux@gmail.com
|
0864a55af4f109e92a6b1185d04837dc723f87a7
|
e5d130e183b5dea1b7aad23a047c703fa0d2b3bf
|
/lightbus/transports/__init__.py
|
d0a7d70c722ce0c3f6df21091b256155c18899f5
|
[
"Apache-2.0"
] |
permissive
|
adamcharnock/lightbus
|
4a86428b8203bfe98f77a32375ac961ef398ce16
|
cf892779a9a9a8f69c789ffa83c24acfb7f9a336
|
refs/heads/master
| 2023-08-26T04:19:39.395735
| 2023-08-23T11:07:44
| 2023-08-23T11:07:44
| 94,617,214
| 193
| 22
|
Apache-2.0
| 2023-08-10T21:21:51
| 2017-06-17T10:39:23
|
Python
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
from lightbus.transports.base import (
RpcTransport,
ResultTransport,
EventTransport,
SchemaTransport,
Transport,
)
from lightbus.transports.debug import (
DebugRpcTransport,
DebugResultTransport,
DebugEventTransport,
DebugSchemaTransport,
)
from lightbus.transports.redis.rpc import RedisRpcTransport
from lightbus.transports.redis.result import RedisResultTransport
from lightbus.transports.redis.event import RedisEventTransport
from lightbus.transports.redis.schema import RedisSchemaTransport
|
[
"adam@adamcharnock.com"
] |
adam@adamcharnock.com
|
23f3a9b619600c2c45f879384f3a51dda94f5c3e
|
38466811d0e12a8f755bae58d7244622ef5f4d9b
|
/leetcode/200/141_linked_list_cycle.py
|
9e5262a7110cf85407e3ce7e9183543e977219f0
|
[] |
no_license
|
andysitu/algo-problems
|
4ab5a2b6591f0c0d84174b69598f30bc354ff8aa
|
35c88dc747e7afa4fdd51d538bc80c4712eb1172
|
refs/heads/master
| 2023-06-24T15:55:39.019652
| 2021-02-26T20:31:07
| 2021-02-26T20:31:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
if head == None:
return False
forward1 = head
forward2 = head
while True:
forward2 = forward2.next
if forward2 == None:
return False
forward2 = forward2.next
if forward2 == None:
return False
forward1 = forward1.next
if forward1 == forward2:
|
[
"and.situ@gmail.com"
] |
and.situ@gmail.com
|
9551d519b20dfcc4061ff956e357f5bdb2481c6d
|
7cd8ee14711eaf33cee0d9e06e78a974fc579242
|
/Linkedin/Linkedin/spiders/linkedin_distinct_12logincount.py
|
73e600bb84ee61402c91c2f94e2db905b9cf883c
|
[] |
no_license
|
Chandler-Song/pi
|
c618117dfdd9a7496a57c69f029851e94787f591
|
aebc6d65b79ed43c66e7e1bf16d6d9f31b470372
|
refs/heads/master
| 2022-03-13T02:44:30.452673
| 2019-02-19T09:38:45
| 2019-02-19T09:38:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
from linkedin_voyager_functions import *
class Companylidsurls(object):
def __init__(self, *args, **kwargs):
self.con, self.cur = get_mysql_connection(DB_HOST, 'FACEBOOK', '')
#self.qu1 = 'select profile_sk , connections_profile_url from linkedin_connections where date(modified_at)>"2017-04-17" and date(modified_at)<"2017-08-21" and member_id = "%s"'
self.qu1 = "select distinct member_id from linkedin_meta where date(modified_at) < '2017-08-20'"
self.qu2 = "select distinct member_id from linkedin_connections where date(modified_at) > '2017-08-20'"
self.query2 = "select connections_profile_url, member_id, sk from FACEBOOK.linkedin_connections where date(modified_at) >= '2017-08-20'"
self.excel_file_name = 'linkedin_connections_profiles_%s.csv'%str(datetime.datetime.now().date())
if os.path.isfile(self.excel_file_name):
os.system('rm %s'%self.excel_file_name)
oupf = open(self.excel_file_name, 'ab+')
self.todays_excel_file = csv.writer(oupf)
self.headers = ['Linkedin_Profile_url', 'member_id']
self.todays_excel_file.writerow(self.headers)
def main(self):
"""with open('duplicate_members', 'r') as f:
rows = f.readlines()
for inde, row in enumerate(rows):
row = row.strip('\n')
one_ = fetchmany(self.cur, self.qu1 % row)
pf_sk = '<>'.join([i[0] for i in one_])
pf_url = '<>'.join([i[0] for i in one_])
file("duplicate_member_info","ab+").write("%s, %s, %s\n" % (row, pf_sk, pf_url))"""
re1 = fetchall(self.cur, self.qu1)
re2 = fetchall(self.cur, self.qu2)
re2 = [str(i[0]) for i in re2]
re1 = [str(i[0]) for i in re1]
new_list = []
for i in re1:
if i in re2:
new_list.append(i)
print len(new_list)
total_distinct_list = []
total_connection_records = fetchall(self.cur, self.query2)
for tocr in total_connection_records:
linkedin_profilef, member_id, connection_sk = tocr
if member_id in new_list:
continue
total_distinct_list.append((linkedin_profilef, member_id))
print len(total_distinct_list), 'total_length'
print len(set(total_distinct_list)), 'total_distinct_lenth'
total_distinct_list = set(total_distinct_list)
for tdl in total_distinct_list:
lk_url, mem_id = tdl
values = [lk_url, mem_id]
values = [normalize(i) for i in values]
self.todays_excel_file.writerow(values)
if __name__ == '__main__':
Companylidsurls().main()
|
[
"aravind@headrun.com"
] |
aravind@headrun.com
|
a7e316b3e4294deab2c4be72af3994d2504b8d49
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_update_creative_response_wrapper.py
|
cca6a9dbe84cb7e4f9ee1164d8a578b819dd20ce
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.creative.model.update_creative_response_wrapper_body import UpdateCreativeResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['UpdateCreativeResponseWrapperBody'] = UpdateCreativeResponseWrapperBody
from baiduads.creative.model.update_creative_response_wrapper import UpdateCreativeResponseWrapper
class TestUpdateCreativeResponseWrapper(unittest.TestCase):
"""UpdateCreativeResponseWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateCreativeResponseWrapper(self):
"""Test UpdateCreativeResponseWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateCreativeResponseWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"tokimekiyxp@foxmail.com"
] |
tokimekiyxp@foxmail.com
|
ebbc23d30dbea2dafb4b6a71b92a5ccb4c9bb341
|
d1c352676563b2decacfad19120001959b043f05
|
/superset/migrations/versions/a33a03f16c4a_add_extra_column_to_savedquery.py
|
07e0b05a1c0c6e035dd0a4931949130430b03579
|
[
"Apache-2.0",
"CC-BY-4.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] |
permissive
|
Affirm/incubator-superset
|
c9a09a10289b4ebf8a09284a483bca93725a4b51
|
421183d3f46c48215e88e9d7d285f2dc6c7ccfe6
|
refs/heads/master
| 2023-07-06T11:34:38.538178
| 2019-05-22T23:39:01
| 2019-05-22T23:39:01
| 128,005,001
| 1
| 3
|
Apache-2.0
| 2023-03-20T19:49:14
| 2018-04-04T04:02:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add extra column to SavedQuery
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Revision ID: a33a03f16c4a
Revises: fb13d49b72f9
Create Date: 2019-01-14 16:00:26.344439
"""
# revision identifiers, used by Alembic.
revision = 'a33a03f16c4a'
down_revision = 'fb13d49b72f9'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('saved_query') as batch_op:
batch_op.add_column(sa.Column('extra_json', sa.Text(), nullable=True))
def downgrade():
with op.batch_alter_table('saved_query') as batch_op:
batch_op.drop_column('extra_json')
|
[
"noreply@github.com"
] |
Affirm.noreply@github.com
|
49d99d025201045a3100ae9ab2515e297887e22a
|
9b10d8482a7af9c90766747f5f2ddc343871d5fa
|
/Gemtek/AutoTest/Sprinkler-Auto-Test/appium/modules/android/main_screen.py
|
53dfab1b87c290967aa74c0b91465fb24e0c9366
|
[] |
no_license
|
DarcyChang/MyProjects
|
86d33f5cf8bdfd4b21e64922e4eb25c1afc3c135
|
47efb2dfe13ace264f8943b59b701f39f23c4c17
|
refs/heads/master
| 2021-05-12T12:43:39.255082
| 2020-09-23T06:42:03
| 2020-09-23T06:42:03
| 117,419,269
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,525
|
py
|
import unittest
from time import sleep
from appium import webdriver
import android.verify.exist
import android.verify.next_page
from appium.webdriver.common.touch_action import TouchAction
def add_device(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/ivAddSprinkler")
self.assertIsNotNone(el)
el.click()
sleep(1)
def choose_device(self):
# TODO timeout 30 seconds
el = self.driver.find_element_by_id("com.blackloud.wetti:id/ivThum")
# "com.blackloud.wetti:id/tvName" is too.
self.assertIsNotNone(el)
action = TouchAction(self.driver)
i = 1
while(1):
try:
action.tap(el).perform()
# el.click()
sleep(1)
try:
android.verify.next_page.verify_binging_success(self)
except:
android.verify.next_page.verify_binging_network_success(self)
break
except:
sleep(1)
i += 1
if(i == 30):
print("[Gemtek] choose device TIME OUT !")
break
sleep(1)
# TODO : 1. There are four point can touch that choose sprinkler function.
# two are recourse-id, another are class.
# Maybe we can do it by random.
# 2. Binging two or more devices.
def my_account(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/imvAbLeft")
self.assertIsNotNone(el)
el.click()
sleep(2)
def buy_blackloud_sprinkler(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvBuyNetti")
self.assertIsNotNone(el)
el.click()
sleep(5)
def user_manual(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvUserManual")
self.assertIsNotNone(el)
el.click()
sleep(5)
def feature_introduction(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvTourGuide")
self.assertIsNotNone(el)
el.click()
sleep(1)
def contact_us(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvContactUs")
self.assertIsNotNone(el)
el.click()
sleep(5)
def about_blackloud(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvAbout")
self.assertIsNotNone(el)
el.click()
sleep(5)
def legal_and_privacy_policy(self):
el = self.driver.find_element_by_id("com.blackloud.wetti:id/tvUnderText")
self.assertIsNotNone(el)
el.click()
sleep(5)
if __name__ == '__main__':
print("[Gemtek] main_screen.py")
|
[
"cychang0916@gmail.com"
] |
cychang0916@gmail.com
|
ef7dcf27560b561e80bb4f4a68f159d63bf00127
|
bbf1ae079309eca11270422d3f0d259d1515d430
|
/numerical-tours/python/nt_solutions/ml_3_classification/exo5.py
|
3c0f3f3cda5068fd794b1a41a27c032ac538f66e
|
[
"BSD-2-Clause"
] |
permissive
|
ZichaoDi/Di_MATLABTool
|
5e6a67b613c4bcf4d904ddc47c2744b4bcea4885
|
c071291c63685c236f507b2cb893c0316ab6415c
|
refs/heads/master
| 2021-08-11T07:28:34.286526
| 2021-08-04T18:26:46
| 2021-08-04T18:26:46
| 149,222,333
| 9
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
sigma_list = np.array( [.1, .5, 1, 4] )
niter = 4000
plt.clf
for io in np.arange(0, np.size(sigma_list)):
sigma = sigma_list[io]
# grad descent
K = kappa(X,X,sigma)
tau = .5
if io==4:
tau = .05
h = np.zeros((n,1))
for i in np.arange(0,niter):
h = h - tau * nablaF(h,K,y)
# evaluate on a grid
K1 = kappa(G,X,sigma)
Theta = theta( K1.dot(h) )
Theta = Theta.reshape((q,q))
# Display the classification probability.
plt.subplot(2,2,io+1)
plt.imshow(Theta.transpose(), origin="lower", extent=[-tmax, tmax, -tmax, tmax])
plt.plot(X[I,0], X[I,1], '.')
plt.plot(X[J,0], X[J,1], '.')
plt.axis('equal')
plt.axis('off')
plt.title('$\sigma=' + str(sigma) + '$')
|
[
"wendydi@compute001.mcs.anl.gov"
] |
wendydi@compute001.mcs.anl.gov
|
47e266d665db77c973d48ba03cb937966bfcbd41
|
c733e6b433914a8faba256c7853f5cf2cd39c62a
|
/Python/Leetcode Daily Practice/unclassified/647. Palindromic Substrings.py
|
a75c67380d9fa7090153a83f1116d883ea245643
|
[] |
no_license
|
YaqianQi/Algorithm-and-Data-Structure
|
3016bebcc1f1356b6e5f3c3e588f3d46c276a805
|
2e1751263f484709102f7f2caf18776a004c8230
|
refs/heads/master
| 2021-10-27T16:29:18.409235
| 2021-10-14T13:57:36
| 2021-10-14T13:57:36
| 178,946,803
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""
Given a string, your task is to count how many palindromic substrings in this string.
The substrings with different start indexes or end indexes are counted as different
substrings even they consist of same characters.
Input: "abc"
Output: 3
Explanation: Three palindromic strings: "a", "b", "c".
"""
class Solution(object):
def countSubstrings_dp(self, s):
n = len(s)
dp = [[False for i in range(n)] for i in range(n)]
# dp[j][i] = (dp[j-1][i+1] or i - j <= 2) and s[j][i]
res = 0
for j in range(n):
for i in range(j+1):
if s[i] == s[j] and (j - i <=2 or dp[i+1][j-1]):
dp[i][j] = True
res += 1
return res
s = "aaa"
print(Solution().countSubstrings_dfs(s))
|
[
"alicia.qyq@gmail.com"
] |
alicia.qyq@gmail.com
|
4084a64ffe7d52b14cb8b756e1efe29d46730493
|
8784a3a9d4054d1aca752ec742902abb51a9de80
|
/python_stack/python_OOP/arithmetic_module/main.py
|
785338b9b6b6a9481506f9e74ad051b34a087637
|
[] |
no_license
|
MichaelKirkaldyV/mean-deploy-2
|
25eaf7cc430ac095f5327c04be84b9212314c7f2
|
f30b8ea14ccbaecfe62929948f2a84191d316c22
|
refs/heads/master
| 2023-01-13T07:20:28.984728
| 2019-05-23T16:42:15
| 2019-05-23T16:42:15
| 151,123,880
| 0
| 0
| null | 2022-12-30T09:47:11
| 2018-10-01T16:54:09
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 313
|
py
|
#imports arithmetic module within the same folder.
#Then uses the module as a variable and calls its functions using the .method
#Adds parameters.
#prints out the solutions that each function returns.
import arithmetic
print arithmetic.add(5, 8)
print arithmetic.subtract(10, 5)
print arithmetic.multiply(12, 6)
|
[
"vmichaelkirkaldy@live.com"
] |
vmichaelkirkaldy@live.com
|
83d88a5ed0bdcad629a6e3815dd75d21cc5a72e0
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/model_init.py
|
63a2eea20c488ff5f7c5cdf7026be84854afb40b
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 5,623
|
py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model weight initializer."""
import copy
import math
import torch.nn.init as init
from modnas.registry.construct import register
def _t_init_he_normal_fout(t, gain, fan_in, fan_out):
stdv = gain / math.sqrt(fan_out)
init.normal_(t, 0, stdv)
def _t_init_he_normal_fin(t, gain, fan_in, fan_out):
stdv = gain / math.sqrt(fan_in)
init.normal_(t, 0, stdv)
def _t_init_he_uniform_fout(t, gain, fan_in, fan_out):
b = math.sqrt(3.) * gain / math.sqrt(fan_out)
init.uniform_(t, -b, b)
def _t_init_he_uniform_fin(t, gain, fan_in, fan_out):
b = math.sqrt(3.) * gain / math.sqrt(fan_in)
init.uniform_(t, -b, b)
def _t_init_xavier_uniform(t, gain, fan_in, fan_out):
b = math.sqrt(6.) * gain / math.sqrt(fan_in + fan_out)
init.uniform_(t, -b, b)
def _t_init_xavier_normal(t, gain, fan_in, fan_out):
stdv = math.sqrt(2.) * gain / math.sqrt(fan_in + fan_out)
init.normal_(t, 0, stdv)
def _t_init_uniform_fin(t, gain, fan_in, fan_out):
b = 1.0 / math.sqrt(fan_in)
init.uniform_(t, -b, b)
def _t_init_uniform_fout(t, gain, fan_in, fan_out):
b = 1.0 / math.sqrt(fan_out)
init.uniform_(t, -b, b)
def _t_init_uniform(t, gain, fan_in, fan_out):
init.uniform_(t)
def _t_init_normal(t, gain, fan_in, fan_out):
init.normal_(t)
def _t_init_zeros(t, gain, fan_in, fan_out):
init.zeros_(t)
def _t_init_ones(t, gain, fan_in, fan_out):
init.ones_(t)
def _init_tensor(init_type, t, gain, fan_in, fan_out):
init_fn = _tensor_init_fn.get(init_type)
if init_fn is None or t is None:
return
init_fn(t, gain, fan_in, fan_out)
def _m_init_conv(m, config):
init_type = config['conv']['type']
bias_init_type = config['bias']['type']
gain = config['gain']
if init_type is None:
return
rec_size = m.kernel_size[0] * m.kernel_size[1]
fan_in = rec_size * m.in_channels
fan_out = rec_size * m.out_channels
if config['conv'].get('div_groups', True):
fan_in /= m.groups
fan_out /= m.groups
_init_tensor(init_type, m.weight, gain, fan_in, fan_out)
if m.bias is not None:
_init_tensor(bias_init_type, m.bias, gain, fan_in, fan_out)
def _m_init_norm(m, config):
init_type = config['norm']['type']
bias_init_type = config['bias']['type']
momentum = config['norm'].get('momentum')
eps = config['norm'].get('eps')
gain = config['gain']
m.reset_running_stats()
if momentum is not None:
m.momentum = momentum
if eps is not None:
m.eps = eps
if not m.affine:
return
fan_in = fan_out = m.num_features
_init_tensor(init_type, m.weight, gain, fan_in, fan_out)
_init_tensor(bias_init_type, m.bias, gain, fan_in, fan_out)
def _m_init_fc(m, config):
init_type = config['fc']['type']
bias_init_type = config['bias']['type']
gain = config['gain']
if init_type is None:
return
fan_in, fan_out = m.in_features, m.out_features
_init_tensor(init_type, m.weight, gain, fan_in, fan_out)
if m.bias is None:
return
_init_tensor(bias_init_type, m.bias, gain, fan_in, fan_out)
_tensor_init_fn = {k[8:]: v for (k, v) in globals().items() if k.startswith('_t_init_')}
_module_init_fn = {k[8:]: v for (k, v) in globals().items() if k.startswith('_m_init_')}
_default_init_config = {
'conv': {
'type': None,
'div_groups': True,
},
'norm': {
'type': None,
},
'fc': {
'type': None,
},
'bias': {
'type': None,
},
}
_default_module_map = {
'Conv2d': 'conv',
'BatchNorm2d': 'norm',
'GroupNorm': 'norm',
'Linear': 'fc',
}
@register
class DefaultModelInitializer():
"""Model weight initializer class."""
def __init__(self,
init_config=None,
module_init_map=None,
default_init_type=None,
neg_slope=math.sqrt(5),
nonlinear='leaky_relu'):
self.init_config = copy.deepcopy(_default_init_config)
self.init_config['gain'] = init.calculate_gain(nonlinear, neg_slope)
self.init_config.update(init_config or {})
self.module_init_map = _default_module_map.copy()
self.module_init_map.update(module_init_map or {})
self.default_init_type = default_init_type
def __call__(self, model):
"""Return initialized model."""
for m in model.modules():
m_init_type = self.module_init_map.get(type(m).__name__)
if m_init_type is not None:
_module_init_fn[m_init_type](m, self.init_config)
elif len(list(m.children())) == 0:
for p in m.parameters():
sz = p.shape
fan_out = sz[0] if len(sz) else 1
fan_in = sz[min(1, len(sz) - 1)] if len(sz) else 1
_init_tensor(self.default_init_type, p, self.init_config['gain'], fan_in, fan_out)
return model
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
59817d4f4915dfc4c470c6d51b0592362187ec0b
|
350d6b7246d6ef8161bdfccfb565b8671cc4d701
|
/Binary Tree Vertical Order Traversal.py
|
da22a1ddbb5aca8b4d6f3dbd14fa43d4a483c554
|
[] |
no_license
|
YihaoGuo2018/leetcode_python_2
|
145d5fbe7711c51752b2ab47a057b37071d2fbf7
|
2065355198fd882ab90bac6041c1d92d1aff5c65
|
refs/heads/main
| 2023-02-14T14:25:58.457991
| 2021-01-14T15:57:10
| 2021-01-14T15:57:10
| 329,661,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
dic = {}
def verticalOrder(self, root):
self.help(root, 1)
save = []
keys = sorted(self.dic.keys())
for k in keys:
save.append(self.dic[k])
return save
def help(self, root, depth):
if root == None:
return
if depth not in self.dic.keys():
self.dic[depth] = []
self.dic[depth].append(root.val)
self.help(root.left, depth - 1)
self.help(root.right, depth + 1)
return
|
[
"yihao_guo@gwmail.gwu.edu"
] |
yihao_guo@gwmail.gwu.edu
|
0780bc486c4355eaef2a4df385fc503799cbf3eb
|
79e19819aec49b500825f82a7de149eb6a0ba81d
|
/leetcode/1018.py
|
632dc46703f709c5e2bf6b31ac1d966e91cbfa8c
|
[] |
no_license
|
seoyeonhwng/algorithm
|
635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26
|
90406ee75de69996e666ea505ff5d9045c2ad941
|
refs/heads/master
| 2023-05-03T16:51:48.454619
| 2021-05-26T00:54:40
| 2021-05-26T00:54:40
| 297,548,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
class Solution:
def prefixesDivBy5(self, A: List[int]) -> List[bool]:
answer = [False] * len(A)
answer[0], prev = (A[0] == 0), A[0]
for i in range(1, len(A)):
answer[i] = ((prev * 2 + A[i]) % 5 == 0)
prev = prev * 2 + A[i]
return answer
"""
- 왼쪽으로 shift = 2를 곱한다
"""
|
[
"seoyeon@nowbusking.com"
] |
seoyeon@nowbusking.com
|
b284f9b10b8c572c65a64f1f9b88cde920a8b781
|
d0cb58e1658d4b5b88bdc07e497dc8092707ae02
|
/2021/01january/24specify_data.py
|
6381a461e0645467957c5e23c467055af3ce9fb7
|
[] |
no_license
|
June-fu/python365
|
27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c
|
242033a4b644a7566fbfa4dba9b60f60aa31fe91
|
refs/heads/master
| 2021-07-02T21:42:28.454091
| 2021-05-04T15:08:44
| 2021-05-04T15:08:44
| 233,629,713
| 0
| 0
| null | 2020-01-13T15:52:58
| 2020-01-13T15:36:53
| null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
#!/usr/bin/python
'''
# @ Author: june-fu
# @ Create Time: 2021-02-22 23:59:17
# @ Modified by: june-fu
# @ Modified time: 2021-02-22 23:59:19
# @ Description:arguments parse_dates
'''
import pandas as pd
from io import StringIO
data =('date,A,B,C\n'
'20090101,a,1,2\n'
'20090102,b,3,4\n'
'20090103,c,4,5')
# arguments parse_dates
df = pd.read_csv(StringIO(data), index_col=0, parse_dates=True)
print(df)
# These are Python datetime objects
print(df.index)
|
[
"fujun1990@gmail.com"
] |
fujun1990@gmail.com
|
8e4439a5213755463643b9a98d6b098eb3614207
|
92e26b93057723148ecb8ca88cd6ad755f2e70f1
|
/cov_exp/plain30_orth/plt.py
|
15145016643831e2908e2041dc913dd1c9a66851
|
[] |
no_license
|
lyuyanyii/CIFAR
|
5906ad9fbe1377edf5b055098709528e06b5ace2
|
d798834942d6a9d4e3295cda77488083c1763962
|
refs/heads/master
| 2021-08-30T20:09:52.819883
| 2017-12-19T08:37:37
| 2017-12-19T08:37:37
| 112,701,370
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import matplotlib.pyplot as plt
import pickle
import numpy as np
import scipy.signal as signal
with open("hisloss.data", "rb") as f:
his = pickle.load(f)
his = np.array(his)
hisloss = his[:,1]
hisloss = signal.medfilt(hisloss, 9)
#print(np.max(hisloss[10000:]))
plt.plot(range(len(hisloss)), hisloss)
plt.show()
|
[
"315603442@qq.com"
] |
315603442@qq.com
|
e86af748470270a3bd18fbbcd3dc8e992712cb17
|
8cf0cf9b71b7c5fbaa150e9893bf461ef661045e
|
/ownblock/ownblock/apps/accounts/views.py
|
77aad64c0c1af2bb8b440208af2f015e13b0a50a
|
[
"MIT"
] |
permissive
|
danjac/ownblock
|
676b27a5aa0d4ce2ac2cd924a632489cd6fc21ee
|
ac662fb7efb2f04567e2f85638c1250286452611
|
refs/heads/master
| 2016-08-02T21:51:56.055598
| 2015-05-02T12:54:47
| 2015-05-02T12:54:47
| 34,940,828
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,442
|
py
|
from django.db.models import Q
from rest_framework import status, viewsets, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from ..storage.models import Item
from ..parking.models import Vehicle
from ..messaging.models import Message
from ..notices.models import Notice
from ..amenities.models import Booking
from .models import User
from .serializers import (
UserSerializer,
RestrictedUserSerializer,
AuthUserSerializer,
)
class UserViewSet(viewsets.ModelViewSet):
model = User
def get_serializer_class(self):
if self.request.user.role == 'manager':
return RestrictedUserSerializer
return UserSerializer
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
data = self.get_serializer(self.object).data
data['gravatar'] = self.object.get_gravatar_url(size=40)
notices = Notice.objects.filter(author=self.object)
data['notices'] = []
for notice in notices.iterator():
data['notices'].append({
'id': notice.id,
'title': notice.title,
'details': notice.details,
'created': notice.created,
})
if self.object != self.request.user:
messages = Message.objects.filter(
Q(sender=self.object) | Q(recipient=self.object)).filter(
Q(sender=self.request.user) | Q(recipient=self.request.user)
).order_by('-created')
data['sent_messages'] = []
data['received_messages'] = []
for message in messages.iterator():
message_data = {
'id': message.id,
'header': message.header,
'details': message.details,
'created': message.created,
}
if message.sender_id == self.object.id:
data['sent_messages'].append(message_data)
else:
data['received_messages'].append(message_data)
if self.object.role == 'resident':
items = Item.objects.filter(
resident=self.object
).select_related('place')
data['items'] = []
for item in items.iterator():
data['items'].append({
'id': item.id,
'place_id': item.place_id,
'place_name': item.place.name,
'description': item.description,
})
vehicles = Vehicle.objects.filter(
resident=self.object
)
data['vehicles'] = []
for vehicle in vehicles.iterator():
data['vehicles'].append({
'id': vehicle.id,
'description': vehicle.description,
'registration_number': vehicle.registration_number,
})
bookings = Booking.objects.filter(
resident=self.object
).select_related('amenity')
data['bookings'] = []
for booking in bookings:
data['bookings'].append({
'id': booking.id,
'amenity': {
'id': booking.amenity.id,
'name': booking.amenity.name,
},
'reserved_from': booking.reserved_from,
'reserved_to': booking.reserved_to,
})
return Response(data)
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs).select_related(
'apartment'
).filter(is_active=True).order_by('last_name', 'first_name')
if self.request.GET.get('residents'):
return qs.filter(apartment__building=self.request.building)
elif self.request.GET.get('managers'):
return qs.filter(role='manager', site=self.request.building.site)
return qs.filter(
Q(
Q(apartment__building=self.request.building) |
Q(site=self.request.building.site)
),
)
class AuthView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get_user_response(self, request):
return Response(AuthUserSerializer(
request.user, context={'request': request}).data)
def get(self, request, *args, **kwargs):
return self.get_user_response(request)
def put(self, request, *args, **kwargs):
serializer = AuthUserSerializer(request.user, data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
serializer.save(force_update=True)
return Response(serializer.data)
def patch(self, request, *args, **kwargs):
password = request.DATA.get('password')
if not password:
return Response('Password is missing',
status=status.HTTP_400_BAD_REQUEST)
request.user.set_password(request.DATA['password'])
request.user.save()
return Response()
|
[
"danjac354@gmail.com"
] |
danjac354@gmail.com
|
b77cd80c0c1fbc74c1487f9da2d71f3e83e1b0ec
|
54de64c1bd866c2cd1ef7f23dff20019a87ae408
|
/src/bio2bel_drugbank/patent_utils.py
|
ea41eb5ec3822be8c47b5a766041d5e8125fa9e7
|
[
"MIT"
] |
permissive
|
AspirinCode/drugbank-1
|
83fc8bfb3b275df085423ac53c698bc0a8bc9c27
|
1b842ed7a9de7904e8a11fd19ad35164ffb781bf
|
refs/heads/master
| 2020-04-07T20:29:55.925875
| 2018-11-20T18:26:38
| 2018-11-20T18:26:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,704
|
py
|
# -*- coding: utf-8 -*-
"""Utilities for downloading patents from Google.
Code modified from original work by Alexander Esser.
"""
import os
import re
from typing import Optional, Set
import requests
from bs4 import BeautifulSoup
LINK_PATTERN = "https?:\/\/patentimages\.storage\.googleapis\.com\/.+\/([A-z0-9]+\.pdf)"
LINK_RE = re.compile(LINK_PATTERN, re.IGNORECASE)
prefix_map = {
'United States': 'US',
'Canada': 'CA',
}
def download_google_patents(url: str, directory: str) -> Set[str]:
"""Crawls a list of URLs at patent.google.com and downloads the attached PDF documents
:param url: The url (e.g., https://patents.google.com/patent/US5972916)
:param directory: The output directory
"""
rv = set()
try:
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, "html.parser")
for link in soup.find_all("a"):
target = link.get("href")
link = _process_link(target, directory)
if link:
rv.add(link)
except Exception as e:
print("Could not download patent from {}: {}".format(url, str(e)))
return rv
def _process_link(target, directory: str) -> Optional[str]:
"""Download the link if it fits the description and return it if it works."""
m = LINK_RE.search(target)
if not m:
return
outfile = os.path.join(directory, m.group(1))
if os.path.exists(outfile):
return target
print(f"Downloading {target} to {outfile}")
r2 = requests.get(target, stream=True)
if r2.status_code != 200:
return
with open(outfile, 'wb') as f:
for chunk in r2:
f.write(chunk)
return target
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
bfca6c0531a704417241810a33f46ee4c038afad
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/small-scale-multiobj/pod50_milp/throughput/runsimu11_throughput.py
|
89588a3c2132dc6081ea0222defc8c77da4d7d2d
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,440
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize throughput
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition import Arch4_decompose
from arch1 import ModelSDM_arch1
from arch2_decomposition import Arch2_decompose
from arch5_decomposition import Arch5_decompose
np.random.seed(2010)
num_cores=3
num_slots=60
n_sim = 1 # number of simulations
n_start = 11 # index of start
n_end = n_start+n_sim # index of end
time_limit_routing = 1000 # 1000
time_limit_sa = 18000
alpha = 0
beta = 0.01
result = np.zeros((n_sim, 15))
total_cnk = []
for i in range(n_start, n_end):
filename = 'traffic_matrix__matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [int(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
total_cnk.append(tm.flatten().astype(bool).sum())
result[i-n_start, 14] = tm.flatten().astype(bool).sum()
print "\n"
print total_cnk
print "\n"
#%% arch4
print "Architecture 4"
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 0] = m.connections_lb
result[i-n_start, 1] = m.connections_ub
result[i-n_start, 2] = m.throughput_lb
result[i-n_start, 3] = m.throughput_ub
#%% arch1
print "Architecutre 1"
m = ModelSDM_arch1(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model(mipfocus=1, timelimit=time_limit_routing,mipgap=0.01)
result[i-n_start, 4] = m.connections
result[i-n_start, 5] = m.throughput
#%% arch2
print "Architecture 2"
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 6] = m.connections_lb
result[i-n_start, 7] = m.connections_ub
result[i-n_start, 8] = m.throughput_lb
result[i-n_start, 9] = m.throughput_ub
#%% arch5
print "Architecture 5"
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1, timelimit=time_limit_routing, mipgap=0.01)
m.create_model_sa(mipfocus=1, timelimit=time_limit_sa)
result[i-n_start, 10] = m.connections_lb
result[i-n_start, 11] = m.connections_ub
result[i-n_start, 12] = m.throughput_lb
result[i-n_start, 13] = m.throughput_ub
file_name = "result_throughput_{}to{}.csv".format(n_start, n_end)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['arch4_connections_lb', 'arch4_connections_ub',
'arch4_throughput_lb', 'arch4_throughput_ub',
'arch1_connections', 'arch1_throughput',
'arch2_connections_lb', 'arch2_connections_ub',
'arch2_throughput_lb', 'arch2_throughput_ub',
'arch5_connections_lb', 'arch5_connections_ub',
'arch5_throughput_lb', 'arch5_throughput_ub',
'total_cnk'])
writer.writerows(result)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
8c37577beb948a84c1017887ad0ff113575583c4
|
87b7d7948aa51fdb4a27540240579788896369ea
|
/code/runs_sacred/model_4_classes/_sources/main_0d7ea3a13b62ec2b4e0ed10b9b965fe4.py
|
721ea09321b607fc28b8b2985a463f302725e990
|
[] |
no_license
|
Samuel-Levesque/Projet_GLO7030
|
6f13accd63b52107ec3e3a0b9b5f52edccda7c8d
|
557bce3235f09723900f65c6e3b44a0ed9d2b519
|
refs/heads/master
| 2022-01-16T12:49:22.884798
| 2019-05-05T18:38:35
| 2019-05-05T18:38:35
| 177,038,991
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,978
|
py
|
from sacred import Experiment
from sacred.observers import FileStorageObserver
from data_set_file import create_huge_data_set,create_encoding_deconding_dict
from model_creation import create_model
from trainning import train_model,load_model_weights,create_scheduler
from test_metrics import calcul_metric_concours
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader
#Trucs sacred
experiment_sacred=Experiment("Doodle_Boys")
experiment_sacred.observers.append(FileStorageObserver.create('runs_sacred/model_4_classes'))
#Configs
@experiment_sacred.config
def configuration():
path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
path_save_model="saves_model/model_4_classes.tar"
path_load_existing_model=None
# path_load_existing_model = "saves_model/model_4_classes.tar"
path_model_weights_test = "saves_model/model_4_classes.tar"
use_gpu = True
do_training=True
do_testing=True
nb_row_per_classe=300
nb_epoch = 3
batch_size = 32
learning_rate = 0.1
type_schedule="constant"
seed=123 #marche paas
torch.manual_seed(123)
np.random.seed(123)
random.seed(123)
#Main
@experiment_sacred.automain
def main_program(path_data,path_save_model,path_load_existing_model,path_model_weights_test,
use_gpu,do_training,do_testing,
nb_row_per_classe,
nb_epoch,batch_size,
learning_rate,type_schedule,
seed
):
#Seed
# torch.manual_seed(123)
# np.random.seed(123)
# random.seed(123)
# Label encoding and decoding dicts
enc_dict, dec_dict = create_encoding_deconding_dict(path_data)
#Data_set
size_image_train = 224
data_train=create_huge_data_set(path_data,nb_rows=nb_row_per_classe,size_image=size_image_train,encoding_dict=enc_dict)
data_valid=create_huge_data_set(path_data,nb_rows=100,size_image=size_image_train,skip_rows=range(1,nb_row_per_classe),encoding_dict=enc_dict)
# Model
model = create_model(use_gpu)
if use_gpu:
model.cuda()
#Loss
criterion = nn.CrossEntropyLoss()
#Optimiser
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# Scheduler LR
scheduler = create_scheduler(start_lr=learning_rate,type=type_schedule,optimizer=optimizer)
#Data loader
train_loader=DataLoader(data_train,batch_size=batch_size,shuffle=True)
valid_loader=DataLoader(data_valid,batch_size=batch_size,shuffle=True)
#Train
if do_training:
train_model(model,train_loader,valid_loader,nb_epoch,
scheduler,optimizer,criterion,use_gpu,
path_save=path_save_model,path_start_from_existing_model=path_load_existing_model)
#Test
if do_testing:
data_test = create_huge_data_set(path_data, nb_rows=100, size_image=size_image_train,
skip_rows=range(1, nb_row_per_classe + 100), encoding_dict=enc_dict)
test_loader = DataLoader(data_test, batch_size=batch_size)
model_final,history=load_model_weights(model,path_model_weights_test,type="best",use_gpu=use_gpu,get_history=True)
# history.display()
acc,loss,score_top3,conf_mat,acc_per_class=calcul_metric_concours(model_final,test_loader,use_gpu=use_gpu,show_acc_per_class=True)
print("Accuracy test: {}".format(acc))
print("Score top 3 concours: {}".format(score_top3))
print(acc_per_class)
#Log experiment
experiment_sacred.log_scalar("Test accuracy",acc)
experiment_sacred.log_scalar("Test loss", loss)
experiment_sacred.log_scalar("Test score top3", score_top3)
experiment_sacred.log_scalar("Test confusion matrix", conf_mat)
experiment_sacred.log_scalar("Test accuracy per class", acc_per_class)
|
[
"44324703+William-Bourget@users.noreply.github.com"
] |
44324703+William-Bourget@users.noreply.github.com
|
81a54439253dce29241c49928fd05e2c8db9e060
|
ac7f2369cf136cef946ee6eb89c5be1edda27769
|
/hare_turtle_algorithm/scratch_4.py
|
d209eb062e4425d63c53283db7cf6454fa6fc968
|
[] |
no_license
|
iluvjava/Silly_Python_Stuff
|
d244a94a6c8236713123815ccd1f1f6c27b1cb98
|
eb12a67c060de783e6b00d6030668f8d32630dad
|
refs/heads/master
| 2021-04-22T23:14:14.215801
| 2021-04-05T05:25:42
| 2021-04-05T05:25:42
| 249,879,410
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
import numpy as np
y = x = np.array([np.arange(0, 10)]).T
print(x.T.shape)
print(y.shape)
print(x@y.T)
print(np.linspace(0, 100, 400)[np.newaxis, :].T)
|
[
"victor1301166040@gmail.com"
] |
victor1301166040@gmail.com
|
8fcc9b9fcb2d3773828fcb001c5e5282e5601c8e
|
22cec5da2b1fb83dcc9cf7c888f1e2078b05b62e
|
/flora/wallet/sign_coin_solutions.py
|
e1848b04de272fc1cbdb5bc12e37e82971b93c6b
|
[
"Apache-2.0"
] |
permissive
|
JuEnPeHa/flora-blockchain
|
649d351e096e73222ab79759c71e191e42da5d34
|
656b5346752d43edb89d7f58aaf35b1cacc9a366
|
refs/heads/main
| 2023-07-18T08:52:51.353754
| 2021-09-07T08:13:35
| 2021-09-07T08:13:35
| 399,297,784
| 0
| 0
|
Apache-2.0
| 2021-08-24T01:30:45
| 2021-08-24T01:30:44
| null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import inspect
from typing import List, Any
import blspy
from blspy import AugSchemeMPL
from flora.types.coin_solution import CoinSolution
from flora.types.spend_bundle import SpendBundle
from flora.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
async def sign_coin_solutions(
coin_solutions: List[CoinSolution],
secret_key_for_public_key_f: Any, # Potentially awaitable function from G1Element => Optional[PrivateKey]
additional_data: bytes,
max_cost: int,
) -> SpendBundle:
signatures: List[blspy.G2Element] = []
pk_list: List[blspy.G1Element] = []
msg_list: List[bytes] = []
for coin_solution in coin_solutions:
# Get AGG_SIG conditions
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if err or conditions_dict is None:
error_msg = f"Sign transaction failed, con:{conditions_dict}, error: {err}"
raise ValueError(error_msg)
# Create signature
for pk, msg in pkm_pairs_for_conditions_dict(
conditions_dict, bytes(coin_solution.coin.name()), additional_data
):
pk_list.append(pk)
msg_list.append(msg)
if inspect.iscoroutinefunction(secret_key_for_public_key_f):
secret_key = await secret_key_for_public_key_f(pk)
else:
secret_key = secret_key_for_public_key_f(pk)
if secret_key is None:
e_msg = f"no secret key for {pk}"
raise ValueError(e_msg)
assert bytes(secret_key.get_g1()) == bytes(pk)
signature = AugSchemeMPL.sign(secret_key, msg)
assert AugSchemeMPL.verify(pk, msg, signature)
signatures.append(signature)
# Aggregate signatures
aggsig = AugSchemeMPL.aggregate(signatures)
assert AugSchemeMPL.aggregate_verify(pk_list, msg_list, aggsig)
return SpendBundle(coin_solutions, aggsig)
|
[
"github@floracoin.farm"
] |
github@floracoin.farm
|
970e032873598b577c478df4bda72a6d70df2593
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_328/ch45_2020_04_11_19_23_00_469501.py
|
654fc6cd7bedadc387007d4d63a90c312e9a584c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
lista= []
while True:
x= int(input('Digite algum número: '))
if x>0:
lista.append(x)
elif x <= 0:
lista.reverse()
print(lista)
|
[
"you@example.com"
] |
you@example.com
|
ef76fce18c4d75abc69a31441786b2d3465aaad6
|
5ac40dd0907f6b5a7adff338465c7c41fffc4348
|
/src/jukeboxcore/gui/widgets/guerilla/shotcreator_ui.py
|
a94b8806cff4c0c262fcc729863f846a82ed3722
|
[] |
permissive
|
JukeboxPipeline/jukebox-core
|
8effaf675c8a3b39d043bb69e40b75e591bb4a21
|
bac2280ca49940355270e4b69400ce9976ab2e6f
|
refs/heads/master
| 2021-07-22T13:50:58.168148
| 2015-06-01T16:20:56
| 2015-06-01T16:20:56
| 24,540,320
| 2
| 0
|
BSD-3-Clause
| 2021-06-10T19:34:28
| 2014-09-27T19:06:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'h:\projects\jukebox-core\src\jukeboxcore\gui\widgets\guerilla\shotcreator.ui'
#
# Created: Tue Jan 13 18:54:57 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_shotcreator_dialog(object):
def setupUi(self, shotcreator_dialog):
shotcreator_dialog.setObjectName("shotcreator_dialog")
shotcreator_dialog.resize(694, 398)
self.gridLayout = QtGui.QGridLayout(shotcreator_dialog)
self.gridLayout.setObjectName("gridLayout")
self.name_lb = QtGui.QLabel(shotcreator_dialog)
self.name_lb.setObjectName("name_lb")
self.gridLayout.addWidget(self.name_lb, 0, 0, 1, 1)
self.name_le = QtGui.QLineEdit(shotcreator_dialog)
self.name_le.setObjectName("name_le")
self.gridLayout.addWidget(self.name_le, 0, 1, 1, 1)
self.desc_lb = QtGui.QLabel(shotcreator_dialog)
self.desc_lb.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.desc_lb.setObjectName("desc_lb")
self.gridLayout.addWidget(self.desc_lb, 1, 0, 1, 1)
self.desc_pte = QtGui.QPlainTextEdit(shotcreator_dialog)
self.desc_pte.setObjectName("desc_pte")
self.gridLayout.addWidget(self.desc_pte, 1, 1, 1, 1)
self.create_pb = QtGui.QPushButton(shotcreator_dialog)
self.create_pb.setObjectName("create_pb")
self.gridLayout.addWidget(self.create_pb, 2, 1, 1, 1)
self.retranslateUi(shotcreator_dialog)
QtCore.QMetaObject.connectSlotsByName(shotcreator_dialog)
def retranslateUi(self, shotcreator_dialog):
shotcreator_dialog.setWindowTitle(QtGui.QApplication.translate("shotcreator_dialog", "Create Shot", None, QtGui.QApplication.UnicodeUTF8))
self.name_lb.setText(QtGui.QApplication.translate("shotcreator_dialog", "Name", None, QtGui.QApplication.UnicodeUTF8))
self.desc_lb.setText(QtGui.QApplication.translate("shotcreator_dialog", "Description", None, QtGui.QApplication.UnicodeUTF8))
self.create_pb.setText(QtGui.QApplication.translate("shotcreator_dialog", "Create", None, QtGui.QApplication.UnicodeUTF8))
|
[
"zuber.david@gmx.de"
] |
zuber.david@gmx.de
|
327bfffa563cbbdd3435fd1eb8bb852e1a0cf97b
|
910d4dd8e56e9437cf09dd8b9c61167673140a1f
|
/dd5_Stack2_연습문제/부분집합.py
|
b02d1c3ead323fdbff993e93b8f47ded822adf39
|
[] |
no_license
|
nopasanadamindy/Algorithms
|
10825b212395680401b200a37ab4fde9085bc61f
|
44b82d2f129c4cc6e811b651c0202a18719689cb
|
refs/heads/master
| 2022-09-28T11:39:54.630487
| 2020-05-29T09:49:56
| 2020-05-29T09:49:56
| 237,923,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
# {1,2,3} 모든 부분 집합 출력하기
N = 3
A = [0 for _ in range(N)] # 원소의 포함여부 저장 (0, 1)
data = [1, 2, 3]
def printSet(n):
for i in range(n): # 각 부분 배열의 원소 출력
if A[i] == 1: # A[i]가 1이면 포함된 것이므로 출력.
print(data[i], end="")
print()
def powerset(n, k): # n: 원소의 갯수, k: 현재depth
if n == k: # Basis Part
printSet(n)
else: # Inductive Part
A[k] = 1 # k번 요소 O
powerset(n, k + 1) # 다음 요소 포함 여부 결정
A[k] = 0 # k번 요소 X
powerset(n, k + 1) # 다음 요소 포함 여부 결정
powerset(N, 0)
|
[
"iuui21@snu.ac.kr"
] |
iuui21@snu.ac.kr
|
a02bee8748891f92e694a53f4c42f3fb36df9143
|
46244bb6af145cb393846505f37bf576a8396aa0
|
/leetcode/066.plus_one.py
|
4afb73916cebf2c50af6b1e5aa64b0ec40b737b8
|
[] |
no_license
|
aoeuidht/homework
|
c4fabfb5f45dbef0874e9732c7d026a7f00e13dc
|
49fb2a2f8a78227589da3e5ec82ea7844b36e0e7
|
refs/heads/master
| 2022-10-28T06:42:04.343618
| 2022-10-15T15:52:06
| 2022-10-15T15:52:06
| 18,726,877
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
"""
"""
class Solution:
# @param digits, a list of integer digits
# @return a list of integer digits
def plusOne(self, digits):
if not digits:
return [1]
carry = 0
for i in range(len(digits)-1, -1, -1):
print i
r = digits[i] + 1
digits[i] = (r % 10)
carry = r / 10
if carry < 1:
break
return ([carry] + digits) if (carry > 0) else digits
if __name__ == '__main__':
s = Solution()
v = lambda st: (st, s.plusOne(st))
print v([0])
|
[
"sockpuppet.lea@gmail.com"
] |
sockpuppet.lea@gmail.com
|
0fdfbfd787cc27d2b00a01b2ccef13060671930d
|
81835671049fd32f173d438ca85a8e81482bc76a
|
/src/muypicky/settings/local.py
|
233d240db6568b445a0afe234a1df87000ee4b57
|
[] |
no_license
|
laura8857/django_restaurant
|
acd344423bd71194f6763e899edaf94955bf06ce
|
f9882d3d2d8998c1e99a7ecb706be66fab8c4425
|
refs/heads/master
| 2021-09-09T07:42:25.636140
| 2017-12-31T07:03:36
| 2017-12-31T07:03:36
| 111,422,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
"""
Django settings for muypicky project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p!a0#ndo^-lp14=odpiw=cs@(+6a-k67#y&5hw5wnsk$px#--h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'muypicky.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'muypicky.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"luara8857@gmail.com"
] |
luara8857@gmail.com
|
ad99634d0137842d7042a688010637b825ee29ce
|
46ae8264edb9098c9875d2a0a508bc071201ec8b
|
/res/scripts/client/gui/scaleform/daapi/view/metapromopremiumigrwindowmeta.py
|
814c7e9466ff495e9f3a447d32cb6863c4598fcd
|
[] |
no_license
|
Difrex/wotsdk
|
1fc6156e07e3a5302e6f78eafdea9bec4c897cfb
|
510a34c67b8f4c02168a9830d23f5b00068d155b
|
refs/heads/master
| 2021-01-01T19:12:03.592888
| 2016-10-08T12:06:04
| 2016-10-08T12:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/PromoPremiumIgrWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class PromoPremiumIgrWindowMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
"""
def as_setTitleS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_setTitle(value)
def as_setTextS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_setText(value)
def as_setWindowTitleS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_setWindowTitle(value)
def as_setApplyButtonLabelS(self, value):
if self._isDAAPIInited():
return self.flashObject.as_setApplyButtonLabel(value)
|
[
"m4rtijn@gmail.com"
] |
m4rtijn@gmail.com
|
cfed4084fec538d16981ee31a7f600850dcf0d86
|
bc23dd0952a7235d2a63f59c83a4a283bbfa49f8
|
/backend/manage.py
|
5f19f0c6d16370616739c0eaa3ae0a8a5c26e630
|
[] |
no_license
|
crowdbotics-apps/dee-world-originals-3621
|
94e8af26153de836e9e313e84f3a7a39f21deb66
|
833a299efb37158bde7446d287ffccaf57f3c829
|
refs/heads/master
| 2020-05-25T11:09:59.009295
| 2019-05-21T06:24:38
| 2019-05-21T06:24:38
| 187,774,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dee_world_originals_3621.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f665485f49d5e9f3218ee0ee9cc87b2b65a9a74d
|
8ce70bf719616200f623629e1c3fca20e9f3d369
|
/GetSharedExternallyDriveACLs.py
|
06736e36300dc2838dfc0a696d4501ec625e9bf1
|
[] |
no_license
|
glmyers/GAM-Scripts3
|
055f1f398971d2aa630372e1594aeea960f7b104
|
115e1ae811b72570c674a0b0284494f57660ad79
|
refs/heads/master
| 2023-08-02T01:41:09.853011
| 2021-09-15T19:36:37
| 2021-09-15T19:36:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,387
|
py
|
#!/usr/bin/env python3
"""
# Purpose: For a Google Drive User(s), show all drive file ACLs for files shared outside of a list of specified domains
# You specify a list of domains, DOMAIN_LIST, and indicate whether this list is exclusive/inclusive
# EXCLUSIVE_DOMAINS = True: exclude domains in DOMAIN_LIST from the output
# EXCLUSIVE_DOMAINS = False: include domains in DOMAIN_LIST in the output
# You can include/exclude shares to anyone in the ouput
# INCLUDE_ANYONE = True: include shares to anyone in the output
# INCLUDE_ANYONE = False: exclude shares to anyone from the output
# Note: This script can use Basic or Advanced GAM:
# https://github.com/jay0lee/GAM
# https://github.com/taers232c/GAMADV-XTD3
# Customize: Set DOMAIN_LIST, EXCLUSIVE_DOMAINS, INCLUDE_ANYONE
# Python: Use python or python3 below as appropriate to your system; verify that you have version 3
# $ python -V or python3 -V
# Python 3.x.y
# Usage:
# 1: Get ACLs for all files, if you don't want all users, replace all users with your user selection in the command below
# $ Basic GAM: gam all users print filelist id title permissions owners > filelistperms.csv
# $ Advanced GAM: You can have GAM do some pre-filtering
# $ EXCLUSIVE_DOMAINS = True:
# Add the following clause to the command for each domain in DOMAIN_LIST: pm not domain domainx.com em
# $ EXCLUSIVE_DOMAINS = False:
# Add the following clause to the command for each domain in DOMAIN_LIST: pm domain domainx.com em
# $ INCLUDE_ANYONE = True
# Add the following clause to the command: pm type anyone em
# $ gam config auto_batch_min 1 redirect csv ./filelistperms.csv multiprocess all users print filelist fields id,name,permissions,owners.emailaddress <pm clauses>
# 2: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,permissionId,role,type,emailAddress,domain"
# that lists the driveFileIds and permissionIds for all ACLs shared with the selected domains.
# (n.b., role, type, emailAddress, domain and driveFileTitle are not used in the next step, they are included for documentation purposes)
# $ python3 GetSharedExternallyDriveACLs.py filelistperms.csv deleteperms.csv
# 3: Inspect deleteperms.csv, verify that it makes sense and then proceed
# 4: If desired, delete the ACLs
# $ gam csv ./deleteperms.csv gam user "~Owner" delete drivefileacl "~driveFileId" "~permissionId"
"""
import csv
import re
import sys
FILE_NAME = 'name'
ALT_FILE_NAME = 'title'
# Substitute your domain(s) in the list below, e.g., DOMAIN_LIST = ['domain.com',] DOMAIN_LIST = ['domain1.com', 'domain2.com',]
DOMAIN_LIST = ['domain.com']
# EXCLUSIVE_DOMAINS = True: You're interested only in domains not in DOMAIN_LIST which would typically be your internal domains
# EXCLUSIVE_DOMAINS = False: You're interested only in domains in DOMAIN_LIST which would typically be external domains
# Indicate whether the list is exclusive or inclusive
EXCLUSIVE_DOMAINS = True
# Indicate whether shares to anyone should be included
INCLUDE_ANYONE = True
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle',
'permissionId', 'role', 'type', 'emailAddress', 'domain'],
lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in iter(row.items()):
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v:
permissions_N = mg.group(1)
if v == 'domain':
emailAddress = ''
domain = row[f'permissions.{permissions_N}.domain']
elif v in ['user', 'group']:
if row.get(f'permissions.{permissions_N}.deleted') == 'True':
continue
emailAddress = row[f'permissions.{permissions_N}.emailAddress']
domain = emailAddress[emailAddress.find('@')+1:]
else: #anyone
if not INCLUDE_ANYONE:
continue
emailAddress = ''
domain = ''
if ((row[f'permissions.{permissions_N}.role'] != 'owner') and
((v == 'anyone') or # Can only be true if INCLUDE_ANYONE = True
(EXCLUSIVE_DOMAINS and domain not in DOMAIN_LIST) or
(not EXCLUSIVE_DOMAINS and domain in DOMAIN_LIST))):
outputCSV.writerow({'Owner': row['owners.0.emailAddress'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'permissionId': f'id:{row[f"permissions.{permissions_N}.id"]}',
'role': row[f'permissions.{permissions_N}.role'],
'type': v,
'emailAddress': emailAddress,
'domain': domain})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
|
[
"ross.scroggs@gmail.com"
] |
ross.scroggs@gmail.com
|
bf7de811bfea6dda3995b659cf1eefa05341ded2
|
74472ae20fa049a82b20b8ba7ea80394c43d5a01
|
/messenger/urls.py
|
14204e888b18dbed10f155dcb11b12b8c5abf853
|
[] |
no_license
|
Adelgh/Projet
|
ad2d02e92f7ab3adef4c2646ba0c0838bc2e799e
|
1e1918f5ee47312dce47e2ae384c0168ffce7664
|
refs/heads/master
| 2021-01-15T08:27:42.502063
| 2017-08-28T14:34:16
| 2017-08-28T14:34:16
| 99,567,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.inbox, name='inbox'),
url(r'^new/$', views.new, name='new_message'),
url(r'^send/$', views.send, name='send_message'),
url(r'^send1/$', views.send1, name='send_message1'),
url(r'^delete/$', views.delete, name='delete_message'),
url(r'^users/$', views.users, name='users_message'),
url(r'^check/$', views.check, name='check_message'),
url(r'^filter/$', views.filter, name='filter'),
url(r'^latest/$', views.latest, name='latest_message'),
url(r'^upload/$', views.upload, name='upload'),
url(r'^(?P<username>[^/]+)/$', views.messages, name='messages'),
]
|
[
"you@example.com"
] |
you@example.com
|
8c9e26db66935091b5ff391425547f99e9a0a6e4
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/9AMT6SC4Jz8tExihs_23.py
|
306863290370f89d947dad424359f11ee3c866c3
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from itertools import product
def generate_nonconsecutive(n):
return " ".join([combo for combo in ["".join(list(combi)) for combi in product("01",repeat=n)] if "11" not in combo])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6ddca35b1612d57330fd4fc592c1f7de0f2633d3
|
fffbf9e1ac40fdbd77f5b6baf34662478da8162e
|
/library/setup.py
|
fc56d3fee0faa0c1dde3eb53f3b7cd88eb98ddf7
|
[
"MIT"
] |
permissive
|
yorkrobotlab/inky
|
aa4c41ce17e8e47c3f6b2a16368560be6c66f051
|
65f9abb7cb09e2a9d9b31e484a576d230d8c28a1
|
refs/heads/master
| 2022-10-03T04:43:19.189473
| 2020-03-11T11:21:04
| 2020-03-11T11:21:04
| 259,630,799
| 1
| 0
|
MIT
| 2020-04-28T12:29:16
| 2020-04-28T12:29:15
| null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2017 Pimoroni.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup
classifiers = [
'Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware'
]
setup(
name='inky',
version='0.0.6',
author='Philip Howard',
author_email='phil@pimoroni.com',
description='Inky pHAT Driver',
long_description=open('README.rst').read() + '\n' + open('CHANGELOG.txt').read(),
license='MIT',
keywords='Raspberry Pi e-paper display driver',
url='http://www.pimoroni.com',
project_urls={'GitHub': 'https://www.github.com/pimoroni/inky'},
classifiers=classifiers,
py_modules=[],
packages=['inky'],
include_package_data=True,
install_requires=['numpy'],
extras_require={
'rpi-gpio-output': ['spidev', 'RPi.GPIO', 'smbus2']
}
)
|
[
"phil@gadgetoid.com"
] |
phil@gadgetoid.com
|
3a2dbda0d6edea8b04c5c326afe5c8171c834539
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/xcp2k/classes/_restart2.py
|
4de1a3a5a71bd0926db09c8a34da1ed829325acb
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from xcp2k.inputsection import InputSection
from _each11 import _each11
class _restart2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each11()
self._name = "RESTART"
self._keywords = {'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Log_print_key': 'LOG_PRINT_KEY', 'Add_last': 'ADD_LAST', 'Filename': 'FILENAME'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
960d2fbde5d08095542b53926dcab3915b657c1b
|
5f6019aefd4b940451ae81fb0e430e97d19626cb
|
/2016/martian/get_lines.py
|
85a7a3481b29fc18839f72a0420d73a8b1eefc05
|
[] |
no_license
|
cligs/projects
|
7cee393ccdd5fdf8477a89f07ae7a93fe78511e6
|
d8a60564d3436a207ce4d94dbdefed9bf5402a9c
|
refs/heads/master
| 2022-04-28T20:38:27.267358
| 2022-04-12T09:08:05
| 2022-04-12T09:08:05
| 42,662,737
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: get_lines.py
# Authors: #cf
# 2016-05-21
import re
import os
import glob
import pandas as pd
WorkDir = "/media/christof/data/Dropbox/0-Analysen/2016/martians/diffs5/"
DiffTable = WorkDir+"DiffTable_2016-04-29.csv"
DiffedText = WorkDir+"martians_wdiffed-prep.txt"
Types = ["deletion-major", "deletion-minor", "expansion-major", "expansion-minor"]
def get_lines(DiffTable, DiffedText, Types):
"""
Collect line IDs with expansions / deletions, get lines from diffed text, write into separate file.
Author: #cf.
"""
print("get_lines...")
# Open and read the DiffTable
with open(DiffTable, "r") as InFile:
Diffs = pd.DataFrame.from_csv(InFile, sep="\t")
with open(DiffedText, "r") as InFile:
Text = InFile.read()
Text = re.split("\n", Text)
#print(Diffs.head())
# For each type of edir, get the line-ids
for Type in Types:
Items = Diffs.loc[Diffs['type'] == Type]
#print(Type, len(Items))
ItemIDs = Items.index.values
#print(ItemIDs)
LineIDs = []
for ItemID in ItemIDs:
LineID = int(ItemID[:-2])
LineIDs.append(LineID)
#print(len(LineIDs))
#print(LineIDs)
Lines = []
for LineID in LineIDs:
Line = "-- " + '{:05d}'.format(LineID-1) + ": " + Text[LineID-2]
Lines.append(Line)
Line = "=> " + '{:05d}'.format(LineID) + ": " + Text[LineID-1]
Lines.append(Line)
Line = "-- " + '{:05d}'.format(LineID+1) + ": " + Text[LineID-0] + "\n"
Lines.append(Line)
Lines = "\n".join(Lines)
LinesFile = "./lines/lines_"+str(Type)+".txt"
with open(LinesFile, "w") as OutFile:
OutFile.write(Lines)
#with open(TXMFolder+Filename[:-7]+".xml", "w") as OutFile:
# OutFile.write(NewNewText)
print("Done.")
get_lines(DiffTable, DiffedText, Types)
|
[
"c.schoech@gmail.com"
] |
c.schoech@gmail.com
|
3f25fb7ce6da69d951596e88ada26bf2a14bd5d8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_285/ch5_2019_06_06_18_54_46_963120.py
|
985b9e61a770d1502eea4003eb618e39ff03abfa
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
def eh_primo(n):
primo=True
if n<=1:
primo=False
for e in range(2,n):
if n%e==0 and e!=n:
primo=False
return primo
lista_primos=[]
def maior_primo_menor_que(n):
if n<0:
return -1
else:
for i in range(0,n+1):
if eh_primo(i):
lista_primos.append(i)
maiorprimo=lista_primos[-1]
return maiorprimo
|
[
"you@example.com"
] |
you@example.com
|
4a95b21c810a8139cdf6848ac7d6fbe6c2f553ff
|
4b3ae6048ced0d7f88a585af29fa3a7b15005749
|
/Python/Python_Fundamentals/makingTuples.py
|
826647c1102aa86bdc343efe54ec68cda094a6db
|
[] |
no_license
|
ajag408/DojoAssignments
|
a6320856466ac21d38e8387bdcbbe2a02009e418
|
03baa0ff5261aee6ffedf724657b3a8c7cdffe47
|
refs/heads/master
| 2022-12-11T15:50:46.839881
| 2021-06-07T20:57:17
| 2021-06-07T20:57:17
| 79,872,914
| 0
| 0
| null | 2022-12-08T00:35:09
| 2017-01-24T02:58:15
|
Python
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
def dictToTuple(dict):
return dict.items()
# my_dict = {
# "Speros": "(555) 555-5555",
# "Michael": "(999) 999-9999",
# "Jay": "(777) 777-7777"
# }
#
# answer = dictToTuple(my_dict)
# print answer
|
[
"akashjagannathan408@gmail.com"
] |
akashjagannathan408@gmail.com
|
518f8410f8bc49ab48576f99926b7c130acc5de7
|
177338a720f904f63926da055364cc0e2c0a850c
|
/python_stu/s11_22_pager/app01/migrations/0001_initial.py
|
e1df1b6e1002b6b8fc5e9aec9c576e3d2b84b7e1
|
[] |
no_license
|
xuefenga616/mygit
|
60ef7bf7201603e13d4621cf7a39dea8ec92e0b7
|
be3b8003fcc900ce7ca6616a9ddebb0edcbc1407
|
refs/heads/master
| 2020-09-13T11:50:55.448041
| 2017-08-27T10:59:00
| 2017-08-27T10:59:00
| 67,042,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=32)),
('age', models.IntegerField()),
],
),
]
|
[
"xuefeng_11@qq.com"
] |
xuefeng_11@qq.com
|
a7aaea88c780b8bd4794ae81c8be3b058b2d5c5d
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/z3/building_a_house.py
|
9d5015a6cc2a0dd406b4c96667b91c8ae531df7d
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,932
|
py
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Building a house, simple scheduling problem in Z3
#
# This model is adapted OPL model sched_intro.mod (examples).
# """
# This is a basic problem that involves building a house. The masonry,
# roofing, painting, etc. must be scheduled. Some tasks must
# necessarily take place before others, and these requirements are
# expressed through precedence constraints.
# """
#
# The OPL solution is
# """
# Masonry : 0..35
# Carpentry: 35..50
# Plumbing : 35..75
# Ceiling : 35..50
# Roofing : 50..55
# Painting : 50..60
# Windows : 55..60
# Facade : 75..85
# Garden : 75..80
# Moving : 85..90
# """
#
# With the extra objective (from the OPL model sched_time.mod) the result is
#
# masonry : [20 -- 35 --> 55]
# carpentry: [75 -- 15 --> 90]
# plumbing : [55 -- 40 --> 95]
# ceiling : [75 -- 15 --> 90]
# roofing : [90 -- 5 --> 95]
# painting : [90 -- 10 --> 100]
# windows : [95 -- 5 --> 100]
# facade : [95 -- 10 --> 105]
# garden : [95 -- 5 --> 100]
# moving : [105 -- 5 --> 110]
#
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
# handle the precedences
# the task x must be finished before task y begin
def prec(sol, x, y, s, d):
sol.add(s[x] + d[x] <= s[y])
sol = SolverFor("LIA")
# data
num_tasks = 10
# for the precedences
masonry,carpentry,plumbing,ceiling,roofing,painting,windows,facade,garden,moving = range(num_tasks)
tasks = [masonry,carpentry,plumbing,ceiling,roofing,painting,windows,facade,garden,moving]
tasks_s = ["masonry","carpentry","plumbing","ceiling","roofing","painting","windows","facade","garden","moving"]
duration = [35,15,40,15, 5,10, 5,10, 5, 5];
height = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
total_duration = sum(duration)
# precendeces
num_precedences = 14;
precedences = [
[masonry, carpentry],
[masonry, plumbing],
[masonry, ceiling],
[carpentry, roofing],
[ceiling, painting],
[roofing, windows],
[roofing, facade],
[plumbing, facade],
[roofing, garden],
[plumbing, garden],
[windows, moving],
[facade, moving],
[garden, moving],
[painting, moving]
]
# variables
start = makeIntVector(sol,"start",num_tasks, 0, total_duration)
end = makeIntVector(sol,"end",num_tasks, 0, total_duration)
limitx = makeIntVar(sol,"limitx",1,3)
makespan = makeIntVar(sol,"makespan", 0,total_duration)
# the extra objective z (see above)
z = makeIntVar(sol,"z", 0, 10000)
# select which variable we should minimize: makespan or z
min_val = makespan # (then we ignore the z part)
# min_val = z
# constraints
# This takes a long time to calculate
# print("before cumulative")
cumulative(sol, start, duration, height, limitx, 0, total_duration)
# print("after cumulative")
if min_val == z:
sol.add(z ==
400 * maximum2(sol,[end[moving]- 100, 0]) +
200 * maximum2(sol,[25 - start[masonry], 0]) +
300 * maximum2(sol,[75 - start[carpentry], 0]) +
100 * maximum2(sol,[75 - start[ceiling], 0]))
else:
sol.add(z == 0)
for t in range(num_tasks):
sol.add(end[t] == start[t] + duration[t])
# makespan is the end time of the last task
maximum(sol, makespan, end)
# precedences
for p in range(num_precedences):
prec(sol,precedences[p][0], precedences[p][1], start, duration)
# minimize makespan;
while sol.check() == sat:
mod = sol.model()
print("makespan:", mod[makespan])
if min_val == z:
print("z:", mod[z])
print("start:", [mod[start[t]] for t in range(num_tasks)])
print("end :", [mod[end[t]] for t in range(num_tasks)])
for i in range(num_tasks):
print("%-10s: %3i..(%3i)..%3i" % (tasks_s[i], mod[start[i]].as_long(), duration[i], mod[end[i]].as_long()))
print()
getLessSolution(sol,mod,min_val)
|
[
"hakank@gmail.com"
] |
hakank@gmail.com
|
6f0e96c1993a1b210e4d7c1365b69706190d11d7
|
60814a33c10069ac92f2621463bfa0acfed16f7e
|
/StarmerxSpider/pool.py
|
017a3a649a0fdb4e6467d2b191a5ff4a54083268
|
[] |
no_license
|
ijt0walle/LiuFan_Spider
|
967138c79bb4f6097fb8d898892a02c5fd6a454c
|
25c07e7d594a835d123530bb49bce77a5bd7f662
|
refs/heads/master
| 2021-01-25T13:18:28.306502
| 2017-08-15T02:32:08
| 2017-08-15T02:32:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
#!/usr/bin/python
# coding=utf8
from Queue import Queue
import threading
import contextlib
WorkerStop = object()
class ThreadPool:
workers = 0
thread_factory = threading.Thread
current_thread = staticmethod(threading.currentThread)
def __init__(self, max_threads=32, name=None):
self.queue = Queue(0)
self.max_threads = max_threads
self.name = name
self.waiters = [] # 存放等待线程的列表
self.working = [] # 存放工作线程的列表
def start(self):
need_size = self.queue.qsize()
while self.workers < min(self.max_threads, need_size):
self.start_a_worker()
def start_a_worker(self):
self.workers += 1
new_thread = self.thread_factory(target=self._worker, name='New Worker')
new_thread.start()
def call_in_thread(self, func, *args, **kwargs):
self.call_in_thread_with_callback(None, func, *args, **kwargs)
def call_in_thread_with_callback(self, on_result, func, *args, **kwargs):
job = (func, args, kwargs, on_result)
self.queue.put(job)
@contextlib.contextmanager
def _work_state(self, states, worker_thread):
assert isinstance(states, list)
states.append(worker_thread) # 把当前执行线程加入线程状态列表states
try:
yield
finally:
states.remove(worker_thread) # 执行完成后从状态列表中移除
def _worker(self):
ct = self.current_thread() # 获取当前线程id
job = self.queue.get()
while job is not WorkerStop:
with self._work_state(self.working, ct):
func, args, kwargs, on_result = job
del job
try:
result = func(*args, **kwargs)
success = True
except:
success = False
del func, args, kwargs
if on_result is not None:
try:
on_result(success, result)
except:
pass
del on_result, result
with self._work_state(self.waiters, ct):
job = self.queue.get()
def stop(self):
"""
Close threads
:return:
"""
while self.workers:
self.queue.put(WorkerStop)
self.workers -= 1
if __name__ == '__main__':
def show_timestamp(name):
import time
print '%s: %s' % (name, time.time())
time.sleep(1)
pool = ThreadPool(10)
for i in range(100):
pool.call_in_thread(show_timestamp, i)
print '# Before start()'
pool.start()
print '# After start()'
pool.stop()
print '# After stop()'
|
[
"liufan.dery@gmail.com"
] |
liufan.dery@gmail.com
|
440008a7a36ecaef1ea45f372d64494846621011
|
6669b132eb482f95c1f40d35ecae14a544fe9197
|
/dp/no70.py
|
f978622a8425456080815f3c7ee609f8abec503a
|
[] |
no_license
|
markdannel/leetcode
|
94dade2e5a286d04075e70e48015459ea6ac383a
|
6a2ac436599ecebc527efe0d6bfe0f6f825311fb
|
refs/heads/master
| 2021-06-06T20:56:34.868122
| 2020-10-21T12:16:56
| 2020-10-21T12:16:56
| 140,668,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
# 假设你正在爬楼梯。需要 n 阶你才能到达楼顶。
# 每次你可以爬 1 或 2 个台阶。你有多少种不同的方法可以爬到楼顶呢?
# 注意:给定 n 是一个正整数。
# 示例 1:
# 输入: 2
# 输出: 2
# 解释: 有两种方法可以爬到楼顶。
# 1. 1 阶 + 1 阶
# 2. 2 阶
# 示例 2:
# 输入: 3
# 输出: 3
# 解释: 有三种方法可以爬到楼顶。
# 1. 1 阶 + 1 阶 + 1 阶
# 2. 1 阶 + 2 阶
# 3. 2 阶 + 1 阶
# 明确「状态」 -> 定义 dp 数组/函数的含义 -> 明确「选择」-> 明确 base case
class Solution:
def climbStairs(self, n: int) -> int:
memo = [0]*n
def dpp(n):
if n <= 2:
return n
if memo[n-1] > 0:
return memo[n-1]
memo[n-1] = dpp(n-1) + dpp(n-2)
return memo[n-1]
return dpp(n)
s=Solution()
print(s.climbStairs(5))
|
[
"wistion@foxmail.com"
] |
wistion@foxmail.com
|
e73305264df6b1aea70f4552a91dc35e2b2d9d40
|
159d2b827db0ae748b739378cab43a24e1ebaa38
|
/buildtools/scons-local-3.0.0/scons-local-3.0.0/SCons/Platform/sunos.py
|
3279fb9c5b725417a732aa469ae0a7a65daf1880
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
mqnc/c-sick
|
2ef474f5626fcf47b5ee0793220dd7693656b488
|
65b54b21d9492fae7c7cac299f56c8e6583ef555
|
refs/heads/master
| 2020-03-23T15:02:03.057094
| 2019-10-18T13:51:33
| 2019-10-18T13:51:33
| 141,716,128
| 1
| 1
|
BSD-3-Clause
| 2019-07-24T06:30:00
| 2018-07-20T13:34:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
"""engine.SCons.Platform.sunos
Platform-specific initialization for Sun systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/sunos.py rel_3.0.0:4395:8972f6a2f699 2017/09/18 12:59:24 bdbaddog"
from . import posix
def generate(env):
posix.generate(env)
# Based on sunSparc 8:32bit
# ARG_MAX=1048320 - 3000 for environment expansion
env['MAXLINELENGTH'] = 1045320
env['PKGINFO'] = 'pkginfo'
env['PKGCHK'] = '/usr/sbin/pkgchk'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/opt/SUNWspro/bin:/usr/ccs/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"mirko.kunze@web.de"
] |
mirko.kunze@web.de
|
f114cc7a55d5cfd56927c7da8e0c7f5d3752c94f
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2015_08_01/aio/_event_hub_management_client.py
|
b45cc43bc478f9a8629155aa12abd9c16f8499af
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,134
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import EventHubManagementClientConfiguration
from .operations import ConsumerGroupsOperations, EventHubsOperations, NamespacesOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class EventHubManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Azure Event Hubs client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.eventhub.v2015_08_01.aio.operations.Operations
:ivar namespaces: NamespacesOperations operations
:vartype namespaces: azure.mgmt.eventhub.v2015_08_01.aio.operations.NamespacesOperations
:ivar event_hubs: EventHubsOperations operations
:vartype event_hubs: azure.mgmt.eventhub.v2015_08_01.aio.operations.EventHubsOperations
:ivar consumer_groups: ConsumerGroupsOperations operations
:vartype consumer_groups:
azure.mgmt.eventhub.v2015_08_01.aio.operations.ConsumerGroupsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2015-08-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = EventHubManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.namespaces = NamespacesOperations(self._client, self._config, self._serialize, self._deserialize)
self.event_hubs = EventHubsOperations(self._client, self._config, self._serialize, self._deserialize)
self.consumer_groups = ConsumerGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "EventHubManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
24d00706810b45332650e6f2373530e74e5de2fa
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03624/s778098430.py
|
55d87c556857c9b5ceb9dae5cfda1de24006c1e7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
s = input()
ans = 'None'
for i in range(26):
c = chr(ord('a') + i)
if c not in s:
ans = c
break
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f08fa45d2f7184da8a83e99c2fa82e9f5560718c
|
7456c190ac67e9bf383c645839ac210f6f87f626
|
/Scattering_Analysis.py
|
17b0643b6d1db264200c9e075b5f6124f10e2277
|
[] |
no_license
|
joebatley/PythonCode
|
6a8b9d775577b4ba5b48a43b839576b1a861464e
|
de2748fdd40a0c21f7292c7188b8873f95bc759a
|
refs/heads/master
| 2021-01-11T11:03:42.894488
| 2014-09-29T13:35:02
| 2014-09-29T13:35:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,338
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 6 10:44:51 2014
@author: Joe
"""
import numpy
import pylab as plt
from scipy import interpolate
from scipy.interpolate import splrep, splev
from scipy.integrate import quad
import Stoner
from Stoner.Folders import DataFolder
import Stoner.Analysis as Analysis
import scipy.optimize
from lmfit import minimize, Parameters, Parameter, report_fit
import Stoner.PlotFormats as SPF
import Stoner.Plot as SP
from Stoner.Util import format_error
def BG(params,t,rho):
K = params['K'].value
Dt = params['Dt'].value
rho_0 = params['rho_0'].value
a=numpy.ones(len(t))
b=numpy.ones(len(t))
c=numpy.ones(len(t))
for i in range(len(t)):
func_ph = lambda x:(x**5)/((numpy.exp(x)-1)*(1-numpy.exp(-x)))#((numpy.sinh(x))**2)
func_sd = lambda x:(x**3)/((numpy.exp(x)-1)*(1-numpy.exp(-x)))
func_ee = lambda x:(x**2)/((numpy.exp(x)-1)*(1-numpy.exp(-x)))
ph = quad(func_ph,0,(Dt/t[i]))
sd = quad(func_sd,0,(Dt/t[i]))
ee = quad(func_ee,0,(Dt/t[i]))
a[i]=ph[0]
b[i]=sd[0]
c[i]=ee[0]
model3 = rho_0 + K * ((t/Dt)**5) * a + K * ((t/Dt)**3) * b + K * ((t/Dt)**2) * c
model2 = rho_0 + K * ((t/Dt)**5) * a + K * ((t/Dt)**3) * b
model1 = rho_0 + K * ((t/Dt)**5) * a
return model1-rho
################ IMPORT FILE #######################
sample = 'SC021'
datadir = '/Volumes/data/Projects/Spincurrents/Joe Batley/Measurements/SC021/Transport/Scattering Analysis/'
R = Analysis.AnalyseFile(datadir+'SC021_1_B_Cu_resistivity_vs_T.txt')
L = Analysis.AnalyseFile(datadir+'SC021_Spindiffusion_length_vs_T.txt')
################ FIT RESISTANCE DATA #######################
# create a set of Parameters
params = Parameters()
params.add('K', value= 9e-8,min=0.5e-8,max=5e-7)
params.add('Dt', value= 190,min=100,max=500)
params.add('rho_0', value= 2.9e-8,min=0.5e-8,max=10e-8)
# do fit, here with leastsq model
result = minimize(BG, params, args=(R.column('T (K)'), R.column('res')))
# calculate final result
final = R.column('res') + result.residual
R.add_column(final,column_header='BG')
# write error report
report_fit(params)
print params['K']
################ GET SCATTERING TIME #######################
rho = R.interpolate(L.column('T'))
print R.column_headers
tsf = L.column('Lam_Cu')**2*rho[:,2]*1.6e-19*1.81e28
tau = Analysis.AnalyseFile()
tau.add_column(L.column('T'),'T (K)')
tau.add_column(1/tsf,r'1/$\tau_{sf}$')
tau_err = (L.column('Lam_err')/L.column('Lam_Cu'))/tsf
tau.add_column(tau_err,'1/t_err')
################ FIT SCATTERING TIME #######################
def phonon(sc_params,t,tau):
func_ph = lambda x:(x**5)/((numpy.exp(x)-1)*(1-numpy.exp(-x)))
K = params['K'].value
Dt = params['Dt'].value
e = sc_params['epsilon'].value
i = sc_params['imp'].value
a=numpy.ones(len(t))
for j in range(len(t)):
ph = quad(func_ph,0,(Dt/t[j]))
a[j] = ph[0]
rho_ph = K * ((t/Dt)**5) * numpy.array(a)
tau_ph_sf = ((e*8.45e28*(1.6e-19**2)*rho_ph)/9.1e-31)+i
return (tau_ph_sf-tau)
# create a set of Parameters
sc_params = Parameters()
sc_params.add('epsilon', value= 9e20)
sc_params.add('imp', value= 1e9)
# do fit, here with leastsq model
q=SP.PlotFile(tau.clone)
d = Analysis.AnalyseFile(tau.clone)
d.del_rows('T (K)',lambda x,y:x<100 and x>230)
sc_result = minimize(phonon, sc_params, args=(d.column('T (K)'), d.column(r'1/$\tau_{sf}$')))
# calculate final result
sc_final = (d.column(r'1/$\tau_{sf}$')) + sc_result.residual
d.add_column(sc_final,column_header='fit')
# write error report
report_fit(sc_params)
e_ph = sc_params['epsilon'].value
e_ph_err = sc_params['epsilon'].stderr
print r'$\epsilon_ph$ = ' + str(e_ph) + '$\pm$' + str(e_ph_err)
print format_error(e_ph,e_ph_err,latex=True)
e_imp = sc_params['imp'].value*9.1e-31/(8.45e28*(1.6e-19**2)*params['rho_0'].value)
e_imp_err = e_imp*numpy.sqrt((sc_params['imp'].stderr/sc_params['imp'].value)**2 + (params['rho_0'].stderr/params['rho_0'].value)**2)
print r'$\epsilon_imp$ = ' + str(e_imp) + '$\pm$' + str(e_imp_err)
print format_error(e_imp,e_imp_err,latex=True)
################ PLOT SCATTERING DATA #######################
fit=SP.PlotFile(d.clone)
fit.template=SPF.JTBPlotStyle
t=SP.PlotFile(tau.clone)
t.template=SPF.JTBPlotStyle
BG=SP.PlotFile(R.clone)
BG.template=SPF.JTBPlotStyle
fit.figure()
t.figure()
BG.figure()
f=plt.gcf()
f.set_size_inches((6.5,3.75),forward=True) # Set for A4 - will make wrapper for this someday
plt.subplot2grid((1,2),(0,0))
l = r'$\epsilon_{ph}$ = ' + format_error(e_ph,e_ph_err,latex=True) + '\n' + r'$\epsilon_{imp}$ = ' + format_error(e_imp,e_imp_err,latex=True)
t.plot_xy('T (K)',r'1/$\tau_{sf}$',yerr='1/t_err',linestyle='',linewidth=1,marker='o')
fit.plot_xy('T (K)','fit',label=l,linestyle='-',linewidth=2,marker='')
t.ylabel = r'1/$\tau_{sf}$'
t.title = sample
################ PLOT B-G DATA #######################
#label_fit='B-G fit\n K = ' + str(params['K'].value) + '\n'+r'$\theta_D$ = ' + str(params['Dt'].value) + '\n'+r'$\rho_0$ = ' + str(params['rho_0'].value)
#label = 'data'
plt.subplot2grid((1,2),(0,1))
BG.plot_xy('T (K)','res',linestyle='',linewidth=3,marker='o',label = r'Cu spacer $\rho$')
BG.plot_xy('T (K)','BG',linestyle='-',linewidth=2,marker='',label = 'B-G fit')
BG.ylabel = r'$\rho (\Omega m)$'
plt.tight_layout()
|
[
"py07jtb@leeds.ac.uk"
] |
py07jtb@leeds.ac.uk
|
e54ff071f98fe853abfa4d2a1d3dfda418acb12f
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/common/Lib/plat-mac/Carbon/File.py
|
c0c25c19dcfc7e1e788e857d3bcbd6b39a4c0c21
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 358
|
py
|
# 2016.11.19 20:00:52 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/File.py
from _File import *
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\plat-mac\Carbon\File.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 20:00:52 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
1905be51f9c00ebc3b098c76678348ffeb71035e
|
b7888fb921abeb2ad44ce6409bf62ecef77a458e
|
/src/djanban/apps/members/migrations/0019_remove_member_spent_time_factor.py
|
2ab10ee0796f693207d2b06d3d54d558aeeb0a74
|
[
"MIT"
] |
permissive
|
my-favorite-repositories/djanban
|
303ce59f821d01f727536068b83f8e8485b7d649
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
refs/heads/master
| 2021-03-01T14:23:19.745085
| 2018-05-15T17:12:01
| 2018-05-15T17:12:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-24 23:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0018_auto_20170225_0040'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='spent_time_factor',
),
]
|
[
"diegojromerolopez@gmail.com"
] |
diegojromerolopez@gmail.com
|
ddcfb84625fbf00abc35ea2d697ae18e14dca3fa
|
b9d648a7cb56412f367492f93bb9acd27ab53e84
|
/baralho_renzo.py
|
0cd602729055a5e63d8d39cf19c97486e0b24d49
|
[
"MIT"
] |
permissive
|
maribedran/oopy
|
0d9a34ab820f427f0b6738fa49e434d780e7bf27
|
3f0629afee10f60f214cff04d07a27daa2fc8208
|
refs/heads/master
| 2020-12-26T13:02:16.938449
| 2016-04-17T15:40:54
| 2016-04-17T15:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
from collections import namedtuple
from itertools import chain, product
from random import shuffle
Carta = namedtuple('Carta', 'valor naipe')
class Baralho():
def __init__(self):
valores = chain((str(i) for i in range(2, 11)), 'JQKA')
naipes = '♣♡♠♢'
self.cartas = [Carta(valor, naipe) for naipe, valor in product(naipes, valores)]
def __repr__(self):
return repr(self.cartas)
def __getitem__(self, item):
return self.cartas[item]
def __len__(self):
return len(self.cartas)
def __setitem__(self, key, value):
self.cartas[key] = value
baralho = Baralho()
print(baralho)
mao = baralho[:5]
print(mao)
print(len(baralho))
shuffle(baralho)
print(baralho)
for carta in baralho:
print(carta)
|
[
"renzon@gmail.com"
] |
renzon@gmail.com
|
6a24ee0acfd7a5e70f4e6b359e81a3a6662bbc34
|
d6be053915c065fe6da71afddd28429d144fee68
|
/streamlit_tutorial/main_concepts/app_02_st-write.py
|
b0292b32740b780363ad49a71892cadeb280fa04
|
[] |
no_license
|
DrShushen/practice_py
|
61bc28f52783f8304cce1d834def4934ba6ee8e1
|
cf40ec43ccd73aa835c4e65e6a4b41408b90a3ea
|
refs/heads/master
| 2023-01-08T06:57:10.852157
| 2023-01-03T22:58:11
| 2023-01-03T22:58:11
| 211,668,464
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
import streamlit as st
import pandas as pd
st.write("Here's our first attempt at using data to create a table:")
st.write(pd.DataFrame({"first column": [1, 2, 3, 4], "second column": [10, 20, 30, 40]}))
|
[
"e.s.saveliev@gmail.com"
] |
e.s.saveliev@gmail.com
|
e719eb80d4457db6ea99dc3821c5929866765f80
|
e311664619d469addd2c77566ec97d24affcbfd9
|
/src/apps/sistema/admin.py
|
e984d54edcb131f638648697eb5a1205922b2c8b
|
[] |
no_license
|
danielhuamani/Proyecto-taller-base-datos
|
361dc8c915dff36a9ce96a7147c11f0af9d51227
|
5d791383f77f8042a2890db4cfd31079c6d1dc7b
|
refs/heads/master
| 2016-08-11T13:47:03.169317
| 2015-12-22T04:28:52
| 2015-12-22T04:28:52
| 46,673,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Usuario
@admin.register(Usuario)
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('email', 'password')
|
[
"danielhuamani15@gmail.com"
] |
danielhuamani15@gmail.com
|
7e3a908b15a1ae21c5a415ad9a3bd556966e7eed
|
b21abd3873c76739ceefd1b4613a343ba2b454d1
|
/jwst/associations/__init__.py
|
d301f9c7e12a2e7fee6c0d7ec0e7ed537bfa1211
|
[
"BSD-2-Clause"
] |
permissive
|
rij/jwst
|
96a7baf95de953c51bbe67f3cdd459c114c47eef
|
1d3acecb28d9a3dcb44b993e451b69da9856187d
|
refs/heads/master
| 2020-12-24T09:56:21.784342
| 2016-06-09T19:17:01
| 2016-06-09T19:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from __future__ import absolute_import
from .association import (Association, AssociationRegistry)
from .pool import AssociationPool
from .generate import generate
|
[
"jhunkeler@gmail.com"
] |
jhunkeler@gmail.com
|
4abb0b3416a912f00495bdac12ea344e0e5c4234
|
6490638f15a2dfbe0cec9725186f9784d57c92f0
|
/UnitTest/testSEGMENT.py
|
b39851c1efe3ad9480414b8fea7c6e6a7eb3a621
|
[
"MIT"
] |
permissive
|
khawatkom/SpacePyLibrary
|
af9c490ef796b9d37a13298c41df1fb5bf6b3cee
|
c94415e9d85519f345fc56938198ac2537c0c6d0
|
refs/heads/master
| 2020-05-14T21:52:39.388979
| 2019-04-17T17:06:04
| 2019-04-17T17:06:04
| 181,970,668
| 1
| 0
| null | 2019-04-17T21:26:44
| 2019-04-17T21:26:44
| null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
#!/usr/bin/env python3
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# CCSDS Stack - Unit Tests *
#******************************************************************************
import CCSDS.SEGMENT, testData
#############
# functions #
#############
def test_SEGMENT_DUoperations():
"""function to test the transfer segment data units"""
tcSegment1 = CCSDS.SEGMENT.TCsegment(testData.TC_SEGMENT_01)
if tcSegment1.sequenceFlags != testData.TC_SEGMENT_01_sequenceFlags:
print("tcSegment1 sequenceFlags wrong:", tcSegment1.sequenceFlags, "- should be", testData.TC_SEGMENT_01_sequenceFlags)
return False
if tcSegment1.mapId != testData.TC_SEGMENT_01_mapId:
print("tcSegment1 mapId wrong:", tcSegment1.mapId, "- should be", testData.TC_SEGMENT_01_mapId)
return False
tcSegment2 = CCSDS.SEGMENT.TCsegment(testData.TC_SEGMENT_02)
if tcSegment2.sequenceFlags != testData.TC_SEGMENT_02_sequenceFlags:
print("tcSegment2 sequenceFlags wrong:", tcSegment2.sequenceFlags, "- should be", testData.TC_SEGMENT_02_sequenceFlags)
return False
if tcSegment2.mapId != testData.TC_SEGMENT_02_mapId:
print("tcSegment2 mapId wrong:", tcSegment2.mapId, "- should be", testData.TC_SEGMENT_02_mapId)
return False
return True
########
# main #
########
if __name__ == "__main__":
print("***** test_SEGMENT_DUoperations() start")
retVal = test_SEGMENT_DUoperations()
print("***** test_SEGMENT_DUoperations() done:", retVal)
|
[
"korner-hajek@gmx.at"
] |
korner-hajek@gmx.at
|
68b54e70382cdd4a0198bd401b8281a41aedd8bf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03835/s488234349.py
|
e56e10d8295dca46ab608cfccce640f2972f7441
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
k,s = map(int, input().split())
ans = 0
for i in range(k+1):
for j in range(k+1):
t = s - (i + j)
if 0 <= t <= k:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
706418248d2b6c25981b3d2197b0838bed81d752
|
15ed3ab4510677e6df9b11af8fd7a36fc6d826fc
|
/rado/2014-10/mc2/nice-res-2014-10-29/mc2.py
|
3188ce25fe60f46da2959be84ae326f91d4945c8
|
[] |
no_license
|
pe-ge/Computational-analysis-of-memory-capacity-in-echo-state-networks
|
929347575538de7015190d35a7c2f5f0606235f2
|
85873d8847fb2876cc8a6a2073c2d1779ea1b20b
|
refs/heads/master
| 2020-04-02T08:08:38.595974
| 2018-01-17T08:12:26
| 2018-01-17T08:12:26
| 61,425,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,473
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mc2.py
Created 17.10.2014
Based on mc.py
Goal: Measuring Memory Capacity for random matrices
"""
from numpy import *
import matplotlib.pyplot as plt
p = 1 # one input node
q = 100 # 100 reservoir nodes
r = 200 # 200 output nodes
params = {
'MEMORY_MAX': 200,
'ITERATIONS': 2000,
'ITERATIONS_SKIPPED': 1000,
'ITERATIONS_COEF_MEASURE': 1000,
'RUNS': 1,
'NETS': 100,
}
sigma = 0.10
dist_WI = lambda: random.uniform(-0.1,0.1,[q,p])
dist_input = lambda: random.uniform(-1., 1., params['ITERATIONS']) # maybe [1,1] ?
#dist_W = lambda sigma: random.uniform(-.13, .13, [q,q])
dist_W = lambda sigma: random.normal(0., sigma, [q,q])
def memory_capacity(W, WI, params):
"""Calculates memory capacity of a NN
[given by its input weights WI and reservoir weights W].
W = q x q matrix storing hidden reservoir weights
WI = q x p matrix storing input weights
Returns: a tuple (MC, std)
MC: memory capacity for history 0..(MEMORY_MAX - 1)
[a vector of length MEMORY_MAX]
std: standard deviation for each value of MC
"""
# load parameters to local variables for better readibility
MEMORY_MAX = params['MEMORY_MAX']
ITERATIONS = params['ITERATIONS']
ITERATIONS_SKIPPED = params['ITERATIONS_SKIPPED']
ITERATIONS_MEASURED = ITERATIONS - ITERATIONS_SKIPPED
ITERATIONS_COEF_MEASURE = params['ITERATIONS_COEF_MEASURE']
RUNS = params['RUNS']
# dist_input = lambda: random.uniform(-1., 1., ITERATIONS) # maybe [1,1] ?
# vector initialization
X = zeros([q,1]) # reservoir activations, @FIXME, maybe try only q, instead of [q, 1] (performance?)
S = zeros([q,ITERATIONS_MEASURED])
# generate random input
u = dist_input() # all input; dimension: [ITERATIONS, 1]
# run 2000 iterations and fill the matrices D and S
for it in range(ITERATIONS):
X = tanh(dot(W, X) + dot(WI, u[it]))
if it >= ITERATIONS_SKIPPED:
# record the state of reservoir activations X into S
S[:, it - ITERATIONS_SKIPPED] = X[:,0]
# prepare matrix D of desired values (that is, shifted inputs)
assert MEMORY_MAX < ITERATIONS_SKIPPED
D = zeros([MEMORY_MAX, ITERATIONS_MEASURED])
for h in range(MEMORY_MAX): # fill each row
#FIXME maybe should be: 'ITERATIONS - (h+1)', it depends, whether we measure 0th iteration as well
D[h,:] = u[ITERATIONS_SKIPPED - h : ITERATIONS - h]
# calculate pseudoinverse S+ and with it, the matrix WO
S_PINV = linalg.pinv(S)
WO = dot(D, S_PINV)
# do a new run for an unbiased test of quality of our newly trained WO
# we skip MEMORY_MAX iterations to have large enough window
MC = zeros([RUNS, MEMORY_MAX]) # here we store memory capacity
for run in range(RUNS):
u = dist_input()
X = zeros([q,1])
o = zeros([MEMORY_MAX, ITERATIONS_COEF_MEASURE]) # 200 x 1000
for it in range(ITERATIONS_COEF_MEASURE + MEMORY_MAX):
X = tanh(dot(W, X) + dot(WI, u[it]))
if it >= MEMORY_MAX:
# we calculate output nodes using WO ( @FIXME maybe not a column, but a row?)
o[:, it - MEMORY_MAX] = dot(WO, X)[:,0]
# correlate outputs with inputs (shifted)
for h in range(MEMORY_MAX):
k = h + 1
cc = corrcoef(u[MEMORY_MAX - h : MEMORY_MAX + ITERATIONS_COEF_MEASURE - h], o[h, : ]) [0, 1]
MC[run, h] = cc * cc
return (average(MC, axis=0), std(MC, axis=0) / sqrt(RUNS))
def kindofvector(vec):
shp = vec.shape
if len(shp) == 1:
print('vector of length %d' % shp[0])
else:
if shp[0] == 1:
print('a long row (with %d columns)' % shp[1])
elif shp[1] == 1:
print('a long column (with %d rows)' % shp[0])
elif shp[0] > shp[1]:
print('a tall rectangle matrix (%d x %d)' % shp)
elif shp[0] < shp[1]:
print('a wide rectangle matrix (%d x %d)' % shp)
elif shp[0] == shp[1]:
print('a square matrix (%d x %d)' % shp)
else:
print('an alien matrix of shape: %s' % str(shp))
def main_plot_MCk():
# plot it
#sigma = 0.10
NETS = 100
# initial setup
MCs = zeros([NETS, params['MEMORY_MAX']])
for net in range(NETS):
W = dist_W(sigma)
WI = dist_WI()
# calculate MC for history 0..199
MC, _ = memory_capacity(W, WI, params)
MC.shape = (1, MC.size)
MCs[net, :] = MC
x = array(range(params['MEMORY_MAX']))
y = average(MCs, axis=0)
error = std(MCs, axis=0) / sqrt(NETS)
print("MC = %f" % sum(y))
# with open('cc-vs-history-values.txt', 'w') as f:
# print(y, file=f)
# with open('cc-vs-history-derivatives.txt', 'w') as f:
# print(diff(y), file=f)
plt.errorbar(x, y, yerr=(error * 3))
#plt.plot(range(params['MEMORY_MAX']-1), diff(y))
plt.grid(True)
plt.ylabel('correlation coefficient')
plt.xlabel('memory size')
plt.ylim([0,1])
plt.title('Memory capacity ($\sigma_{W^R}$ = %.3f) (confidence = $3\sigma$) (runs = %d) ' % (sigma, params['RUNS']))
plt.show()
plt.plot(range(params['MEMORY_MAX']-1), diff(y))
plt.show()
def main_plot_MC_sigma():
# 0.13s na iteraciu (tu 4000)
POINTS = 100
NETS = params['NETS']
sigmas = linspace(0.001, 0.2, POINTS)
params['RUNS'] = 1
y = zeros([NETS, POINTS])
for i, sigma in enumerate(sigmas):
for net in range(NETS):
W = dist_W(sigma)
WI = dist_WI()
MC, _ = memory_capacity(W, WI, params)
y[net, i] = sum(MC)
print("\rsigma: %.3f (%d of %d), net: (%d of %d)" % (sigma, i, POINTS, net, NETS), end="")
y, error = (average(y, axis=0), std(y, axis=0) / sqrt(NETS))
x = sigmas
plt.errorbar(x, y, yerr=(error * 3))
plt.plot(sigmas, y)
plt.show()
def main():
#main_plot_MC_sigma()
main_plot_MCk()
if __name__ == '__main__':
main()
|
[
"gergelp@gmail.com"
] |
gergelp@gmail.com
|
c6ebae4c39070dc4f956e0aa6d460198565a6724
|
8e01f8c0c6ae1ab1f2cd34408577dc8affb8288e
|
/slingsby/articles/migrations/0002_auto_20150305_2339.py
|
e7d8f839fe8f0fac077f1092d88772230280b0ed
|
[] |
no_license
|
TelemarkAlpint/slingsby
|
b8122f0a367d81c2cf8809a91827426de9e93e2c
|
e480ebf12f7d5dddeca242b1c0ed508631a6674c
|
refs/heads/master
| 2021-01-10T15:12:28.205841
| 2017-09-13T21:20:01
| 2017-09-13T21:20:25
| 8,419,417
| 2
| 0
| null | 2020-03-17T21:19:35
| 2013-02-25T22:11:16
|
Python
|
UTF-8
|
Python
| false
| false
| 512
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='subpagearticle',
name='title',
field=models.CharField(help_text=b'Hva skal undersiden hete?', unique=True, max_length=200, verbose_name=b'tittel'),
preserve_default=True,
),
]
|
[
"git@thusoy.com"
] |
git@thusoy.com
|
86158ba972b588256136d51e4ae6672785f5eee1
|
b4afb44b8f483c048716fe12d778186ce68ac846
|
/AutoFrameworkForAppiumPy/com/qa/automation/appium/cases/ios/ffan/common/check_network.py
|
47a35ee729958f64215c24e69771b597aaf20c60
|
[] |
no_license
|
liu111xiao111/UItest
|
64309b2c85f6d2334d64bb0875ba9ced459ebb1e
|
67e2acc9a99da81022e286e8d8ec7ccb12636ff3
|
refs/heads/master
| 2021-09-01T18:30:28.044296
| 2017-12-28T04:36:46
| 2017-12-28T04:36:46
| 115,585,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
# -*- coding: utf-8 -*-
from subprocess import Popen, PIPE
CONNECTION = 0
NOCONNECTION = -1
NOFIND = -1
class CheckNetworkStatus:
'''
usage : Check Network status.
'''
def __init__(self):
pass
def checkNetwork(self):
cmd = 'adb shell ping -w 3 baidu.com'
ret = Popen(cmd , shell=True, stdout=PIPE, stderr=PIPE)
out, err = ret.communicate()
if out.find('unknown') == NOFIND:
return CONNECTION;
else:
print(err);
return NOCONNECTION;
if __name__ == '__main__':
checkNetworkStatus = CheckNetworkStatus()
checkNetworkStatus.checkNetwork()
|
[
"tl@neusoft.com"
] |
tl@neusoft.com
|
364728cd83b127d6eeb34938d1d4dd9be8dd794e
|
e993c53e4e1a52acc8279129c67feb0d3a1b9cbc
|
/catkin_ws/src/o2as_cad_matching/cfg/CadMatching.cfg
|
2ffd7a6a9e0af0082bdd6b1054b9a6e0b92447db
|
[
"MIT"
] |
permissive
|
DevwratJoshi/ur-o2as
|
134ec87d371a7d9f9b64cbeb4030b23cf114812d
|
265249c27908a79a301014168394db0c0dc2204c
|
refs/heads/master
| 2021-01-03T16:03:57.344339
| 2020-02-17T03:58:39
| 2020-02-17T03:58:39
| 240,143,319
| 0
| 0
|
MIT
| 2020-02-13T00:21:52
| 2020-02-13T00:21:51
| null |
UTF-8
|
Python
| false
| false
| 2,602
|
cfg
|
#!/usr/bin/env python
PACKAGE = "o2as_cad_matching"
from math import pi
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
object_id = gen.enum([
gen.const("01_BASE" , int_t, 1, "Base Panel"),
gen.const("02_PANEL" , int_t, 2, "Output shaft fixing plate"),
gen.const("03_PANEL2" , int_t, 3, "Motor fixing plate"),
gen.const("04_37D_GEARMOTOR_50_70" , int_t, 4, "Geared motor (Gear ratio 1:70)"),
gen.const("05_MBRFA30_2_P6" , int_t, 5, "Pulley for Round Belt (4mm) - Setscrew P.D. 30mm"),
gen.const("06_MBT4_400" , int_t, 6, "Polyurethane round belt (welded joint product) P.D. 4mm L=400mm"),
gen.const("07_SBARB6200ZZ_30" , int_t, 7, "Bearings with Housings (Double Bearings)"),
gen.const("08_KZAF1075NA4WA55GA20AA0" , int_t, 8, "Drive shaft (Straight) D10h6"),
gen.const("09_EDCS10" , int_t, 9, "End Cap for Shaft"),
gen.const("10_CLBPS10_17_4" , int_t, 10, "Bearing Spacers For Inner Ring (output pulley)"),
gen.const("11_MBRAC60_2_10" , int_t, 11, "Pulley for Round Belts Clamping Type P.D. 60mm"),
gen.const("12_CLBUS6_9_9_5" , int_t, 12, "Bearing Spacers For Inner Ring (tension pulley)"),
gen.const("13_MBGA30_2" , int_t, 13, "Idler for Round Belt - Wide"),
gen.const("14_BGPSL6_9_L30_F8" , int_t, 14, "Bearing Shaft Screw"),
gen.const("15_SLBNR6" , int_t, 15, "M6 Hex Nut (Fixing for idler shaft)"),
gen.const("16_SPWF6" , int_t, 16, "M6 Flat Washer (Fixing for idler shaft)"),
gen.const("17_SCB4_10" , int_t, 17, "10mm M4 Socket Head Cap Screw (metric coarse thread)"),
gen.const("100_robotiq_calib_marker" , int_t, 100, "Aruco calibration board for robotiq gripper"),
gen.const("102_CalTarget15" , int_t, 102, "3D calibration target")],
"An enum to set object id")
# camera = gen.enum([
# gen.const("phoxi" , int_t, 1, "PhoXi camera."),
# gen.const("b_bot_camera" , int_t, 2, "RealSense camera attached on the b_bot.")],
# "An enum to set camera name")
gen.add("object_id" , int_t , 1 << 0, "id of the object to be detected." , 7, 1, 1000, edit_method=object_id)
# gen.add("camera" , int_t , 1 << 0, "id of the camera to be used to detect object." , 1, 1, 2, edit_method=camera)
exit(gen.generate(PACKAGE, "o2as_cad_matching", "CadMatching"))
|
[
"FvDrigalski@gmail.com"
] |
FvDrigalski@gmail.com
|
96818b870c57707871eaa9aaa64013c4dddb882d
|
e483b0515cca39f4ddac19645f03fc1695d1939f
|
/google/ads/google_ads/v1/proto/errors/mutate_error_pb2.py
|
7d4341f914f9ea47ea5cab80d378555a1d3b6cf3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
BrunoWMello/google-ads-python
|
0af63d2ca273eee96efd8a33252d27112c049442
|
9b074a037d10f0c1208a00d5d41a8e5e25405f28
|
refs/heads/master
| 2020-05-27T04:37:47.669144
| 2019-05-24T17:07:31
| 2019-05-24T17:07:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,602
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/errors/mutate_error.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/errors/mutate_error.proto',
package='google.ads.googleads.v1.errors',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v1.errorsB\020MutateErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Errors\312\002\036Google\\Ads\\GoogleAds\\V1\\Errors\352\002\"Google::Ads::GoogleAds::V1::Errors'),
serialized_pb=_b('\n7google/ads/googleads_v1/proto/errors/mutate_error.proto\x12\x1egoogle.ads.googleads.v1.errors\x1a\x1cgoogle/api/annotations.proto\"\xee\x01\n\x0fMutateErrorEnum\"\xda\x01\n\x0bMutateError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x16\n\x12RESOURCE_NOT_FOUND\x10\x03\x12!\n\x1dID_EXISTS_IN_MULTIPLE_MUTATES\x10\x07\x12\x1d\n\x19INCONSISTENT_FIELD_VALUES\x10\x08\x12\x16\n\x12MUTATE_NOT_ALLOWED\x10\t\x12\x1e\n\x1aRESOURCE_NOT_IN_GOOGLE_ADS\x10\n\x12\x1b\n\x17RESOURCE_ALREADY_EXISTS\x10\x0b\x42\xeb\x01\n\"com.google.ads.googleads.v1.errorsB\x10MutateErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Errors\xea\x02\"Google::Ads::GoogleAds::V1::Errorsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MUTATEERRORENUM_MUTATEERROR = _descriptor.EnumDescriptor(
name='MutateError',
full_name='google.ads.googleads.v1.errors.MutateErrorEnum.MutateError',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_NOT_FOUND', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ID_EXISTS_IN_MULTIPLE_MUTATES', index=3, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCONSISTENT_FIELD_VALUES', index=4, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTATE_NOT_ALLOWED', index=5, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_NOT_IN_GOOGLE_ADS', index=6, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_ALREADY_EXISTS', index=7, number=11,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=142,
serialized_end=360,
)
_sym_db.RegisterEnumDescriptor(_MUTATEERRORENUM_MUTATEERROR)
_MUTATEERRORENUM = _descriptor.Descriptor(
name='MutateErrorEnum',
full_name='google.ads.googleads.v1.errors.MutateErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MUTATEERRORENUM_MUTATEERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=360,
)
_MUTATEERRORENUM_MUTATEERROR.containing_type = _MUTATEERRORENUM
DESCRIPTOR.message_types_by_name['MutateErrorEnum'] = _MUTATEERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MutateErrorEnum = _reflection.GeneratedProtocolMessageType('MutateErrorEnum', (_message.Message,), dict(
DESCRIPTOR = _MUTATEERRORENUM,
__module__ = 'google.ads.googleads_v1.proto.errors.mutate_error_pb2'
,
__doc__ = """Container for enum describing possible mutate errors.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.errors.MutateErrorEnum)
))
_sym_db.RegisterMessage(MutateErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
BrunoWMello.noreply@github.com
|
1b211e2af8bd47e80d621774445befeff125077b
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/day10/day10/5.进程_server.py
|
6bcdd06a147b6b6b577b373066d3137b7a7fe994
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
import socket
from multiprocessing import Process
def func(conn):
while True:
conn.send(b'hello') # msg必须是字节类型
message = conn.recv(1024) # n是接受消息的最大字节数
print(message)
if __name__ == '__main__':
sk = socket.socket()
sk.bind(('127.0.0.1',9001))
sk.listen()
while True:
conn,addr = sk.accept() # 接受客户端请求建立连接 -- 三次握手
Process(target=func,args=(conn,)).start()
conn.close()
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
2d742d0514e2f6b3d504abf2415972f06a362098
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/gui/shared/utils/decorators.py
|
fd6a83b489698ea258530d5b9b8f79437a048a1b
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406
| 2017-05-04T13:51:43
| 2017-05-04T13:51:43
| 90,268,530
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 5,312
|
py
|
# 2017.05.04 15:26:22 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/utils/decorators.py
import time
import adisp
import BigWorld
from debug_utils import LOG_DEBUG
from gui.Scaleform.Waiting import Waiting
from debug_utils import LOG_WARNING
from string import join
class process(object):
def __init__(self, *kargs):
self.__currentMessage = None
self.__messages = kargs
self.__messages2Show = list(self.__messages)
return
def __hideWaiting(self):
if self.__currentMessage is not None:
Waiting.hide(self.__currentMessage)
self.__currentMessage = None
return
def __nextWaiting(self):
if len(self.__messages2Show):
self.__hideWaiting()
self.__currentMessage = self.__messages2Show.pop(0)
Waiting.show(self.__currentMessage)
def __stepCallback(self, isStop):
if not isStop:
return self.__nextWaiting()
self.__hideWaiting()
self.__messages2Show = list(self.__messages)
def __call__(self, func):
def wrapper(*kargs, **kwargs):
self.__nextWaiting()
return adisp.process(func, self.__stepCallback)(*kargs, **kwargs)
return wrapper
def async(func, cbname = 'callback', cbwrapper = lambda x: x):
def wrapper(*kargs, **kwargs):
if cbname in func.func_code.co_varnames:
idx = func.func_code.co_varnames.index(cbname)
if idx >= len(kargs) and cbname not in kwargs:
return adisp.async(func, cbname, cbwrapper)(*kargs, **kwargs)
return func(*kargs, **kwargs)
return wrapper
def dialog(func):
def wrapper(*kargs, **kwargs):
Waiting.suspend()
def cbwrapper(cb):
def callback(result):
Waiting.resume()
cb(result)
return callback
return async(func, 'callback', cbwrapper)(*kargs, **kwargs)
return wrapper
def debugTime(func):
def wrapper(*args, **kwargs):
startTime = time.time()
func(*args, **kwargs)
LOG_DEBUG("Method '%s' measuring time: %.10f" % (func.__name__, time.time() - startTime))
return wrapper
IS_DEVELOPMENT = True
class _TrackFrameEnabled(object):
def __init__(self, logID):
super(_TrackFrameEnabled, self).__init__()
self.__logID = logID
def __call__(self, func):
def wrapper(*args, **kwargs):
BigWorld.PFbeginFrame(self.__logID)
func(*args, **kwargs)
BigWorld.PFendFrame()
return wrapper
class _TrackFrameDisabled(object):
def __init__(self, logID):
super(_TrackFrameDisabled, self).__init__()
def __call__(self, func):
return func
if IS_DEVELOPMENT:
trackFrame = _TrackFrameEnabled
else:
trackFrame = _TrackFrameDisabled
def makeArr(obj):
if isinstance(obj, tuple):
if len(obj) > 1:
return [obj[0], obj[1]]
else:
return [obj[0], obj[0]]
return [obj, obj]
class ReprInjector(object):
@classmethod
def withParent(cls, *argNames):
return InternalRepresenter(True, argNames)
@classmethod
def simple(cls, *argNames):
return InternalRepresenter(False, argNames)
class InternalRepresenter(object):
def __init__(self, reprParentFlag, argNames):
self.argNames = argNames
self.reprParentFlag = reprParentFlag
def __call__(self, clazz):
if '__repr__' in dir(clazz):
if hasattr(clazz, '__repr_params__') and self.reprParentFlag is not False:
clazz.__repr_params__ = tuple((arg for arg in self.argNames if arg not in clazz.__repr_params__)) + tuple((arg for arg in clazz.__repr_params__ if arg[0:2] != '__'))
else:
clazz.__repr_params__ = self.argNames
else:
clazz.__repr_params__ = self.argNames
representation = []
attrMethNames = []
for i in xrange(len(clazz.__repr_params__)):
attrMethNames.append(makeArr(clazz.__repr_params__[i]))
if attrMethNames[-1][0][:2] == '__':
if clazz.__name__[0] != '_':
attrMethNames[-1][0] = join(['_', clazz.__name__, attrMethNames[-1][0]], sep='')
else:
attrMethNames[-1][0] = join([clazz.__name__, attrMethNames[-1][0]], sep='')
representation.append('{0} = {{{1}}}'.format(attrMethNames[-1][1], i))
representation = join([clazz.__name__,
'(',
join(representation, sep=', '),
')'], sep='')
def __repr__(self):
formatedArgs = []
for attrMethName, reprName in attrMethNames:
attr = getattr(self, attrMethName, 'N/A')
if callable(attr):
attr = getattr(self, attrMethName, 'N/A')()
formatedArgs.append(attr)
return representation.format(*formatedArgs)
clazz.__repr__ = __repr__
return clazz
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\shared\utils\decorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:26:23 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
71bae5b57bd978555acd8f94f55a75779c4e5c5a
|
a4e59c4f47873daf440374367a4fb0383194d2ce
|
/Python/129.py
|
73d145f89ea0e90f96faa9bfc9b762b6c30ecb3d
|
[] |
no_license
|
maxjing/LeetCode
|
e37cbe3d276e15775ae028f99cf246150cb5d898
|
48cb625f5e68307390d0ec17b1054b10cc87d498
|
refs/heads/master
| 2021-05-23T17:50:18.613438
| 2021-04-02T17:14:55
| 2021-04-02T17:14:55
| 253,406,966
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
return self.pathsSum(root, 0)
def pathsSum(self, node, pathSum):
if node is None:
return 0
#2020.05.15 totally forgot the following step
pathSum = 10 * pathSum + node.val
if node.left is None and node.right is None:
return pathSum
return self.pathsSum(node.left, pathSum) + self.pathsSum(node.right, pathSum)
|
[
"tvandcc@gmail.com"
] |
tvandcc@gmail.com
|
2f4db38a3a1591db3042a3d16dbd30478a312b0e
|
07b37ca45d38edea112895049acf76d96ff07eff
|
/3.Processing&UnderstadingText/recommended_dependency_parser.py
|
2ced66cfba94395977f015428833ccba515d6df6
|
[] |
no_license
|
KRBhavaniSankar/NLTK
|
e335944de346be72a01c92221b0bf58d85475fb9
|
4b228338566996fbccee72cb6afaa199a6496787
|
refs/heads/master
| 2020-03-12T23:03:59.981112
| 2018-05-11T01:15:28
| 2018-05-11T01:15:28
| 130,858,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
sentence = 'The brown fox is quick and he is jumping over the lazy dog'
#Load dependencies
from spacy.lang.en import English
parser = English()
parsed_sent = parser(unicode(sentence))
print(parsed_sent) ,type(parsed_sent)
dependency_pattern = '{left}<---{word}[{w_type}]--->{right}\n--------'
for token in parsed_sent:
print dependency_pattern.format(word=token.orth_,
w_type=token.dep_,
left=[t.orth_
for t
in token.lefts],
right=[t.orth_
for t
in token.rights])
from nltk.parse.stanford import StanfordDependencyParser
sdp = StanfordDependencyParser(path_to_jar='/home/bhavani/work/Python/programs/NLTK/stanford-parser-full-2018-02-27/stanford-parser.jar',
path_to_models_jar='/home/bhavani/work/Python/programs/NLTK/stanford-parser-full-2018-02-27/stanford-parser-3.9.1-models.jar')
result = list(sdp.raw_parse(sentence))
#print(result[0])
#print(type(result[0]))
dep_tree = [parse.tree() for parse in result][0]
print dep_tree
#dep_tree.draw()
# generation of annotated dependency tree shown in Figure 3-4
from graphviz import Source
dep_tree_dot_repr = [parse for parse in result][0].to_dot()
source = Source(dep_tree_dot_repr, filename="dep_tree", format="png")
source.view()
#Building our own dependecny parsers
import nltk
tokens = nltk.word_tokenize(sentence)
dependency_rules = """
'fox' -> 'The' | 'brown'
'quick' -> 'fox' | 'is' | 'and' | 'jumping'
'jumping' -> 'he' | 'is' | 'dog'
'dog' -> 'over' | 'the' | 'lazy'
"""
dependency_grammar = nltk.grammar.DependencyGrammar.fromstring(dependency_rules)
print dependency_grammar
dp = nltk.ProjectiveDependencyParser(dependency_grammar)
res = [item for item in dp.parse(tokens)]
tree = res[0]
print tree
tree.draw()
|
[
"krbhavanisankar@gmail.com"
] |
krbhavanisankar@gmail.com
|
d018f1d0babe1ace6fc29381446346cddfd4e2a2
|
39e1320c74bcf0bbebb855645b4f538e9ef361f4
|
/src/genui/projects/models.py
|
352d603101f8e355490bba68659b5203e196e5ba
|
[
"MIT"
] |
permissive
|
Tontolda/genui
|
4c684e08e78b848e5afa7e4333bbea46c30d9d51
|
c5b7da7c5a99fc16d34878e2170145ac7c8e31c4
|
refs/heads/master
| 2023-04-14T12:57:31.774323
| 2021-01-29T08:01:30
| 2021-01-29T08:01:30
| 344,443,814
| 0
| 0
|
NOASSERTION
| 2021-04-24T14:56:35
| 2021-03-04T11:00:54
| null |
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
from django.conf import settings
from django.db import models
from polymorphic.models import PolymorphicModel
from abc import ABCMeta, abstractmethod
from django.utils import timezone
class PolymorphicAbstractModelMeta(ABCMeta, type(PolymorphicModel)):
pass
class PolymorphicAbstractModel(PolymorphicModel):
__metaclass__ = PolymorphicAbstractModelMeta
class Meta:
abstract = True
class BaseProject(PolymorphicAbstractModel):
name = models.CharField(max_length=256, blank=False)
description = models.TextField(max_length=10000, blank=True)
created = models.DateTimeField(blank=True)
updated = models.DateTimeField(blank=True, verbose_name="Last Update")
owner = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, on_delete=models.CASCADE)
class Meta:
abstract = True
@abstractmethod
def update(self):
pass
class BaseDataSet(PolymorphicAbstractModel):
project = models.ForeignKey(BaseProject, on_delete=models.CASCADE)
name = models.CharField(max_length=256, blank=False)
description = models.TextField(max_length=10000, blank=True)
created = models.DateTimeField()
updated = models.DateTimeField('last_updated')
class Meta:
abstract = True
@abstractmethod
def update(self):
pass
class Project(BaseProject):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.pk is None:
self.created = timezone.now()
self.update()
def update(self):
self.updated = timezone.now()
def save(self, *args, **kwargs):
self.update()
super().save(*args, **kwargs)
def __str__(self):
return '%s object (%s)' % (self.__class__.__name__, self.name)
class DataSet(BaseDataSet):
project = models.ForeignKey(Project, on_delete=models.CASCADE, blank=False)
class Meta:
abstract = True
def __str__(self):
return '%s object (%s)' % (self.__class__.__name__, self.name)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.pk is None:
self.created = timezone.now()
self.update()
def update(self):
if self.pk is not None:
self.project.update()
self.updated = timezone.now()
def save(self, *args, **kwargs):
self.update()
self.project.save()
super().save(*args, **kwargs)
|
[
"sicho.martin@gmail.com"
] |
sicho.martin@gmail.com
|
35b0b17ca058a4213445abfdb3222aa67dceb8e9
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/eve-8.51.857815/carbonui/camera/behaviors/cameraBehavior.py
|
0e12ce3324f0f6b154ae49f02a86cd143e92084a
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
#Embedded file name: carbonui/camera/behaviors\cameraBehavior.py
"""
Simple base class for camera behaviors.
Contains base functionality and Corified versions of common functions needed.
"""
class CameraBehavior(object):
__guid__ = 'cameras.CameraBehavior'
def __init__(self):
self.gameWorldClient = sm.GetService('gameWorldClient')
self.gameWorld = None
self._LoadGameWorld()
def _LoadGameWorld(self):
if self.gameWorldClient.HasGameWorld(session.worldspaceid):
self.gameWorld = self.gameWorldClient.GetGameWorld(session.worldspaceid)
def ProcessCameraUpdate(self, camera, now, frameTime):
"""
Implemented in derived classes, what do I do when the camera tells me to update?
"""
pass
def _GetEntity(self, entID):
return sm.GetService('entityClient').FindEntityByID(entID)
def _GetEntityModel(self, entID):
entity = sm.GetService('entityClient').FindEntityByID(entID)
if entity and entity.HasComponent('paperdoll'):
return entity.GetComponent('paperdoll').doll.avatar
def Reset(self):
"""
Implemented in derived classes.
Used for when changing the scene and values need to be reset
"""
pass
def OnBehaviorAdded(self, camera):
"""
Implemented in derived classes.
Used for custom behavior for when this behavior is added to a camera
"""
pass
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
da4a32962100827a0d1787dccf3a4722e5168197
|
c7bde9b78057cc60c9ccd03145b5baf55284c875
|
/blah.py
|
356e579ee69aaab75cf027fcac689390897a3106
|
[] |
no_license
|
sseaver/tic_tac_toe
|
cb38f1cd11d879ee94eb91fbcd9b40a5736aaea8
|
7607123435419b0862d7e36373263f3592eeca9e
|
refs/heads/master
| 2021-01-17T19:13:53.498545
| 2016-09-21T21:58:10
| 2016-09-21T21:58:10
| 68,764,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
def winning():
column_zero = [row[0] for row in game_matrix]
column_one = [row[1] for row in game_matrix]
column_two = [row[2] for row in game_matrix]
if column_zero == ["X", "X", "X"]:
return True
print ("X has won the game!")
elif column_zero == ["O", "O", "O"]:
return True
print ("O has won the game!")
else:
return False
if column_one == ["X", "X", "X"]:
return True
print ("X has won the game!")
elif column_one == ["O", "O", "O"]:
return True
print ("O has won the game!")
else:
return False
if column_two == ["X", "X", "X"]:
return True
print ("X has won the game!")
elif column_two == ["O", "O", "O"]:
return True
print ("O has won the game!")
else:
return False
if game_matrix[0] == ["X", "X", "X"]:
return True
print ("X has won the game!")
elif game_matrix[0] == ["O", "O", "O"]:
return True
print ("O has won the game!")
else:
return False
if game_matrix[1] == ["X", "X", "X"]:
return True
print ("X has won the game!")
elif game_matrix[1] == ["O", "O", "O"]:
return True
print ("O has won the game!")
else:
return False
if game_matrix[2] == ["X", "X", "X"]:
return True
print ("X has won the game!")
elif game_matrix[2] == ["O", "O", "O"]:
return True
print ("O has won the game!")
else:
return False
########################################################
game_over = winning()
draw_board(game_matrix)
print ("X goes first!")
while not game_over:
print ("Pick a coordinate to place your 'X'")
x_move(int(input("Row: ")), int(input("Column: ")))
draw_board(game_matrix)
game_over = winning()
print ("Pick a coordinate to place your 'O'")
o_move(int(input("Row: ")), int(input("Column: ")))
draw_board(game_matrix)
game_over = winning()
|
[
"sseaver321@gmail.com"
] |
sseaver321@gmail.com
|
fd8b773813e15c6655ea2f1fa0dd72bbe07d2e9c
|
2d5171ac7f2640ed73b48aebf4b96e29d5cad818
|
/ABC164/D.py
|
1d6040a7c8e43dbb4d68df474ac22cf5856aacee
|
[] |
no_license
|
kentahoriuchi/Atcorder
|
d7b8308424175f32d47f24bb15303695780e1611
|
f6449d4e9dc7d92210497e3445515fe95b74c659
|
refs/heads/master
| 2023-06-06T09:26:46.963642
| 2021-06-13T15:08:04
| 2021-06-13T15:08:04
| 255,396,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
S = input()
count = 0
l = [0]*2019
l[0] = 1
mod = 2019
tmp = 0
for i in range(1,len(S)+1):
tmp = (tmp + int(S[len(S)-i])*pow(10,i-1,mod))%mod
l[tmp] += 1
for j in range(2019):
if l[j] >= 2:
count += l[j]*(l[j]-1)//2
print(count)
|
[
"dorahori_108@yahoo.co.jp"
] |
dorahori_108@yahoo.co.jp
|
a1453f77a8fd8eb705cbce7eeabba2f607626caa
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/gui/prb_control/entities/sandbox/pre_queue/ctx.py
|
72525ca7d0d926af5be80b5ae0c31108efc663d7
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,055
|
py
|
# 2017.08.29 21:45:38 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/prb_control/entities/sandbox/pre_queue/ctx.py
from constants import QUEUE_TYPE
from gui.prb_control.entities.base.pre_queue.ctx import QueueCtx
from gui.shared.utils.decorators import ReprInjector
@ReprInjector.withParent(('getVehicleInventoryID', 'vInvID'), ('getWaitingID', 'waitingID'))
class SandboxQueueCtx(QueueCtx):
"""
Sandbox enqueue context
"""
def __init__(self, vInventoryID, waitingID = ''):
super(SandboxQueueCtx, self).__init__(entityType=QUEUE_TYPE.SANDBOX, waitingID=waitingID)
self.__vInventoryID = vInventoryID
def getVehicleInventoryID(self):
"""
Gets the selected vehicle inventory ID
"""
return self.__vInventoryID
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\sandbox\pre_queue\ctx.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:45:38 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
78fc991a1014c50f1012814c34bcd40c58596c95
|
1a194118c60a22b0b4e039e3949403d00b2df8ea
|
/others/find_planned_roll.py
|
3083ee0ac915357f643152f702fcf8eb5377049f
|
[] |
no_license
|
tisobe/Ocat
|
645e62cbd05a4b050c6ca45966271e4108d2fe74
|
a1d66ee8163f73a23ce3964f1347365c8a4e36ae
|
refs/heads/master
| 2020-12-24T06:47:07.753060
| 2016-11-17T18:24:54
| 2016-11-17T18:24:54
| 73,399,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,737
|
py
|
#!/usr/bin/env /proj/sot/ska/bin/python
#############################################################################################################
# #
# find_planned_roll.py: find roll angle and the range from currently planned table #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Nov 01, 2016 #
# #
#############################################################################################################
import sys
import os
import string
import re
import copy
import math
import Cookie
import unittest
import time
from os.path import join, dirname, realpath
BASE_DIR = dirname(dirname(realpath(__file__)))
#
#--- reading directory list
#
path = '/proj/web-r2d2-v/lib/python2.7/site-packages/r2d2-v/ocatsite/static/dir_list_py'
f = open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split('::', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec "%s = %s" %(var, line)
#
#--- append path to a private folders
#
sys.path.append(base_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
import convertTimeFormat as tcnv
#------------------------------------------------------------------------------------------------------
#-- find_planned_roll: find roll angle and the range from currently planned table --
#------------------------------------------------------------------------------------------------------
def find_planned_roll():
"""
find roll angle and the range from currently planned table
input: none but read from /proj/web-icxc/htdocs/mp/lts/lts-current.html
output: <out_dir>/mp_long_term_roll --- a list of obsid:roll:range
"""
f = open('/proj/web-icxc/htdocs/mp/lts/lts-current.html', 'r')
data = [line.strip() for line in f.readlines()]
f.close()
ofile = BASE_DIR + '/ocatsite/data_save/mp_long_term_roll'
fo = open(ofile, 'w')
for ent in data:
#
#--- after "LTS changes", the file list different information
#
mc = re.search('LTS changes', ent)
if mc is not None:
break
#
#--- find obsid
#
mc = re.search('target.cgi', ent)
if mc is not None:
atemp = re.split('target_param.cgi\?', ent)
btemp = re.split('">', atemp[1])
obsid = btemp[0]
#
#--- find the positions of roll/range information
#
btemp = re.split('\s+', ent)
bcnt = 0
for test in btemp:
mc1 = re.search('ACIS', test)
mc2 = re.search('HRC', test)
if (mc1 is not None) or (mc2 is not None):
break
else:
bcnt += 1
#
#--- count back from the instrument column to find the information needed
#
pl_roll = btemp[bcnt - 4]
pl_range = btemp[bcnt - 3]
line = obsid + ':' + pl_roll + ':' + pl_range + '\n'
fo.write(line)
fo.close()
#------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
find_planned_roll()
|
[
"isobe@head.cfa.harvard.edu"
] |
isobe@head.cfa.harvard.edu
|
b16a7d2303c6584ceaae6c79e8bd71faad1e197f
|
ba0cbdae81c171bd4be7b12c0594de72bd6d625a
|
/MyToontown/py2/toontown/effects/FireworkSparkles.pyc.py
|
7c201e409d069ab5b1d3220e537f0c082007c3e2
|
[] |
no_license
|
sweep41/Toontown-2016
|
65985f198fa32a832e762fa9c59e59606d6a40a3
|
7732fb2c27001264e6dd652c057b3dc41f9c8a7d
|
refs/heads/master
| 2021-01-23T16:04:45.264205
| 2017-06-04T02:47:34
| 2017-06-04T02:47:34
| 93,279,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,693
|
py
|
# 2013.08.22 22:19:53 Pacific Daylight Time
# Embedded file name: toontown.effects.FireworkSparkles
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect, Particles, ForceGroup
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class FireworkSparkles(PooledEffect, EffectController):
__module__ = __name__
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('phase_4/models/props/tt_m_efx_ext_fireworkCards')
self.card = model.find('**/tt_t_efx_ext_particleSpark_sharp')
self.cardScale = 16.0
self.setDepthWrite(0)
self.setColorScaleOff()
self.setLightOff()
self.startDelay = 0.0
self.effectScale = 1.0
self.effectColor = Vec4(1, 1, 1, 1)
self.f = ParticleEffect.ParticleEffect('Sparkles')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-2')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereVolumeEmitter')
self.f.addParticles(self.p0)
f0 = ForceGroup.ForceGroup('Gravity')
force0 = LinearVectorForce(Vec3(0.0, 0.0, -15.0), 1.0, 0)
force0.setVectorMasks(1, 1, 1)
force0.setActive(1)
f0.addForce(force0)
self.f.addForceGroup(f0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(10)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.5)
self.p0.factory.setLifespanSpread(1.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 0.1, Vec4(0, 0, 0, 0), self.effectColor, 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.setEffectScale(self.effectScale)
self.setEffectColor(self.effectColor)
def createTrack(self):
self.track = Sequence(Wait(self.startDelay), Func(self.p0.setBirthRate, 0.03), Func(self.p0.clearToInitial), Func(self.f.start, self, self), Wait(0.3), Func(self.p0.setBirthRate, 100.0), Wait(2.5), Func(self.cleanUpEffect))
def setEffectScale(self, scale):
self.effectScale = scale
self.p0.renderer.setInitialXScale(1.2 * self.cardScale * scale)
self.p0.renderer.setFinalXScale(1.5 * self.cardScale * scale)
self.p0.renderer.setInitialYScale(1.5 * self.cardScale * scale)
self.p0.renderer.setFinalYScale(1.2 * self.cardScale * scale)
self.p0.emitter.setAmplitude(25.0 * scale)
self.p0.emitter.setRadius(400.0 * scale)
def setRadius(self, radius):
self.p0.emitter.setRadius(radius)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.setColor(self.effectColor)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
if self.pool and self.pool.isUsed(self):
self.pool.checkin(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\effects\FireworkSparkles.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:19:53 Pacific Daylight Time
|
[
"sweep14@gmail.com"
] |
sweep14@gmail.com
|
6ea4fb9c6455a858013bd542634687b28ef21118
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1CM1g1o9RKDdtDKgcBWE59X2KgTc2TcYtC_alpha.py
|
7fd7dede51a72fd1062c3b8743dc3d392f7fa5e8
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1CM1g1o9RKDdtDKgcBWE59X2KgTc2TcYtC_alpha(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1CM1g1o9RKDdtDKgcBWE59X2KgTc2TcYtC_alpha.json')
def test_storage_encoding_KT1CM1g1o9RKDdtDKgcBWE59X2KgTc2TcYtC_alpha(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1CM1g1o9RKDdtDKgcBWE59X2KgTc2TcYtC_alpha(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1CM1g1o9RKDdtDKgcBWE59X2KgTc2TcYtC_alpha(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
ff838d42226d27d960c29df551b9804f4672fa7b
|
a3b749c69f9ed3d10e1013754428b3a07ef49137
|
/presupuestos/urls_presupuestos.py
|
6fc26f4e85dda9c6aa5b0fe7ea4920f69b929b79
|
[] |
no_license
|
adrian052/SPSD
|
7dd8b4aece3ad2e3ece34624f86d488c0f368dcf
|
f93dee58ada43abe0e3cc06ca3e4ef2d17006791
|
refs/heads/main
| 2023-05-27T16:40:09.995245
| 2021-06-08T22:32:59
| 2021-06-08T22:32:59
| 315,161,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from django.urls import path
from presupuestos.views import lista_presupuestos, nuevo_presupuesto, \
eliminar_presupuesto, editar_presupuesto
app_name = 'presupuestos'
urlpatterns = [
path('lista/', lista_presupuestos, name='lista'),
path('nuevo/', nuevo_presupuesto, name='nuevo'),
path('eliminar/<int:anio>', eliminar_presupuesto, name='eliminar'),
path('editar/<int:anio>', editar_presupuesto, name="editar"),
]
|
[
"vagrant@ubuntu1804.localdomain"
] |
vagrant@ubuntu1804.localdomain
|
218d3ed1c58ad7f4bfc7b1ce49d8780eb94c0a57
|
ab8a34e5b821dde7b09abe37c838de046846484e
|
/twilio/sample-code-master/api/v2010/authorized_connect_app/fetch-default/fetch-default.6.x.py
|
511f053b487454f45e009737f2279a5ad002bea5
|
[] |
no_license
|
sekharfly/twilio
|
492b599fff62618437c87e05a6c201d6de94527a
|
a2847e4c79f9fbf5c53f25c8224deb11048fe94b
|
refs/heads/master
| 2020-03-29T08:39:00.079997
| 2018-09-21T07:20:24
| 2018-09-21T07:20:24
| 149,721,431
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
authorized_connect_app = client \
.authorized_connect_apps('CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.fetch()
print(authorized_connect_app.connect_app_company_name)
|
[
"sekharfly@gmail.com"
] |
sekharfly@gmail.com
|
70b09e5245552f40904f0ac5b1dabf0e8ef879b3
|
341bd2d71b6b6e3af734f16989aeb450e3e73624
|
/HMC6343A.py
|
5c8c749d819dc0cab48d0b18bb3f51cb5f52419b
|
[] |
no_license
|
ControlEverythingCommunity/CE_PYTHON_LIB
|
5c170f7e3763ab3b160a5fc33f2bb96d4798c7e2
|
736b29434a451a384c2f52490c849239c3190951
|
refs/heads/master
| 2021-01-12T00:39:25.374689
| 2017-08-30T21:54:47
| 2017-08-30T21:54:47
| 78,751,564
| 7
| 7
| null | 2017-12-15T11:08:48
| 2017-01-12T14:05:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,911
|
py
|
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# HMC6343A
# This code is designed to work with the HMC6343A_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Accelorometer?sku=HMC6343A_I2CS#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C address of the device
HMC6343A_DEFAULT_ADDRESS = 0x19
# HMC6343A Register Map
HMC6343A_SW_VERSION = 0x02 # Software Version Number
HMC6343A_OP_MODE1 = 0x04 # Operational Mode Register 1
HMC6343A_OP_MODE2 = 0x05 # Operational Mode Register 2
HMC6343A_SN_LSB = 0x06 # Device Serial Number LSB
HMC6343A_SN_MSB = 0x07 # Device Serial Number MSB
HMC6343A_DEVIATION_LSB = 0x0A # Deviation Angle (+/-1800) in tenths of a degree LSB
HMC6343A_DEVIATION_MSB = 0x0B # Deviation Angle (+/-1800) in tenths of a degree MSB
HMC6343A_VARIATION_LSB = 0x0C # Variation Angle (+/-1800) in tenths of a degree LSB
HMC6343A_VARIATION_MSB = 0x0D # Variation Angle (+/-1800) in tenths of a degree MSB
HMC6343A_XOFFSET_LSB = 0x0E # Hard-Iron Calibration Offset for the X-axis LSB
HMC6343A_XOFFSET_MSB = 0x0F # Hard-Iron Calibration Offset for the X-axis MSB
HMC6343A_YOFFSET_LSB = 0x10 # Hard-Iron Calibration Offset for the Y-axis LSB
HMC6343A_YOFFSET_MSB = 0x11 # Hard-Iron Calibration Offset for the Y-axis MSB
HMC6343A_ZOFFSET_LSB = 0x12 # Hard-Iron Calibration Offset for the Z-axis LSB
HMC6343A_ZOFFSET_MSB = 0x13 # Hard-Iron Calibration Offset for the Z-axis MSB
HMC6343A_FILTER_LSB = 0x14 # Heading IIR Filter LSB
HMC6343A_FILTER_MSB = 0x15 # Heading IIR Filter MSB
HMC6343A_POST_ACCEL = 0x40 # Post Accel Data
HMC6343A_POST_MAG = 0x45 # Post Mag Data
# HMC6343A Operational Mode Register-1 Configuration
HMC6343A_OM1_LEVEL = 0x01 # Level Orientation Set
HMC6343A_OM1_UE = 0x02 # Upright Edge Orientation Set
HMC6343A_OM1_UF = 0x04 # Upright Front Orientation Set
HMC6343A_OM1_STDBY = 0x08 # Stand-by Mode Set
HMC6343A_OM1_RUN = 0x10 # Run Mode Set
HMC6343A_OM1_FILTER = 0x20 # IIR Heading Filter Used
HMC6343A_OM1_CAL = 0x40 # Calculating calibration offsets
HMC6343A_OM1_COMP = 0x80 # Calculating compass data
# HMC6343A Operational Mode Register-2 Configuration
HMC6343A_MR_1 = 0x00 # Measurement Rate = 1Hz
HMC6343A_MR_5 = 0x01 # Measurement Rate = 5Hz
HMC6343A_MR_10 = 0x02 # Measurement Rate = 10Hz
class HMC6343A():
def __init__(self):
self.mode_config()
self.measurement_rate_config()
def mode_config(self):
"""Select the Operational Mode Register-1 Configuration from the given provided values"""
MODE_CONFIG = (HMC6343A_OM1_LEVEL | HMC6343A_OM1_RUN)
bus.write_byte_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_OP_MODE1, MODE_CONFIG)
def measurement_rate_config(self):
"""Select the Operational Mode Register-2 Configuration from the given provided values"""
bus.write_byte_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_OP_MODE2, HMC6343A_MR_5)
def read_accl(self):
"""Read data back from HMC6343A_POST_ACCEL(0x40), 6 bytes
X-Axis Accl MSB, X-Axis Accl LSB, Y-Axis Accl MSB, Y-Axis Accl LSB, Z-Axis Accl MSB, Z-Axis Accl LSB"""
data = bus.read_i2c_block_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_POST_ACCEL, 6)
# Convert the data
xAccl = data[0] * 256 + data[1]
if xAccl > 32767 :
xAccl -= 65536
yAccl = data[2] * 256 + data[3]
if yAccl > 32767 :
yAccl -= 65536
zAccl = data[4] * 256 + data[5]
if zAccl > 32767 :
zAccl -= 65536
return {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}
def read_mag(self):
"""Read data back from HMC6343A_POST_MAG(0x45), 6 bytes
X-Axis Mag MSB, X-Axis Mag LSB, Y-Axis Mag MSB, Y-Axis Mag LSB, Z-Axis Mag MSB, Z-Axis Mag LSB"""
data = bus.read_i2c_block_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_POST_MAG, 6)
# Convert the data
xMag = data[0] * 256 + data[1]
if xMag > 32767 :
xMag -= 65536
yMag = data[2] * 256 + data[3]
if yMag > 32767 :
yMag -= 65536
zMag = data[4] * 256 + data[5]
if zMag > 32767 :
zMag -= 65536
return {'x' : xMag, 'y' : yMag, 'z' : zMag}
from HMC6343A import HMC6343A
hmc6343a = HMC6343A()
while True :
hmc6343a.mode_config()
hmc6343a.measurement_rate_config()
time.sleep(0.1)
accl = hmc6343a.read_accl()
print "Acceleration in X-Axis : %d"%(accl['x'])
print "Acceleration in Y-Axis : %d"%(accl['y'])
print "Acceleration in Z-Axis : %d"%(accl['z'])
mag = hmc6343a.read_mag()
print "Magnetic field in X-Axis : %d"%(mag['x'])
print "Magnetic field in Y-Axis : %d"%(mag['y'])
print "Magnetic field in Z-Axis : %d"%(mag['z'])
print " ************************************* "
time.sleep(1)
|
[
"ryker1990@gmail.com"
] |
ryker1990@gmail.com
|
0615cccaa29d7378c2edef98d3e1ab29fa9a44ba
|
32cb0be487895629ad1184ea25e0076a43abba0a
|
/LifePictorial/top/api/rest/HanoiDocumentsGetRequest.py
|
a8388d7a45fc9eab1cb0d78258a5e7a11749c1d2
|
[] |
no_license
|
poorevil/LifePictorial
|
6814e447ec93ee6c4d5b0f1737335601899a6a56
|
b3cac4aa7bb5166608f4c56e5564b33249f5abef
|
refs/heads/master
| 2021-01-25T08:48:21.918663
| 2014-03-19T08:55:47
| 2014-03-19T08:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
'''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class HanoiDocumentsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.code = None
self.current_page = None
self.id = None
self.name = None
self.page_size = None
def getapiname(self):
return 'taobao.hanoi.documents.get'
|
[
"poorevil@gmail.com"
] |
poorevil@gmail.com
|
1d56cb579b4790f733317d1a79f0a8d3c8cd7f52
|
1fdc846f4e5b7bda56e8740b859c8340d9b5141a
|
/tests/test_input_biot_npbc_lagrange.py
|
cc685e4db4201cb225b7adde4304c90956861222
|
[
"BSD-3-Clause"
] |
permissive
|
olivierverdier/sfepy
|
b824fdab7d91e137a371c277901fbb807b316b02
|
83aefb7b33ea17f4acb83388ba8bc7314c77616c
|
refs/heads/master
| 2021-01-18T05:39:13.127137
| 2010-10-25T13:13:18
| 2010-10-25T17:31:37
| 1,022,869
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
input_name = '../examples/biot/biot_npbc_lagrange.py'
output_name = 'test_biot_npbc_lagrange.vtk'
from testsBasic import TestInput
class Test(TestInput):
pass
|
[
"cimrman3@ntc.zcu.cz"
] |
cimrman3@ntc.zcu.cz
|
e1eca558e5cbce67053dce1a670a4aa3069896cd
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/StudentProblem/10.21.12.28/5/1569571946.py
|
af429088f6ad955c7b65295f7f9a893d0bbe5fc1
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def mysum(xs: list) -> int:
return sum(xs)
## Lösung Teil 2. (Tests)
def test_mysum():
assert mysum([1,2,3]) == 6
######################################################################
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
e83344bc5686876da9d4a28fdd9a32c2540bccf5
|
a7b5adc5f72b9ef71c0c71691492f8af8a32c868
|
/Minseo-Kim/leetcode/206_Reverse_linked_list.py
|
73148fa6b52f9be00ce91fc881e9ae477ff1fd88
|
[] |
no_license
|
mintheon/Practice-Algorithm
|
535ff607e36d1bfa9f800a28091a52c48748221c
|
3a653a1d8cc6e1438cab47a427ccd0b421a10010
|
refs/heads/master
| 2023-04-10T17:43:10.434210
| 2021-04-10T18:46:26
| 2021-04-10T18:46:26
| 347,719,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Time complexity: O(n), ** Space complexity: O(1)**
# Runtime: 32 ms, faster than 87.13% of Python3
# Memory Usage: 15.5 MB, less than 93.25% of Python3
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
node, prev = head, None
while node:
# node <- node.next
# node.next <- prev
# prev <- node(current)
node.next, node, prev = prev, node.next, node
return prev
|
[
"noreply@github.com"
] |
mintheon.noreply@github.com
|
3f53f5573681c9ce0fc51b144d84850bc70bcfb1
|
7a87b2701dedeab6ad0c237feebdb3434d28231e
|
/ui_framework/page/index_page.py
|
623efd36b370587149fe71e05467bb3cc8843856
|
[] |
no_license
|
tingyu-ui/test_dwpytest
|
002a81f897b61c1e593d0f07f973b8380a725f9c
|
4765ed74b64582453ddce6e318aa626049b133e8
|
refs/heads/master
| 2023-08-28T02:25:25.814423
| 2021-10-06T17:58:00
| 2021-10-06T17:59:02
| 375,302,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#雪球首页page
#可以直接继承basepage,调用已经封装好的UI操作
import yaml
from ui_framework.base_page import BasePage
class IndexPage(BasePage):
def goto_market(self):
#xpath等同于By.xpath
# self.find("xpath", "//*[@text='行情']").click()
# print(data)
#data格式 {'-action': 'click', 'by': 'xpath', 'value': "//*[@text='行情']"}}
#函数名:[{'action': ,'by': ,'value':, {}, {}}]
#使用steps函数取出所有step
# steps = data.get("goto_market")
self.run_steps("../page/index_page.yaml", "goto_market")
|
[
"you@example.com"
] |
you@example.com
|
11f67c0308b87d360a68ff26de26d4697538d0de
|
0b842bcb3bf20e1ce628d39bf7e11abd7699baf9
|
/sql/register_sql.py
|
9fac3d26fe0e079d1338a20b332cb6855862c0c9
|
[] |
no_license
|
afeset/miner2-tools
|
75cc8cdee06222e0d81e39a34f621399e1ceadee
|
81bcc74fe7c0ca036ec483f634d7be0bab19a6d0
|
refs/heads/master
| 2016-09-05T12:50:58.228698
| 2013-08-27T21:09:56
| 2013-08-27T21:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
'''
Created on Aug 14, 2013
@author: asaf
'''
import sys
sys.path.append("/home/asaf/miner2.0")
import miner_globals
sys.path.append("/home/asaf/miner2-tools")
from sql import sql_target
miner_globals.addTargetToClassMapping("sql",None,"sql_target.sqlDump","Write To SQL DB")
|
[
"afeset@gmail.com"
] |
afeset@gmail.com
|
d6b4388855a0884a32f7d4a5d924a2e063dda428
|
03e4e75a00044df181adbebf5f44b5076d97a15f
|
/example/0_Basic_usage_of_the_library/python_motor/3_delete.py
|
944d7f2e83b32bde74bae3324bcb045b7ab6c39c
|
[
"MIT"
] |
permissive
|
RecluseXU/learning_spider
|
3820b15654bb5824b1f92c53389d24799ff2bb88
|
43831e2fbbd5de0cf729ce8c12c84d043b56e855
|
refs/heads/master
| 2023-06-08T09:36:26.307395
| 2023-05-19T02:48:08
| 2023-05-19T02:48:08
| 234,718,806
| 64
| 14
| null | 2020-01-20T11:54:29
| 2020-01-18T10:38:06
| null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
# -*- encoding: utf-8 -*-
'''
@Time : 2021-06-09
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 删除
'''
# here put the import lib
from motor.motor_asyncio import AsyncIOMotorClient
import asyncio
async def do_delete(client: AsyncIOMotorClient):
""" 删除一条记录
"""
collection = client['temp']['temp']
result = await collection.delete_one({'name': '百度'})
print(result)
async def do_delete_many(client: AsyncIOMotorClient):
""" 删除多条记录
"""
collection = client['temp']['temp']
result = await collection.delete_many({})
print(result)
client = AsyncIOMotorClient('mongodb://localhost:27017')
loop = asyncio.get_event_loop()
loop.run_until_complete(do_delete(client))
loop.run_until_complete(do_delete_many(client))
|
[
"444640050@qq.com"
] |
444640050@qq.com
|
ba5b46a31f4fb683772f0bbe00da6ac8986514ce
|
b03878679b1e07e0ec962083dd4d058d1503180f
|
/pyoxford/translator_api.py
|
50ec01c31e66fc1aef9305b12d2977aaffa05fc1
|
[
"MIT"
] |
permissive
|
jhoelzl/pyoxford
|
d3a8abfb822706fbba17792e8519ac3d2d05e36d
|
9e3f2e0130951e0ccb3c3f7fd8798219e1c36ee2
|
refs/heads/master
| 2020-12-28T12:07:27.963018
| 2015-11-28T14:33:44
| 2015-11-28T14:33:44
| 58,353,003
| 0
| 0
| null | 2016-05-09T06:17:24
| 2016-05-09T06:17:23
| null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
import urllib.parse
from xml.etree import ElementTree
import requests
class Translator(object):
AUTH_URL = "https://datamarket.accesscontrol.windows.net/v2/OAuth2-13"
API_ROOT = "http://api.microsofttranslator.com/v2/Http.svc"
TRANSLATE_URL = "http://api.microsofttranslator.com/v2/Http.svc/Translates"
def __init__(self, client_id, client_secret):
self.__token = ""
self.authorize(client_id, client_secret)
def authorize(self, client_id, client_secret):
headers = {
"Content-type": "application/x-www-form-urlencoded"
}
params = urllib.parse.urlencode({
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"scope": "http://api.microsofttranslator.com"
})
resp = requests.post(self.AUTH_URL, data=params, headers=headers)
if resp.ok:
_body = resp.json()
self.__token = _body["access_token"]
else:
resp.raise_for_status()
def detect(self, text):
params = {
"text": text
}
url = self.API_ROOT + "/Detect?" + urllib.parse.urlencode(params)
resp = requests.get(url, headers=self.__make_header())
result = {}
if resp.ok:
root = ElementTree.fromstring(resp.content)
result = root.text
else:
resp.raise_for_status()
return result
def translate(self, text, lang_to, lang_from=""):
# language codes
# https://msdn.microsoft.com/en-us/library/hh456380.aspx
params = {
"text": text,
"to": lang_to
}
if lang_from:
params["from"] = lang_from
url = self.API_ROOT + "/Translate?" + urllib.parse.urlencode(params)
resp = requests.get(url, headers=self.__make_header())
result = {}
if resp.ok:
root = ElementTree.fromstring(resp.content)
result = root.text
else:
resp.raise_for_status()
return result
def __make_header(self):
return {
"Authorization": "Bearer {0}".format(self.__token)
}
|
[
"icoxfog417@yahoo.co.jp"
] |
icoxfog417@yahoo.co.jp
|
2ae95574f310aa8df7b035537d7208bc72e1225f
|
5e381364c2ab31ff3618369085afffba6caa8edb
|
/recipes/squirrel/all/test_package/conanfile.py
|
5789855d52cb28e63a00eaa87670af8ce596836e
|
[
"MIT"
] |
permissive
|
CAMOBAP/conan-center-index
|
16aea68a6d22da22831ba985773125e8eda08f00
|
67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1
|
refs/heads/master
| 2023-07-30T08:58:57.285571
| 2021-10-02T14:57:54
| 2021-10-02T14:57:54
| 323,262,699
| 1
| 0
|
MIT
| 2021-05-29T13:37:04
| 2020-12-21T07:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.definitions["SQUIRREL_SHARED"] = self.options["squirrel"].shared
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
|
[
"noreply@github.com"
] |
CAMOBAP.noreply@github.com
|
23b3665560b2105af94061d1dfb27c99ee78e8e3
|
709c35fded3567512c1eec3bb7facb63e4891903
|
/mark2cure/task/ner/models.py
|
5c8d298d572b68cf492693848e88e40401c9c0fc
|
[
"MIT"
] |
permissive
|
SuLab/mark2cure
|
da8ddca9d35cc10116417451d8a4caea29f279c5
|
63d20e75b8817ad75c6766b4d8a7a8ee8207d512
|
refs/heads/master
| 2023-02-04T08:51:22.372167
| 2018-02-28T02:50:25
| 2018-02-28T02:50:25
| 54,926,527
| 17
| 14
| null | 2023-01-11T22:31:16
| 2016-03-28T21:58:29
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
from django.db import models
from .managers import EntityRecognitionAnnotationManager
from django.forms.models import model_to_dict
class EntityRecognitionAnnotation(models.Model):
# Only access through Document.Annotation.metadata.RelationAnnotation
DISEASE = 0
GENE = 1
TREATMENT = 2
TYPE_CHOICES = (
(DISEASE, 'Disease'),
(GENE, 'Gene'),
(TREATMENT, 'Treatment')
)
type_idx = models.IntegerField(choices=TYPE_CHOICES, blank=True, null=True)
text = models.TextField(blank=True, null=True)
# (WARNING) Different than BioC
# This is always the start position relative
# to the section, not the entire document
start = models.IntegerField(blank=True, null=True)
objects = EntityRecognitionAnnotationManager()
|
[
"max@maxnanis.com"
] |
max@maxnanis.com
|
93751656fda3d971cc190873ece55248d3a8d757
|
58a87e847f8c6cd5b83cbe5758e779679563cc66
|
/Exercícios complementares/ExercícioG.py
|
cceba6383e0aa814ccbfd132a91df968d153333a
|
[] |
no_license
|
suzanamfp/Atividades-complementares-Python
|
9444dec6cd952db3cdeaf26648f0eb60a89a0862
|
d1a04f3f4d70298aa8448a37ba3e4b5313ced472
|
refs/heads/master
| 2022-12-02T21:23:55.261903
| 2020-08-18T19:28:34
| 2020-08-18T19:28:34
| 288,545,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
def main():
cores = ['azul', 'vermelho', 'cinza', 'amarelo']
cores.remove('vermelho')
print(cores)
main()
|
[
"noreply@github.com"
] |
suzanamfp.noreply@github.com
|
b7559dc7812e5200464d1e7279d70aedf5f87fb3
|
d73b14bd20cfc1320e1911247b28c5109c51b5d1
|
/training/train_openpose.py
|
75a4569e1b897416ebc493dee676ee8e17f683fe
|
[] |
no_license
|
rainyucool/openpose-pytorch
|
9c7e32bdab53417fef30d81a2dc53019dc3dbd84
|
4bc9cf4c927fdb507d89198724a237800cad9b3e
|
refs/heads/master
| 2020-04-01T13:25:04.702843
| 2018-05-07T13:12:11
| 2018-05-07T13:12:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
import torch
from torch.utils.data import DataLoader
from config import cfg
from datasets.coco_dataset import CocoDataset
from models import model_openpose
from skeletons.gt_generators.gt_generator_openpose import GroundTruthGeneratorOpenPose
from skeletons.skeleton_config_openpose import SkeletonConfigOpenPose
from training.train_prod import train
from training.train_utils import get_losses, fix_layers_weights
network_model_handler = model_openpose.OpenPoseModelHandler()
network = network_model_handler.get_train_model()
network_model_handler.load_state_dict(network)
fix_layers_weights(network, "stage[2-6]_[1-9]_(joint|limb)_maps")
skeleton_config = SkeletonConfigOpenPose()
gt_generator = GroundTruthGeneratorOpenPose(network, skeleton_config)
train_dataset = CocoDataset([cfg.dataset.train_hdf5], skeleton_config, gt_generator,
network, augment=True)
sim_dataset = CocoDataset(["/media/USERNAME/Store1/sim_train_18_04_17_ITSC.h5"], skeleton_config, gt_generator,
network, augment=True)
train_sets = torch.utils.data.ConcatDataset([train_dataset, sim_dataset])
train_loader = DataLoader(train_sets, cfg.train.batch_size, shuffle=True)
train(network, train_loader, get_losses, fix_regex="stage[2-6]_[1-9]_(joint|limb)_maps")
|
[
"Dennis.Ludl@reutlingen-university.de"
] |
Dennis.Ludl@reutlingen-university.de
|
1a6e7eb51c70cb8fd43657c64d233264aef82988
|
d70a4ec35ac91c914c42611e8b0ee05525371f7a
|
/src/lwc/settings_old.py
|
392a0df76f497a6832088dac3f360cb7bffbefc8
|
[] |
no_license
|
jwilsontt/lwc
|
b662de74e47f350a732cc5e1e52a80bd4da46524
|
975a45aab16019f03880dafcd1b1ee7931613613
|
refs/heads/master
| 2021-01-21T07:53:44.348760
| 2015-08-21T04:05:36
| 2015-08-21T04:05:36
| 33,587,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
"""
Django settings for lwc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd=643az*af2ts!1)stb+#5vuk1739ve&vsg&mh_j#qj&aaiz+2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'joins',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lwc.middleware.ReferMiddleware',
)
ROOT_URLCONF = 'lwc.urls'
WSGI_APPLICATION = 'lwc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SHARE_URL = "http://127.0.0.1:8000/?ref="
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
#BASE_DIR + "/templates/",
#'/Users/jasonwilson/Documents/ProgWork/lwc/src/templates/',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
MEDIA_URL = '/media/'
|
[
"jwilsontt@gmail.com"
] |
jwilsontt@gmail.com
|
46ab94c8a8e2a2f0c3ac17d32ed98651ad8589fb
|
0849923ebcde8f56a6e8550ae4f3c5ee3e2e0846
|
/desktop/core/ext-py/MySQL-python-1.2.3c1/setup_posix.py
|
5895984e6cb9da83ebb46ef3be36f4b55e2e0b0d
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"Apache-2.0"
] |
permissive
|
thinker0/hue
|
511a5796cdfe45e0b27f1d3309557ca60ce8b13b
|
ee5aecc3db442e962584d3151c0f2eab397d6707
|
refs/heads/master
| 2022-07-10T02:37:23.591348
| 2014-03-27T20:05:00
| 2014-03-27T20:05:00
| 12,731,435
| 0
| 0
|
Apache-2.0
| 2022-07-01T17:44:37
| 2013-09-10T14:13:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,947
|
py
|
from ConfigParser import SafeConfigParser
# This dequote() business is required for some older versions
# of mysql_config
def dequote(s):
if s[0] in "\"'" and s[0] == s[-1]:
s = s[1:-1]
return s
def compiler_flag(f):
return "-%s" % f
def mysql_config(what):
from os import popen
f = popen("%s --%s" % (mysql_config.path, what))
data = f.read().strip().split()
ret = f.close()
if ret:
if ret/256:
data = []
if ret/256 > 1:
raise EnvironmentError("%s not found" % (mysql_config.path,))
return data
mysql_config.path = "mysql_config"
def get_config():
import os, sys
from setup_common import get_metadata_and_options, enabled, create_release_file
metadata, options = get_metadata_and_options()
if 'mysql_config' in options:
mysql_config.path = options['mysql_config']
extra_objects = []
static = enabled(options, 'static')
if enabled(options, 'embedded'):
libs = mysql_config("libmysqld-libs")
client = "mysqld"
elif enabled(options, 'threadsafe'):
libs = mysql_config("libs_r")
client = "mysqlclient_r"
if not libs:
libs = mysql_config("libs")
client = "mysqlclient"
else:
libs = mysql_config("libs")
client = "mysqlclient"
library_dirs = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("L")) ]
libraries = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("l")) ]
removable_compile_args = [ compiler_flag(f) for f in "ILl" ]
extra_compile_args = [ i.replace("%", "%%") for i in mysql_config("cflags")
if i[:2] not in removable_compile_args ]
include_dirs = [ dequote(i[2:])
for i in mysql_config('include')
if i.startswith(compiler_flag('I')) ]
if not include_dirs: # fix for MySQL-3.23
include_dirs = [ dequote(i[2:])
for i in mysql_config('cflags')
if i.startswith(compiler_flag('I')) ]
if static:
extra_objects.append(os.path.join(
library_dirs[0],'lib%s.a' % client))
name = "MySQL-python"
if enabled(options, 'embedded'):
name = name + "-embedded"
metadata['name'] = name
define_macros = [
('version_info', metadata['version_info']),
('__version__', metadata['version']),
]
create_release_file(metadata)
del metadata['version_info']
ext_options = dict(
name = "_mysql",
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = extra_compile_args,
include_dirs = include_dirs,
extra_objects = extra_objects,
define_macros = define_macros,
)
return metadata, ext_options
if __name__ == "__main__":
print """You shouldn't be running this directly; it is used by setup.py."""
|
[
"bcwalrus@cloudera.com"
] |
bcwalrus@cloudera.com
|
cc5fa549674d5f7f51c7555601bb75a5d2f426f5
|
9b0bdebe81e558d3851609687e4ccd70ad026c7f
|
/数据结构/链表/10.链表相交.py
|
f407d7d61aadf869f8133d2993fdec96d025f9a8
|
[] |
no_license
|
lizenghui1121/DS_algorithms
|
645cdad007ccbbfa82cc5ca9e3fc7f543644ab21
|
9690efcfe70663670691de02962fb534161bfc8d
|
refs/heads/master
| 2022-12-13T22:45:23.108838
| 2020-09-07T13:40:17
| 2020-09-07T13:40:17
| 275,062,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
"""
@Author: Li Zenghui
@Date: 2020-07-17 14:47
"""
class ListNode:
def __init__(self, val):
self.val = val
self.next = None
def getIntersectionNode(headA, headB):
def getLength(head):
count = 0
while head:
count += 1
head = head.next
return count
l1 = getLength(headA)
l2 = getLength(headB)
if l1 > l2:
gap = l1 - l2
while gap > 0:
headA = headA.next
gap -= 1
if l2 > l1:
gap = l2 - l1
while gap > 0:
headB = headB.next
gap -= 1
while headA:
if headA == headB:
return headA
headB = headB.next
headA = headA.next
return None
if __name__ == '__main__':
n0 = ListNode(5)
n1 = ListNode(3)
n2 = ListNode(4)
n3 = ListNode(2)
m1 = ListNode(4)
n0.next = n1
n1.next = n2
n2.next = n3
m1.next = n2
getIntersectionNode(n0, m1)
|
[
"954267393@qq.com"
] |
954267393@qq.com
|
175dac1813e30dfc38d2330b3134a90625c9dacc
|
3d192f5ebe208a9603460d7bc248a5e983bd49e1
|
/main_list.py
|
dc59cc6d0f399323fb087d8c5a02697e1a5038f5
|
[] |
no_license
|
535521469/list_shc
|
43e83705127b601fdfa3f1688f450d9ddae96bb9
|
0d93baec3a64a532ab95498805247784262dd684
|
refs/heads/master
| 2021-01-15T21:10:03.980779
| 2013-11-28T05:44:17
| 2013-11-28T05:44:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,202
|
py
|
# encoding=utf8
'''
Created on 2013-3-20
@author: corleone
'''
from crawler.shc.fe.const import FEConstant as const
from multiprocessing import Process
from sched import scheduler
from scrapy.cmdline import execute
from scrapy.settings import CrawlerSettings
import collections
import datetime
import time
from bot.config import configdata
from const import ListSpiderConst, ScrapyConst, AppConst
import os
class SpiderProcess(Process):
def __init__(self, city_name, configdata):
Process.__init__(self)
self.city_name = city_name
self.configdata = dict(configdata)
self.configdata[const.CURRENT_CITY] = city_name
def run(self):
feconfig = self.configdata[const.FE_CONFIG]
try:
#=======================================================================
# if the city use the default config
#=======================================================================
city_config = eval(feconfig[self.city_name])
except Exception:
city_config = {}
start_page = city_config.get(const.START_PAGE,
feconfig[const.DEFAULT_START_PAGE])
end_page = city_config.get(const.END_PAGE,
feconfig[const.DEFAULT_END_PAGE])
# values = {
# const.CONFIG_DATA:self.configdata,
# const.START_PAGE:int(start_page),
# const.END_PAGE:int(end_page),
# }
# settings = u'crawler.shc.fe.settings'
# module_import = __import__(settings, {}, {}, [''])
# settings = CrawlerSettings(module_import, values=values)
# execute(argv=["scrapy", "crawl", 'SHCSpider' ], settings=settings)
values = configdata.get(ListSpiderConst.ListSettings, {})
values.update(**{
const.CONFIG_DATA:self.configdata,
const.START_PAGE:int(start_page),
const.END_PAGE:int(end_page),
})
if ScrapyConst.Console in values:
if values[ScrapyConst.Console] == u'1':# out to console
values[ScrapyConst.LOG_FILE] = None
else:
log_dir = values.get(ScrapyConst.LOG_DIR, os.getcwd())
if ScrapyConst.LOG_FILE in values:
log_file = values[ScrapyConst.LOG_FILE]
values[ScrapyConst.LOG_FILE] = os.sep.join([log_dir , log_file])
settings_path = u'crawler.shc.fe.settings'
module_import = __import__(settings_path, {}, {}, [''])
settings = CrawlerSettings(module_import, values=values)
execute(argv=["scrapy", "crawl", 'SHCSpider' ], settings=settings)
spider_process_mapping = {}
def add_task(root_scheduler):
city_names = configdata[const.FE_CONFIG][const.FE_CONFIG_CITIES].split(u',')
processes = collections.deque()
for city_name in city_names :
p = SpiderProcess(city_name, configdata)
spider_process_mapping[city_name] = p
processes.append(p)
if len(processes):
root_scheduler.enter(1, 1, check_add_process,
(spider_process_mapping, processes,
root_scheduler, configdata))
def check_add_process(spider_process_mapping, processes,
root_scheduler, configdata):
alives = filter(Process.is_alive, spider_process_mapping.values())
if len(processes):
pool_size = int(configdata[const.FE_CONFIG].get(const.MULTI, 1))
if len(alives) < pool_size:
p = processes.popleft()
print (u'%s enqueue %s ,pool size %d , %d cities '
'waiting ') % (datetime.datetime.now(), p.city_name,
pool_size, len(processes))
root_scheduler.enter(0, 1, p.start, ())
#=======================================================================
# check to add process 10 seconds later
#=======================================================================
if not len(processes):
print (u'%s all process enqueue ...' % datetime.datetime.now())
root_scheduler.enter(5, 1, check_add_process
, (spider_process_mapping, processes,
root_scheduler, configdata))
else:
if len(alives) == 0:
print ('%s crawl finished ... ' % datetime.datetime.now())
else :
root_scheduler.enter(5, 1, check_add_process
, (spider_process_mapping, processes,
root_scheduler, configdata))
if __name__ == '__main__':
frequence = configdata[AppConst.app_config].get(AppConst.app_config_frequence, 1800)
frequence = int(frequence)
while 1:
root_scheduler = scheduler(time.time, time.sleep)
root_scheduler.enter(0, 0, add_task, (root_scheduler,))
root_scheduler.run()
print u'%s sleep %s seconds' % (datetime.datetime.now(), frequence)
time.sleep(frequence)
|
[
"535521469@qq.com"
] |
535521469@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.