blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84c465eb719d666ab94e9a1ae1ca92ab24943c5d | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba4184.pngMap.py | 9a4c1b02c5c0a7471335847d2b8fcc84e3042564 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba4184.pngMap = [
'11111111111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000',
'11111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000',
'11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000110000000000000000',
'11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000',
'11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001110000000000000',
'11111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011100000000000000',
'11111111100000000000000000000000000000000000000000000000000000000000000000001111111111000000000000000000000000001111000000000000',
'11111111100000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000001111000000000000',
'11111111000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000111110000000000',
'11111111000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000111110000000000',
'11111110000000000000000000000000000000000000000000000000000000000000000000101111111111000000000000000000000000001111111000000000',
'11111100000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000001111111100000000',
'11111000000000000000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000001111111100000000',
'11110100000000000000000000000000000000000000000000000000000000000000000000001011101000000000000000000000000000001111111100000000',
'11110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111100000000',
'11110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111110000000',
'11110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000',
'11110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000',
'11110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111000000',
'11100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111000000',
'11100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111100000',
'11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111110000',
'11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000',
'11000000000000000000000000000000000000000010000000000110000000000000000000000000000000000000000000000000011111111111111111100000',
'11000000000000000000000000000000000010000111111111111111111100000000000000000000000000000000000000000011111111111111111111100000',
'11000000000000000000000000000000000011111111111111111111111100000000000000000000000000000000000000000111111111111111111111100000',
'11000000000000000000000000000000011111111111111111111111111111110000000000000000000000000000000000101111111111111111111111100000',
'11000000000000000000000000000011111111111111111111111111111111111110000000000000000000000000000010111111111111111111111111100000',
'11000000000000000000000000001111111111111111111111111111111111111111111000000000000000000000001111111111111111111111111111110000',
'11000000000000000000000000001111111111111111111111111111111111111111111100010000000000000000001111111111111111111111111111110000',
'11100000000000000000000011111111111111111111111111111111111111111111111111111111000011110000001111111111111111111111111111100000',
'11100000000000000000000011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000',
'11110000000000000000000011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000',
'11110000000000000000010011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000',
'11110000000000000000001111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000',
'11110000000000000000101111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000',
'11110000000000000001111111111111111111111010011001111111111111111111111111111111111111111111111111111111111111111111111111000000',
'11110000000000000001111111111111111111111100000000111111111111111111111111111111111111111111111111111111111111111111111110000000',
'11111000000000000000111111111111111111110000000000001011111111111111111111111111111111111111111111111111111111111111111100000000',
'11111000000000000010111111111111111111100000000000000001111111111111111111111111111111111111111111111111111111111111111100000000',
'11111100000000000000111111111111111111100000000000000000111111111111111111111111111111111111111111111111111111111111111100000000',
'11111110000000000000111111111111111111000000000000000000111111111111111111111111111111111111111111111111111111111111111000000000',
'11111111000000000001111111111111111111100000000000000000111111111111111111111111111111111111111111111111111111111111110000000000',
'11111111000000000011111111111111111111100000000000000000111111111111111111111111111111111111111111111111111111111111110000000000',
'11111111100000000011111111111111111111110000000000000001111111111111111111111111111111111111111111111111111111111111000000000000',
'11111111110000000011111111111111111111110000000000000001111111111111111111111111111111111111111111111111111111111111000000000000',
'11111111111000000010111111111111111111111100000000000111111111111111111111111111111111111111111111111111111111111100000000000000',
'11111111111100000000111111111111111111111111000110101111111111111111111111111111111111111111111111111111111111111000000000000000',
'11111111111100000000111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000',
'11111111111110000000011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000',
'11111111111111110000111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000',
'11111111111111110000011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000',
'11111111111111111000001111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000',
'11111111111111111110000000111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000',
'11111111111111111110000100111111111111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000',
'11111111111111111111011010111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
ddbde340dbd330040e85b9a914a191527fc52717 | e905abd9bb7bd7017657d0a0c4d724d16e37044c | /.history/article/spiders/sciencedirect_20201230010225.py | 787c70ae8c70e5f466306bdf5a5c7e93e8e15343 | [] | no_license | tabdelbari/articles | a8b921841f84fb473f5ed1cdcda743863e6bc246 | f0e1dfdc9e818e43095933139b6379a232647898 | refs/heads/main | 2023-03-05T10:21:35.565767 | 2021-02-10T13:35:14 | 2021-02-10T13:35:14 | 325,654,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | import scrapy
import logging
import re
from scrapy_splash import SplashRequest
from article.items import ArticleItem
class SciencedirectSpider(scrapy.Spider):
name = 'sciencedirect'
allowed_domains = ['scienced.com']
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
def __init__(self, topic='', keywords='', **kwargs):
super().__init__(**kwargs)
self.start_urls = ['https://www.sciencedirect.com/search?qs=%s' %keywords]
self.topic = topic
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url, callback=self.find_articles, args={ 'wait': 4 })
def find_articles(self, response):
# logging.info(response.text)
articles_urls = response.xpath('//*/div/h2/span/a/@href').getall()
logging.info(f'{len(articles_urls)} articles found')
for article_url in articles_urls:
article_url = 'https://www.sciencedirect.com' + article_url
yield SplashRequest(article_url, callback=self.parse_article, args={ 'wait': 4 })
next_page = response.xpath('//*[@id="srp-pagination"]/li[@class="pagination-link next-link"]/a/@href').get(default='')
logging.info('Next page found:')
if next_page != '':
next_page = 'https://www.sciencedirect.com' + next_page
yield SplashRequest(next_page, callback=self.find_articles)
def parse_article(self, response):
article = ArticleItem()
logging.info('Processing --> ' + response.url)
article.title = response.xpath('//*/article/h1/span').get(default='')
authors = []
authors_surnames = response.xpath('//*/div[@class="author-group"]/a/span/span[@class="text surname"]').getall()
authors_givennames = response.xpath('//*/div[@class="author-group"]/a/span/span[@class="text given-name"]').getall()
for i in range(0, len(authors_givennames)):
authors.append(authors_surnames[i] + ' ' + authors_givennames[i])
article.authors = '|'.join(authors)
article.country = ''
article.abstract = response.xpath('//*/div[@class="abstract author"]/div/p').get(default='')
article.date_pub = response.xpath('//*/div[@class="Publication"]/div/div').get(default='').split(',')[1]
article.journal = response.xpath('//*/div[@class="Publication"]/div/h2').get(default='')
article.topic = self.topic
article.latitude = ''
article.longitude = ''
yield article
| [
"abdelbari1996@hotmail.com"
] | abdelbari1996@hotmail.com |
b332da6e7afdf60dcfe5a0805699a6f76bed5000 | 1f70e6c069074d848347cfb6674b1376a323aae2 | /design/observer.py | eaf9c1e098a2f388539bda2ee30218482dd76cd3 | [] | no_license | TalentBoy2333/python_study | 5b3bf172a4bb04bd0ee05c24af7a223470ff78ca | 703d2ff4d9fe18c9c5b801c3784e5e8f0845a3a7 | refs/heads/master | 2023-05-25T15:27:22.315664 | 2021-06-14T08:16:50 | 2021-06-14T08:16:50 | 357,243,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | ''' 观察者模式
'''
from abc import ABCMeta, abstractclassmethod
class Observer(metaclass=ABCMeta): # 订阅者
@abstractclassmethod
def update(self, notice):
pass
class Notice: # 发布者
def __init__(self):
self.observers = []
def attach(self, obs):
self.observers.append(obs)
def detach(self, obs):
self.observers.remove(obs)
def notify(self):
for obs in self.observers:
obs.update(self)
class StaffNotice(Notice):
def __init__(self, company_info=None):
super().__init__()
self.__company_info = company_info
@property
def company_info(self):
return self.__company_info
@company_info.setter
def company_info(self, info):
self.__company_info = info
self.notify()
class Staff(Observer):
def __init__(self):
self.company_info = None
def update(self, notice):
self.company_info = notice.company_info
notice = StaffNotice('init')
s1 = Staff()
s2 = Staff()
notice.attach(s1)
notice.attach(s2)
print(s1.company_info)
print(s2.company_info)
notice.company_info = 'money'
print(s1.company_info)
print(s2.company_info)
notice.detach(s2)
notice.company_info = 'holiday'
print(s1.company_info)
print(s2.company_info) | [
"957498562@qq.com"
] | 957498562@qq.com |
300dff82aea77ac86bbac47b0ee397a4503a22cc | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_20/models/replica_link_performance_replication.py | f928f8dbcd694fd51ffcb2543e1012ad8e850d8a | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,750 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class ReplicaLinkPerformanceReplication(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_sec_from_remote': 'int',
'bytes_per_sec_to_remote': 'int',
'bytes_per_sec_total': 'int',
'direction': 'str',
'local_pod': 'FixedReference',
'remote_pod': 'FixedReference',
'remotes': 'list[FixedReference]',
'time': 'int'
}
attribute_map = {
'bytes_per_sec_from_remote': 'bytes_per_sec_from_remote',
'bytes_per_sec_to_remote': 'bytes_per_sec_to_remote',
'bytes_per_sec_total': 'bytes_per_sec_total',
'direction': 'direction',
'local_pod': 'local_pod',
'remote_pod': 'remote_pod',
'remotes': 'remotes',
'time': 'time'
}
required_args = {
}
def __init__(
self,
bytes_per_sec_from_remote=None, # type: int
bytes_per_sec_to_remote=None, # type: int
bytes_per_sec_total=None, # type: int
direction=None, # type: str
local_pod=None, # type: models.FixedReference
remote_pod=None, # type: models.FixedReference
remotes=None, # type: List[models.FixedReference]
time=None, # type: int
):
"""
Keyword args:
bytes_per_sec_from_remote (int): The number of bytes received per second from a remote array.
bytes_per_sec_to_remote (int): The number of bytes transmitted per second to a remote array.
bytes_per_sec_total (int): Total bytes transmitted and received per second.
direction (str): The direction of replication. Valid values are `inbound` and `outbound`.
local_pod (FixedReference): Reference to a local pod.
remote_pod (FixedReference): Reference to a remote pod.
remotes (list[FixedReference]): Reference to a remote array.
time (int): Sample time in milliseconds since the UNIX epoch.
"""
if bytes_per_sec_from_remote is not None:
self.bytes_per_sec_from_remote = bytes_per_sec_from_remote
if bytes_per_sec_to_remote is not None:
self.bytes_per_sec_to_remote = bytes_per_sec_to_remote
if bytes_per_sec_total is not None:
self.bytes_per_sec_total = bytes_per_sec_total
if direction is not None:
self.direction = direction
if local_pod is not None:
self.local_pod = local_pod
if remote_pod is not None:
self.remote_pod = remote_pod
if remotes is not None:
self.remotes = remotes
if time is not None:
self.time = time
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
if key == "bytes_per_sec_from_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_from_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_to_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_to_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_total" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_total`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicaLinkPerformanceReplication, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicaLinkPerformanceReplication):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
579af1e21a4071f2e575bb86773f1afa24a2881a | d4ce80ad43b488af5b46ebb5f6af4ff656ebca7f | /wscript | e95b2dc39b53310355b242079965c17722d32058 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-mit-taylor-variant"
] | permissive | johne53/MB3Suil | 14f7b78becfa56c8e486f646f653fe40381d7f47 | 0730cc5f9a4d679131ada9e1e84e4f89f7026993 | refs/heads/master | 2021-07-03T16:47:07.544271 | 2021-06-04T14:16:56 | 2021-06-04T14:16:56 | 80,009,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,796 | #!/usr/bin/env python
from waflib import Build, Logs, Options, TaskGen
from waflib.extras import autowaf
# Semver package/library version
SUIL_VERSION = '0.10.11'
SUIL_MAJOR_VERSION = SUIL_VERSION[0:SUIL_VERSION.find('.')]
# Mandatory waf variables
APPNAME = 'suil' # Package name for waf dist
VERSION = SUIL_VERSION # Package version for waf dist
top = '.' # Source directory
out = 'build' # Build directory
# Release variables
uri = 'http://drobilla.net/sw/suil'
dist_pattern = 'http://download.drobilla.net/suil-%d.%d.%d.tar.bz2'
post_tags = ['Hacking', 'LAD', 'LV2', 'Suil']
def options(ctx):
ctx.load('compiler_c')
ctx.load('compiler_cxx')
opt = ctx.configuration_options()
opt.add_option('--gtk2-lib-name', type='string', dest='gtk2_lib_name',
default="libgtk-x11-2.0.so.0",
help="Gtk2 library name [Default: libgtk-x11-2.0.so.0]")
opt.add_option('--gtk3-lib-name', type='string', dest='gtk3_lib_name',
default="libgtk-x11-3.0.so.0",
help="Gtk3 library name [Default: libgtk-x11-3.0.so.0]")
ctx.add_flags(
opt,
{'static': 'build static library',
'no-shared': 'do not build shared library',
'no-cocoa': 'do not build support for Cocoa/Quartz',
'no-gtk': 'do not build support for Gtk',
'no-qt': 'do not build support for Qt (any version)',
'no-qt5': 'do not build support for Qt5',
'no-x11': 'do not build support for X11'})
def configure(conf):
conf.load('compiler_c', cache=True)
conf.load('compiler_cxx', cache=True)
conf.load('autowaf', cache=True)
autowaf.set_c_lang(conf, 'c99')
conf.env.BUILD_SHARED = not conf.options.no_shared
conf.env.BUILD_STATIC = conf.options.static
if not conf.env.BUILD_SHARED and not conf.env.BUILD_STATIC:
conf.fatal('Neither a shared nor a static build requested')
if conf.env.DOCS:
conf.load('sphinx')
if Options.options.strict:
# Check for programs used by lint target
conf.find_program("flake8", var="FLAKE8", mandatory=False)
conf.find_program("clang-tidy", var="CLANG_TIDY", mandatory=False)
conf.find_program("iwyu_tool", var="IWYU_TOOL", mandatory=False)
if Options.options.ultra_strict:
autowaf.add_compiler_flags(conf.env, '*', {
'gcc': [
'-Wno-padded',
'-Wno-suggest-attribute=const',
'-Wno-suggest-attribute=pure',
],
'clang': [
'-Wno-cast-qual',
'-Wno-disabled-macro-expansion',
'-Wno-padded',
]
})
autowaf.add_compiler_flags(conf.env, 'c', {
'msvc': [
'/wd4514', # unreferenced inline function has been removed
'/wd4820', # padding added after construct
'/wd4191', # unsafe function conversion
'/wd5045', # will insert Spectre mitigation for memory load
],
})
conf.env.NODELETE_FLAGS = []
if (not conf.env.MSVC_COMPILER and
conf.check(linkflags = ['-Wl,-z,nodelete'],
msg = 'Checking for link flags -Wl,-z,-nodelete',
mandatory = False)):
conf.env.NODELETE_FLAGS = ['-Wl,-z,nodelete']
conf.check_pkg('lv2 >= 1.16.0', uselib_store='LV2')
if not conf.options.no_x11:
conf.check_pkg('x11', uselib_store='X11', system=True, mandatory=False)
def enable_module(var_name):
conf.env[var_name] = 1
if not conf.options.no_gtk:
conf.check_pkg('gtk+-2.0 >= 2.18.0',
uselib_store='GTK2',
system=True,
mandatory=False)
if not conf.env.HAVE_GTK2:
conf.check_pkg('gtk+-2.0',
uselib_store='GTK2',
system=True,
mandatory=False)
if conf.env.HAVE_GTK2:
conf.define('SUIL_OLD_GTK', 1)
if not conf.options.no_x11:
conf.check_pkg('gtk+-x11-2.0',
uselib_store='GTK2_X11',
system=True,
mandatory=False)
if not conf.options.no_cocoa:
conf.check_pkg('gtk+-quartz-2.0',
uselib_store='GTK2_QUARTZ',
system=True,
mandatory=False)
conf.check_pkg('gtk+-3.0 >= 3.14.0',
uselib_store='GTK3',
system=True,
mandatory=False)
if not conf.options.no_x11:
conf.check_pkg('gtk+-x11-3.0 >= 3.14.0',
uselib_store='GTK3_X11',
system=True,
mandatory=False)
if not conf.options.no_qt:
if not conf.options.no_qt5:
conf.check_pkg('Qt5Widgets >= 5.1.0',
uselib_store='QT5',
system=True,
mandatory=False)
if not conf.options.no_x11:
conf.check_pkg('Qt5X11Extras >= 5.1.0',
uselib_store='QT5_X11',
system=True,
mandatory=False)
if not conf.options.no_cocoa:
if conf.check_cxx(header_name = 'QMacCocoaViewContainer',
uselib = 'QT5_COCOA',
system=True,
mandatory = False):
enable_module('SUIL_WITH_COCOA_IN_QT5')
conf.check_cc(define_name = 'HAVE_LIBDL',
lib = 'dl',
mandatory = False)
conf.define('SUIL_MODULE_DIR',
conf.env.LIBDIR + '/suil-' + SUIL_MAJOR_VERSION)
conf.define('SUIL_GTK2_LIB_NAME', conf.options.gtk2_lib_name)
conf.define('SUIL_GTK3_LIB_NAME', conf.options.gtk3_lib_name)
if conf.env.HAVE_GTK2 and conf.env.HAVE_QT5:
enable_module('SUIL_WITH_GTK2_IN_QT5')
enable_module('SUIL_WITH_QT5_IN_GTK2')
if conf.env.HAVE_GTK2 and conf.env.HAVE_GTK2_X11:
enable_module('SUIL_WITH_X11_IN_GTK2')
if conf.env.HAVE_GTK3 and conf.env.HAVE_GTK3_X11:
enable_module('SUIL_WITH_X11_IN_GTK3')
if conf.env.HAVE_GTK3 and conf.env.HAVE_QT5:
enable_module('SUIL_WITH_QT5_IN_GTK3')
if conf.env.HAVE_GTK2 and conf.env.HAVE_GTK2_QUARTZ:
enable_module('SUIL_WITH_COCOA_IN_GTK2')
if conf.env.HAVE_GTK2 and conf.env.DEST_OS == 'win32':
enable_module('SUIL_WITH_WIN_IN_GTK2')
if conf.env.HAVE_QT5 and conf.env.HAVE_QT5_X11:
enable_module('SUIL_WITH_X11_IN_QT5')
if conf.env.HAVE_X11:
enable_module('SUIL_WITH_X11')
conf.run_env.append_unique('SUIL_MODULE_DIR', [conf.build_path()])
# Set up environment for building/using as a subproject
autowaf.set_lib_env(conf, 'suil', SUIL_VERSION,
include_path=str(conf.path.find_node('include')))
conf.define('SUIL_NO_DEFAULT_CONFIG', 1)
autowaf.display_summary(
conf,
{'Static library': bool(conf.env.BUILD_STATIC),
'Shared library': bool(conf.env.BUILD_SHARED)})
if conf.env.HAVE_GTK2:
autowaf.display_msg(conf, "Gtk2 Library Name",
conf.get_define('SUIL_GTK2_LIB_NAME'))
if conf.env.HAVE_GTK3:
autowaf.display_msg(conf, "Gtk3 Library Name",
conf.get_define('SUIL_GTK3_LIB_NAME'))
# Print summary message for every potentially supported wrapper
wrappers = [('cocoa', 'gtk2'),
('gtk2', 'qt5'),
('qt5', 'gtk2'),
('win', 'gtk2'),
('x11', 'gtk2'),
('x11', 'gtk3'),
('qt5', 'gtk3'),
('x11', 'qt5'),
('cocoa', 'qt5')]
for w in wrappers:
var = 'SUIL_WITH_%s_IN_%s' % (w[0].upper(), w[1].upper())
autowaf.display_msg(conf, 'Support for %s in %s' % (w[0], w[1]),
bool(conf.env[var]))
def build(bld):
# C Headers
includedir = '${INCLUDEDIR}/suil-%s/suil' % SUIL_MAJOR_VERSION
bld.install_files(includedir, bld.path.ant_glob('include/suil/*.h'))
TaskGen.task_gen.mappings['.mm'] = TaskGen.task_gen.mappings['.cc']
# Pkgconfig file
autowaf.build_pc(bld, 'SUIL', SUIL_VERSION, SUIL_MAJOR_VERSION, [],
{'SUIL_MAJOR_VERSION': SUIL_MAJOR_VERSION,
'SUIL_PKG_DEPS': 'lv2'})
cflags = []
lib = []
modlib = []
if bld.env.DEST_OS == 'win32':
modlib += ['user32']
else:
cflags += ['-fvisibility=hidden']
if bld.is_defined('HAVE_LIBDL'):
lib += ['dl']
modlib += ['dl']
module_dir = '${LIBDIR}/suil-' + SUIL_MAJOR_VERSION
# Shared Library
if bld.env.BUILD_SHARED:
bld(features = 'c cshlib',
export_includes = ['include'],
source = 'src/host.c src/instance.c',
target = 'suil-%s' % SUIL_MAJOR_VERSION,
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
name = 'libsuil',
vnum = SUIL_VERSION,
install_path = '${LIBDIR}',
cflags = cflags,
lib = lib,
uselib = 'LV2')
# Static library
if bld.env.BUILD_STATIC:
bld(features = 'c cstlib',
export_includes = ['include'],
source = 'src/host.c src/instance.c',
target = 'suil-%s' % SUIL_MAJOR_VERSION,
includes = ['.', 'include'],
defines = ['SUIL_STATIC', 'SUIL_INTERNAL'],
name = 'libsuil_static',
vnum = SUIL_VERSION,
install_path = '${LIBDIR}',
cflags = cflags,
lib = lib,
uselib = 'LV2')
if bld.env.SUIL_WITH_GTK2_IN_QT5:
bld(features = 'cxx cxxshlib',
source = 'src/gtk2_in_qt5.cpp',
target = 'suil_gtk2_in_qt5',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cxxflags = cflags,
lib = modlib,
uselib = 'GTK2 QT5 LV2')
if bld.env.SUIL_WITH_QT5_IN_GTK2:
bld(features = 'cxx cxxshlib',
source = 'src/qt5_in_gtk.cpp',
target = 'suil_qt5_in_gtk2',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cxxflags = cflags,
lib = modlib,
uselib = 'GTK2 QT5 LV2',
linkflags = bld.env.NODELETE_FLAGS)
if bld.env.SUIL_WITH_X11_IN_GTK2:
bld(features = 'c cshlib',
source = 'src/x11_in_gtk2.c',
target = 'suil_x11_in_gtk2',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib + ['X11'],
uselib = 'GTK2 GTK2_X11 LV2',
linkflags = bld.env.NODELETE_FLAGS)
if bld.env.SUIL_WITH_X11_IN_GTK3:
bld(features = 'c cshlib',
source = 'src/x11_in_gtk3.c',
target = 'suil_x11_in_gtk3',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib + ['X11'],
uselib = 'GTK3 GTK3_X11 LV2',
linkflags = bld.env.NODELETE_FLAGS)
if bld.env.SUIL_WITH_QT5_IN_GTK3:
bld(features = 'cxx cxxshlib',
source = 'src/qt5_in_gtk.cpp',
target = 'suil_qt5_in_gtk3',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib,
uselib = 'GTK3 QT5 LV2',
linkflags = bld.env.NODELETE_FLAGS)
if bld.env.SUIL_WITH_COCOA_IN_GTK2:
bld(features = 'cxx cshlib',
source = 'src/cocoa_in_gtk2.mm',
target = 'suil_cocoa_in_gtk2',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib,
uselib = 'GTK2 LV2',
linkflags = ['-framework', 'Cocoa'])
if bld.env.SUIL_WITH_WIN_IN_GTK2:
bld(features = 'cxx cxxshlib',
source = 'src/win_in_gtk2.cpp',
target = 'suil_win_in_gtk2',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib,
uselib = 'GTK2 LV2',
linkflags = bld.env.NODELETE_FLAGS)
if bld.env.SUIL_WITH_X11_IN_QT5:
bld(features = 'cxx cxxshlib',
source = 'src/x11_in_qt5.cpp',
target = 'suil_x11_in_qt5',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib,
uselib = 'QT5 QT5_X11 LV2 X11')
if bld.env.SUIL_WITH_COCOA_IN_QT5:
bld(features = 'cxx cxxshlib',
source = 'src/cocoa_in_qt5.mm',
target = 'suil_cocoa_in_qt5',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib,
uselib = 'QT5 QT5_COCOA LV2',
linkflags = ['-framework', 'Cocoa'])
if bld.env.SUIL_WITH_X11:
bld(features = 'c cshlib',
source = 'src/x11.c',
target = 'suil_x11',
includes = ['.', 'include'],
defines = ['SUIL_INTERNAL'],
install_path = module_dir,
cflags = cflags,
lib = modlib,
uselib = 'X11 LV2')
# Documentation
if bld.env.DOCS:
bld.recurse('doc/c')
bld.add_post_fun(autowaf.run_ldconfig)
class LintContext(Build.BuildContext):
fun = cmd = 'lint'
def lint(ctx):
"checks code for style issues"
import glob
import os
import subprocess
import sys
st = 0
if "FLAKE8" in ctx.env:
Logs.info("Running flake8")
st = subprocess.call([ctx.env.FLAKE8[0],
"wscript",
"--ignore",
"E101,E129,W191,E221,W504,E251,E241,E741"])
else:
Logs.warn("Not running flake8")
if "IWYU_TOOL" in ctx.env:
Logs.info("Running include-what-you-use")
qt_mapping_file = "/usr/share/include-what-you-use/qt5_11.imp"
extra_args = []
if os.path.exists(qt_mapping_file):
extra_args += ["--", "-Xiwyu", "--mapping_file=" + qt_mapping_file]
cmd = [ctx.env.IWYU_TOOL[0], "-o", "clang", "-p", "build"] + extra_args
output = subprocess.check_output(cmd).decode('utf-8')
if 'error: ' in output:
sys.stdout.write(output)
st += 1
else:
Logs.warn("Not running include-what-you-use")
if "CLANG_TIDY" in ctx.env and "clang" in ctx.env.CC[0]:
Logs.info("Running clang-tidy")
sources = glob.glob('src/*.c') + glob.glob('tests/*.c')
sources = list(map(os.path.abspath, sources))
procs = []
for source in sources:
cmd = [ctx.env.CLANG_TIDY[0], "--quiet", "-p=.", source]
procs += [subprocess.Popen(cmd, cwd="build")]
for proc in procs:
stdout, stderr = proc.communicate()
st += proc.returncode
else:
Logs.warn("Not running clang-tidy")
if st != 0:
sys.exit(st)
def dist(ctx):
ctx.base_path = ctx.path
ctx.excl = ctx.get_excl() + ' .gitmodules'
| [
"d@drobilla.net"
] | d@drobilla.net | |
57b9200cadbd23bee5cbfb3057e293fd71831270 | c46407b9924351d794a0c28f498b4c74063f9b7b | /setup.py | 081220dbbcf8b8027b461700db64f086626ca1eb | [] | no_license | cprecioso/lektor-surge | fb317ae55c92230fdd607a173d17eecd71916e82 | da7cacda356f045cb71663c90c3139ff0e4b5451 | refs/heads/master | 2021-01-13T09:35:47.945391 | 2016-10-27T00:26:06 | 2016-10-27T00:26:06 | 72,056,084 | 0 | 0 | null | 2016-10-27T00:25:41 | 2016-10-27T00:25:40 | null | UTF-8 | Python | false | false | 409 | py | from setuptools import setup
setup(
name='lektor-surge',
version='0.2+',
author=u'A. Jesse Jiryu Davis',
author_email='jesse@emptysquare.net',
license='MIT',
py_modules=['lektor_surge'],
install_requires=['Lektor'],
url='https://github.com/ajdavis/lektor-surge',
entry_points={
'lektor.plugins': [
'surge = lektor_surge:SurgePlugin',
]
}
)
| [
"jesse@mongodb.com"
] | jesse@mongodb.com |
3f40a60b63fa2afebd92f23a45a3e7f418ae4644 | a3746020cf091f433beb41bde1b62818b4de569b | /past/rule_analysis/rule/text/check_lob_using.py | 7f1b4038034ddcd1a4e51f098a17ade8e31b68f3 | [] | no_license | kk71/sqlaudit | 59bab5765a67f56f1dd2f3103812051c5acbbc49 | 747aaa02573a9c2b46a9e14415d27c0ab8e6158c | refs/heads/master | 2023-02-04T18:38:46.125746 | 2020-06-05T09:49:46 | 2020-06-05T09:49:46 | 323,559,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # Author: kk.Fang(fkfkbill@gmail.com)
import re
from .utils import judge_if_ddl
def execute_rule(sql, db_model=None, **kwargs):
if not judge_if_ddl(sql):
return False
if not re.search(r"create\s+table", sql, re.I) and not re.search(r"alter\s+table", sql, re.I):
return False
if any([x in sql.lower() for x in ['blob', 'clob', 'bfile', 'xmltype']]):
#return "高频表上不推荐使用LOB字段"
return True
return False
| [
"fkfkbill@gmail.com"
] | fkfkbill@gmail.com |
f1115291106bbc3302ed73f4d698bd8e138e850f | bafb1c203362a9711f783115c7c573fdcd00a3d4 | /venv/Lib/site-packages/kivy/tests/test_clipboard.py | e9d8617a21f5b50010b3650d2a9d6d4bfb0df15d | [] | no_license | santokalayil/kivy_android_test_project | 0c41c40f6c8869767729cd153f4ce31ac09c0f1c | a4283ba4f4ca8961b2689ee7150297349aedb897 | refs/heads/main | 2023-04-14T08:18:40.453585 | 2021-04-27T19:15:21 | 2021-04-27T19:15:21 | 362,220,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from kivy.tests.common import GraphicUnitTest
class ClipboardTestCase(GraphicUnitTest):
def setUp(self):
from kivy.core.clipboard import Clipboard
self._clippy = Clipboard
clippy_types = Clipboard.get_types()
cliptype = clippy_types[0]
if 'UTF8_STRING' in clippy_types:
cliptype = 'UTF8_STRING'
self._cliptype = cliptype
super(ClipboardTestCase, self).setUp()
def test_clipboard_not_dummy(self):
clippy = self._clippy
if clippy.__class__.__name__ == 'ClipboardDummy':
self.fail('Something went wrong "dummy" clipboard is being used')
def test_clipboard_paste(self):
clippy = self._clippy
try:
clippy.paste()
except:
self.fail(
'Can not get data from clipboard')
def test_clipboard_copy(self):
clippy = self._clippy
try:
clippy.copy(u"Hello World")
except:
self.fail(
'Can not get put data to clipboard')
def test_clipboard_copy_paste(self):
clippy = self._clippy
txt1 = u"Hello 1"
clippy.copy(txt1)
ret = clippy.paste()
self.assertEqual(txt1, ret)
| [
"49450970+santokalayil@users.noreply.github.com"
] | 49450970+santokalayil@users.noreply.github.com |
cfef430b6bdd42f1a86c075b9f5b7da9eb77d3f5 | cf58614c12802286e4e416ef7b651ab6431f5b68 | /src/zojax/persistentlayout/information.py | b18f07a7c83d8d58666fa61a62cd8b7b1d61296b | [
"ZPL-2.1"
] | permissive | Zojax/zojax.persistentlayout | 3a31df0ac17e5b633a681b0bcf038c951fcba1fd | 94f05af015d69a1452a4bdc7f34d90db5acabc64 | refs/heads/master | 2021-01-01T18:55:56.560619 | 2011-08-09T19:31:12 | 2011-08-09T19:31:12 | 2,026,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface
from interfaces import ILayoutInformation
class LayoutInformation(object):
interface.Interface(ILayoutInformation)
def __init__(self, uid, name, view, context, layer, layoutclass):
self.uid = uid
self.name = name
self.view = view
self.context = context
self.layer = layer
self.layoutclass = layoutclass
| [
"andrey.fedoseev@gmail.com"
] | andrey.fedoseev@gmail.com |
316a4bc464ee57433c6c147d9feed9d7d64b64f6 | c68aea1de91b46ae684792123c61e84c44ea0266 | /code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/distutils/command/register.py | 86343c8017bf0a891d4fbdb449dbd4a5c971576e | [
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Winfredemalx54/algorithm-challenger-1 | 12e23bed89ca889701db1b17ac540ce62ce86d8e | 761c2c39e041fb155f853385998d5c6318a39913 | refs/heads/master | 2022-11-22T15:03:01.548605 | 2020-07-11T12:26:31 | 2020-07-11T12:26:31 | 297,955,141 | 3 | 0 | Apache-2.0 | 2020-09-23T11:58:19 | 2020-09-23T11:58:18 | null | UTF-8 | Python | false | false | 11,733 | py | """distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
import os, string, getpass
import io
import urllib.parse, urllib.request
from warnings import warn
from distutils.core import PyPIRCCommand
from distutils.errors import *
from distutils import log
class register(PyPIRCCommand):
description = ("register the distribution with the Python package index")
user_options = PyPIRCCommand.user_options + [
('list-classifiers', None,
'list the valid Trove classifiers'),
('strict', None ,
'Will stop the registering if the meta-data are not fully compliant')
]
boolean_options = PyPIRCCommand.boolean_options + [
'verify', 'list-classifiers', 'strict']
sub_commands = [('check', lambda self: True)]
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.list_classifiers = 0
self.strict = 0
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
# setting options for the `check` subcommand
check_options = {'strict': ('register', self.strict),
'restructuredtext': ('register', 1)}
self.distribution.command_options['check'] = check_options
def run(self):
self.finalize_options()
self._set_config()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.register.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.strict = self.strict
check.restructuredtext = 1
check.run()
def _set_config(self):
''' Reads the configuration file and set attributes.
'''
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
self.has_config = True
else:
if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
raise ValueError('%s not found in .pypirc' % self.repository)
if self.repository == 'pypi':
self.repository = self.DEFAULT_REPOSITORY
self.has_config = False
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
url = self.repository+'?:action=list_classifiers'
response = urllib.request.urlopen(url)
log.info(self._read_pypi_response(response))
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
log.info('Server response (%s): %s' % (code, result))
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[distutils] containing username and password entries (both
in clear text). Eg:
[distutils]
index-servers =
pypi
[pypi]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
# see if we can short-cut and get the username/password from the
# config
if self.has_config:
choice = '1'
username = self.username
password = self.password
else:
choice = 'x'
username = password = ''
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
self.announce('''\
We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''', log.INFO)
choice = input()
if not choice:
choice = '1'
elif choice not in choices:
print('Please choose one of the four options!')
if choice == '1':
# get the username and password
while not username:
username = input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib.request.HTTPPasswordMgr()
host = urllib.parse.urlparse(self.repository)[1]
auth.add_password(self.realm, host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
self.announce('Server response (%s): %s' % (code, result),
log.INFO)
# possibly save the login
if code == 200:
if self.has_config:
# sharing the password in the distribution instance
# so the upload command can reuse it
self.distribution.password = password
else:
self.announce(('I can store your PyPI login so future '
'submissions will be faster.'), log.INFO)
self.announce('(the login will be stored in %s)' % \
self._get_rc_file(), log.INFO)
choice = 'X'
while choice.lower() not in 'yn':
choice = input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
self._store_pypirc(username, password)
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print("Password and confirm don't match!")
while not data['email']:
data['email'] = input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
log.info('Server response (%s): %s' % (code, result))
else:
log.info('You will receive an email shortly.')
log.info(('Follow the instructions in it to '
'complete registration.'))
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = input('Your email address: ')
code, result = self.post_to_server(data)
log.info('Server response (%s): %s' % (code, result))
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
if 'name' in data:
self.announce('Registering %s to %s' % (data['name'],
self.repository),
log.INFO)
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = io.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
value = str(value)
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue().encode("utf-8")
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib.request.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib.request.build_opener(
urllib.request.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib.error.HTTPError as e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib.error.URLError as e:
result = 500, str(e)
else:
if self.show_response:
data = self._read_pypi_response(result)
result = 200, 'OK'
if self.show_response:
msg = '\n'.join(('-' * 75, data, '-' * 75))
self.announce(msg, log.INFO)
return result
| [
"bater.makhabel@gmail.com"
] | bater.makhabel@gmail.com |
25471751b2db8edcf8582fdedc9dee2bd88e1a36 | ab15c38891f26888e4dd4f192b42e5d171437d98 | /ch07-improving-classification-with-a-meta-algorithm-adaboost/adaboost.py | 092232466bf7224513c3b52d9ed68f6b401b53a8 | [] | no_license | zzy1120716/machine-learning-in-action | f262b1c6aea3a262c25d9a56102466d73024dd0a | a46b0b1f9e134d85f4f28bef1de30cdf329d8653 | refs/heads/master | 2020-03-28T21:07:59.224931 | 2018-10-12T08:51:15 | 2018-10-12T08:51:15 | 149,131,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,701 | py | from numpy import *
"""
创建简单数据集
"""
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
"""
单层决策树生成函数
"""
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
retArray = ones((shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #init error sum, to +infinity
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#call stump classify with i, j, lessThan
errArr = mat(ones((m,1)))
errArr[predictedVals == labelMat] = 0
# 计算加权错误率
weightedError = D.T*errArr #calc total error multiplied by D
print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError))
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
"""
基于单层决策树的AdaBoost训练过程
"""
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #init D to all equal
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#build Stump
print("D:",D.T)
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
print("classEst: ",classEst.T)
# 为下一次迭代计算*D*
expon = multiply(-1*alpha*mat(classLabels).T,classEst) #exponent for D calc, getting messy
D = multiply(D,exp(expon)) #Calc New D for next iteration
D = D/D.sum()
# 错误率累加计算
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst
print("aggClassEst: ",aggClassEst.T)
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
errorRate = aggErrors.sum()/m
print("total error: ",errorRate)
if errorRate == 0.0: break
return weakClassArr, aggClassEst
"""
AdaBoost分类函数
"""
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print(aggClassEst)
return sign(aggClassEst)
"""
自适应数据加载函数
"""
def loadDataSet(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
"""
ROC曲线的绘制及AUC计算函数
"""
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #cursor
ySum = 0.0 #variable to calculate AUC
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)
# 获取排好序的索引
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print("the Area Under the Curve is: ",ySum*xStep) | [
"zzy1120716@126.com"
] | zzy1120716@126.com |
3a171d3084131166e554195f7c2a2b3bca3b65f8 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/resources/v20190801/get_resource_group.py | a589cd0c39a411c4afa98dc37e3ceaceedc2c156 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,330 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetResourceGroupResult',
'AwaitableGetResourceGroupResult',
'get_resource_group',
]
@pulumi.output_type
class GetResourceGroupResult:
"""
Resource group information.
"""
def __init__(__self__, location=None, managed_by=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource group. It cannot be changed after the resource group has been created. It must be one of the supported Azure locations.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[str]:
"""
The ID of the resource that manages this resource group.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ResourceGroupPropertiesResponse':
"""
The resource group properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags attached to the resource group.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource group.
"""
return pulumi.get(self, "type")
class AwaitableGetResourceGroupResult(GetResourceGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceGroupResult(
location=self.location,
managed_by=self.managed_by,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_resource_group(resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceGroupResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group to get. The name is case insensitive.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:resources/v20190801:getResourceGroup', __args__, opts=opts, typ=GetResourceGroupResult).value
return AwaitableGetResourceGroupResult(
location=__ret__.location,
managed_by=__ret__.managed_by,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
dd64a7d5af16dc250d7f9f0558cf36a08ff22cb4 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/check_point/mgmt/plugins/modules/cp_mgmt_add_domain.py | 90c360467afab6cc661742573a653989786f573a | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,822 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_add_domain
short_description: Create new object
description:
- Create new object
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
servers:
description:
- Domain servers. When this field is provided, 'set-domain' command is executed asynchronously.
type: list
suboptions:
name:
description:
- Object name. Must be unique in the domain.
type: str
ip_address:
description:
- IPv4 or IPv6 address. If both addresses are required use ipv4-address and ipv6-address fields explicitly.
type: str
ipv4_address:
description:
- IPv4 address.
type: str
ipv6_address:
description:
- IPv6 address.
type: str
multi_domain_server:
description:
- Multi Domain server name or UID.
type: str
active:
description:
- Activate domain server. Only one domain server is allowed to be active
type: bool
skip_start_domain_server:
description:
- Set this value to be true to prevent starting the new created domain.
type: bool
type:
description:
- Domain server type.
type: str
choices: ['management server', 'log server', 'smc']
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: check_point.mgmt.checkpoint_commands
"""
EXAMPLES = """
- name: add-domain
cp_mgmt_add_domain:
name: domain1
servers:
ip_address: 192.0.2.1
multi_domain_server: MDM_Server
name: domain1_ManagementServer_1
"""
RETURN = """
cp_mgmt_domain:
description: The checkpoint add-domain output.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.check_point.mgmt.plugins.module_utils.checkpoint import checkpoint_argument_spec_for_commands, api_command
def main():
argument_spec = dict(
name=dict(type='str', required=True),
servers=dict(type='list', options=dict(
name=dict(type='str'),
ip_address=dict(type='str'),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
multi_domain_server=dict(type='str'),
active=dict(type='bool'),
skip_start_domain_server=dict(type='bool'),
type=dict(type='str', choices=['management server', 'log server', 'smc'])
)),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool'),
)
argument_spec.update(checkpoint_argument_spec_for_commands)
module = AnsibleModule(argument_spec=argument_spec)
command = 'add-domain'
result = api_command(module, command)
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"baltah666@gmail.com"
] | baltah666@gmail.com |
2bf87000c434e7db266d0577faff3d8da61f2e6c | b42f4f67e71dee0f0cd95ee4ec0b781f1d27de4c | /Yr12 - to do list task, python revision.py | bbf3e7ee18240ea6e3b82e42ea335b675c47d09b | [] | no_license | Botany-Downs-Secondary-College/todo_list-Rishab-Lal | fa4e30669713fa178546597786463e98d59fc40c | 4e421cf74bb6fafecfc8c67b151a05741effcee8 | refs/heads/main | 2023-03-07T22:59:01.908938 | 2021-02-21T07:23:37 | 2021-02-21T07:23:37 | 338,934,435 | 0 | 0 | null | 2021-02-15T00:45:25 | 2021-02-15T00:45:20 | null | UTF-8 | Python | false | false | 2,022 | py | def command_operator(order):
if order == options_list[0] or order == options_list[3]:
task = input("what task would you like to add to your list?: ")
task_list.append(task)
elif order == options_list[1] or order == options_list[4]:
print("your tasks:")
tasks_in_list = len(task_list)
x = range(0, tasks_in_list, 1)
b = 0
for n in x:
b += 1
print("{}.".format(b), task_list[n])
user_name = input("greetings user, what is your name?: ")
print("hello user {}. It's nice to meet you.".format(user_name))
global task_list
global options_list
options_list = ["addtask", "viewtasks", "exitprogram","1","2","3"]
task_list = []
print("what would you like to do out of the following options {}?".format(user_name))
print("1. add a task to your to do list \n2. view current tasks in list \n3. exit the program")
order = input("to select one of the following, please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: ")
loop = 0
order = str(order).strip().lower().replace(" ","")
while order not in options_list[2] or order not in options_list[5]:
if order in options_list[2] or order in options_list[5]:
break
if loop > 0:
order = input("please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: ")
order = str(order).strip().lower().replace(" ","")
loop += 1
while order not in options_list:
print("command {} unrecognised".format(order))
order = input("to select one of the following, please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: ")
order = str(order).strip().lower().replace(" ","")
if order in options_list[2] or order in options_list[5]:
break
command_operator(order)
print("bye bye.")
| [
"noreply@github.com"
] | Botany-Downs-Secondary-College.noreply@github.com |
46f7c876228e28be02c1f1d994e893cb19c6acb2 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnfretwork.py | 519094b3dab4508a8e7adc7fb1466263733fe977 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 104 | py | ii = [('CookGHP3.py', 1), ('WilkJMC2.py', 1), ('DibdTRL2.py', 1), ('MedwTAI2.py', 1), ('BeckWRE.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
02533f4a8fe865e8035777f87750a925e2a26d76 | 5d3c8af513c3ff3f39ee09b78b36d6a0ad3f22e8 | /day10/main.py | 62a27bac0f3619cdd65d3b40306451bb6724c006 | [] | no_license | jason9075/aoc_2020_puzzle | 9a46efb781f3878eaf93854e97e001cb1fef1d31 | dc2f4c1fa52330cd7b29911d9211cb87fdd82ceb | refs/heads/master | 2023-02-03T20:41:11.447718 | 2020-12-25T04:06:47 | 2020-12-25T04:06:47 | 319,540,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | import numpy as np
def p1(file):
jol_num = []
with open(file) as fp:
line = fp.readline()
while line:
jol_num.append(int(line))
line = fp.readline()
# jol_num.append(0)
jol_num.sort()
jol_num.reverse()
num_of_thr = 0
num_of_one = 0
for idx, jol in enumerate(jol_num[:-1]):
diff = jol - jol_num[idx + 1]
if diff == 3:
num_of_thr += 1
elif diff == 1:
num_of_one += 1
else:
print(f"strange diff: {diff}")
num_of_thr += 1 # final +3
return num_of_thr, num_of_one, jol_num
def p2_wrong():
num_of_thr, num_of_one, jol_num = p1('input_test.txt')
print(jol_num)
print(num_of_thr)
print(num_of_one)
print(jol_num[0] + 3)
high = jol_num[0] + 3
jol_set = set(jol_num)
remain = high
sol_set = set() # 警告:把完整path存進set裡會超級佔用空間,此set僅供debug使用
sol_list = []
def find_path(path, s, r, usage_thr, usage_one):
if 1 <= usage_thr or 3 <= usage_one:
next_value = r - 3
if next_value in jol_set:
if next_value == 0:
# s.add(f'{path}-0')
sol_list.append(1)
if 1 <= usage_thr:
find_path(f'{path}-{next_value}', s, next_value, usage_thr - 1, usage_one)
else:
find_path(f'{path}-{next_value}', s, next_value, usage_thr, usage_one - 3)
if 2 <= usage_one:
next_value = r - 2
if next_value in jol_set:
if next_value == 0:
# s.add(f'{path}-0')
sol_list.append(1)
find_path(f'{path}-{next_value}', s, next_value, usage_thr, usage_one - 2)
if 1 <= usage_one:
next_value = r - 1
if next_value in jol_set:
if next_value == 0:
# s.add(f'{path}-0')
sol_list.append(1)
find_path(f'{path}-{next_value}', s, next_value, usage_thr, usage_one - 1)
find_path(f"{remain}", sol_set, remain, num_of_thr, num_of_one)
# print(f'sol_set: {sol_set}')
print(f'sol_count: {len(sol_list)}')
def p2():
_, _, jol_num = p1('input.txt')
jol_num.append(0)
print(jol_num)
jol_num.reverse()
# recursive
# def decompose(num):
# if num not in jol_num:
# return 0
# if num == 0:
# return 1
# if num == 1:
# return 1
# if num == 2:
# return 2
# return decompose(num - 3) + decompose(num - 2) + decompose(num - 1)
#
# print(decompose(jol_num[-1]))
# dp
data = np.zeros(jol_num[-1] + 1, dtype=int)
for i, jol in enumerate(jol_num):
if jol == 0:
data[jol] = 1
elif jol == 1:
data[jol] = 1
elif jol == 2:
data[jol] = 2
elif jol == 3:
data[jol] = 4
else:
data[jol] = data[jol - 1] + data[jol - 2] + data[jol - 3]
print(f'data : {data}')
print(f'answer : {data[-1]}')
if __name__ == '__main__':
# num_of_thr, num_of_one, _ = p1("input.txt")
# print(num_of_thr)
# print(num_of_one)
# print((num_of_thr) * num_of_one)
# p2_wrong()
p2()
| [
"jason9075@gmail.com"
] | jason9075@gmail.com |
8ec248e69b206d1a951f4d862f13b4e8dbc3705d | f3e5b47bb4781415f427af4c34f909ae3b67f411 | /概率/概率/资金流预测/function/概率/未命名文件夹/get_user_p.py | bcca6de5dad8132e3e108822f1ed31729df598b9 | [] | no_license | abnering/alitianchi | 83a45e4bc7d9adb06ad7451231ba241413768ba5 | 3e16a37ea4dd69c97f6d06fa8faa7ce81af23376 | refs/heads/master | 2020-05-18T15:34:30.783552 | 2015-06-28T08:58:29 | 2015-06-28T08:58:29 | 38,192,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | import cPickle as pickle
def get_week_count():
week_count = [0,0,0,0,0,0,0]
for i in range(92):
week_count[i%7] += 1
return week_count
def get_purchase_p():
week_count = get_week_count()
usr_purchase_p = {}
f1 = file("../data/get_678_purchase.pkl",'rb')
usr_purchase = pickle.load(f1)
for key in usr_purchase.keys():
if key not in usr_purchase_p.keys():
usr_purchase_p[key] = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for day in range(7):
usr_purchase_p[key][day] = float(usr_purchase[key][day])/week_count[day]
f11 = file("../data/get_purchase_p.pkl",'wb')
pickle.dump(usr_purchase_p,f11)
f1.close()
f11.close()
def get_redeem_p():
usr_redeem_p = { }
week_count = get_week_count()
f1 = file("../data/get_678_redeem.pkl",'rb')
usr_redeem = pickle.load(f1)
for key in usr_redeem.keys():
if key not in usr_redeem_p.keys():
usr_redeem_p[key] = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for day in range(7):
usr_redeem_p[key][day] = float(usr_redeem[key][day])/week_count[day]
f11 = file("../data/get_redeem_p.pkl","wb")
pickle.dump(usr_redeem_p,f11)
f1.close()
f11.close()
| [
"="
] | = |
469053de6c14cc6c42f2b512fdb68ae8e871bf84 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/vpc/models/OpModifyBandwidthPackageSpec.py | 34fd5427391027321fb140ad91d8fa9d90d8e030 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,462 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class OpModifyBandwidthPackageSpec(object):
def __init__(self, bandwidthMbps=None, name=None, description=None, userPin=None):
"""
:param bandwidthMbps: (Optional) 共享带宽包带宽上限,取值范围200-5000,单位为Mbps,限制取值必须为5的整倍数,且不能低于共享带宽包内公网IP带宽上限
:param name: (Optional) 名称,只支持中文、数字、大小写字母、英文下划线“_”及中划线“-”,且长度不超过32个字符
:param description: (Optional) 描述,长度不超过256个字符
:param userPin: (Optional) 资源所属的用户pin
"""
self.bandwidthMbps = bandwidthMbps
self.name = name
self.description = description
self.userPin = userPin
| [
"jdcloud-api@jd.com"
] | jdcloud-api@jd.com |
717e46f0ffd27a18a0b9b54a3611e47516442f14 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_15746.py | 3243066645dbac93483ab2cce76bcdfcaa156fe8 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # string.format, differing length breaks table
for i in table_data:
interface,mac,ip = i
print '{:<20s}{:<20s}{:<20s}{s}'.format(ip, mac,'ARPA' ,interface)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
3d17cdf60b5b1c9e6634e1185b48af2d4710f512 | 3c95032b4dfaa243a5dcf98956a233bb0b2c97b2 | /plot/swiss.py | 000063886447d17d0e6ceca70eadf629362c9d06 | [] | no_license | jhui/tf2 | b8c8736958117eca824fa83baddf6b48ebbc6b1d | 0eed446dd6252d17d23ef44140945dd8e25d06d4 | refs/heads/main | 2023-06-07T11:49:23.878736 | 2023-06-01T17:59:52 | 2023-06-01T17:59:52 | 324,392,041 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import matplotlib.pyplot as plt
from sklearn import manifold, datasets
sr_points, sr_color = datasets.make_swiss_roll(n_samples=8000, random_state=14)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sr_points[:, 0], sr_points[:, 1], sr_points[:, 2], '.', c=sr_color, s=4, alpha=0.8
)
ax.set_title("Swiss Roll in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
sr_lle, sr_err = manifold.locally_linear_embedding(
sr_points, n_neighbors=12, n_components=2
)
sr_tsne = manifold.TSNE(n_components=2, perplexity=40, random_state=0).fit_transform(
sr_points
)
fig, axs = plt.subplots(figsize=(8, 8), nrows=2)
axs[0].scatter(sr_lle[:, 0], sr_lle[:, 1], c=sr_color, s=0.5)
axs[0].set_title("LLE Embedding of Swiss Roll")
axs[1].scatter(sr_tsne[:, 0], sr_tsne[:, 1], c=sr_color, s=0.5, alpha=0.6)
_ = axs[1].set_title("t-SNE Embedding of Swiss Roll")
plt.show()
sh_points, sh_color = datasets.make_swiss_roll(
n_samples=400, hole=True, random_state=10
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sh_points[:, 0], sh_points[:, 1], sh_points[:, 2], '.', c=sh_color, s=0.5, alpha=0.6
)
ax.set_title("Swiss-Hole in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
| [
"jonathan@jonathanhui.com"
] | jonathan@jonathanhui.com |
36f61e485d385be8d0aa3d2a88aa722f9cd8e3f7 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/aspeakman/walsall_planning_applications_1.py | 1323734e160aaf1e0d9e0f38d98762082ec01f52 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,674 | py | # this is a scraper of Walsall planning applications for use by Openly Local
# works from the sequence of application numbers (in YY/NNNN/EXT format) - no date or list query
import scraperwiki
from datetime import timedelta
from datetime import date
from datetime import datetime
import re
import dateutil.parser
import urllib
base = scraperwiki.utils.swimport("openlylocal_base_scraper")
scrapemark = scraperwiki.utils.swimport("scrapemark_09")
util = scraperwiki.utils.swimport("utility_library")
class WalsallScraper(base.ListScraper):
START_SEQUENCE = 20020001 # gathering back to this record number (in YYYYNNNN format derived from the application number in this format = YY/NNNN/EXT)
MAX_ID_BATCH = 350 # max application ids to fetch in one go
MAX_UPDATE_BATCH = 200 # max application details to scrape in one go
START_POINT = (date.today().year * 10000) + 1
#ID_ORDER = 'uid desc'
ID_ORDER = "CASE uid WHEN substr(uid, 1, 2) < '50' THEN '20' || uid ELSE '19' || uid END desc"
applic_url = 'http://www2.walsall.gov.uk/dcaccess/headway/AppNumberSearchResults.asp'
search_url = 'http://www2.walsall.gov.uk/dcaccess/headway/AppNumberSearchResults.asp'
scrape_ids = """
<table class="MISresults">
{* <tr> <input type="HIDDEN" value="{{ [records].uid }}" name="AppNumber">
<input type="HIDDEN" value="{{ [records].reference }}" name="AppID">
<input type="HIDDEN" value="{{ [records].uprn }}" name="UPRN">
</tr> *}
</table>
"""
# captures HTML block encompassing all fields to be gathered
scrape_data_block = """
<div id="content"> {{ block|html }} </div>
"""
# the minimum acceptable valid dataset on an application page
scrape_min_data = """
<tr> <td> APPLICATION NUMBER </td> <td> {{ reference }} </td> </tr>
<tr> <td> DATE RECEIVED </td> <td> {{ date_received }} </td> </tr>
<tr> <td> DATE VALID </td> <td> {{ date_validated }} </td> </tr>
<tr> <td> DESCRIPTION </td> <td> {{ description }} </td> </tr>
<tr> <td> ADDRESS </td> <td> {{ address }} </td> </tr>
"""
# other optional parameters that can appear on an application page
scrape_optional_data = [
'<tr> <td> APPLICATION TYPE </td> <td> {{ application_type }} </td> </tr>',
'<tr> <td> DECISION DATE </td> <td> {{ decision_date }} </td> </tr>',
'<tr> <td> OFFICER </td> <td> {{ case_officer }} </td> </tr>',
'<tr> <td> WARD </td> <td> {{ ward_name }} </td> </tr>',
'<tr> <td> DECISION DETAIL </td> <td> {{ decision }} </td> </tr>',
'<a href="{{ comment_url }}">Click here to comment on this application</a>'
]
# NB if both are None, it's the first ever scrape
# NB if rec_to is None, rec_from is the highest existing record and it's a current scrape
# NB also if rec_to is None, the default should be to try to get at least self.MIN_RECS records
def get_id_records (self, rec_from, rec_to = None):
final_result = []
num_from = None
num_to = None
if not rec_from and not rec_to:
rec_from = self.START_SEQUENCE
rec_to = (date.today().year * 10000) + 9999 # last possible record of the current year
elif not rec_to:
rec_to = rec_from + self.MIN_RECS # set target after highest current record to get any recent records
min_rec_to = (date.today().year * 10000) + self.MIN_RECS # first possible record of the current year
if rec_to < min_rec_to: rec_to = min_rec_to
rec_from -= self.MIN_RECS
if self.DEBUG: print 'Looking for records: ', rec_from, rec_to
current_rec = rec_to
top_page = None
bot_page = None
fields = {}
while current_rec >= rec_from and len(final_result) < self.MAX_ID_BATCH:
current_page = str(current_rec)[4:8]
if int(current_page) < 2000: # only testing for max 2000 applications per year (potentially there are 9999)
current_appno = str(current_rec)[2:4] + '/' + current_page + '/'
if self.DEBUG: print 'On page:', current_appno
fields['AppNo'] = current_appno
response = util.open_url(self.br, self.search_url, fields, 'POST')
if response:
html = response.read()
url = response.geturl()
if self.DEBUG: print 'Html:', html
result = scrapemark.scrape(self.scrape_ids, html, url)
if result and result.get('records'):
if not top_page: top_page = current_appno
bot_page = current_appno
if self.DEBUG: print result
unique_refs = [ ]
unique_records = [ ]
for r in result['records']:
if r['reference'] not in unique_refs: # remove duplicates
r['url'] = self.applic_url + '?AppNo=' + urllib.quote_plus(r['uid'])
unique_refs.append(r['reference'])
unique_records.append(r)
self.clean_ids(unique_records)
final_result.extend(unique_records)
if self.DEBUG: print 'Output N: ', len(final_result)
current_rec -= 1
if final_result:
if self.DEBUG: print bot_page, top_page
num_from = int(bot_page[0:2]) + 1900
if num_from <= 1930: num_from += 100
num_to = int(top_page[0:2]) + 1900
if num_to <= 1930: num_to += 100
num_from = (num_from * 10000) + int(bot_page[3:7])
num_to = (num_to * 10000) + int(top_page[3:7])
return final_result, num_from, num_to
# NB if move_forward is true, we scrape forwards from rec_start, otherwise we scrape backwards
def get_id_records2 (self, rec_start, move_forward):
final_result = []
num_from = None
num_to = None
bad_count = 0
find_bad = True
current_rec = rec_start
fields = {}
first_good_rec = None
last_good_rec = None
while len(final_result) < self.MAX_ID_BATCH and bad_count < 20:
current_year = str(current_rec)[0:4]
current_page = str(current_rec)[4:8]
current_appno = current_year[2:4] + '/' + current_page + '/' # note lower 2 year digits only here
if self.DEBUG: print 'Record:', current_appno
fields['AppNo'] = current_appno
response = util.open_url(self.br, self.search_url, fields, 'POST')
if response:
html = response.read()
url = response.geturl()
if self.DEBUG: print 'Html:', html
result = scrapemark.scrape(self.scrape_ids, html, url)
if result and result.get('records'):
if not first_good_rec: first_good_rec = current_rec
last_good_rec = current_rec
if self.DEBUG: print result
unique_refs = [ ]
unique_records = [ ]
for r in result['records']:
if r['reference'] not in unique_refs: # remove duplicates
r['url'] = self.applic_url + '?AppNo=' + urllib.quote_plus(r['uid'])
unique_refs.append(r['reference'])
unique_records.append(r)
final_result.extend(unique_records)
bad_count = 0
find_bad = True
elif find_bad:
bad_count += 1
elif find_bad:
bad_count += 1
if move_forward:
if bad_count == 10: # try the next year if moving forward and we reach 10 errors
current_rec = (int(current_year)+1)*10000
else:
current_rec += 1
else:
if current_page == '0000': # if moving backward, swap to next year when reach zero
current_rec = ((int(current_year)-1)*10000)+2000 # expecting max 2000 applications per year (potentially 9999)
find_bad = False
else:
current_rec -= 1
if final_result:
self.clean_ids(final_result)
if move_forward:
num_from = first_good_rec
num_to = last_good_rec
else:
num_to = first_good_rec
num_from = last_good_rec
return final_result, num_from, num_to
def get_detail_from_uid (self, uid):
try:
# search by application number
fields = { 'AppNo': uid }
response = util.open_url(self.br, self.applic_url, fields)
# follow first view form if there is one
form_ok = util.setup_form(self.br)
response = util.submit_form(self.br)
html = response.read()
url = response.geturl()
if self.DEBUG: print "detail page:", html
except:
if self.DEBUG: raise
else: return None
return self.get_detail(html, url)
if __name__ == 'scraper':
scraper = WalsallScraper()
scraper.run()
#scraper.DEBUG = True
# misc tests
#print scraper.get_detail_from_uid ('12/0001/')
#result = scraper.get_id_records(20050000, 20120050)
#result = scraper.get_id_records(20120709)
#print result
#print scraper.gather_ids(None, None)
#scraper.gather_current_ids()
#util.rename_column('swdata', 'ward', 'ward_name')
#scraperwiki.sqlite.save_var('latest', 20120800)
# this is a scraper of Walsall planning applications for use by Openly Local
# works from the sequence of application numbers (in YY/NNNN/EXT format) - no date or list query
import scraperwiki
from datetime import timedelta
from datetime import date
from datetime import datetime
import re
import dateutil.parser
import urllib
base = scraperwiki.utils.swimport("openlylocal_base_scraper")
scrapemark = scraperwiki.utils.swimport("scrapemark_09")
util = scraperwiki.utils.swimport("utility_library")
class WalsallScraper(base.ListScraper):
START_SEQUENCE = 20020001 # gathering back to this record number (in YYYYNNNN format derived from the application number in this format = YY/NNNN/EXT)
MAX_ID_BATCH = 350 # max application ids to fetch in one go
MAX_UPDATE_BATCH = 200 # max application details to scrape in one go
START_POINT = (date.today().year * 10000) + 1
#ID_ORDER = 'uid desc'
ID_ORDER = "CASE uid WHEN substr(uid, 1, 2) < '50' THEN '20' || uid ELSE '19' || uid END desc"
applic_url = 'http://www2.walsall.gov.uk/dcaccess/headway/AppNumberSearchResults.asp'
search_url = 'http://www2.walsall.gov.uk/dcaccess/headway/AppNumberSearchResults.asp'
scrape_ids = """
<table class="MISresults">
{* <tr> <input type="HIDDEN" value="{{ [records].uid }}" name="AppNumber">
<input type="HIDDEN" value="{{ [records].reference }}" name="AppID">
<input type="HIDDEN" value="{{ [records].uprn }}" name="UPRN">
</tr> *}
</table>
"""
# captures HTML block encompassing all fields to be gathered
scrape_data_block = """
<div id="content"> {{ block|html }} </div>
"""
# the minimum acceptable valid dataset on an application page
scrape_min_data = """
<tr> <td> APPLICATION NUMBER </td> <td> {{ reference }} </td> </tr>
<tr> <td> DATE RECEIVED </td> <td> {{ date_received }} </td> </tr>
<tr> <td> DATE VALID </td> <td> {{ date_validated }} </td> </tr>
<tr> <td> DESCRIPTION </td> <td> {{ description }} </td> </tr>
<tr> <td> ADDRESS </td> <td> {{ address }} </td> </tr>
"""
# other optional parameters that can appear on an application page
scrape_optional_data = [
'<tr> <td> APPLICATION TYPE </td> <td> {{ application_type }} </td> </tr>',
'<tr> <td> DECISION DATE </td> <td> {{ decision_date }} </td> </tr>',
'<tr> <td> OFFICER </td> <td> {{ case_officer }} </td> </tr>',
'<tr> <td> WARD </td> <td> {{ ward_name }} </td> </tr>',
'<tr> <td> DECISION DETAIL </td> <td> {{ decision }} </td> </tr>',
'<a href="{{ comment_url }}">Click here to comment on this application</a>'
]
# NB if both are None, it's the first ever scrape
# NB if rec_to is None, rec_from is the highest existing record and it's a current scrape
# NB also if rec_to is None, the default should be to try to get at least self.MIN_RECS records
def get_id_records (self, rec_from, rec_to = None):
final_result = []
num_from = None
num_to = None
if not rec_from and not rec_to:
rec_from = self.START_SEQUENCE
rec_to = (date.today().year * 10000) + 9999 # last possible record of the current year
elif not rec_to:
rec_to = rec_from + self.MIN_RECS # set target after highest current record to get any recent records
min_rec_to = (date.today().year * 10000) + self.MIN_RECS # first possible record of the current year
if rec_to < min_rec_to: rec_to = min_rec_to
rec_from -= self.MIN_RECS
if self.DEBUG: print 'Looking for records: ', rec_from, rec_to
current_rec = rec_to
top_page = None
bot_page = None
fields = {}
while current_rec >= rec_from and len(final_result) < self.MAX_ID_BATCH:
current_page = str(current_rec)[4:8]
if int(current_page) < 2000: # only testing for max 2000 applications per year (potentially there are 9999)
current_appno = str(current_rec)[2:4] + '/' + current_page + '/'
if self.DEBUG: print 'On page:', current_appno
fields['AppNo'] = current_appno
response = util.open_url(self.br, self.search_url, fields, 'POST')
if response:
html = response.read()
url = response.geturl()
if self.DEBUG: print 'Html:', html
result = scrapemark.scrape(self.scrape_ids, html, url)
if result and result.get('records'):
if not top_page: top_page = current_appno
bot_page = current_appno
if self.DEBUG: print result
unique_refs = [ ]
unique_records = [ ]
for r in result['records']:
if r['reference'] not in unique_refs: # remove duplicates
r['url'] = self.applic_url + '?AppNo=' + urllib.quote_plus(r['uid'])
unique_refs.append(r['reference'])
unique_records.append(r)
self.clean_ids(unique_records)
final_result.extend(unique_records)
if self.DEBUG: print 'Output N: ', len(final_result)
current_rec -= 1
if final_result:
if self.DEBUG: print bot_page, top_page
num_from = int(bot_page[0:2]) + 1900
if num_from <= 1930: num_from += 100
num_to = int(top_page[0:2]) + 1900
if num_to <= 1930: num_to += 100
num_from = (num_from * 10000) + int(bot_page[3:7])
num_to = (num_to * 10000) + int(top_page[3:7])
return final_result, num_from, num_to
# NB if move_forward is true, we scrape forwards from rec_start, otherwise we scrape backwards
def get_id_records2 (self, rec_start, move_forward):
final_result = []
num_from = None
num_to = None
bad_count = 0
find_bad = True
current_rec = rec_start
fields = {}
first_good_rec = None
last_good_rec = None
while len(final_result) < self.MAX_ID_BATCH and bad_count < 20:
current_year = str(current_rec)[0:4]
current_page = str(current_rec)[4:8]
current_appno = current_year[2:4] + '/' + current_page + '/' # note lower 2 year digits only here
if self.DEBUG: print 'Record:', current_appno
fields['AppNo'] = current_appno
response = util.open_url(self.br, self.search_url, fields, 'POST')
if response:
html = response.read()
url = response.geturl()
if self.DEBUG: print 'Html:', html
result = scrapemark.scrape(self.scrape_ids, html, url)
if result and result.get('records'):
if not first_good_rec: first_good_rec = current_rec
last_good_rec = current_rec
if self.DEBUG: print result
unique_refs = [ ]
unique_records = [ ]
for r in result['records']:
if r['reference'] not in unique_refs: # remove duplicates
r['url'] = self.applic_url + '?AppNo=' + urllib.quote_plus(r['uid'])
unique_refs.append(r['reference'])
unique_records.append(r)
final_result.extend(unique_records)
bad_count = 0
find_bad = True
elif find_bad:
bad_count += 1
elif find_bad:
bad_count += 1
if move_forward:
if bad_count == 10: # try the next year if moving forward and we reach 10 errors
current_rec = (int(current_year)+1)*10000
else:
current_rec += 1
else:
if current_page == '0000': # if moving backward, swap to next year when reach zero
current_rec = ((int(current_year)-1)*10000)+2000 # expecting max 2000 applications per year (potentially 9999)
find_bad = False
else:
current_rec -= 1
if final_result:
self.clean_ids(final_result)
if move_forward:
num_from = first_good_rec
num_to = last_good_rec
else:
num_to = first_good_rec
num_from = last_good_rec
return final_result, num_from, num_to
def get_detail_from_uid (self, uid):
try:
# search by application number
fields = { 'AppNo': uid }
response = util.open_url(self.br, self.applic_url, fields)
# follow first view form if there is one
form_ok = util.setup_form(self.br)
response = util.submit_form(self.br)
html = response.read()
url = response.geturl()
if self.DEBUG: print "detail page:", html
except:
if self.DEBUG: raise
else: return None
return self.get_detail(html, url)
if __name__ == 'scraper':
scraper = WalsallScraper()
scraper.run()
#scraper.DEBUG = True
# misc tests
#print scraper.get_detail_from_uid ('12/0001/')
#result = scraper.get_id_records(20050000, 20120050)
#result = scraper.get_id_records(20120709)
#print result
#print scraper.gather_ids(None, None)
#scraper.gather_current_ids()
#util.rename_column('swdata', 'ward', 'ward_name')
#scraperwiki.sqlite.save_var('latest', 20120800)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
204379dedd5ca575eb34d4a3ecf6f7fb845eb022 | 80cfb5f42bc93433249bb2789be33208bb037679 | /build/lib/platin/language/__init__.py | 6e2e3196480267064d1af89f669431bd5d059f80 | [] | no_license | legibe/platin | b3960975c214b99d699fc3c8d5e94d473d173a8c | 430c4421ac2056c3041c1ac4b7f950c202334fb4 | refs/heads/master | 2020-12-24T21:28:15.740529 | 2016-05-13T00:29:53 | 2016-05-13T00:29:53 | 55,983,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #--------------------------------------------------------------------------------
# Copyright (c) 2013, MediaSift Ltd
# All rights reserved.
# Distribution of this software is strictly forbidden under the terms of this
# license.
#
# Author: Claude Gibert
#
#--------------------------------------------------------------------------------
import jsonreader
| [
"claudegibert@Bearwood.local"
] | claudegibert@Bearwood.local |
e630acfcebf47e142561828d46fe7d83a788efc0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv1/tests/test_sampler.py | 8c6b401c06506371ea5f98f7873d093d3ee98610 | [
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,905 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from mmdet.core import MaxIoUAssigner
from mmdet.core.bbox.samplers import OHEMSampler, RandomSampler
def test_random_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, ).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def _context_for_ohem():
try:
from test_forward import _get_detector_cfg
except ImportError:
# Hack: grab testing utils from test_forward to make a context for ohem
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
model, train_cfg, test_cfg = _get_detector_cfg(
'faster_rcnn_ohem_r50_fpn_1x.py')
model['pretrained'] = None
# torchvision roi align supports CPU
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
return context
def test_ohem_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
fd7500964a5e092683adba5c3939e8b99221bfb9 | fa51b088ea761b78cf0c85837fabaa0b7035b105 | /compute/client_library/ingredients/instance-templates/create_from_instance.py | 1450cf02f213a99b15e8ff2976bf5fa433f50640 | [
"Apache-2.0"
] | permissive | manavgarg/python-docs-samples | f27307022092bc35358b8ddbd0f73d56787934d1 | 54b9cd6740b4dbc64db4d43a16de13c702b2364b | refs/heads/master | 2023-02-07T21:18:15.997414 | 2023-01-28T18:44:11 | 2023-01-28T18:44:11 | 245,290,674 | 0 | 0 | Apache-2.0 | 2020-03-05T23:44:17 | 2020-03-05T23:44:16 | null | UTF-8 | Python | false | false | 2,677 | py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is an ingredient file. It is not meant to be run directly. Check the samples/snippets
# folder for complete code samples that are ready to be used.
# Disabling flake8 for the ingredients file, as it would fail F821 - undefined name check.
# flake8: noqa
from google.cloud import compute_v1
# <INGREDIENT create_template_from_instance>
def create_template_from_instance(
project_id: str, instance: str, template_name: str
) -> compute_v1.InstanceTemplate:
"""
Create a new instance template based on an existing instance.
This new template specifies a different boot disk.
Args:
project_id: project ID or project number of the Cloud project you use.
instance: the instance to base the new template on. This value uses
the following format: "projects/{project}/zones/{zone}/instances/{instance_name}"
template_name: name of the new template to create.
Returns:
InstanceTemplate object that represents the new instance template.
"""
disk = compute_v1.DiskInstantiationConfig()
# Device name must match the name of a disk attached to the instance you are
# basing your template on.
disk.device_name = "disk-1"
# Replace the original boot disk image used in your instance with a Rocky Linux image.
disk.instantiate_from = "CUSTOM_IMAGE"
disk.custom_image = "projects/rocky-linux-cloud/global/images/family/rocky-linux-8"
# Override the auto_delete setting.
disk.auto_delete = True
template = compute_v1.InstanceTemplate()
template.name = template_name
template.source_instance = instance
template.source_instance_params = compute_v1.SourceInstanceParams()
template.source_instance_params.disk_configs = [disk]
template_client = compute_v1.InstanceTemplatesClient()
operation = template_client.insert(
project=project_id, instance_template_resource=template
)
wait_for_extended_operation(operation, "instance template creation")
return template_client.get(project=project_id, instance_template=template_name)
# </INGREDIENT>
| [
"71398022+dandhlee@users.noreply.github.com"
] | 71398022+dandhlee@users.noreply.github.com |
0027e82f0d6f818aca29541ec19d064e1b701138 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/resourcehealth/azure-mgmt-resourcehealth/azure/mgmt/resourcehealth/v2015_01_01/aio/__init__.py | 0333edd9e09f5f821f552a2ee2cabccdf0696107 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 570 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._microsoft_resource_health import MicrosoftResourceHealth
__all__ = ['MicrosoftResourceHealth']
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
037d7f026b3234c024d54712032323343ef39fe2 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/1302. Deepest Leaves Sum.py | a2ec36969b746bee15fc4af758331ba0cc235878 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | '''
Given the root of a binary tree, return the sum of values of its deepest leaves.
Example 1:
Input: root = [1,2,3,4,5,null,6,7,null,null,null,null,8]
Output: 15
Example 2:
Input: root = [6,7,8,2,7,1,3,9,null,1,4,null,null,null,5]
Output: 19
Constraints:
The number of nodes in the tree is in the range [1, 104].
1 <= Node.val <= 100
'''
import unittest
from typing import *
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:
q = deque([root])
while q:
size = len(q)
sum_v = 0
for _ in range(size):
node = q.popleft()
sum_v += node.val
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return sum_v
| [
"xiaohuanlin1993@gmail.com"
] | xiaohuanlin1993@gmail.com |
61fbd5f790574d59343a902bec9b36e3cd9d3526 | b8fed8222b41e447cd5ce83513eb4d014c01742b | /ad_account_optimization/report/account_move_line_record_xls.py | 6b8bea49e51ec23e054c329caae8de862e21f676 | [] | no_license | lajayuhniyarsyah/ERP-Supra | e993d8face6e022b6f863d1dff7cb51cda36be8d | 5a64dbb57ee40070354926700091fb9025c1350c | refs/heads/master | 2021-01-25T22:09:46.306990 | 2017-11-08T05:32:04 | 2017-11-08T05:32:04 | 23,605,825 | 0 | 10 | null | 2017-11-08T05:32:05 | 2014-09-03T03:58:28 | Python | UTF-8 | Python | false | false | 15,820 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import string
import time
import xlwt
from report_engine_xls import report_xls
from ad_account_optimization.generic.account_move_line_record import journal_print
from tools.translate import _
from sys import *
import random
import datetime
header_line = []
ws = []
class account_move_line_xls(report_xls):
"""def _get_start_date(self, data):
# ok
if data.get('form', False) and data['form'].get('date_from', False):
return data['form']['date_from']
return ''
def _get_end_date(self, data):
# ok
if data.get('form', False) and data['form'].get('date_to', False):
return data['form']['date_to']
return ''
def get_start_period(self, data):
if data.get('form', False) and data['form'].get('period_from', False):
return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr,self.uid,data['form']['period_from']).name
return ''
def get_end_period(self, data):
if data.get('form', False) and data['form'].get('period_to', False):
return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr, self.uid, data['form']['period_to']).name
return ''
def _get_target_move(self, data):
if data.get('form', False) and data['form'].get('target_move', False):
if data['form']['target_move'] == 'By date':
return _('All Entries')
return _('All Posted Entries')
return ''
def _get_filter(self, data):
if data.get('form', False) and data['form'].get('filter', False):
if data['form']['filter'] == 'filter_date':
return _('Date')
elif data['form']['filter'] == 'filter_period':
return _('Periods')
return _('No Filter')
def _display_filter(self, parser, data):
filter_mode = self._get_filter(data)
filter_string = filter_mode
if filter_mode == 'Date':
filter_string = '%s -> %s' % (parser.formatLang(self._get_start_date(data), date=True),
parser.formatLang(self._get_end_date(data), date=True))
elif filter_mode == 'Periods':
filter_string = '%s -> %s' % (self.get_start_period(data),
self.get_end_period(data))
moves_string = self._get_target_move(data)
display_acct_string = ''
if data['form']['sort_selection'] == 'date':
display_acct_string = 'By date'
elif data['form']['sort_selection'] == 'to_number(name,"999999999")':
display_acct_string = 'By entry number'
else:
display_acct_string = 'By reference number'
#fiscal_year_str = parser.get_fiscalyear_text(data['form'])
#period_date_str = parser.get_periods_and_date_text(data['form'])
print "######>>>>>>>>>>>",data['form']
return data['form'] #'Fiscal Year: %s, Period & Date By: %s' % (fiscal_year_str, period_date_str)"
def _display_fiscalyear(self, parser, data):
k = parser.get_fiscalyear_text(data)
if k:
k = 'Fiscal Year: %s' % (k)
k = "############^^^^^^^^^^^^############"
print "--------------------------->>>>>>",k
return k"""
#===============================================================================
# def _get_start_date(self, data):
# # ok
# if data.get('form', False) and data['form'].get('date_from', False):
# return data['form']['date_from']
# return ''
#
# def _get_end_date(self, data):
# # ok
# if data.get('form', False) and data['form'].get('date_to', False):
# return data['form']['date_to']
# return ''
#
# def get_start_period(self, data):
# if data.get('form', False) and data['form'].get('period_from', False):
# return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr,self.uid,data['form']['period_from']).name
# return ''
#
# def get_end_period(self, data):
# if data.get('form', False) and data['form'].get('period_to', False):
# return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr, self.uid, data['form']['period_to']).name
# return ''
#
# def _get_target_move(self, data):
# if data.get('form', False) and data['form'].get('target_move', False):
# if data['form']['target_move'] == 'all':
# return _('All Entries')
# return _('All Posted Entries')
# return ''
#
# def _get_filter(self, data):
# if data.get('form', False) and data['form'].get('filter', False):
# if data['form']['filter'] == 'filter_date':
# return _('Date')
# elif data['form']['filter'] == 'filter_period':
# return _('Periods')
# return _('No Filter')
#===============================================================================
#===============================================================================
# def _display_filter(self, parser, data):
# filter_mode = self._get_filter(data)
# filter_string = filter_mode
# if filter_mode == 'Date':
# filter_string = '%s -> %s' % (parser.formatLang(self._get_start_date(data), date=True),
# parser.formatLang(self._get_end_date(data), date=True))
# elif filter_mode == 'Periods':
# filter_string = '%s -> %s' % (self.get_start_period(data),
# self.get_end_period(data))
#
# moves_string = self._get_target_move(data)
# display_acct_string = ''
# if data['form']['display_account'] == 'bal_all':
# display_acct_string = 'All'
# elif data['form']['display_account'] == 'bal_movement':
# display_acct_string = 'With movements'
# else:
# display_acct_string = 'With balance is not equal to 0'
#
# fiscal_year_str = parser.get_fiscalyear_text(data['form'])
# period_date_str = parser.get_periods_and_date_text(data['form'])
#
# return 'Fiscal Year: %s, Period & Date By: %s' % (fiscal_year_str, period_date_str)
#===============================================================================
#===========================================================================
# def _display_fiscalyear(self, parser, data):
# """k = parser.get_fiscalyear_text(data)
# if k:
# k = 'Fiscal Year: %s' % (k)"""
# k = "asdfasdfasdfasdf"
# return k
#===========================================================================
## Modules Begin
def _size_col(sheet, col):
return sheet.col_width(col)
def _size_row(sheet, row):
return sheet.row_height(row)
def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
"""def create_header(self, xxx):
print "==================xxx=",xxx
judul = "Judul judulan"
return {'judul':judul}"""
def generate_xls_report(self, parser, data, obj, wb):
for a in parser.objects:
if a.company_id.name in header_line:
a.company_id.name = ''
else:
header_line.append(a.company_id.name)
if len(a.company_id.name) != 0:
c = parser.localcontext['company']
ws = wb.add_sheet(('JOURNAL LEDGER %s - %s - %s ' % (self.id_generator(), c.partner_id.ref, c.currency_id.name))[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
judul = "JOURNAL LEDGER REPORT"
cols_specs = [
# Headers data
('Title', 8, 0, 'text',
lambda x, d, p: judul),
('Kosong', 8, 0, 'text',
lambda x, d, p: ""),
('Create Date', 8, 0, 'text',
lambda x, d, p: 'Create date: ' + p.formatLang(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),date_time = True)),
# Balance column
('Entry' , 1, 67, 'text',
lambda x, d, p: x['name_split']),
('Date' , 1, 67, 'text',
lambda x, d, p: p.formatLang(x['date'],date=True)),
('Period' , 1, 80, 'text',
lambda x, d, p: x.period_id['name']),
('Journal' , 1, 200, 'text',
lambda x, d, p: x.journal_id['code']),
('Partner' , 1, 200, 'text',
lambda x, d, p: x.partner_id['name']),
('Reference' , 1, 200, 'text',
lambda x, d, p: x['ref']),
('Debit' , 1, 80, 'number',
lambda x, d, p: abs(x['amount'])),
('Credit' , 1, 80, 'number',
lambda x, d, p: abs(x['amount'])),
# Account Total
('Blank', 3, 0, 'text',
lambda x, d, p: ""),
('Account', 1, 0, 'text',
lambda x, d, p: x.account_id['code']),
('Account Name', 1, 0, 'text',
lambda x, d, p: x.account_id['name']),
('Description', 1, 0, 'text',
lambda x, d, p: x['name']),
('Account Debit', 1, 0, 'number',
lambda x, d, p: x['debit']),
('Account Credit', 1, 0, 'number',
lambda x, d, p: x['credit']),
]
row_hdr0 = self.xls_row_template(cols_specs, ['Title'])
row_hdr1 = self.xls_row_template(cols_specs, ['Kosong'])
row_hdr2 = self.xls_row_template(cols_specs, ['Create Date'])
row_hdr4 = self.xls_row_template(cols_specs, ['Kosong'])
row_balance = self.xls_row_template(cols_specs,[
'Entry',
'Date',
'Period',
'Journal',
'Partner',
'Reference',
'Debit',
'Credit'
])
hdr_account_total = self.xls_row_template(cols_specs,['Blank', 'Account', 'Account Name', 'Description', 'Account Debit', 'Account Credit'])
## Style variable Begin
hdr_style = xlwt.easyxf('pattern: pattern solid, fore_color white;')
row_normal_style= xlwt.easyxf(num_format_str='#,##0.00;(#,##0.00)')
row_bold_style = xlwt.easyxf('font: bold on;')
tittle_style = xlwt.easyxf('font: height 240, name Arial Black, colour_index black, bold on; align: wrap on, vert centre, horiz center; pattern: pattern solid, fore_color white;')
subtittle_left_style = xlwt.easyxf('font: name Arial, colour_index brown, bold on, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
subtittle_right_style = xlwt.easyxf('font: height 240, name Arial, colour_index brown, bold on, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
subtittle_top_and_bottom_style = xlwt.easyxf('font: height 240, name Arial, colour_index black, bold off, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
blank_style = xlwt.easyxf('font: height 650, name Arial, colour_index brown, bold off; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
normal_style = xlwt.easyxf('font: height 240, name Arial, colour_index black, bold off; align: wrap on, vert centre, horiz left;')
total_style = xlwt.easyxf('font: height 240, name Arial, colour_index brown, bold on, italic on; align: wrap on, vert centre;', num_format_str='#,##0.00;(#,##0.00)')
## Style variable End
# Write headers
self.xls_write_row(ws, None, data, parser, 0, row_hdr0, tittle_style)
self.xls_write_row(ws, None, data, parser, 1, row_hdr1, blank_style)
self.xls_write_row(ws, None, data, parser, 2, row_hdr2, subtittle_left_style)
#self.xls_write_row(ws, None, data, parser, 3, row_hdr3, hdr_style)
self.xls_write_row(ws, None, data, parser, 3, row_hdr1, blank_style)
self.xls_write_row_header(ws, 4, row_balance, hdr_style, set_column_size=True)
self.xls_write_row_header(ws, 5, hdr_account_total, hdr_style)
row_count = 6
ws.horz_split_pos = row_count
for b in parser.objects:
r = ws.row(row_count)
self.xls_write_row(ws, b, data, parser, row_count, row_balance, row_bold_style)
row_count += 1
for c in b.line_id:
self.xls_write_row(ws, c, data, parser, row_count, hdr_account_total, row_normal_style)
row_count += 1
pass
account_move_line_xls(
'report.account.move.line.xls',
'account.journal.period',
#'account.move',
'addons/ad_account_optimization/report/account_move_line_record_h.rml',
parser=journal_print,
header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"lajayuhni@gmail.com"
] | lajayuhni@gmail.com |
fcbdc53a51fb6af5dbdd65a63ece1bb714a0861f | 50f4d2bb1b1222bcb2eb0122c48a0dd254deddfc | /Solve Algorithm/Dicevolume.py | 539f6cb16e57da789adb0d754f9c9e082760acc6 | [] | no_license | yejinee/Algorithm | 9ae1c40382e9dcd868a28d42fe1cc543b790c7f5 | 81d409c4d0ea76cf152a5f334e53a870bc0656a7 | refs/heads/master | 2023-04-13T01:29:44.579635 | 2023-04-05T06:23:26 | 2023-04-05T06:23:26 | 235,014,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | """
태민이는 주사위를 수집하는 취미를 가지고 있습니다.
주사위의 모양과 색깔은 각기 다르며, 크기 또한 다릅니다.
태민이는 지금까지 모은 N개의 주사위가 너무 난잡하게 보관해놓고 있어서
정리를 결심했습니다. 그래서 우선 N개의 주사위를 크기 순서대로 정리해보려고 마음 먹었습니다.
그렇게 주사위를 순서대로 정렬시켜보니 각 변의 길이가 1부터 N까지 모두 있는 것을 알게되었습니다.
이 사실이 매우 신기했던 태민이는 이 주사위들의 부피의 합은 어떻게 될지 궁금해졌습니다.
태민이가 현재 가지고 있는 모든 주사위의 부피의 합은 얼마일까요? 태민이의 궁금증을 풀어주세요!
"""
# SIGMA 1-N (N^3)
n = int(input())
#for문으로 구해보기 -> Time이 너무 오래 걸림
"""
sum=0
for i in range(1,n+1):
sum=sum+(i*i*i)
print(sum%1000000007)
"""
#SIGMA공식 사용하기
sum=(n*(n+1))/2 %1000000007
answer=(sum*sum) %1000000007
print("%d"%answer) | [
"kimyj9609@gmail.com"
] | kimyj9609@gmail.com |
0821ebaacd10da4c7d9f3837197682eb6f0d5430 | 1c61f90e32431a0bf813aa6ace88f7fa2627ee6f | /leadmanager/leads/serializers.py | 8e9d07756880747a16828975e4a37bf14d7cf81e | [] | no_license | WilliamOtieno/LeadManager_ReactDjango | 4c2b24072192c233ed9ab12521554272491f89bf | 209dc549651cebb7445f040bc38bdde275c67de9 | refs/heads/master | 2023-05-25T18:44:34.445294 | 2021-05-29T22:29:19 | 2021-05-29T22:29:19 | 371,958,224 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | from .models import Lead
from rest_framework import serializers
class LeadSerializer(serializers.ModelSerializer):
class Meta:
model = Lead
fields = "__all__"
| [
"jimmywilliamotieno@gmail.com"
] | jimmywilliamotieno@gmail.com |
2b37db3d071a1c49eb259947fc325cbdc7cc9c67 | 3b7b6648b72910046b6a227db30f71aeee2cba9c | /2020-12-10-deepergooglenet/config/tiny_imagenet_config.py | 0741ab950f8790d2664a0b6bcd3f28c8091fa011 | [] | no_license | ken2190/deep-learning-study | f2abeb1cd302e405a15bbb52188ae44ffb414e2f | f2998be89d0c931176f158ae5f48ca562786e171 | refs/heads/main | 2023-04-02T05:07:08.504212 | 2021-04-11T15:11:22 | 2021-04-11T15:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | import os
def path_join(*arr):
return os.path.sep.join([*arr])
def cd(path):
return os.system("cd {}".format(path))
def start(path):
os.system("start {}".format(path))
PROJECT_DIR = os.path.sep.join(os.path.realpath(__file__).split(os.path.sep)[:-2])
# DATASET_DIR = path_join("d:", "datasets", "tiny-imagenet-200")
# hdf5 conversion is much quicker if dataset lives in our SSD:
DATASET_DIR = path_join("C:", "Users", "user", "Repos", "Python", "DeepLearning",
"deep-learning-study", "datasets", "tiny-imagenet-200")
TRAIN_IMAGES_DIR = path_join(DATASET_DIR, "train")
VAL_IMAGES_DIR = path_join(DATASET_DIR, "val")
VAL_MAPPING_TXT = path_join(DATASET_DIR, "val", "val_annotations.txt")
WORDNET_IDS = path_join(DATASET_DIR, "wnids.txt")
WORD_LABELS = path_join(DATASET_DIR, "wnids.txt")
NUM_CLASSES = 200
NUM_TEST_IMAGES = 50 * NUM_CLASSES
TRAIN_HDF5_PATH = path_join(PROJECT_DIR, "hdf5", "train.hdf5")
VAL_HDF5_PATH = path_join(PROJECT_DIR, "hdf5", "val.hdf5")
TEST_HDF5_PATH = path_join(PROJECT_DIR, "hdf5", "test.hdf5")
DATASET_MEAN = path_join(PROJECT_DIR, "output", "tiny-image-net-200-mean.json")
MODEL_PATH = path_join(PROJECT_DIR, "output", "checkpoints", "model_epoch_70.hdf5")
CHECKPOINT_DIR = path_join(PROJECT_DIR, "output", "checkpoints")
FIG_PATH = path_join(PROJECT_DIR, "output", "deepergooglenet_tinyimagenet.png")
JSON_PATH = path_join(PROJECT_DIR, "output", "deepergooglenet_tinyimagenet.json")
| [
"machingclee@gmail.com"
] | machingclee@gmail.com |
1266c0ee1674e1b6a510166f34ca4890175e4235 | f487b2f8086fcf97311a61f79b7d01382b7c04b4 | /anomaly detection/cat.py | 8b94702fa88412761c7aa0857ad9122b27d1ccbb | [] | no_license | vinayakumarr/extreme-learning-machine-for-security | 7c6386accc4c663e50ef5844388cd805ec396f50 | c888e9d8cf64f623e810d783f1c49a7ce57ad4be | refs/heads/master | 2021-05-09T21:19:01.985634 | 2018-01-24T06:53:47 | 2018-01-24T06:53:47 | 118,724,306 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from __future__ import print_function
import pandas as pd
from keras.utils.np_utils import to_categorical
import numpy as np
print("Loading")
testlabel = pd.read_csv('data/corrected.csv', header=None)
Y1 = testlabel.iloc[:,0]
y_test1 = np.array(Y1)
y_test= to_categorical(y_test1)
np.savetxt('data/correctedonehot.csv', y_test, fmt='%01d')
| [
"noreply@github.com"
] | vinayakumarr.noreply@github.com |
eeb0149de58ec4a390b835b3bb2a4b3341edca3c | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/eventgrid/azure-mgmt-eventgrid/azure/mgmt/eventgrid/aio/_event_grid_management_client.py | cda928965cc0af906415b9d662acd77e43c82655 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 7,322 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import EventGridManagementClientConfiguration
from .operations import DomainsOperations
from .operations import DomainTopicsOperations
from .operations import EventSubscriptionsOperations
from .operations import SystemTopicEventSubscriptionsOperations
from .operations import Operations
from .operations import TopicsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import SystemTopicsOperations
from .operations import ExtensionTopicsOperations
from .operations import TopicTypesOperations
from .. import models
class EventGridManagementClient(object):
"""Azure EventGrid Management Client.
:ivar domains: DomainsOperations operations
:vartype domains: azure.mgmt.eventgrid.aio.operations.DomainsOperations
:ivar domain_topics: DomainTopicsOperations operations
:vartype domain_topics: azure.mgmt.eventgrid.aio.operations.DomainTopicsOperations
:ivar event_subscriptions: EventSubscriptionsOperations operations
:vartype event_subscriptions: azure.mgmt.eventgrid.aio.operations.EventSubscriptionsOperations
:ivar system_topic_event_subscriptions: SystemTopicEventSubscriptionsOperations operations
:vartype system_topic_event_subscriptions: azure.mgmt.eventgrid.aio.operations.SystemTopicEventSubscriptionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.eventgrid.aio.operations.Operations
:ivar topics: TopicsOperations operations
:vartype topics: azure.mgmt.eventgrid.aio.operations.TopicsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.eventgrid.aio.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.eventgrid.aio.operations.PrivateLinkResourcesOperations
:ivar system_topics: SystemTopicsOperations operations
:vartype system_topics: azure.mgmt.eventgrid.aio.operations.SystemTopicsOperations
:ivar extension_topics: ExtensionTopicsOperations operations
:vartype extension_topics: azure.mgmt.eventgrid.aio.operations.ExtensionTopicsOperations
:ivar topic_types: TopicTypesOperations operations
:vartype topic_types: azure.mgmt.eventgrid.aio.operations.TopicTypesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = EventGridManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domain_topics = DomainTopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.event_subscriptions = EventSubscriptionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.system_topic_event_subscriptions = SystemTopicEventSubscriptionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.topics = TopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.system_topics = SystemTopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.extension_topics = ExtensionTopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.topic_types = TopicTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "EventGridManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
c43915e05bf79a467cc796b6ded3f746d250c066 | 2bf75b04b6a80d17a40170fb5e3e6998dc58981b | /l2tdevtools/build_helpers/source.py | ba428d24dc6eb092383430c66a999b946efd81dd | [
"Apache-2.0"
] | permissive | kiddinn/l2tdevtools | 53534a3c3f9d2fb3b6c585611daf0fc5a336d84c | 0e55b449a5fc19dbab68980df47d9073b7f0618c | refs/heads/master | 2020-03-25T19:07:15.219418 | 2018-11-26T05:26:09 | 2018-11-26T05:26:09 | 144,065,721 | 0 | 0 | Apache-2.0 | 2018-08-08T20:45:29 | 2018-08-08T20:45:29 | null | UTF-8 | Python | false | false | 3,309 | py | # -*- coding: utf-8 -*-
"""Helper for building projects from source."""
from __future__ import unicode_literals
import logging
import os
import subprocess
import sys
from l2tdevtools.build_helpers import interface
class SourceBuildHelper(interface.BuildHelper):
"""Helper to build projects from source."""
class ConfigureMakeSourceBuildHelper(SourceBuildHelper):
"""Helper to build projects from source using configure and make."""
def Build(self, source_helper_object):
"""Builds the source.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building source of: {0:s}'.format(source_filename))
if self._project_definition.patches:
# TODO: add self._ApplyPatches
pass
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = './configure > {0:s} 2>&1'.format(log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
command = 'make >> {0:s} 2>&1'.format(log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
return True
def Clean(self, unused_source_helper_object):
"""Cleans the source.
Args:
source_helper_object (SourceHelper): source helper.
"""
# TODO: implement.
return
class SetupPySourceBuildHelper(SourceBuildHelper):
"""Helper to build projects from source using setup.py."""
def Build(self, source_helper_object):
"""Builds the source.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building source of: {0:s}'.format(source_filename))
if self._project_definition.patches:
# TODO: add self._ApplyPatches
pass
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = '{0:s} setup.py build > {1:s} 2>&1'.format(
sys.executable, log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
return True
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
a91d27053468eaf6f98de68ee62bcfe05d192e2b | 3cda2dc11e1b7b96641f61a77b3afde4b93ac43f | /nni/nas/evaluator/__init__.py | 5c14415483187674a4095a7419b65bde73c68a95 | [
"MIT"
] | permissive | Eurus-Holmes/nni | 6da51c352e721f0241c7fd26fa70a8d7c99ef537 | b84d25bec15ece54bf1703b1acb15d9f8919f656 | refs/heads/master | 2023-08-23T10:45:54.879054 | 2023-08-07T02:39:54 | 2023-08-07T02:39:54 | 163,079,164 | 3 | 2 | MIT | 2023-08-07T12:35:54 | 2018-12-25T12:04:16 | Python | UTF-8 | Python | false | false | 250 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from nni.common.framework import shortcut_framework
from .evaluator import *
from .functional import FunctionalEvaluator
shortcut_framework(__name__)
del shortcut_framework
| [
"noreply@github.com"
] | Eurus-Holmes.noreply@github.com |
f448589acd1f8fb150cb19cdefe3bf63485cd106 | 7f66c66eb82b480e8a23ecbfb8613aae02cb50f7 | /web3/vns.py | 1dc1c9f87373767d7c8def6d0229dcf3d974a0e1 | [
"MIT"
] | permissive | y19818/web3.py | 03ddedcfdbd4dde2c1a458b31f5e796509b3c7c6 | 32a85a287ab63220d1e0c06d77be74de595ff02f | refs/heads/master | 2021-06-25T00:30:50.312173 | 2019-12-02T08:21:40 | 2019-12-02T08:21:40 | 225,276,093 | 0 | 0 | MIT | 2019-12-02T03:20:47 | 2019-12-02T03:20:47 | null | UTF-8 | Python | false | false | 15,259 | py | from vns_account import (
Account,
)
from vns_utils import (
apply_to_return_value,
is_checksum_address,
is_string,
)
from hexbytes import (
HexBytes,
)
from web3._utils.blocks import (
select_method_for_block_identifier,
)
from web3._utils.empty import (
empty,
)
from web3._utils.encoding import (
to_hex,
)
from web3._utils.filters import (
BlockFilter,
LogFilter,
TransactionFilter,
)
from web3._utils.threads import (
Timeout,
)
from web3._utils.toolz import (
assoc,
merge,
)
from web3._utils.transactions import (
assert_valid_transaction_params,
extract_valid_transaction_params,
get_buffered_gas_estimate,
get_required_transaction,
replace_transaction,
wait_for_transaction_receipt,
)
from web3.contract import (
Contract,
)
from web3.exceptions import (
BlockNotFound,
TimeExhausted,
TransactionNotFound,
)
from web3.iban import (
Iban,
)
from web3.module import (
Module,
)
class Bbbbbbbb(Module):
account = Account()
defaultAccount = empty
defaultBlock = "latest"
defaultContractFactory = Contract
iban = Iban
gasPriceStrategy = None
def namereg(self):
raise NotImplementedError()
def icapNamereg(self):
raise NotImplementedError()
@property
def protocolVersion(self):
return self.web3.manager.request_blocking("vns_protocolVersion", [])
@property
def syncing(self):
return self.web3.manager.request_blocking("vns_syncing", [])
@property
def coinbase(self):
return self.web3.manager.request_blocking("vns_coinbase", [])
@property
def mining(self):
return self.web3.manager.request_blocking("vns_mining", [])
@property
def hashrate(self):
return self.web3.manager.request_blocking("vns_hashrate", [])
@property
def gasPrice(self):
return self.web3.manager.request_blocking("vns_gasPrice", [])
@property
def accounts(self):
return self.web3.manager.request_blocking("vns_accounts", [])
@property
def blockNumber(self):
return self.web3.manager.request_blocking("vns_blockNumber", [])
@property
def chainId(self):
return self.web3.manager.request_blocking("vns_chainId", [])
def getBalance(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"vns_getBalance",
[account, block_identifier],
)
def getStorageAt(self, account, position, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"vns_getStorageAt",
[account, position, block_identifier]
)
def getCode(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"vns_getCode",
[account, block_identifier],
)
def getBlock(self, block_identifier, full_transactions=False):
"""
`vns_getBlockByHash`
`vns_getBlockByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='vns_getBlockByNumber',
if_hash='vns_getBlockByHash',
if_number='vns_getBlockByNumber',
)
result = self.web3.manager.request_blocking(
method,
[block_identifier, full_transactions],
)
if result is None:
raise BlockNotFound(f"Block with id: {block_identifier} not found.")
return result
def getBlockTransactionCount(self, block_identifier):
"""
`vns_getBlockTransactionCountByHash`
`vns_getBlockTransactionCountByNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='vns_getBlockTransactionCountByNumber',
if_hash='vns_getBlockTransactionCountByHash',
if_number='vns_getBlockTransactionCountByNumber',
)
result = self.web3.manager.request_blocking(
method,
[block_identifier],
)
if result is None:
raise BlockNotFound(f"Block with id: {block_identifier} not found.")
return result
def getUncleCount(self, block_identifier):
"""
`vns_getUncleCountByBlockHash`
`vns_getUncleCountByBlockNumber`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='vns_getUncleCountByBlockNumber',
if_hash='vns_getUncleCountByBlockHash',
if_number='vns_getUncleCountByBlockNumber',
)
result = self.web3.manager.request_blocking(
method,
[block_identifier],
)
if result is None:
raise BlockNotFound(f"Block with id: {block_identifier} not found.")
return result
def getUncleByBlock(self, block_identifier, uncle_index):
"""
`vns_getUncleByBlockHashAndIndex`
`vns_getUncleByBlockNumberAndIndex`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='vns_getUncleByBlockNumberAndIndex',
if_hash='vns_getUncleByBlockHashAndIndex',
if_number='vns_getUncleByBlockNumberAndIndex',
)
result = self.web3.manager.request_blocking(
method,
[block_identifier, uncle_index],
)
if result is None:
raise BlockNotFound(
f"Uncle at index: {uncle_index} of block with id: {block_identifier} not found."
)
return result
def getTransaction(self, transaction_hash):
result = self.web3.manager.request_blocking(
"vns_getTransactionByHash",
[transaction_hash],
)
if result is None:
raise TransactionNotFound(f"Transaction with hash: {transaction_hash} not found.")
return result
def getTransactionFromBlock(self, block_identifier, transaction_index):
"""
Alias for the method getTransactionByBlock
Depreceated to maintain naming consistency with the json-rpc API
"""
raise DeprecationWarning("This method has been deprecated as of EIP 1474.")
def getTransactionByBlock(self, block_identifier, transaction_index):
"""
`vns_getTransactionByBlockHashAndIndex`
`vns_getTransactionByBlockNumberAndIndex`
"""
method = select_method_for_block_identifier(
block_identifier,
if_predefined='vns_getTransactionByBlockNumberAndIndex',
if_hash='vns_getTransactionByBlockHashAndIndex',
if_number='vns_getTransactionByBlockNumberAndIndex',
)
result = self.web3.manager.request_blocking(
method,
[block_identifier, transaction_index],
)
if result is None:
raise TransactionNotFound(
f"Transaction index: {transaction_index} "
f"on block id: {block_identifier} not found."
)
return result
def waitForTransactionReceipt(self, transaction_hash, timeout=120):
try:
return wait_for_transaction_receipt(self.web3, transaction_hash, timeout)
except Timeout:
raise TimeExhausted(
"Transaction {} is not in the chain, after {} seconds".format(
transaction_hash,
timeout,
)
)
def getTransactionReceipt(self, transaction_hash):
result = self.web3.manager.request_blocking(
"vns_getTransactionReceipt",
[transaction_hash],
)
if result is None:
raise TransactionNotFound(f"Transaction with hash: {transaction_hash} not found.")
return result
def getTransactionCount(self, account, block_identifier=None):
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"vns_getTransactionCount",
[account, block_identifier],
)
def replaceTransaction(self, transaction_hash, new_transaction):
current_transaction = get_required_transaction(self.web3, transaction_hash)
return replace_transaction(self.web3, current_transaction, new_transaction)
def modifyTransaction(self, transaction_hash, **transaction_params):
assert_valid_transaction_params(transaction_params)
current_transaction = get_required_transaction(self.web3, transaction_hash)
current_transaction_params = extract_valid_transaction_params(current_transaction)
new_transaction = merge(current_transaction_params, transaction_params)
return replace_transaction(self.web3, current_transaction, new_transaction)
def sendTransaction(self, transaction):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
# TODO: move gas estimation in middleware
if 'gas' not in transaction:
transaction = assoc(
transaction,
'gas',
get_buffered_gas_estimate(self.web3, transaction),
)
return self.web3.manager.request_blocking(
"vns_sendTransaction",
[transaction],
)
def sendRawTransaction(self, raw_transaction):
return self.web3.manager.request_blocking(
"vns_sendRawTransaction",
[raw_transaction],
)
def sign(self, account, data=None, hexstr=None, text=None):
message_hex = to_hex(data, hexstr=hexstr, text=text)
return self.web3.manager.request_blocking(
"vns_sign", [account, message_hex],
)
def signTransaction(self, transaction):
return self.web3.manager.request_blocking(
"vns_signTransaction", [transaction],
)
@apply_to_return_value(HexBytes)
def call(self, transaction, block_identifier=None):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
# TODO: move to middleware
if block_identifier is None:
block_identifier = self.defaultBlock
return self.web3.manager.request_blocking(
"vns_call",
[transaction, block_identifier],
)
def estimateGas(self, transaction, block_identifier=None):
# TODO: move to middleware
if 'from' not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
if block_identifier is None:
params = [transaction]
else:
params = [transaction, block_identifier]
return self.web3.manager.request_blocking(
"vns_estimateGas",
params,
)
def filter(self, filter_params=None, filter_id=None):
if filter_id and filter_params:
raise TypeError(
"Ambiguous invocation: provide either a `filter_params` or a `filter_id` argument. "
"Both were supplied."
)
if is_string(filter_params):
if filter_params == "latest":
filter_id = self.web3.manager.request_blocking(
"vns_newBlockFilter", [],
)
return BlockFilter(self.web3, filter_id)
elif filter_params == "pending":
filter_id = self.web3.manager.request_blocking(
"vns_newPendingTransactionFilter", [],
)
return TransactionFilter(self.web3, filter_id)
else:
raise ValueError(
"The filter API only accepts the values of `pending` or "
"`latest` for string based filters"
)
elif isinstance(filter_params, dict):
_filter_id = self.web3.manager.request_blocking(
"vns_newFilter",
[filter_params],
)
return LogFilter(self.web3, _filter_id)
elif filter_id and not filter_params:
return LogFilter(self.web3, filter_id)
else:
raise TypeError("Must provide either filter_params as a string or "
"a valid filter object, or a filter_id as a string "
"or hex.")
def getFilterChanges(self, filter_id):
return self.web3.manager.request_blocking(
"vns_getFilterChanges", [filter_id],
)
def getFilterLogs(self, filter_id):
return self.web3.manager.request_blocking(
"vns_getFilterLogs", [filter_id],
)
def getLogs(self, filter_params):
return self.web3.manager.request_blocking(
"vns_getLogs", [filter_params],
)
def submitHashrate(self, hashrate, node_id):
return self.web3.manager.request_blocking(
"vns_submitHashrate", [hashrate, node_id],
)
def submitWork(self, nonce, pow_hash, mix_digest):
return self.web3.manager.request_blocking(
"vns_submitWork", [nonce, pow_hash, mix_digest],
)
def uninstallFilter(self, filter_id):
return self.web3.manager.request_blocking(
"vns_uninstallFilter", [filter_id],
)
def contract(self,
address=None,
**kwargs):
ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)
ContractFactory = ContractFactoryClass.factory(self.web3, **kwargs)
if address:
return ContractFactory(address)
else:
return ContractFactory
def setContractFactory(self, contractFactory):
self.defaultContractFactory = contractFactory
def getCompilers(self):
raise DeprecationWarning("This method has been deprecated as of EIP 1474.")
def getWork(self):
return self.web3.manager.request_blocking("vns_getWork", [])
def generateGasPrice(self, transaction_params=None):
if self.gasPriceStrategy:
return self.gasPriceStrategy(self.web3, transaction_params)
def setGasPriceStrategy(self, gas_price_strategy):
self.gasPriceStrategy = gas_price_strategy
| [
"y19818@gmail.com"
] | y19818@gmail.com |
7050101a01ba0b7f9d3b8222b5590e6ce9bb1653 | f2575444e57696b83ce6dcec40ad515b56a1b3a9 | /Python/Introduction/WriteAFunction.py | 308e029c6747ab642c896a925f68087dcd10765e | [] | no_license | abhi10010/Hackerrank-Solutions | 046487d79fc5bf84b4df5ef2117578d29cb19243 | da2a57b8ebfcc330d94d104c1755b8c62a9e3e65 | refs/heads/master | 2021-07-24T09:41:49.995295 | 2020-07-12T09:31:58 | 2020-07-12T09:31:58 | 195,647,097 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | def is_leap(year):
leap = False
if (year%4==0 and year%100!=0):
leap = True
if year%400==0:
leap = True
return leap
| [
"noreply@github.com"
] | abhi10010.noreply@github.com |
0eba26bcfcd9f5e9a4db59f3d9390ad11e96df73 | b92c73ac2fca8a1f16388cd553dafa0f167bda93 | /Unit 1/linear search.py | 26e1396a7e0eb697b0dee26b5592835914ced8f4 | [] | no_license | DamoM73/Digital-Solutions-old | deb8d0fd7c256113fd7fad56b4658896de9f1cba | 750b76d847e1d1c1661c3f1bbf7d56a72666f094 | refs/heads/master | 2023-08-16T05:35:03.641402 | 2021-10-05T02:36:34 | 2021-10-05T02:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | find = 10
found = False
number_list = [3,5,2,9,6,1,8,7]
length = len(number_list)
counter = 0
while found == False and counter < length:
if number_list[counter] == find:
found = True
print("Found at position", counter)
else:
counter = counter + 1
if found == False:
print("Number not in list") | [
"damomurtagh@gmail.com"
] | damomurtagh@gmail.com |
b5e24e6323440a416a551612ebd2d5789e7abce6 | f99a83f3d538a121184de88bff19ce396be6e3d5 | /stayclean-2022-november/checkin.py | 0035f9707815a1c2e971d997cf08e9fb7dca0582 | [
"MIT"
] | permissive | foobarbazblarg/stayclean | c38deddd971b795af58ae389b9e65914dea08d2d | 384a8261b1164797d6726166a6e40f323e5bd6dd | refs/heads/master | 2023-02-21T09:48:57.907540 | 2023-01-02T15:32:35 | 2023-01-02T15:32:35 | 45,186,602 | 1 | 0 | MIT | 2023-02-16T03:49:00 | 2015-10-29T13:59:33 | Python | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/env python3
import sys
from participantCollection import ParticipantCollection
if __name__ == "__main__":
names = sys.argv[1::]
participants = ParticipantCollection()
for name in names:
if participants.hasParticipantNamed(name):
participants.participantNamed(name).hasCheckedIn = True
print(f"just checked in {name}")
else:
print(f"*** WARNING: {name} is not present in participants.txt")
participants.save()
| [
"foobarbazblarg@gmail.com"
] | foobarbazblarg@gmail.com |
2a7f0cc94cfa60d0fbb656abc845e8140d33fd55 | 3f15b2aac6cc0d9d8c85174a85aa2e7130f50fe1 | /memo.py | 8bf98c8737e0e42e3496f87c0e1237c2a8af72ea | [] | no_license | dfoderick/bitshovel-memo | 811bb9cb1ba961bb00a5f7c31ec901e4d9576d04 | 138019988a637a56a3f0bbdc1db9808084902118 | refs/heads/master | 2020-04-14T20:58:16.248551 | 2019-01-07T06:35:15 | 2019-01-07T06:35:15 | 164,112,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | #naive implement memo.cash op_return protocol
#see https://memo.cash/protocol
#example: redis-cli publish memo.send <action> <value>
#redis-cli publish memo.send "Hello from BitShovel"
import sys
import redis
RED = redis.Redis(host="127.0.0.1", port="6379")
BUS = RED.pubsub()
def main():
#listen for anyone who wants to send to memo
memo_send = BUS.subscribe("memo.send")
for event in BUS.listen():
process_event(event)
def process_event(event):
try:
if event["type"] == "message" :
process_message(event["data"])
if event["type"] == "subscribe":
print('subscribed to {}'.format(event["channel"]))
except Exception as ex:
print(ex)
def process_message(data):
remainder = data
#default action is to post the data
prefix = "0x6d02"
command = "post"
if " " in data:
parsed = data.split(' ',1)
command = parsed[0].lower()
if len(parsed) > 1:
remainder = parsed[1]
if command == "setname":
prefix = "0x6d01"
elif command == "post":
prefix = "0x6d02"
elif command == "posttopic":
prefix = "0x6d0c"
else:
#no matches so dont eat the first word
remainder = data
send(command, prefix, remainder)
def send(command, prefix, remainder):
op_stuff = '{0} "{1}"'.format(prefix, remainder).lstrip()
RED.publish("bitshovel.send", op_stuff)
print('Send to BitShovel > {}'.format(op_stuff))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Shutting down...')
BUS.unsubscribe()
sys.exit()
| [
"dfoderick@gmail.com"
] | dfoderick@gmail.com |
c676d154e348d5ded75002372f6e2ba4570d6cb3 | 40b42ccf2b6959d6fce74509201781be96f04475 | /tools/data/textrecog/ilst_converter.py | 6a3a8852bef7880b6b83cf9ead3dc0c95a7b00a9 | [
"Apache-2.0"
] | permissive | xdxie/WordArt | 2f1414d8e4edaa89333353d0b28e5096e1f87263 | 89bf8a218881b250d0ead7a0287526c69586c92a | refs/heads/main | 2023-05-23T02:04:22.185386 | 2023-03-06T11:51:43 | 2023-03-06T11:51:43 | 515,485,694 | 106 | 12 | null | null | null | null | UTF-8 | Python | false | false | 8,527 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import os
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from mmocr.datasets.pipelines.crop import crop_img
from mmocr.utils.fileio import list_to_file
def collect_files(img_dir, gt_dir):
"""Collect all images and their corresponding groundtruth files.
Args:
img_dir (str): The image directory
gt_dir (str): The groundtruth directory
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
"""
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
ann_list, imgs_list = [], []
for img_file in os.listdir(img_dir):
ann_path = osp.join(gt_dir, img_file.split('.')[0] + '.xml')
if os.path.exists(ann_path):
ann_list.append(ann_path)
imgs_list.append(osp.join(img_dir, img_file))
files = list(zip(imgs_list, ann_list))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
"""Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
"""
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
"""Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
"""
assert isinstance(files, tuple)
img_file, gt_file = files
assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split(
'.')[0]
# read imgs while ignoring orientations
img = mmcv.imread(img_file, 'unchanged')
try:
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
except AttributeError:
print(f'Skip broken img {img_file}')
return None
if osp.splitext(gt_file)[1] == '.xml':
img_info = load_xml_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_xml_info(gt_file, img_info):
"""Collect the annotation information.
The annotation format is as the following:
<annotations>
...
<object>
<name>SMT</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>157</xmin>
<ymin>294</ymin>
<xmax>237</xmax>
<ymax>357</ymax>
</bndbox>
<object>
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
"""
obj = ET.parse(gt_file)
root = obj.getroot()
anno_info = []
for object in root.iter('object'):
word = object.find('name').text
x1 = int(object.find('bndbox').find('xmin').text)
y1 = int(object.find('bndbox').find('ymin').text)
x2 = int(object.find('bndbox').find('xmax').text)
y2 = int(object.find('bndbox').find('ymax').text)
x = max(0, min(x1, x2))
y = max(0, min(y1, y2))
w, h = abs(x2 - x1), abs(y2 - y1)
bbox = [x, y, x + w, y, x + w, y + h, x, y + h]
anno = dict(bbox=bbox, word=word)
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def split_train_val_list(full_list, val_ratio):
"""Split list by val_ratio.
Args:
full_list (list): List to be splited
val_ratio (float): Split ratio for val set
return:
list(list, list): Train_list and val_list
"""
n_total = len(full_list)
offset = int(n_total * val_ratio)
if n_total == 0 or offset < 1:
return [], full_list
val_list = full_list[:offset]
train_list = full_list[offset:]
return [train_list, val_list]
def generate_ann(root_path, image_infos, preserve_vertical, val_ratio, format):
"""Generate cropped annotations and label txt file.
Args:
root_path (str): The root path of the dataset
split (str): The split of dataset. Namely: training or test
image_infos (list[dict]): A list of dicts of the img and
annotation information
preserve_vertical (bool): Whether to preserve vertical texts
val_ratio (float): Split ratio for val set
format (str): Using jsonl(dict) or str to format annotations
"""
assert val_ratio <= 1.
if val_ratio:
image_infos = split_train_val_list(image_infos, val_ratio)
splits = ['training', 'val']
else:
image_infos = [image_infos]
splits = ['training']
for i, split in enumerate(splits):
dst_image_root = osp.join(root_path, 'crops', split)
ignore_image_root = osp.join(root_path, 'ignores', split)
dst_label_file = osp.join(root_path, f'{split}_label.{format}')
os.makedirs(dst_image_root, exist_ok=True)
lines = []
for image_info in image_infos[i]:
index = 1
src_img_path = osp.join(root_path, 'imgs', image_info['file_name'])
image = mmcv.imread(src_img_path)
src_img_root = image_info['file_name'].split('.')[0]
for anno in image_info['anno_info']:
word = anno['word']
dst_img = crop_img(image, anno['bbox'], 0, 0)
h, w, _ = dst_img.shape
dst_img_name = f'{src_img_root}_{index}.png'
index += 1
# Skip invalid annotations
if min(dst_img.shape) == 0:
continue
# Skip vertical texts
if not preserve_vertical and h / w > 2 and split == 'training':
dst_img_path = osp.join(ignore_image_root, dst_img_name)
else:
dst_img_path = osp.join(dst_image_root, dst_img_name)
mmcv.imwrite(dst_img, dst_img_path)
filename = f'{osp.basename(dst_image_root)}/{dst_img_name}'
if format == 'txt':
lines.append(f'{filename} ' f'{word}')
elif format == 'jsonl':
lines.append(
json.dumps({
'filename': filename,
'text': word
},
ensure_ascii=False))
else:
raise NotImplementedError
list_to_file(dst_label_file, lines)
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and val set of ILST ')
parser.add_argument('root_path', help='Root dir path of ILST')
parser.add_argument(
'--preserve-vertical',
help='Preserve samples containing vertical texts',
action='store_true')
parser.add_argument(
'--val-ratio', help='Split ratio for val set', default=0., type=float)
parser.add_argument(
'--nproc', default=1, type=int, help='Number of processes')
parser.add_argument(
'--format',
default='jsonl',
help='Use jsonl or string to format annotations',
choices=['jsonl', 'txt'])
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
with mmcv.Timer(print_tmpl='It takes {}s to convert ILST annotation'):
files = collect_files(
osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'))
image_infos = collect_annotations(files, nproc=args.nproc)
# filter broken images
image_infos = list(filter(None, image_infos))
generate_ann(root_path, image_infos, args.preserve_vertical,
args.val_ratio, args.format)
if __name__ == '__main__':
main()
| [
"xudongxie77@gmail.com"
] | xudongxie77@gmail.com |
40ba102f4f4aa53184eb28b5f4f40e5b7c9db825 | 63efeff58299f3ca66c7be0aa80d636ade379ebf | /2019/july/shape_test.py | fca26e372a82f7855aa73e817427a95a6685dd4b | [] | no_license | gosch/Katas-in-python | 0eb6bafe2d6d42dac64c644c2fd48f90bdcef22b | f89ee2accdde75222fa1e4e0ca8b4f8e27b7b760 | refs/heads/master | 2021-07-24T23:50:26.268217 | 2020-04-14T23:53:15 | 2020-04-14T23:53:15 | 137,545,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # importing pandas module
import pandas as pd
import math
# making data frame
from pandocfilters import Math
data = pd.read_csv("https://cdncontribute.geeksforgeeks.org/wp-content/uploads/nba.csv")
# dataframe.size
size = data.size
# dataframe.shape
shape = data.shape
# dataframe.ndim
df_ndim = data.ndim
# series.ndim
series_ndim = data["Salary"].ndim
# printing size and shape
# print("Size = {}\nShape ={}\nShape[0] x Shape[1] = {}".
# format(size, shape, shape[0] * shape[1]))
df['V']=df['Z7'] -[math.exp(x]) for x in df['VFACT']]
# printing ndim
# print("ndim of dataframe = {}\nndim of series ={}".
# format(df_ndim, series_ndim)) | [
"francisco.gosch@ge.com"
] | francisco.gosch@ge.com |
5734512409b516a061b0050f422b34b7ca2d1f69 | d185832a16690d4f8a84311e5083f95541d9105c | /tracker/model/cvegrouppackage.py | e83aeeb1d96482bef0dfa222dea89e049fa0ff1d | [
"MIT"
] | permissive | dukebarman/arch-security-tracker | 5eaf074215f567e11b128e5c9f58c5f1f6e91edc | 569efa7c6b509c96339baa151c82a7398eb79743 | refs/heads/master | 2020-04-17T13:15:00.724114 | 2019-01-24T10:41:08 | 2019-01-24T10:41:08 | 166,607,334 | 0 | 0 | MIT | 2019-01-24T10:41:09 | 2019-01-20T00:17:37 | Python | UTF-8 | Python | false | false | 656 | py | from tracker import db
class CVEGroupPackage(db.Model):
id = db.Column(db.Integer(), index=True, unique=True, primary_key=True, autoincrement=True)
group_id = db.Column(db.Integer(), db.ForeignKey('cve_group.id', ondelete="CASCADE"), nullable=False)
pkgname = db.Column(db.String(64), nullable=False)
group = db.relationship("CVEGroup", back_populates="packages")
__tablename__ = 'cve_group_package'
__table_args__ = (db.Index('cve_group_package__group_pkgname_idx', group_id, pkgname, unique=True),)
def __repr__(self):
return '<CVEGroupPackage %r from %r referencing %r>' % (self.id, self.group_id, self.pkgname)
| [
"levente@leventepolyak.net"
] | levente@leventepolyak.net |
51ab28d92dbcae0a65a1fae287c6b9e11d8a3168 | f2b172f7c1dcf0ac28fe7465b5844b48facade18 | /12/1208/capitals.py | a134c5e6f06169f662925732217f9bc809346ab1 | [] | no_license | 0gravity000/IntroducingPython | 2fde12485d0597e72a7da801a08d5048a47f2ff5 | 5d3281dbe37ed1a08d71cb6a36841781f9ac0ccf | refs/heads/master | 2023-07-19T02:53:23.081806 | 2021-09-30T01:51:44 | 2021-09-30T01:51:44 | 403,935,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | def process_cities(filename):
with open(filename, 'rt') as file:
for line in file:
line = line.strip()
if 'quit' == line.lower():
return
country, city = line.split(',')
city = city.strip()
country = country.strip()
print(city.title(), country.title(), sep=',')
if __name__ == '__main__':
import sys
process_cities(sys.argv[1])
| [
"0gravity000@gmail.com"
] | 0gravity000@gmail.com |
321c8151d99bfc35e1562fe05644e8a08ecc1a87 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/nyc-permitted-event-information/depositor.py | 476281f4fd7af0a89bd394682d4f9c09cebab620 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/views/tvpp-9vvx/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/nyc-permitted-event-information/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/nyc-permitted-event-information/data.csv"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
4ac2241cd9990e5eccfa76732cc50add2d0b77d1 | 8792797d97e7a3916a50579d625a5b56b748e1eb | /ppRECO/pPbreco2016/specialSequence/ReconstructionPPb_cff.py | e5a3fe3ad1d52aea59fca445f7cb11100cb70f30 | [] | no_license | samborsol/Centrality | ffe95864f782151bfec4e77d1adc1f303b473626 | 26546f37f308b3d9b6e1277a4c8d98332bd1ea6c | refs/heads/master | 2021-01-25T04:35:40.684031 | 2017-02-21T15:13:02 | 2017-02-21T15:13:02 | 93,451,545 | 1 | 0 | null | 2017-06-05T22:09:48 | 2017-06-05T22:09:48 | null | UTF-8 | Python | false | false | 11,449 | py | import FWCore.ParameterSet.Config as cms
### Beginning of Copy from Reconstruction_cff.py
##from Configuration.StandardSequences.Eras import eras
from RecoLuminosity.LumiProducer.lumiProducer_cff import *
from RecoLuminosity.LumiProducer.bunchSpacingProducer_cfi import *
from RecoLocalMuon.Configuration.RecoLocalMuon_cff import *
from RecoLocalCalo.Configuration.RecoLocalCalo_cff import *
from RecoTracker.Configuration.RecoTracker_cff import *
from RecoParticleFlow.PFClusterProducer.particleFlowCluster_cff import *
from TrackingTools.Configuration.TrackingTools_cff import *
from RecoTracker.MeasurementDet.MeasurementTrackerEventProducer_cfi import *
from RecoPixelVertexing.PixelLowPtUtilities.siPixelClusterShapeCache_cfi import *
siPixelClusterShapeCachePreSplitting = siPixelClusterShapeCache.clone(
src = 'siPixelClustersPreSplitting'
)
# Global reco
from RecoEcal.Configuration.RecoEcal_cff import *
from RecoJets.Configuration.CaloTowersRec_cff import *
from RecoMET.Configuration.RecoMET_cff import *
from RecoMuon.Configuration.RecoMuon_cff import *
# Higher level objects
from RecoVertex.Configuration.RecoVertex_cff import *
from RecoEgamma.Configuration.RecoEgamma_cff import *
from RecoPixelVertexing.Configuration.RecoPixelVertexing_cff import *
from RecoJets.Configuration.RecoJetsGlobal_cff import *
from RecoMET.Configuration.RecoPFMET_cff import *
from RecoBTag.Configuration.RecoBTag_cff import *
#
# please understand that division global,highlevel is completely fake !
#
#local reconstruction
from RecoLocalTracker.Configuration.RecoLocalTracker_cff import *
from RecoParticleFlow.Configuration.RecoParticleFlow_cff import *
from RecoCTPPS.TotemRPLocal.totemRPLocalReconstruction_cff import *
#
# new tau configuration
#
from RecoTauTag.Configuration.RecoPFTauTag_cff import *
# Also BeamSpot
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
from RecoLocalCalo.CastorReco.CastorSimpleReconstructor_cfi import *
# Cosmic During Collisions
from RecoTracker.SpecialSeedGenerators.cosmicDC_cff import *
localreco = cms.Sequence(bunchSpacingProducer+trackerlocalreco+muonlocalreco+calolocalreco+castorreco)
localreco_HcalNZS = cms.Sequence(trackerlocalreco+muonlocalreco+calolocalrecoNZS+castorreco)
_ctpps_2016_localreco = localreco.copy()
_ctpps_2016_localreco += totemRPLocalReconstruction
eras.ctpps_2016.toReplaceWith(localreco, _ctpps_2016_localreco)
_ctpps_2016_localreco_HcalNZS = localreco_HcalNZS.copy()
_ctpps_2016_localreco_HcalNZS += totemRPLocalReconstruction
eras.ctpps_2016.toReplaceWith(localreco_HcalNZS, _ctpps_2016_localreco_HcalNZS)
#
# temporarily switching off recoGenJets; since this are MC and wil be moved to a proper sequence
#
from RecoLocalCalo.Castor.Castor_cff import *
from RecoLocalCalo.Configuration.hcalGlobalReco_cff import *
globalreco_tracking = cms.Sequence(offlineBeamSpot*
MeasurementTrackerEventPreSplitting* # unclear where to put this
siPixelClusterShapeCachePreSplitting* # unclear where to put this
standalonemuontracking*
trackingGlobalReco*
vertexreco)
_globalreco_tracking_LowPU_Phase1PU70 = globalreco_tracking.copy()
_globalreco_tracking_LowPU_Phase1PU70.replace(trackingGlobalReco, recopixelvertexing+trackingGlobalReco)
eras.trackingLowPU.toReplaceWith(globalreco_tracking, _globalreco_tracking_LowPU_Phase1PU70)
eras.trackingPhase1PU70.toReplaceWith(globalreco_tracking, _globalreco_tracking_LowPU_Phase1PU70)
globalreco = cms.Sequence(globalreco_tracking*
hcalGlobalRecoSequence*
particleFlowCluster*
ecalClusters*
caloTowersRec*
egammaGlobalReco*
jetGlobalReco*
muonGlobalReco*
pfTrackingGlobalReco*
muoncosmicreco*
CastorFullReco)
globalreco_plusPL= cms.Sequence(globalreco*ctfTracksPixelLess)
reducedRecHits = cms.Sequence ( reducedEcalRecHitsSequence * reducedHcalRecHitsSequence )
highlevelreco = cms.Sequence(egammaHighLevelRecoPrePF*
particleFlowReco*
egammaHighLevelRecoPostPF*
muoncosmichighlevelreco*
muonshighlevelreco *
particleFlowLinks*
jetHighLevelReco*
metrecoPlusHCALNoise*
btagging*
recoPFMET*
PFTau*
reducedRecHits*
cosmicDCTracksSeq
)
## Beginning of PPb reco
# PPb Event Characterization
from RecoHI.HiCentralityAlgos.HiCentrality_cfi import *
from RecoHI.HiCentralityAlgos.CentralityBin_cfi import *
from RecoHI.HiEvtPlaneAlgos.HiEvtPlane_cfi import *
# PPb Reco Sequence
ppbreco = cms.Sequence(hiCentrality
* centralityBin
* hiEvtPlane
)
## End of PPb reco
from FWCore.Modules.logErrorHarvester_cfi import *
# "Export" Section
## add "ppbreco" to "reconstruction"
reconstruction = cms.Sequence(localreco*globalreco*highlevelreco*ppbreco*logErrorHarvester)
reconstruction_trackingOnly = cms.Sequence(localreco*globalreco_tracking)
#need a fully expanded sequence copy
modulesToRemove = list() # copy does not work well
noTrackingAndDependent = list()
noTrackingAndDependent.append(siPixelClustersPreSplitting)
noTrackingAndDependent.append(siStripZeroSuppression)
noTrackingAndDependent.append(siStripClusters)
noTrackingAndDependent.append(initialStepSeedLayersPreSplitting)
noTrackingAndDependent.append(initialStepSeedsPreSplitting)
noTrackingAndDependent.append(initialStepTrackCandidatesPreSplitting)
noTrackingAndDependent.append(initialStepTracksPreSplitting)
noTrackingAndDependent.append(firstStepPrimaryVerticesPreSplitting)
noTrackingAndDependent.append(initialStepTrackRefsForJetsPreSplitting)
noTrackingAndDependent.append(caloTowerForTrkPreSplitting)
noTrackingAndDependent.append(ak4CaloJetsForTrkPreSplitting)
noTrackingAndDependent.append(jetsForCoreTrackingPreSplitting)
noTrackingAndDependent.append(siPixelClusterShapeCachePreSplitting)
noTrackingAndDependent.append(siPixelClusters)
noTrackingAndDependent.append(clusterSummaryProducer)
noTrackingAndDependent.append(siPixelRecHitsPreSplitting)
noTrackingAndDependent.append(MeasurementTrackerEventPreSplitting)
noTrackingAndDependent.append(PixelLayerTriplets)
noTrackingAndDependent.append(pixelTracks)
noTrackingAndDependent.append(pixelVertices)
modulesToRemove.append(dt1DRecHits)
modulesToRemove.append(dt1DCosmicRecHits)
modulesToRemove.append(csc2DRecHits)
modulesToRemove.append(rpcRecHits)
#modulesToRemove.append(ecalGlobalUncalibRecHit)
modulesToRemove.append(ecalMultiFitUncalibRecHit)
modulesToRemove.append(ecalDetIdToBeRecovered)
modulesToRemove.append(ecalRecHit)
modulesToRemove.append(ecalCompactTrigPrim)
modulesToRemove.append(ecalTPSkim)
modulesToRemove.append(ecalPreshowerRecHit)
modulesToRemove.append(selectDigi)
modulesToRemove.append(hbheprereco)
modulesToRemove.append(hbhereco)
modulesToRemove.append(hfreco)
modulesToRemove.append(horeco)
modulesToRemove.append(hcalnoise)
modulesToRemove.append(zdcreco)
modulesToRemove.append(castorreco)
##it's OK according to Ronny modulesToRemove.append(CSCHaloData)#needs digis
reconstruction_fromRECO = reconstruction.copyAndExclude(modulesToRemove+noTrackingAndDependent)
noTrackingAndDependent.append(siPixelRecHitsPreSplitting)
noTrackingAndDependent.append(siStripMatchedRecHits)
noTrackingAndDependent.append(pixelTracks)
noTrackingAndDependent.append(ckftracks)
reconstruction_fromRECO_noTrackingTest = reconstruction.copyAndExclude(modulesToRemove+noTrackingAndDependent)
##requires generalTracks trajectories
noTrackingAndDependent.append(trackerDrivenElectronSeeds)
noTrackingAndDependent.append(ecalDrivenElectronSeeds)
noTrackingAndDependent.append(uncleanedOnlyElectronSeeds)
noTrackingAndDependent.append(uncleanedOnlyElectronCkfTrackCandidates)
noTrackingAndDependent.append(uncleanedOnlyElectronGsfTracks)
noTrackingAndDependent.append(uncleanedOnlyGeneralConversionTrackProducer)
noTrackingAndDependent.append(uncleanedOnlyGsfConversionTrackProducer)
noTrackingAndDependent.append(uncleanedOnlyPfTrackElec)
noTrackingAndDependent.append(uncleanedOnlyGsfElectronCores)
noTrackingAndDependent.append(uncleanedOnlyPfTrack)
noTrackingAndDependent.append(uncleanedOnlyGeneralInOutOutInConversionTrackMerger)#can live with
noTrackingAndDependent.append(uncleanedOnlyGsfGeneralInOutOutInConversionTrackMerger)#can live with
noTrackingAndDependent.append(uncleanedOnlyAllConversions)
noTrackingAndDependent.append(uncleanedOnlyGsfElectrons)#can live with
noTrackingAndDependent.append(electronMergedSeeds)
noTrackingAndDependent.append(electronCkfTrackCandidates)
noTrackingAndDependent.append(electronGsfTracks)
noTrackingAndDependent.append(generalConversionTrackProducer)
noTrackingAndDependent.append(generalInOutOutInConversionTrackMerger)
noTrackingAndDependent.append(gsfGeneralInOutOutInConversionTrackMerger)
noTrackingAndDependent.append(ecalDrivenGsfElectrons)
noTrackingAndDependent.append(gsfConversionTrackProducer)
noTrackingAndDependent.append(allConversions)
noTrackingAndDependent.append(gsfElectrons)
reconstruction_fromRECO_noTracking = reconstruction.copyAndExclude(modulesToRemove+noTrackingAndDependent)
reconstruction_noTracking = reconstruction.copyAndExclude(noTrackingAndDependent)
#sequences with additional stuff
reconstruction_withPixellessTk = cms.Sequence(localreco *globalreco_plusPL*highlevelreco*logErrorHarvester)
reconstruction_HcalNZS = cms.Sequence(localreco_HcalNZS*globalreco *highlevelreco*logErrorHarvester)
#sequences without some stuffs
#
reconstruction_woCosmicMuons = cms.Sequence(localreco*globalreco*highlevelreco*logErrorHarvester)
# define a standard candle. please note I am picking up individual
# modules instead of sequences
#
reconstruction_standard_candle = cms.Sequence(localreco*globalreco*vertexreco*recoJetAssociations*btagging*electronSequence*photonSequence)
### End of Copy from Reconstruction_cff.py
#HF cleaning for data in the new design
# adding q tests for those lines
#particleFlowRecHitHCAL.LongShortFibre_Cut = 30.
#particleFlowRecHitHCAL.ApplyPulseDPG = True
for qTest in particleFlowRecHitHF.producers[0].qualityTests:
if qTest.name == 'PFRecHitQTestHCALChannel':
qTest.maxSeverities.append(9)
qTest.cleaningThresholds.append(30.)
qTest.flags.append('HFDigi')
import RecoLocalCalo.HcalRecAlgos.RemoveAddSevLevel as HcalRemoveAddSevLevel
HcalRemoveAddSevLevel.AddFlag(hcalRecAlgos,"HFDigiTime",11)
HcalRemoveAddSevLevel.AddFlag(hcalRecAlgos,"HBHEFlatNoise",12)
HcalRemoveAddSevLevel.AddFlag(hcalRecAlgos,"HBHENegativeNoise",12)
CSCHaloData.ExpectedBX = cms.int32(3)
from JetMETCorrections.Configuration.JetCorrectors_cff import ak4PFCHSResidualCorrector, ak4PFCHSL1FastL2L3ResidualCorrector, ak4PFCHSL1FastL2L3ResidualCorrectorChain
jetCorrectorsForReco.replace(ak4PFCHSL1FastL2L3CorrectorChain, ak4PFCHSL1FastL2L3ResidualCorrectorChain)
| [
"shengquan.tuo@cern.ch"
] | shengquan.tuo@cern.ch |
3163746233dd0e092d7b43a3d6001c45455a58c2 | 19cad9cc067f117a096af8dbf233f6fff3115e4b | /tools/src2/xml_processor.py | d5c531706bcbaada05d679c7225d005d32201ae2 | [
"MIT"
] | permissive | mattpbooth/oxygine-framework | b28339b77dd26c69029ee80b18ccd0b1895b0ab4 | 14a3026a67a0b8df0a3c094566aae89bf66c3ffd | refs/heads/master | 2020-04-01T13:52:14.227020 | 2014-11-25T21:35:21 | 2014-11-25T21:35:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,313 | py | from xml.dom import minidom
import os
import shutil
import process_atlas
import process_font
import process_starling_atlas
import oxygine_helper
class XmlWalker:
def __init__(self, src, folder, scale_factor, node, meta_node, scale_quality):
self.base = folder
self.path = folder
self.scale_factor = scale_factor
self.root = node
self.last = None
self.root_meta = meta_node
self.last_meta = None
self.src = src
self.scale_quality = scale_quality
self.checkSetAttributes()
def getType(self):
return self.root
def getPath(self, attr):
path = self.root.getAttribute(attr)
if path.startswith("./") or path.startswith(".\\"):
path = path[2:len(path)]
return self.path + path
def setSrcFullPath(self, path):
return self.src + path
def checkSetAttributes(self):
self._checkSetAttributes(self.root)
def _checkSetAttributes(self, node):
path = node.getAttribute("path")
if path:
if 0:
path = ""
if path.startswith("./") or path.startswith(".\\"):
path = self.base + path[2:len(path)]
self.path = path + "/"
scale_factor = node.getAttribute("scale_factor")
if scale_factor:
self.scale_factor = float(scale_factor)
scale_quality = node.getAttribute("scale_quality")
if scale_quality:
self.scale_quality = float(scale_quality)
def next(self):
while True:
if not self.last:
if len(self.root.childNodes) == 0:
return None
self.last = self.root.childNodes[0]
else:
self.last = self.last.nextSibling
if not self.last:
return None
if self.last.nodeType == self.last.TEXT_NODE:
continue
if self.last.nodeType == self.last.COMMENT_NODE:
continue
meta = self.root_meta.ownerDocument.createElement(self.last.nodeName)
self.root_meta.appendChild(meta)
self.last_meta = meta
if self.last.nodeName == "set":
self._checkSetAttributes(self.last)
continue
break
return XmlWalker(self.src, self.path, self.scale_factor, self.last, self.last_meta, self.scale_quality)
class XmlProcessor:
def __init__(self, args):
self.src_data = args.src_data + "/"
self.dest_data = args.dest_data + "/"
self.compression = args.compression.lower()
#self.etc1tool = args.android_sdk + "\\tools\\etc1tool.exe "
#if self.compression == "etc1":
# if not os.path.exists(self.etc1tool):
# raise Exception("can't find etc1tool. please pass correct path to android_sdk")
self.path_xml = args.xml
self.xml_name = os.path.split(self.path_xml)[1]
self.atlas_group_id = 0
self.args = args
self.verbosity = args.verbosity
self.warnings = 0
self.errors = 0
#self.scale_factor = 1.0
#self.scale_quality = 1.0
self.scale = args.scale
self.debug = args.debug
self.processors = {}
#self.path_current = ""
self._meta_doc = None
#self._meta_element = None
self.helper = oxygine_helper.helper(os.path.split(__file__)[0] + "/../../")
self.register_processor(process_font.bmfc_font_Processor())
self.register_processor(process_font.font_Processor())
self.register_processor(process_atlas.atlas_Processor())
self.register_processor(process_starling_atlas.starling_atlas_Processor())
self._current_processor = None
def register_processor(self, processor):
self.processors[processor.node_id] = processor
def get_apply_scale(self, applyScaleFactor, walker):
"""
returns scale should be applied to image
"""
v = self.scale * walker.scale_quality
if applyScaleFactor:
v *= walker.scale_factor
return v
"""
def add_meta(self, node_id = ""):
if not node_id:
node_id = self._current_processor.node_id
meta = self._meta_doc.createElement(node_id)
self._meta_element.appendChild(meta)
return meta
def get_meta_doc(self):
return self._meta_doc
"""
"""
def _process_set(self, el):
path = el.getAttribute("path")
if path:
if path.startswith(".\\") or path.startswith("./"):
path = self.path_current + path
path = os.path.normpath(path) + "/"
self.path_current = path
scale_factor = el.getAttribute("scale_factor")
if scale_factor:
self.scale_factor = float(scale_factor)
scale_quality = el.getAttribute("scale_quality")
if scale_quality:
self.scale_quality = float(scale_quality)
self.add_meta("set");
"""
def _open_xml(self, path):
with open(path, "r") as file:
font_doc = minidom.parse(file)
return font_doc.documentElement
def _get_src_path(self, local_path):
return self.src_data + local_path
def _get_dest_path(self, local_path):
return self.dest_data + local_path
def _get_meta_xml_path(self, local_path):
return self._get_dest_path(self.xml_name) + ".ox" + "/" + local_path
"""
def get_current_src_path(self, local = ""):
return self._get_src_path(self.path_current + local)
"""
def get_inner_dest(self, inner_local_path = ""):
return self._get_meta_xml_path(self._current_processor.node_id + "/" + inner_local_path)
def log(self, st):
print st
def warning(self, st):
if self.args.warnings:
print "warning: " + st
def error(self, st):
print "error: " + st
def process(self):
#print self.path_data
#print self.path_xml
#print self.path_atlasses
try:
nm = self._get_src_path(self.path_xml)
file = open(nm, "r")
except IOError:
print "can't open file: " + nm
return
doc = minidom.parse(file)
del file
self._meta_doc = minidom.Document()
meta_element = self._meta_doc.createElement("resources")
self._meta_doc.appendChild(meta_element)
totalAtlasses = 0
folder = self._get_meta_xml_path("")
shutil.rmtree(folder, True)
try:
os.makedirs(folder)
except OSError:
pass
xml_folder = os.path.split(self.path_xml)[0] + "/"
walker = XmlWalker(self.src_data, xml_folder, 1.0, doc.documentElement, meta_element, 1.0)
while True:
next = walker.next();
if not next:
break
name = next.root.nodeName
if name in self.processors:
proc = self.processors[name]
self._current_processor = proc
try:
if proc.create_folder:
os.makedirs(self.get_inner_dest(""))
except OSError:
pass
proc.process(self, next)
"""
for el in doc.documentElement.childNodes:
name = el.nodeName
if name in self.processors:
proc = self.processors[name]
self._current_processor = proc
try:
if proc.create_folder:
os.makedirs(self.get_inner_dest(""))
except OSError:
pass
proc.process(self, el)
if name == "set":
self._process_set(el)
if name == "sdfont":
self._process_sdf_font(el)
"""
path_ox_dest = self._get_meta_xml_path("meta.xml")
file = open(path_ox_dest, "w")
if self.verbosity > 1:
print "saving ox file: \n" + os.path.normpath(path_ox_dest)
if self.args.debug:
meta_element.writexml(file, "\t", "\t", "\n")
else:
meta_element.writexml(file)
#if self.verbosity > 1:
# print "created %d atlasses" % (totalAtlasses, )
if self.warnings or self.errors:
print "warnings %d, errors %d" % (self.warnings, self.errors) | [
"frankinshtein85@gmail.com"
] | frankinshtein85@gmail.com |
8da7371fc0333a1219c82545e88fc239fe9137f0 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/wiColorJ/pyr_Tcrop255_p60_j15/Add2Loss/Sob_k21_s001_Bce_s001/pyr_1s/L3/step10_a.py | 526d45d6ec62d423f84189f8d28903e1c483f9a1 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,320 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_1side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_bce_s001_sobel_k21_s001_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
bcd6a3fac9732cbc1890b685ff96be4072b34044 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2064/60769/243881.py | 6745299c74805287f773ea018a445a74748503e3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | a = input()
dict = {"IV": 4, "IX": 9, "XL": 40, "XC": 90, "CD": 400, "CM": 900,
"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
res = 0
jmp = False
for i in range(len(a)):
if jmp:
jmp = False
continue
if i < len(a) - 1:
if dict[a[i]] >= dict[a[i + 1]]:
res += dict[a[i]]
else:
res += dict[a[i:i + 2]]
i += 1
jmp = True
else:
res += dict[a[i]]
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
bd0a17a144b476d54d6ec4c36018bac20e5578a4 | 08cb6c716f24ad0e5c3fe8fb2c292e81b63fc518 | /python/problem17a.py | 5843761bced645fe35cd1765502fc11ee322c31b | [
"MIT"
] | permissive | amyreese/euler | 170ada3909500cdc1e3394406b57529d9d5fe839 | 0e2a809620cb02367120c0fbfbf9b419edd42c6e | refs/heads/master | 2022-09-01T16:53:13.423566 | 2015-12-30T05:30:04 | 2015-12-30T05:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py |
onetonine = len("onetwothreefourfivesixseveneightnine")
onetoten = onetonine + len("ten")
eleventotwenty = len("eleventwelvethirteenfourteenfifteensixteenseventeeneighteennineteen")
twenties = len("twenty")*10 + onetonine
thirties = len("thirty")*10 + onetonine
forties = len("forty")*10 + onetonine
fifties = len("fifty")*10 + onetonine
sixties = len("sixty")*10 + onetonine
seventies = len("seventy")*10 + onetonine
eighties = len("eighty")*10 + onetonine
nineties = len("ninety")*10 + onetonine
hundred = onetoten + eleventotwenty + twenties + thirties + forties + fifties + sixties + seventies + eighties + nineties
onehundreds = len("onehundredand") * 100 - 3 + hundred
twohundreds = len("twohundredand") * 100 - 3 + hundred
threehundreds = len("threehundredand") * 100 - 3 + hundred
fourhundreds = len("fourhundredand") * 100 - 3 + hundred
fivehundreds = len("fivehundredand") * 100 - 3 + hundred
sixhundreds = len("sixhundredand") * 100 - 3 + hundred
sevenhundreds = len("sevenhundredand") * 100 - 3 + hundred
eighthundreds = len("eighthundredand") * 100 - 3 + hundred
ninehundreds = len("ninehundredand") * 100 - 3 + hundred
thousands = len("onethousand") + ninehundreds + eighthundreds + sevenhundreds +\
sixhundreds + fivehundreds + fourhundreds + threehundreds + twohundreds +\
onehundreds + hundred
print thousands
| [
"john@noswap.com"
] | john@noswap.com |
dec6b3e4df26199d84d883efb11adff47ef6e9cb | df60bc5a9c27b54b95568a9f04102785c7bc12c2 | /samples/resnet-cmle/resnet-train-pipeline.py | b709051693e09c6c968564be030ee8dbd2486eb4 | [
"Apache-2.0"
] | permissive | Anthonymcqueen21/pipelines | 1d9d0ae20ebd43b590b88d3f9cbb398f21488be4 | f0c8432748e5c013451dcac8e1ee3aee0bb415cc | refs/heads/master | 2020-06-14T09:45:10.246821 | 2019-07-03T00:11:13 | 2019-07-03T00:11:13 | 194,972,318 | 1 | 0 | Apache-2.0 | 2019-07-03T03:22:36 | 2019-07-03T03:22:36 | null | UTF-8 | Python | false | false | 5,903 | py | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.components as comp
import datetime
import json
import os
dataflow_python_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/dataflow/launch_python/component.yaml')
cloudml_train_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/ml_engine/train/component.yaml')
cloudml_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/ml_engine/deploy/component.yaml')
def resnet_preprocess_op(project_id: 'GcpProject', output: 'GcsUri', staging_dir: 'GcsUri', train_csv: 'GcsUri[text/csv]',
validation_csv: 'GcsUri[text/csv]', labels, train_size: 'Integer', validation_size: 'Integer',
step_name='preprocess'):
return dataflow_python_op(
python_file_path='gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/preprocess/preprocess.py',
project_id=project_id,
requirements_file_path='gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/preprocess/requirements.txt',
staging_dir=staging_dir,
args=json.dumps([
'--train_csv', str(train_csv),
'--validation_csv', str(validation_csv),
'--labels', str(labels),
'--output_dir', str(output),
'--train_size', str(train_size),
'--validation_size', str(validation_size)
])
)
def resnet_train_op(project_id, data_dir, output: 'GcsUri', region: 'GcpRegion', depth: int, train_batch_size: int,
eval_batch_size: int, steps_per_eval: int, train_steps: int, num_train_images: int,
num_eval_images: int, num_label_classes: int, tf_version, step_name='train'):
return cloudml_train_op(
project_id=project_id,
region='us-central1',
python_module='trainer.resnet_main',
package_uris=json.dumps(
['gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/trainer/trainer-1.0.tar.gz']),
job_dir=output,
args=json.dumps([
'--data_dir', str(data_dir),
'--model_dir', str(output),
'--use_tpu', 'True',
'--resnet_depth', str(depth),
'--train_batch_size', str(train_batch_size),
'--eval_batch_size', str(eval_batch_size),
'--steps_per_eval', str(steps_per_eval),
'--train_steps', str(train_steps),
'--num_train_images', str(num_train_images),
'--num_eval_images', str(num_eval_images),
'--num_label_classes', str(num_label_classes),
'--export_dir', '{}/export'.format(str(output))
]),
runtime_version=tf_version,
training_input=json.dumps({
'scaleTier': 'BASIC_TPU'
})
)
def resnet_deploy_op(model_dir, model, version, project_id: 'GcpProject', region: 'GcpRegion',
tf_version, step_name='deploy'):
# TODO(hongyes): add region to model payload.
return cloudml_deploy_op(
model_uri=model_dir,
project_id=project_id,
model_id=model,
version_id=version,
runtime_version=tf_version,
replace_existing_version='True'
)
@dsl.pipeline(
name='ResNet_Train_Pipeline',
description='Demonstrate the ResNet50 predict.'
)
def resnet_train(
project_id,
output,
region='us-central1',
model='bolts',
version='beta1',
tf_version='1.12',
train_csv='gs://bolts_image_dataset/bolt_images_train.csv',
validation_csv='gs://bolts_image_dataset/bolt_images_validate.csv',
labels='gs://bolts_image_dataset/labels.txt',
depth=50,
train_batch_size=1024,
eval_batch_size=1024,
steps_per_eval=250,
train_steps=10000,
num_train_images=218593,
num_eval_images=54648,
num_label_classes=10):
output_dir = os.path.join(str(output), '{{workflow.name}}')
preprocess_staging = os.path.join(output_dir, 'staging')
preprocess_output = os.path.join(output_dir, 'preprocessed_output')
train_output = os.path.join(output_dir, 'model')
preprocess = resnet_preprocess_op(project_id, preprocess_output, preprocess_staging, train_csv,
validation_csv, labels, train_batch_size, eval_batch_size).apply(gcp.use_gcp_secret())
train = resnet_train_op(project_id, preprocess_output, train_output, region, depth, train_batch_size,
eval_batch_size, steps_per_eval, train_steps, num_train_images, num_eval_images,
num_label_classes, tf_version).apply(gcp.use_gcp_secret())
train.after(preprocess)
export_output = os.path.join(str(train.outputs['job_dir']), 'export')
deploy = resnet_deploy_op(export_output, model, version, project_id, region,
tf_version).apply(gcp.use_gcp_secret())
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(resnet_train, __file__ + '.zip')
| [
"k8s-ci-robot@users.noreply.github.com"
] | k8s-ci-robot@users.noreply.github.com |
ce8d7ce0b7077bb3fb89194e7b211332cc888836 | 533931c3d15020c6d1aab46f07a602b8257dada3 | /cs3311/ass3/bio | b8c136726899aaf277beacafbc72fb4bec47bd7a | [] | no_license | JoeZhao527/database-system | 9076b1ce6a978b8dfb461bb1d1da69ab887ff68e | 1bfdd9f536f1d67f487ed187aa77687e15192560 | refs/heads/main | 2023-03-24T13:09:56.117135 | 2021-03-19T01:47:15 | 2021-03-19T01:47:15 | 300,784,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | #!/usr/bin/python3
# COMP3311 20T3 Ass3 ... get Name's biography/filmography
import sys
import psycopg2
#from helpers import whatever, functions, you, need
# define any local helper functions here
# set up some globals
usage = "Usage: bio 'NamePattern' [Year]"
db = None
pattern = None
year = None
input = None
# process command-line args
argc = len(sys.argv)
if argc == 2:
pattern = sys.argv[1]
input = '\''+pattern+'\''
elif argc == 3 and (sys.argv[2]).isdigit():
pattern = sys.argv[1]
year = sys.argv[2]
input = '\''+pattern+'\''+' '+str(year)
else:
print(usage)
exit()
# manipulate database
select_names1 = """
select name, birth_year, death_year, id
from Names n
where n.name ~* %s
order by name, birth_year, n.id;
"""
select_names2 = """
select name, birth_year, death_year, id
from Names n
where n.name ~* %s and birth_year = %s
order by name, birth_year, n.id;
"""
select_movies = """
select m.title, m.start_year, m.id
from Movies m
join Principals p on (m.id = p.movie_id)
where p.name_id = %s
order by m.start_year, m.title
"""
select_actors = """
select n.name, a.played
from Movies m
join Acting_roles a on (m.id = a.movie_id)
join Principals p on (m.id = p.movie_id and a.name_id = p.name_id)
join Names n on (n.id = a.name_id)
where m.id = %s and n.id = %s
order by p.ordering, a.played
"""
select_crews = """
select n.name, c.role
from Movies m
join Crew_roles c on (m.id = c.movie_id)
join Principals p on (m.id = p.movie_id and c.name_id = p.name_id)
join Names n on (n.id = c.name_id)
where m.id = %s and n.id = %s
order by p.ordering, c.role
"""
try:
db = psycopg2.connect("dbname=imdb")
# ... add your code here ...
cur = db.cursor()
num_names = 0
if year == None:
cur.execute(select_names1, [pattern])
num_names = len(list(cur))
cur.execute(select_names1, [pattern])
else:
cur.execute(select_names2, (pattern, year))
num_names = len(list(cur))
cur.execute(select_names2, (pattern, year))
if num_names == 0:
print('No name matching '+input)
elif num_names == 1:
name_id = None
for names in cur.fetchall():
if names[1] == None:
print('Filmography for '+names[0], '(???)')
elif names[2] == None:
print('Filmography for '+names[0], '('+str(names[1])+'-)')
else:
print('Filmography for '+names[0], '('+str(names[1])+'-'+str(names[2])+')')
name_id = names[3]
print('===============')
cur.execute(select_movies, [name_id])
for movies in cur.fetchall():
print(movies[0], '('+str(movies[1])+')')
cur.execute(select_actors, (movies[2], name_id))
for acting in cur.fetchall():
print(' playing', acting[1])
cur.execute(select_crews, (movies[2], name_id))
for crewing in cur.fetchall():
print(' as', (crewing[1].capitalize()).replace('_', ' '))
else:
print('Names matching '+input)
print('===============')
for names in cur.fetchall():
if names[1] == None:
print(names[0], '(???)')
elif names[2] == None:
print(names[0], '('+str(names[1])+'-)')
else:
print(names[0], '('+str(names[1])+'-'+str(names[2])+')')
except psycopg2.Error as err:
print("DB error: ", err)
finally:
if db:
db.close()
| [
"email@example.com"
] | email@example.com | |
63a8b851714d15cf5ac6f0ef5c8c7df8979f7929 | 0bb474290e13814c2498c086780da5096453da05 | /tenka1-2017/D/main.py | 471f525f95101f269499fbde1e4d56de0f16212b | [] | no_license | ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | Python | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/env python3
import sys
def solve(N: int, K: int, A: "List[int]", B: "List[int]"):
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
A = [int()] * (N) # type: "List[int]"
B = [int()] * (N) # type: "List[int]"
for i in range(N):
A[i] = int(next(tokens))
B[i] = int(next(tokens))
solve(N, K, A, B)
if __name__ == '__main__':
main()
| [
"deritefully@gmail.com"
] | deritefully@gmail.com |
132e6dd5345800e39acd1da16a86ddfc6215c444 | b47e438f1be149c5b339eb0d7e114d98fb986ad0 | /week-07/profiling/examples/pygame/swarm.py | c2afb0544dd9b3dbbc117f8ac3f424ce0d827e07 | [] | no_license | kstager/Python300-SystemDevelopmentWithPython-Spring-2014 | fb97ac425b09df34c00dc480ed3c263742d47d7f | ed37a50a8ea7c308a081bc87c8bea71520221a5e | refs/heads/master | 2021-01-22T15:22:07.197138 | 2014-07-22T13:13:43 | 2014-07-22T13:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | import math
import pygame
import random
import sys
# from meliae import scanner
# scanner.dump_all_objects("meliae.dump") # you can pass a file-handle if you prefer
NUMBER_OF_SPHERES = 150
size = width, height = 800, 600
pygame.init()
black = 0, 0, 0
screen = pygame.display.set_mode(size)
class Sphere(object):
def __init__(self):
self.ball = pygame.image.load("ball.gif")
self.x = random.random() * width
self.y = random.random() * height
vx = 150*(random.random() - .5)
vy = 150*(random.random() - .5)
self.v = [vx, vy]
def update_v(self, other ):
"""update v with gravitational force of other"""
d = math.sqrt( (self.x - other.x)**2 + (self.y - other.y)**2)
v = ((other.x - self.x), (other.y - self.y))
f = map(lambda x: 200 * x / (d*d), v)
self.v = [self.v[0] + f[0], self.v[1] + f[1]]
def move(self, speed):
self.x = self.x + self.v[0] * speed
self.y = self.y + self.v[1] * speed
def draw(self):
screen.blit(self.ball, (self.x, self.y))
class Sun(Sphere):
def __init__(self):
self.ball = pygame.image.load("sun.gif")
self.x = width / 2.0
self.y = height / 2.0
self.v = [0,0]
if __name__ == "__main__":
sun = Sun()
titlebar = pygame.Rect(0,0,200, 100)
clock = pygame.time.Clock()
spheres = [Sphere() for i in xrange(NUMBER_OF_SPHERES)]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
screen.fill(black)
dt = clock.tick(40)
fps = clock.get_fps()
speed = 1 / float(dt)
for sphere in spheres:
sphere.update_v(sun)
sphere.move(speed)
sphere.draw()
sun.draw()
pygame.draw.rect(screen, (0,0,0), titlebar)
# screen.blit(label, (10, 10))
pygame.display.flip()
| [
"joseph.sheedy@gmail.com"
] | joseph.sheedy@gmail.com |
1ad6aa2c13b4180254111deb127f4c13e2d27af4 | f91474e528ca517f9e81b9dbb50894f2f958f213 | /party.py | def8619f9f28b52d7caa00ba4d9c66de562763db | [] | no_license | daminiamin/Testing-Balloonicorn-s-After-Party-Unit-Test | 8d537af37806486ebdb19f7b7347c2a75d7ba323 | 03fb2160743fbabf45d9a69aed8bcc4b38ed0dd1 | refs/heads/master | 2020-03-30T22:04:12.660895 | 2018-10-05T00:40:21 | 2018-10-05T00:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | """Flask site for Balloonicorn's Party."""
from flask import Flask, session, render_template, request, flash, redirect
from flask_debugtoolbar import DebugToolbarExtension
from model import Game, connect_to_db
app = Flask(__name__)
app.secret_key = "SECRETSECRETSECRET"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
@app.route("/")
def homepage():
"""Show homepage."""
return render_template("homepage.html")
@app.route("/rsvp", methods=['POST'])
def rsvp():
"""Register for the party."""
name = request.form.get("name")
email = request.form.get("email")
session['RSVP'] = True
flash("Yay!")
return redirect("/")
@app.route("/games")
def games():
if session['RSVP'] == True:
games = Game.query.all()
return render_template("games.html", games=games)
if __name__ == "__main__":
app.debug = True
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
DebugToolbarExtension(app)
connect_to_db(app, "postgresql:///games")
app.run()
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
aef71039dde801227a1aaf2d7d3319adaa324b95 | ab6015247185ad2f7440d57aa1215ff25842a996 | /account/models.py | 4dee82653791f8061b1a8cb306186ce1f6af311b | [] | no_license | Johnson-xie/rurality | 7fc5c7c5b941989f1c50cef944f3f4d94ac39fe7 | cc35c8b0610c097db17ed1de554171737466e7b6 | refs/heads/master | 2023-02-20T15:48:46.233989 | 2021-01-23T11:04:16 | 2021-01-23T11:04:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,934 | py | from django.db import models
from django.core.signing import TimestampSigner
from django.contrib.auth.hashers import make_password
from django.contrib.auth.hashers import check_password
from base.models import BaseModel
class UserModel(BaseModel):
'''
用户表
'''
model_name = '用户'
model_sign = 'user'
TYP_NORMAL = 10
TYP_LDAP = 20
TYP_CHOICES = (
(TYP_NORMAL, '标准用户'),
(TYP_LDAP, 'LDAP用户'),
)
ST_NORMAL = 10
ST_FORBIDDEN = 20
ST_CHOICES = (
(ST_NORMAL, '正常'),
(ST_FORBIDDEN, '禁用'),
)
username = models.CharField('账户', max_length=128, db_index=True)
password = models.CharField('密码', max_length=256, null=True, default=None)
name = models.CharField('姓名', max_length=128, default='')
email = models.CharField('邮箱', max_length=128, null=True, default='')
phone = models.CharField('联系方式', max_length=64, null=True, default='')
status = models.IntegerField('状态', choices=ST_CHOICES, default=ST_NORMAL)
typ = models.SmallIntegerField('类型', choices=TYP_CHOICES, default=TYP_NORMAL)
class Meta:
db_table = 'user'
def to_dict(self):
'''
用户信息,不返回密码
'''
data = super().to_dict()
data.pop('password')
return data
def set_password(self, password):
'''
设置密码
'''
self.password = make_password(password)
self.save()
def check_password(self, password):
'''
校验密码
'''
return check_password(password, self.password)
def gen_token(self):
'''
生成接口认证的token
'''
signer = TimestampSigner()
token = signer.sign(self.id)
return token
class RoleModel(BaseModel):
'''
角色表
'''
model_name = '角色'
model_sign = 'role'
TYP_SYSTEM = 10
TYP_NORMAL = 20
TYP_CHOICES = (
(TYP_SYSTEM, '系统角色'),
(TYP_NORMAL, '普通角色'),
)
name = models.CharField('角色名', max_length=32)
typ = models.IntegerField('类型', choices=TYP_CHOICES, default=TYP_NORMAL)
sign = models.CharField('标识', max_length=32)
class Meta:
db_table = 'role'
class RoleUserModel(BaseModel):
'''
角色用户关系表
'''
model_name = '角色关联用户'
model_sign = 'role_user'
role = models.ForeignKey(RoleModel, on_delete=models.CASCADE)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
class Meta:
db_table = 'role_user'
class DepartmentModel(BaseModel):
'''
部门
'''
model_name = '部门'
model_sign = 'department'
name = models.CharField('名称', max_length=32)
sign = models.CharField('标识', max_length=32)
class Meta:
db_table = 'department'
class DepartmentUserModel(BaseModel):
'''
部门与用户
'''
model_name = '部门关联用户'
model_sign = 'department_user'
TYP_MANAGER = 10
TYP_MEMBER = 20
TYP_CHOICES = (
(TYP_MANAGER, '部门负责人'),
(TYP_MEMBER, '普通成员'),
)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
department = models.ForeignKey(DepartmentModel, on_delete=models.CASCADE)
typ = models.SmallIntegerField('类型', choices=TYP_CHOICES, default=TYP_MEMBER)
class Meta:
db_table = 'department_user'
class ModModel(BaseModel):
'''
模块表
'''
model_name = '模块'
model_sign = 'mod'
name = models.CharField('模块名', max_length=32)
sign = models.CharField('唯一标识', max_length=32)
rank = models.IntegerField('排序')
class Meta:
db_table = 'mod'
class PermissionModel(BaseModel):
'''
权限
'''
model_name = '权限'
model_sign = 'permission'
TYP_OP = 10
TYP_DATA = 20
TYP_CHOICES = (
(TYP_OP, '操作权限'),
(TYP_DATA, '数据权限'),
)
mod = models.ForeignKey(ModModel, on_delete=models.CASCADE, null=True)
name = models.CharField('权限名', max_length=128)
typ = models.SmallIntegerField('类型', choices=TYP_CHOICES)
sign = models.CharField('唯一标识', max_length=128)
rank = models.IntegerField('排序')
class Meta:
db_table = 'permission'
class RoleModModel(BaseModel):
'''
角色模块
'''
model_name = '角色关联模块'
model_sign = 'role_mod'
role = models.ForeignKey(RoleModel, on_delete=models.CASCADE)
mod = models.ForeignKey(ModModel, on_delete=models.CASCADE)
class Meta:
db_table = 'role_mod'
class RolePermissionModel(BaseModel):
'''
角色权限
'''
model_name = '角色关联权限'
model_sign = 'role_permission'
role = models.ForeignKey(RoleModel, on_delete=models.CASCADE)
permission = models.ForeignKey(PermissionModel, on_delete=models.CASCADE)
class Meta:
db_table = 'role_permission'
class LdapConfigModel(BaseModel):
'''
LDAP配置
'''
model_name = 'LDAP服务配置'
model_sign = 'ldap_config'
# 类似这样格式:ldap://ldap.oldb.top:389
host = models.CharField('地址', max_length=128)
# ldap管理员账号DN:类似这样cn=admin,dc=oldb,dc=top
admin_dn = models.CharField('管理员DN', max_length=128)
admin_password = models.CharField('管理员密码', max_length=128)
# 所有用户在此节点下
member_base_dn = models.CharField('用户基础DN', max_length=128)
class Meta:
db_table = 'ldap_config'
@classmethod
def none_to_dict(cls):
'''
不存在时,返回内容
'''
data = {
'host': '',
'admin_dn': '',
'admin_password': '',
'member_base_dn': '',
}
return data
| [
"boxingxing@limikeji.com"
] | boxingxing@limikeji.com |
f8ff31230d296d8d52bafc61867337d61507ab7e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_airfares.py | 1310f1c3cdc33bc1f52a56002ac21d34a724d64c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._airfare import _AIRFARE
#calss header
class _AIRFARES(_AIRFARE, ):
def __init__(self,):
_AIRFARE.__init__(self)
self.name = "AIRFARES"
self.specie = 'nouns'
self.basic = "airfare"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aaa7a305b09f42ba724822ff74db0d1d28f9b1a9 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4456289.3.spec | e4fd503864c25acf155653f1567ea71821f0b96a | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,942 | spec | {
"id": "mgm4456289.3",
"metadata": {
"mgm4456289.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 12024,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 726,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 305,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 446,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 579,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 11217,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 199,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2968,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 305,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 22792,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 46,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 569,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 2810,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 78005,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 38,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 347,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 536,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 286138,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 44,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 12,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 54,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 65,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 56,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 18,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 18931,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 78,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 99,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456289.3/file/999.done.species.stats"
}
},
"id": "mgm4456289.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4456289.3"
}
},
"raw": {
"mgm4456289.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4456289.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
aeb7b0b5e01210faa7989db07f6777d5519f089a | dd116fe1e94191749ab7a9b00be25bfd88641d82 | /cairis/cairis/VulnerabilityEnvironmentProperties.py | a7407040d88f84b7fe623b7bdd058f5ff946d5d7 | [
"Apache-2.0"
] | permissive | RobinQuetin/CAIRIS-web | fbad99327707ea3b995bdfb4841a83695989e011 | 4a6822db654fecb05a09689c8ba59a4b1255c0fc | HEAD | 2018-12-28T10:53:00.595152 | 2015-06-20T16:53:39 | 2015-06-20T16:53:39 | 33,935,403 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from EnvironmentProperties import EnvironmentProperties
class VulnerabilityEnvironmentProperties(EnvironmentProperties):
def __init__(self,environmentName,severity,assets):
EnvironmentProperties.__init__(self,environmentName)
self.theSeverity = severity
self.theAssets = assets
def severity(self): return self.theSeverity
def assets(self): return self.theAssets
| [
"shamal.faily@googlemail.com"
] | shamal.faily@googlemail.com |
ed6b7a8c5f2057dacf6492198f9233cc8714b552 | 6aa406767612c3eec15c9dcd26ce49841c1555bb | /ADK_6.2.43/audio/kalimba/kymera/tools/KCSMaker/downloadFile.py | 88d7254055bcb3d572c7b9762266ccc7f436cbba | [] | no_license | hongshui3000/BluetoothEarbud | 0bb483ca3c19f9c4b317a6ef1f4e8a9712313dd1 | b173ec3666c9e02d115d52d301b74fcd2d08cb47 | refs/heads/master | 2021-04-06T12:42:57.286099 | 2018-03-06T01:55:33 | 2018-03-06T01:55:33 | 124,361,091 | 1 | 7 | null | 2018-03-08T08:30:02 | 2018-03-08T08:30:01 | null | UTF-8 | Python | false | false | 2,569 | py | ############################################################################
# CONFIDENTIAL
#
# Copyright (c) 2015 - 2017 Qualcomm Technologies International, Ltd.
#
############################################################################
import types
class downloadFile(list):
"""Container format that extends the built-in list to inclue comment strings
for the output kdc file."""
def append(self, value, comment=""):
"""Extend the built-in list append method to include the comment.
Note the behaviour of this method is different to the built-in method
when handling lists. In this case the append method behaves as extend
does."""
if type(value) == types.ListType:
list.append(self, (value[0], comment))
self.extend(value[1:])
else:
list.append(self, (value, comment))
def extend(self, value, comment=""):
"Extend the built-in list extend method to include the comment."
if len(value) == 0:
return
self.append(value[0], comment)
list.extend( self, map( lambda x: (x, ""), value[1:] ) )
def dumpToTextFile(self, fileT, write_mode="w", index=0):
with open(fileT, write_mode) as f:
for i in range(len(self)):
if self[i][1] != "":
f.write("%06X %04X # %s\n" % (index + i, self[i][0], self[i][1]))
else:
f.write("%06X %04X\n" % (index + i, self[i][0]))
return index + len(self)
def dumpToBinaryFile(self, fileB, write_mode="wb"):
with open(fileB, write_mode) as f:
f.write( "".join( map( lambda (k,s): "%c%c" % (k>>8, k&0xFF), self ) ) )
def dumpToDRAMFile(self, fileD, write_mode="w"):
with open(fileD, write_mode) as f:
word32 = ""
words16bit = map( lambda (k,s): "%04x" % (k), self )
words32bit = []
wordready = False
for word16 in words16bit:
word32 = word32 + word16
if wordready == True:
words32bit.append("0x" + word32)
wordready = False
word32 = ""
else:
wordready = True
size = len(words32bit)
words32bit.insert(0, "@40000000 " + str(size))
f.write( "\n".join( words32bit ) )
f.write("\n")
# return whether we left a 16 bit word behind
return wordready, word16, len(words32bit) | [
"chaw.meng@geortek.com"
] | chaw.meng@geortek.com |
806c0d4bfb02f750662a45711cbf31d002796237 | 205861f3dc228f78eb6544ef5ed987175938eb84 | /celery/wait_rabbitmq.py | eb3b23a6c7d7cd6ca05112587be942d710b9cd5a | [] | no_license | Muzque/nicetomeetyou | c2e0e4e9fdccdf4969b1b721ad95654e486da84b | baf2db0d2a60ab51fce956f70c9acc55a3181bf9 | refs/heads/master | 2020-04-05T06:25:24.316156 | 2018-11-11T11:08:01 | 2018-11-11T11:08:01 | 156,637,397 | 0 | 0 | null | 2018-11-08T02:16:50 | 2018-11-08T02:16:49 | null | UTF-8 | Python | false | false | 1,169 | py | import argparse
import pika
import time
parser = argparse.ArgumentParser(description='Check connection.')
parser.add_argument('--server', default='rabbitmq')
parser.add_argument('--virtual_host', default='celery_vhost')
parser.add_argument('--ssl', action='store_true')
parser.add_argument('--port', type=int, default='5672')
parser.add_argument('--username', default='celery')
parser.add_argument('--password', default='pw123456')
args = vars(parser.parse_args())
credentials = pika.PlainCredentials(args['username'], args['password'])
parameters = pika.ConnectionParameters(host=args['server'],
port=args['port'],
virtual_host=args['virtual_host'],
credentials=credentials,
ssl=args['ssl'])
while True:
try:
connection = pika.BlockingConnection(parameters)
if connection.is_open:
print("RabbitMQ successful connected.")
connection.close()
break
except Exception as e:
print("RabbitMQ not responds... :{}".format(e))
time.sleep(1.0)
| [
"zerosky1943@gmail.com"
] | zerosky1943@gmail.com |
a7cc0d1ab36a3ee26d3b0a4230bda54f1ea0db08 | 246e9200a834261eebcf1aaa54da5080981a24ea | /ctci/arrays-and-strings/1-unique-characters-in-string.py | 73a46444484f9c5f684bb78e31fca563eff94d32 | [] | no_license | kalsotra2001/practice | db435514b7b57ce549b96a8baf64fad8f579da18 | bbc8a458718ad875ce5b7caa0e56afe94ae6fa68 | refs/heads/master | 2021-12-15T20:48:21.186658 | 2017-09-07T23:01:56 | 2017-09-07T23:01:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import sys
def unique(s):
char = [False] * 128
if len(s) > 128:
return False
for c in s:
if char[ord(c)] == True:
return False
else:
char[ord(c)] = True
return True
s = sys.stdin.readline().strip()
if unique(s):
print "Unique."
else:
print "Not unique." | [
"jacquelineluo95@gmail.com"
] | jacquelineluo95@gmail.com |
aa686c33f11968bbdc3d862cf0fce99117109547 | b76e39e535499704368eddc26237dc0016ef7d06 | /LF/fn_ProportionValve_V3.py | 5557424e4d8b523e9634e2cbfadd48247997997f | [] | no_license | BUBAIMITRA2018/castersimulation | 0532e53df7d346c2824e577cc91cd0ac2ce4694c | eca5fddff5c0f33f785168f6b1e9f572c1622be0 | refs/heads/master | 2022-12-10T02:45:04.207196 | 2020-09-09T05:35:54 | 2020-09-09T05:35:54 | 260,110,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,700 | py | import time
from event_V2 import *
from clientcomm_v1 import *
from readgeneral_v2 import *
from writegeneral_v2 import *
import logging
import gc
logger = logging.getLogger("main.log")
__all__ = ['FN_ProportionalValve']
class FN_ProportionalValve(Eventmanager):
def __init__(self,com,df,idxNo,filename):
self._idxNo =idxNo
self.filename = filename
self.gen = com
self._positionsp = 0.0
self.df = df
self.devicename = df.iloc[self._idxNo, 0]
self.setup()
self.initilizedigitalinput()
super().__init__(lambda: self.Proportionalprocess())
def setup(self):
try:
for tag,col in self.readalltags():
if col==3:
self.areatag = str(tag)
if col == 4:
self.possetpointtag = str(tag)
if col == 5:
self.upposlimitswtag = str(tag)
if col == 6:
self.downposlimitswtag =str(tag)
except Exception as e:
level = logging.ERROR
messege = "FN_ProportionalValve" + self.devicename + " Error messege(setup)" + str(e.args)
logger.log(level, messege)
log_exception(e)
def initilizedigitalinput(self):
try:
self.Proportionalprocess()
except Exception as e:
level = logging.ERROR
messege = "FN_ProportionalValve" + self.devicename + " Error messege(initilization)" + str(e.args)
logger.log(level, messege)
def Proportionalprocess(self):
try:
client = Communication()
sta_con_plc = client.opc_client_connect(self.filename)
readgeneral = ReadGeneral(sta_con_plc)
writegeneral = WriteGeneral(sta_con_plc)
self.currentvalue = readgeneral.readsymbolvalue(self.possetpointtag, 'S7WLWord', 'PA')
print("proportional valve start")
print("current value is ", self.currentvalue)
if self.currentvalue == 8294:
writegeneral.writesymbolvalue(self.upposlimitswtag, 0, 'S7WLBit')
time.sleep(1)
writegeneral.writesymbolvalue(self.downposlimitswtag, 1, 'S7WLBit')
level = logging.WARNING
messege = self.devicename + ":" + self.downposlimitswtag + " value is" + "1"
logger.log(level, messege)
if self.currentvalue == 19353:
writegeneral.writesymbolvalue(self.downposlimitswtag, 0, 'S7WLBit')
time.sleep(5)
writegeneral.writesymbolvalue(self.upposlimitswtag, 1, 'S7WLBit')
level = logging.WARNING
messege = self.devicename + ":" + self.downposlimitswtag + " value is" + "1"
logger.log(level, messege)
sta_con_plc.disconnect()
gc.collect()
except Exception as e:
level = logging.ERROR
messege = "FN_ProportionalValve" + self.devicename + " Error messege(process)" + str(e.args)
logger.log(level, messege)
log_exception(e)
@property
def PosSetpoint(self):
return self._positionsp
@PosSetpoint.setter
def PosSetpoint(self, value):
if value != self._positionsp:
super().fire()
self._positionsp = value
@property
def areaname(self):
return self.areatag
def readalltags(self):
n = 3
row, col = self.df.shape
print(col)
while n < col:
data = self.df.iloc[self._idxNo, n]
yield data,n
n = n + 1
| [
"subrata.mitra@sms-group.com"
] | subrata.mitra@sms-group.com |
860b79c446c1239bedd54c874410e6b544f97def | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/-57053121/win32net.py | ff9e6a1b782e3fcae5c883d127212d2b12f61488 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,797 | py | # encoding: utf-8
# module win32net
# from C:\Users\Doly\Anaconda3\lib\site-packages\win32\win32net.pyd
# by generator 1.147
""" A module encapsulating the Windows Network API. """
# imports
from pywintypes import error
# Variables with simple values
SERVICE_SERVER = 'LanmanServer'
SERVICE_WORKSTATION = 'LanmanWorkstation'
USE_FORCE = 1
USE_LOTS_OF_FORCE = 2
USE_NOFORCE = 0
# functions
def NetFileClose(*args, **kwargs): # real signature unknown
pass
def NetFileEnum(*args, **kwargs): # real signature unknown
pass
def NetFileGetInfo(*args, **kwargs): # real signature unknown
pass
def NetGetAnyDCName(*args, **kwargs): # real signature unknown
pass
def NetGetDCName(*args, **kwargs): # real signature unknown
pass
def NetGetJoinInformation(*args, **kwargs): # real signature unknown
pass
def NetGroupAdd(*args, **kwargs): # real signature unknown
pass
def NetGroupAddUser(*args, **kwargs): # real signature unknown
pass
def NetGroupDel(*args, **kwargs): # real signature unknown
pass
def NetGroupDelUser(*args, **kwargs): # real signature unknown
pass
def NetGroupEnum(*args, **kwargs): # real signature unknown
pass
def NetGroupGetInfo(*args, **kwargs): # real signature unknown
pass
def NetGroupGetUsers(*args, **kwargs): # real signature unknown
pass
def NetGroupSetInfo(*args, **kwargs): # real signature unknown
pass
def NetGroupSetUsers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupAdd(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupAddMembers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupDel(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupDelMembers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupEnum(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupGetInfo(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupGetMembers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupSetInfo(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupSetMembers(*args, **kwargs): # real signature unknown
pass
def NetMessageBufferSend(*args, **kwargs): # real signature unknown
pass
def NetMessageNameAdd(*args, **kwargs): # real signature unknown
pass
def NetMessageNameDel(*args, **kwargs): # real signature unknown
pass
def NetMessageNameEnum(*args, **kwargs): # real signature unknown
pass
def NetServerComputerNameAdd(*args, **kwargs): # real signature unknown
pass
def NetServerComputerNameDel(*args, **kwargs): # real signature unknown
pass
def NetServerDiskEnum(*args, **kwargs): # real signature unknown
pass
def NetServerEnum(*args, **kwargs): # real signature unknown
pass
def NetServerGetInfo(*args, **kwargs): # real signature unknown
pass
def NetServerSetInfo(*args, **kwargs): # real signature unknown
pass
def NetSessionDel(*args, **kwargs): # real signature unknown
pass
def NetSessionEnum(*args, **kwargs): # real signature unknown
pass
def NetSessionGetInfo(*args, **kwargs): # real signature unknown
pass
def NetShareAdd(*args, **kwargs): # real signature unknown
pass
def NetShareCheck(*args, **kwargs): # real signature unknown
pass
def NetShareDel(*args, **kwargs): # real signature unknown
pass
def NetShareEnum(*args, **kwargs): # real signature unknown
""" Obsolete Function,Level 1 call """
pass
def NetShareGetInfo(*args, **kwargs): # real signature unknown
pass
def NetShareSetInfo(*args, **kwargs): # real signature unknown
pass
def NetStatisticsGet(*args, **kwargs): # real signature unknown
pass
def NetUseAdd(*args, **kwargs): # real signature unknown
pass
def NetUseDel(*args, **kwargs): # real signature unknown
pass
def NetUseEnum(*args, **kwargs): # real signature unknown
pass
def NetUseGetInfo(*args, **kwargs): # real signature unknown
pass
def NetUserAdd(*args, **kwargs): # real signature unknown
pass
def NetUserChangePassword(*args, **kwargs): # real signature unknown
pass
def NetUserDel(*args, **kwargs): # real signature unknown
pass
def NetUserEnum(*args, **kwargs): # real signature unknown
pass
def NetUserGetGroups(*args, **kwargs): # real signature unknown
""" Updated - New Behavior """
pass
def NetUserGetInfo(*args, **kwargs): # real signature unknown
pass
def NetUserGetLocalGroups(*args, **kwargs): # real signature unknown
""" Updated - New Behavior """
pass
def NetUserModalsGet(*args, **kwargs): # real signature unknown
pass
def NetUserModalsSet(*args, **kwargs): # real signature unknown
pass
def NetUserSetInfo(*args, **kwargs): # real signature unknown
pass
def NetValidateName(*args, **kwargs): # real signature unknown
pass
def NetValidatePasswordPolicy(*args, **kwargs): # real signature unknown
pass
def NetWkstaGetInfo(*args, **kwargs): # real signature unknown
pass
def NetWkstaSetInfo(*args, **kwargs): # real signature unknown
pass
def NetWkstaTransportAdd(*args, **kwargs): # real signature unknown
pass
def NetWkstaTransportDel(*args, **kwargs): # real signature unknown
pass
def NetWkstaTransportEnum(*args, **kwargs): # real signature unknown
pass
def NetWkstaUserEnum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x000001DF95825470>'
__spec__ = None # (!) real value is "ModuleSpec(name='win32net', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x000001DF95825470>, origin='C:\\\\Users\\\\Doly\\\\Anaconda3\\\\lib\\\\site-packages\\\\win32\\\\win32net.pyd')"
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
ab763d211a9c24f4b40133a63293a7644e5bc85f | 19ab1499f904fc7065cf76eb518a457d1cad08c9 | /accounts/models.py | 6a9b925e0f4b3cfd73ab40258c5cce0c4c4d91d5 | [] | no_license | mathemartins/moli | 5a2637c94ec8953b42fba369b2fed5bdd3a83be6 | c945b90920659025013fe6133dfd5a0abac61788 | refs/heads/master | 2021-05-03T06:27:21.472267 | 2018-03-04T18:50:18 | 2018-03-04T18:50:18 | 120,595,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,327 | py | from django.db import models
from django.db.models.signals import pre_save, post_save
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from django.utils import timezone
# Create your models here.
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from accounts.utils import user_code_generator
User = get_user_model()
def upload_location(instance, filename):
return "%s/%s" %(instance.id, filename)
gender_ = (
('Male', 'Male'),
('Female', 'Female'),
('Other', 'Other'),
)
years = (
("1 - 5 years", "1 - 5 years"),
("6 - 13 years", "6 - 13 years"),
("14 - 20 years", "14 - 20 years"),
("20 years and above", "20 years and above"),
)
UserType = (
("Investor", "Investor"),
("StartUp", "StartUp"),
)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
user_code = models.CharField(max_length=5)
user_type = models.CharField(choices=UserType, blank=True, null=True, max_length=100)
image = ProcessedImageField(upload_to=upload_location, processors=[ResizeToFill(150, 150)],
format='JPEG', options={'quality':100}, null=True, blank=True)
is_member = models.BooleanField(default=False, verbose_name="Premium Account")
skill_set = models.CharField(default="I don't have any.", max_length=100)
years_of_experience = models.CharField(choices=years, blank=True, null=True, max_length=100)
mobile_number = models.CharField(max_length=11)
street = models.CharField(max_length=100, blank=True, null=True)
city = models.CharField(max_length=100, blank=True, null=True)
state = models.CharField(max_length=100, blank=True, null=True)
zip_code = models.CharField(max_length=100, blank=True, null=True)
country = models.CharField(max_length=100, blank=True, null=True)
gender = models.CharField(choices=gender_, max_length=100)
slug = models.SlugField(null=True, blank=True)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
# objects = ProfileManager()
class Meta:
ordering = ["-timestamp", "-updated"]
def __str__(self):
return str(self.user)
def new_user_signal(sender, instance, created, *args, **kwargs):
pass
| [
"mathegeniuse@gmail.com"
] | mathegeniuse@gmail.com |
58aede70705c5b0dd846bea68d5953645bf246f6 | 65f9576021285bc1f9e52cc21e2d49547ba77376 | /cdsp_proc/core/securemsm/cryptodrivers/prng/build/SConscript | ac11a7dc6ab6c6c833edf3300ae6c0378f6e7fc7 | [] | no_license | AVCHD/qcs605_root_qcom | 183d7a16e2f9fddc9df94df9532cbce661fbf6eb | 44af08aa9a60c6ca724c8d7abf04af54d4136ccb | refs/heads/main | 2023-03-18T21:54:11.234776 | 2021-02-26T11:03:59 | 2021-02-26T11:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | #===============================================================================
#
# Crypto Driver Libraries
#
# GENERAL DESCRIPTION
# build script
#
# Copyright (c) 2012 by QUALCOMM, Incorporated.
# All Rights Reserved.
# QUALCOMM Proprietary/GTDR
#
#-------------------------------------------------------------------------------
#
# $Header: //source/qcom/qct/core/bsp/config/msm7x30/main/latest/securemsm/build/SConscript#4 $
# $DateTime: 2009/10/07 11:43:48 $
# $Author: wduembeg $
# $Change: 1047079 $
# EDIT HISTORY FOR FILE
#
# This section contains comments describing changes made to the module.
# Notice that changes are listed in reverse chronological order.
#
# when who what, where, why
# -------- --- ---------------------------------------------------------
# 07/04/12 nk Initial version
#===============================================================================
Import('env')
env = env.Clone()
env.Replace(BUILDPATH = env.subst('${BUILDPATH}/${BUILD_ID}'))
CLEAN_SOURCES = env.FindFiles("*.h", "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/environment/")
env.CleanPack(['CORE_QDSP6_SW'], CLEAN_SOURCES)
CLEAN_SOURCES = env.FindFiles("*.h", "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/chipset/")
env.CleanPack(['CORE_QDSP6_SW'], CLEAN_SOURCES)
CLEAN_SOURCES = env.FindFiles("*", "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/test")
for x in env.FindFiles(['*.o', '*.lib', 'SConscript'], "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/test/build"):
CLEAN_SOURCES.remove(x)
env.CleanPack(['CORE_QDSP6_SW'],
CLEAN_SOURCES)
#-------------------------------------------------------------------------------
# Load sub scripts
#-------------------------------------------------------------------------------
env.LoadSoftwareUnits()
| [
"jagadeshkumar.s@pathpartnertech.com"
] | jagadeshkumar.s@pathpartnertech.com | |
5b49340e964b98109030cd66ae3252c3ba525436 | d2ad4a42770cd3b5c8738cdc6f11a3b63689123a | /examples/DeepQNetwork/common.py | 5e53e5c9192924e6f3562c328dcf02a1bdee57e0 | [
"Apache-2.0"
] | permissive | cpehle/tensorpack | bcdc55418e2e07779fd08f03742e9080a0854651 | 7e91eb48ce59b303b580140abbbfa42dc0319b21 | refs/heads/master | 2021-01-11T15:35:03.578953 | 2017-01-29T06:48:50 | 2017-01-29T06:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,462 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.predict import get_predict_func
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
global get_player
get_player = None
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg):
player = get_player(viz=0.01)
predfunc = get_predict_func(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predict_funcs, nr_eval):
class Worker(StoppableThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
player = get_player(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print "Score, ", score
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predict_funcs]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval):
func = get_predict_func(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Callback):
def __init__(self, nr_eval, input_names, output_names):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predict_func(
self.input_names, self.output_names)] * NR_PROC
def _trigger_epoch(self):
t = time.time()
mean, max = eval_with_funcs(self.pred_funcs, nr_eval=self.eval_episode)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.add_scalar_summary('mean_score', mean)
self.trainer.add_scalar_summary('max_score', max)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
17cc2d6112cbf07ea283510a6a4186bda20bb213 | 8246092010e656920e7199f889f9cbf54b83a729 | /pycoin/wallet/SQLite3Persistence.py | 23c29337a9bc3cb77f61dce4e3bb11dd047b7512 | [
"MIT"
] | permissive | richardkiss/pycoin | 5717411a11445773ac922c1d1c1b7dbe4835cd77 | b41ad7d02e52d9869a8c9f0dbd7d3b2b496c98c0 | refs/heads/main | 2023-08-07T12:14:04.974934 | 2023-04-18T02:27:15 | 2023-04-18T02:27:15 | 10,917,677 | 1,306 | 489 | MIT | 2023-06-03T23:24:50 | 2013-06-24T19:17:52 | Python | UTF-8 | Python | false | false | 7,582 | py | from pycoin.encoding.hexbytes import b2h, h2b, b2h_rev, h2b_rev
from pycoin.key.BIP32Node import BIP32Node
class SQLite3Persistence(object):
def __init__(self, sqlite3_db):
self.db = sqlite3_db
self._init_tables()
def _exec_sql(self, sql, *args):
c = self.db.cursor()
c.execute(sql, args)
return c
def commit(self):
self.db.commit()
def rollback(self):
self.db.rollback()
def _init_tables(self):
self._init_table_bip32key()
self._init_table_bip32node()
self._init_table_spendable()
self._init_table_globals()
self._init_other_tables()
def _init_other_tables(self):
pass
def _init_table_bip32key(self):
SQL = """create table if not exists BIP32Key (
id integer primary key,
slug text not null unique,
as_text text not null
);"""
self._exec_sql(SQL)
self.db.commit()
def bip32node_for_slug(self, slug):
c = self._exec_sql("select id, as_text from BIP32Key where slug=?", slug)
r = c.fetchone()
if r is None:
return None
bip32_node = BIP32Node.from_hwif(r[1])
bip32_node.id = r[0]
return bip32_node
def create_bip32node(self, slug, random_bytes):
bip32_node = BIP32Node.from_master_secret(random_bytes)
bip32_text = bip32_node.as_text(as_private=True)
self._exec_sql("insert into BIP32Key (slug, as_text) values (?, ?)", slug, bip32_text)
return self.bip32node_for_slug(slug)
def _init_table_bip32node(self):
SQL = """create table if not exists BIP32Node (
path text not null,
key_id integer,
address text unique,
unique(path, key_id)
);"""
self._exec_sql(SQL)
self.db.commit()
def add_bip32_path(self, bip32_node, path):
address = bip32_node.subkey_for_path(path).address()
key_id = bip32_node.id
self._exec_sql("insert or ignore into BIP32Node values (?, ?, ?)", path, key_id, address)
self.db.commit()
return address
def interesting_addresses(self):
c = self._exec_sql("select address from BIP32Node")
return (r[0] for r in c)
def secret_exponent_for_address(self, bip32_node, address):
c = self._exec_sql("select path from BIP32Node where key_id = ? and address = ?", bip32_node.id, address)
r = c.fetchone()
if r is None:
return r
path = r[0]
return bip32_node.subkey_for_path(path).secret_exponent()
def _init_table_globals(self):
SQL = """create table if not exists Global (
slug text primary key,
data text
);"""
self._exec_sql(SQL)
self.db.commit()
def set_global(self, slug, value):
self._exec_sql("insert or replace into Global values (?, ?)", slug, value)
def get_global(self, slug):
c = self._exec_sql("select data from Global where slug = ?", slug)
r = c.fetchone()
if r is None:
return r
return r[0]
def slugs(self):
for r in self._exec_sql("select slug from Global"):
yield r[0]
def _init_table_spendable(self):
SQL = ["""create table if not exists Spendable (
tx_hash text,
tx_out_index integer,
coin_value integer,
script text,
block_index_available integer,
does_seem_spent boolean,
block_index_spent integer,
unique(tx_hash, tx_out_index)
);""",
"create index if not exists Spendable_cv on Spendable (coin_value);",
"create index if not exists Spendable_bia on Spendable (block_index_available);",
"create index if not exists Spendable_bis on Spendable (block_index_spent);"]
for sql in SQL:
self._exec_sql(sql)
self.db.commit()
def save_spendable(self, spendable):
tx_hash = b2h_rev(spendable.tx_hash)
script = b2h(spendable.script)
self._exec_sql("insert or replace into Spendable values (?, ?, ?, ?, ?, ?, ?)", tx_hash,
spendable.tx_out_index, spendable.coin_value, script,
spendable.block_index_available, spendable.does_seem_spent,
spendable.block_index_spent)
def delete_spendable(self, tx_hash, tx_out_index):
self._exec_sql("delete from Spendable where tx_hash = ? and tx_out_index = ?",
b2h_rev(tx_hash), tx_out_index)
def spendable_for_hash_index(self, tx_hash, tx_out_index, spendable_class):
tx_hash_hex = b2h_rev(tx_hash)
SQL = ("select coin_value, script, block_index_available, "
"does_seem_spent, block_index_spent from Spendable where "
"tx_hash = ? and tx_out_index = ?")
c = self._exec_sql(SQL, tx_hash_hex, tx_out_index)
r = c.fetchone()
if r is None:
return r
return spendable_class(coin_value=r[0], script=h2b(r[1]), tx_hash=tx_hash,
tx_out_index=tx_out_index, block_index_available=r[2],
does_seem_spent=r[3], block_index_spent=r[4])
@staticmethod
def spendable_for_row(r, spendable_class):
return spendable_class(coin_value=r[2], script=h2b(r[3]), tx_hash=h2b_rev(r[0]), tx_out_index=r[1],
block_index_available=r[4], does_seem_spent=r[5], block_index_spent=r[6])
def all_spendables(self, spendable_class, qualifier_sql=""):
SQL = ("select tx_hash, tx_out_index, coin_value, script, block_index_available, "
"does_seem_spent, block_index_spent from Spendable " + qualifier_sql)
c1 = self._exec_sql(SQL)
while 1:
r = next(c1)
yield self.spendable_for_row(r, spendable_class)
def unspent_spendables(self, last_block, spendable_class, confirmations=0):
# we fetch spendables "old enough"
# we alternate between "biggest" and "smallest" spendables
SQL = ("select tx_hash, tx_out_index, coin_value, script, block_index_available, "
"does_seem_spent, block_index_spent from Spendable where "
"block_index_available > 0 and does_seem_spent = 0 and block_index_spent = 0 "
"%s order by coin_value %s")
if confirmations > 0:
prior_to_block = last_block + 1 - confirmations
t1 = "and block_index_available <= %d " % prior_to_block
else:
t1 = ""
c1 = self._exec_sql(SQL % (t1, "desc"))
c2 = self._exec_sql(SQL % (t1, "asc"))
seen = set()
while 1:
r = next(c2)
s = self.spendable_for_row(r, spendable_class)
name = (s.tx_hash, s.tx_out_index)
if name not in seen:
yield s
seen.add(name)
r = next(c1)
s = self.spendable_for_row(r, spendable_class)
name = (s.tx_hash, s.tx_out_index)
if name not in seen:
yield s
seen.add(name)
def unspent_spendable_count(self):
SQL = ("select count(*) from Spendable where does_seem_spent = 0"
" and block_index_available > 0 and block_index_spent = 0")
c = self._exec_sql(SQL)
r = c.fetchone()
return r[0]
def rewind_spendables(self, block_index):
SQL1 = ("update Spendable set block_index_available = 0 where block_index_available > ?")
self._exec_sql(SQL1, block_index)
SQL2 = ("update Spendable set block_index_spent = 0 where block_index_spent > ?")
self._exec_sql(SQL2, block_index)
| [
"him@richardkiss.com"
] | him@richardkiss.com |
0271004bd53c6495bea1b7d09dd2f9cd713b5bab | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_348/ch28_2020_04_01_16_19_34_337220.py | 510b48ff825567842eb0ccd1c5f6ae918874c9eb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | contador = 0
numero = 0
while contador < 99:
numero = numero + (1/2**contador)
contador = contador + 1
print(numero)
| [
"you@example.com"
] | you@example.com |
7dc694c928307813f2ccde47e1ff6de0f120e660 | 33d77a6caf1813fcb378671fd89e5bb1dec7f6f8 | /tsai/callback/core.py | 449505e7e55f4945889568ef0a8510b4d9be93d9 | [
"Apache-2.0"
] | permissive | kusumy/tsai | 6a1f3d4137131084062d1a5f942e8f7d23662abd | d6994896dd804cfed441adbb2b8dd4836b1dac4a | refs/heads/master | 2023-02-25T14:13:08.275441 | 2021-01-29T20:44:21 | 2021-01-29T20:44:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,198 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/010_callback.core.ipynb (unless otherwise specified).
__all__ = ['GamblersCallback', 'TransformScheduler', 'ShowGraph', 'ShowGraphCallback2', 'UBDAug']
# Cell
from ..imports import *
from ..utils import *
from ..data.preprocessing import *
from ..data.transforms import *
from ..models.layers import *
from fastai.callback.all import *
# Cell
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
# Cell
class GamblersCallback(Callback):
"A callback to use metrics with gambler's loss"
def after_loss(self): self.learn.pred = self.learn.pred[..., :-1]
# Cell
class TransformScheduler(Callback):
"A callback to schedule batch transforms during training based on a function (sched_lin, sched_exp, sched_cos (default), etc)"
def __init__(self, schedule_func:callable, show_plot:bool=False):
self.schedule_func,self.show_plot = schedule_func,show_plot
self.mult = []
def before_fit(self):
for pct in np.linspace(0, 1, len(self.dls.train) * self.n_epoch): self.mult.append(self.schedule_func(pct))
# get initial magnitude values and update initial value
self.mag = []
self.mag_tfms = []
for t in self.dls.after_batch:
if hasattr(t, 'magnitude'):
self.mag.append(t.magnitude)
t.magnitude *= self.mult[0]
self.mag_tfms.append(t)
def after_batch(self):
if self.training and len(self.mag_tfms)>0 and self.train_iter < len(self.mult):
# set values for next batch
for t,m in zip(self.mag_tfms, self.mag):
t.magnitude = m * self.mult[self.train_iter]
def after_fit(self):
if self.show_plot and self.mult != [] and len(self.mag_tfms)>0:
print()
plt.plot(self.mult)
plt.title('Scheduled tfms')
plt.show()
print()
self.show_plot = False
# set values to initial values
for t,m in zip(self.mag_tfms, self.mag): t.magnitude = m
def __repr__(self):
return f'{self.__class__.__name__}({self.schedule_func})'
# Cell
class ShowGraph(ShowGraphCallback):
"(Modified) Update a graph of training and validation loss"
def after_epoch(self):
"Plot validation loss in the pbar graph"
if not self.nb_batches: return
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_min = min((min(rec.losses), min(val_losses)))
y_max = max((max(rec.losses), max(val_losses)))
margin = (y_max - y_min) * .05
y_bounds = (y_min - margin, y_max + margin)
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
ShowGraphCallback2 = ShowGraph
# Cell
class UBDAug(Callback):
r"""A callback to implement the uncertainty-based data augmentation."""
def __init__(self, batch_tfms:list, N:int=2, C:int=4, S:int=1):
r'''
Args:
batch_tfms: list of available transforms applied to the combined batch. They will be applied in addition to the dl tfms.
N: # composition steps (# transforms randomly applied to each sample)
C: # augmented data per input data (# times N transforms are applied)
S: # selected data points used for training (# augmented samples in the final batch from each original sample)
'''
self.C, self.S = C, min(S, C)
self.batch_tfms = L(batch_tfms)
self.n_tfms = len(self.batch_tfms)
self.N = min(N, self.n_tfms)
def before_fit(self):
assert hasattr(self.loss_func, 'reduction'), "You need to pass a loss_function with a 'reduction' attribute"
self.red = self.loss_func.reduction
def before_batch(self):
if self.training:
with torch.no_grad():
setattr(self.loss_func, 'reduction', 'none')
for i in range(self.C):
idxs = np.random.choice(self.n_tfms, self.N, False)
x_tfm = compose_tfms(self.x, self.batch_tfms[idxs], split_idx=0)
loss = self.loss_func(self.learn.model(x_tfm), self.y).reshape(-1,1)
if i == 0:
x2 = x_tfm.unsqueeze(1)
max_loss = loss
else:
losses = torch.cat((max_loss, loss), dim=1)
x2 = torch.cat((x2, x_tfm.unsqueeze(1)), dim=1)
x2 = x2[np.arange(x2.shape[0]).reshape(-1,1), losses.argsort(1)[:, -self.S:]]
max_loss = losses.max(1)[0].reshape(-1,1)
setattr(self.loss_func, 'reduction', self.red)
x2 = x2.reshape(-1, self.x.shape[-2], self.x.shape[-1])
if self.S > 1: self.learn.yb = (torch_tile(self.y, 2),)
self.learn.xb = (x2,)
def __repr__(self): return f'UBDAug({[get_tfm_name(t) for t in self.batch_tfms]})' | [
"“oguiza@gmail.com”"
] | “oguiza@gmail.com” |
9c4e3df78a6a526942f1747c2c3ab68138a6105d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_retracting.py | 7a0b51d0632af5dd821d4f03d4dee4fe35e094e4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _RETRACTING():
def __init__(self,):
self.name = "RETRACTING"
self.definitions = retract
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['retract']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
97de807d29418219e7285b76b289be1bafcc58c8 | c609c9e9da9b716dde810334fe32cb65a3ddcff9 | /ddsc/remotestore.py | f5c98790fe650c115cff91c020eda36391e45622 | [
"MIT"
] | permissive | erichhuang/DukeDSClient | 16f858e743b7823f1797214b9ede1e520f73af4a | feccdbd0caed524e0728268be29a3987121712fa | refs/heads/master | 2020-12-24T21:45:24.371369 | 2016-02-22T18:30:06 | 2016-02-22T18:30:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,257 | py | from localstore import LocalContent, HashUtil
class RemoteContentFetch(object):
def __init__(self, data_service):
self.data_service = data_service
def fetch_remote_project(self, project_name, path_list):
project = self._get_my_project(project_name)
if project:
self._add_project_children(project)
return project
def _get_my_project(self, project_name):
response = self.data_service.get_projects().json()
for project in response['results']:
if project['name'] == project_name:
return RemoteProject(project)
return None
def _add_project_children(self, project):
response = self.data_service.get_project_children(project.id, '').json()
for child in response['results']:
self._add_child_recur(project, child)
def _add_child_recur(self, parent, child):
kind = child['kind']
if kind == 'dds-folder':
parent.add_child(self._read_folder(child))
elif kind == 'dds-file':
parent.add_child(RemoteFile(child))
else:
raise ValueError("Unknown child type {}".format(kind))
def _read_folder(self, folder_json):
folder = RemoteFolder(folder_json)
response = self.data_service.get_folder_children(folder.id, '').json()
for child in response['results']:
self._add_child_recur(folder, child)
return folder
class RemoteProject(object):
"""
Project data from a remote store projects request.
Represents the top of a tree.
"""
def __init__(self, json_data):
self.id = json_data['id']
self.kind = json_data['kind']
self.name = json_data['name']
self.description = json_data['description']
self.is_deleted = json_data['is_deleted']
self.children = []
def add_child(self, child):
self.children.append(child)
def get_paths(self):
paths = set()
for child in self.children:
paths.update(child.get_paths(''))
return paths
def __str__(self):
return 'project: {} id:{} {}'.format(self.name, self.id, self.children)
class RemoteFolder(object):
"""
Folder data from a remote store project_id_children or folder_id_children request.
Represents a leaf or branch in a project tree.
"""
def __init__(self, json_data):
self.id = json_data['id']
self.kind = json_data['kind']
self.name = json_data['name']
self.is_deleted = json_data['is_deleted']
self.children = []
def add_child(self, child):
self.children.append(child)
def get_paths(self, parent):
paths = set()
my_path = parent + '/' + self.name
paths.add(my_path)
for child in self.children:
paths.update(child.get_paths(my_path))
return paths
def __str__(self):
return 'folder: {} id:{} {}'.format(self.name, self.id, self.children)
class RemoteFile(object):
"""
File data from a remote store project_id_children or folder_id_children request.
Represents a leaf in a project tree.
"""
def __init__(self, json_data):
self.id = json_data['id']
self.kind = json_data['kind']
self.name = json_data['name']
self.is_deleted = json_data['is_deleted']
self.size = json_data['upload']['size']
def get_paths(self, parent):
paths = set()
paths.add(parent + '/' + self.name)
return paths
def __str__(self):
return 'file: {} id:{} size:{}'.format(self.name, self.id, self.size)
class FileOnDisk(object):
"""Return a chunks lazily."""
def __init__(self, dsa, local_file):
self.dsa = dsa
self.local_file = local_file
self.filename = local_file.path
self.content_type = local_file.mimetype
self.chunk_num = 0
self.upload_id = None
def upload(self, project_id, parent_kind, parent_id):
size = self.local_file.size
(hash_alg, hash_value) = self.local_file.get_hashpair()
name = self.local_file.name
resp = self.dsa.create_upload(project_id, name, self.content_type, size, hash_value, hash_alg)
self.upload_id = resp.json()['id']
self._send_file_chunks()
self.dsa.complete_upload(self.upload_id)
result = self.dsa.create_file(parent_kind, parent_id, self.upload_id)
return result.json()['id']
def _send_file_chunks(self):
self.local_file.process_chunks(self.dsa.bytes_per_chunk, self.process_chunk)
def process_chunk(self, chunk, chunk_hash_alg, chunk_hash_value):
resp = self.dsa.create_upload_url(self.upload_id, self.chunk_num, len(chunk),
chunk_hash_value, chunk_hash_alg)
if resp.status_code == 200:
self._send_file_external(resp.json(), chunk)
self.chunk_num += 1
else:
raise ValueError("Failed to retrieve upload url status:" + str(resp.status_code))
def _send_file_external(self, url_json, chunk):
http_verb = url_json['http_verb']
host = url_json['host']
url = url_json['url']
http_headers = url_json['http_headers']
resp = self.dsa.send_external(http_verb, host, url, http_headers, chunk)
if resp.status_code != 200 and resp.status_code != 201:
raise ValueError("Failed to send file to external store. Error:" + str(resp.status_code))
def _send_file_external(self, url_json, chunk):
http_verb = url_json['http_verb']
host = url_json['host']
url = url_json['url']
http_headers = url_json['http_headers']
resp = self.dsa.send_external(http_verb, host, url, http_headers, chunk)
if resp.status_code != 200 and resp.status_code != 201:
raise ValueError("Failed to send file to external store. Error:" + str(resp.status_code))
class RemoteContentSender(object):
def __init__(self, data_service, project_id, project_name, watcher):
self.data_service = data_service
self.project_id = project_id
self.project_name = project_name
self.watcher = watcher
def visit_project(self, item, parent):
if not item.remote_id:
self.watcher.sending_item(item)
result = self.data_service.create_project(self.project_name, self.project_name)
item.remote_id = result.json()['id']
item.sent_to_remote = True
self.project_id = item.remote_id
def visit_folder(self, item, parent):
if not item.remote_id:
self.watcher.sending_item(item)
result = self.data_service.create_folder(item.name, parent.kind, parent.remote_id)
item.remote_id = result.json()['id']
item.sent_to_remote = True
def visit_file(self, item, parent):
# Always sending files right, no way to know if different without downloading.
self.watcher.sending_item(item)
file_on_disk = FileOnDisk(self.data_service, item)
item.remote_id = file_on_disk.upload(self.project_id, parent.kind, parent.remote_id)
item.sent_to_remote = True
| [
"johnbradley2008@gmail.com"
] | johnbradley2008@gmail.com |
d022fe38183e5587f2f636e2c586849ef8486f56 | 8b5e08a5c2ae9779a362e4a1eb15e9205585288b | /mysite/account/views.py | a8780cbdccf21ad701a253bed72a0ae7507e6d5c | [] | no_license | wuchunlongcom/admin-upimg | 2b6dcc453e91040f0e9154d6255afdcf9aaa4e62 | 35e0dd0584d7e090d8922de704cd26ff0543679e | refs/heads/master | 2022-05-02T02:59:18.691844 | 2020-02-21T10:23:17 | 2020-02-21T10:23:17 | 240,830,450 | 0 | 0 | null | 2022-04-22T23:02:11 | 2020-02-16T04:21:17 | JavaScript | UTF-8 | Python | false | false | 805 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .models import Image
from myAPI.pageAPI import djangoPage, PAGE_NUM
# http://localhost:8000/
#@login_required
def index(request):
meg = '最简单代码,实现图像文件上传。 <br>登录Admin后台, 上传图像文件并在前台显示图像。<br>\
用户名/密码: admin/admin'
return render(request, 'account/index.html', context=locals())
# 显示图片 http://localhost:8000/upload/pic/
def upload_pic(request, page):
pic_list = Image.objects.all()
pic_list, pageList, num_pages, page = djangoPage(pic_list, page, PAGE_NUM)
offset = PAGE_NUM * (page - 1)
return render(request,'account/image.html', context=locals()) | [
"wcl6005@163.com"
] | wcl6005@163.com |
a0cfe1555eae52ff7fc71807259a1992ae6af19e | 8163d8f03aea22cb4fa1e60d809781049fff4bb4 | /MODEL11/first/urls.py | 64c7bb7d97725ed170ff93fe6248f7b4ddae533c | [] | no_license | shubham454/Django-Devlopment | 694b973d31a82d2ded11f95138bd766130d7d3c9 | 43a2c3b98dbe9f582f2394fcfb3beb133c37b145 | refs/heads/master | 2022-12-04T14:34:05.093402 | 2020-08-13T18:35:33 | 2020-08-13T18:35:33 | 287,353,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.urls import path
from first import views
urlpatterns = [
path('list/',views.GetBankData.as_view()),
path('add/', views.AddBankData.as_view()),
path('<int:ano>/', views.DeleteView.as_view()),
path('update/<int:ano>/', views.UpdateView.as_view()),
]
| [
"moreshubham203@gmail.com"
] | moreshubham203@gmail.com |
fae3195bd3e588f5f3ea49692059b133decd7c8e | 086c199b617f304f5edcbb3481a82119b9cec99d | /build/catkin_generated/generate_cached_setup.py | dcc8823bb39338e0486ff1fd5c925f02d20b3f92 | [] | no_license | canveo/catkin_ws | 59634bee66aa9f5ed593acd85e6bd4e2e2eaab01 | 3c931df576529ad7e1e48dc0e69ba131914d18dc | refs/heads/master | 2023-02-28T13:23:43.653964 | 2021-02-03T23:00:45 | 2021-02-03T23:00:45 | 325,388,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/noetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/noetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/canveo/catkin_ws/devel;/opt/ros/noetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/canveo/catkin_ws/devel/env.sh')
output_filename = '/home/canveo/catkin_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"canveo@gmail.com"
] | canveo@gmail.com |
0df4506dacd545abd91b163d9de4b2880bf748cb | 753938feae47c1ce8edeaa35099c634fbafa3b99 | /store/store_convergence.py | 569d2102921bf041a6e060c55c2a75cb8a7ce3c3 | [
"MIT"
] | permissive | dayoladejo/SwarmOptimization | c11cc799ec5a16a3a341ea0f2b5b9df1ea5f65d9 | 5445b6f90ab49339ca0fdb71e98d44e6827c95a8 | refs/heads/main | 2023-07-17T09:14:16.266657 | 2021-09-10T17:10:56 | 2021-09-10T17:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py | #
# file: store_convergence.py
#
# Plot store convergence as function of iteration.
#
# RTK, 24-Sep-2020
# Last update: 24-Sep-2020
#
################################################################
import pickle
import numpy as np
import matplotlib.pylab as plt
def geny(m,gbest,giter):
y = np.zeros(m)
for i in range(len(giter)):
y[giter[i]:] = -gbest[i]
return y
# DE
d = pickle.load(open("results/de_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="P", linestyle='none', color='k', label="DE")
plt.plot(x,y, color='k')
# PSO
d = pickle.load(open("results/pso_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="s", linestyle='none', color='k', label="PSO")
plt.plot(x,y, color='k')
# GWO
d = pickle.load(open("results/gwo_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="<", linestyle='none', color='k', label="GWO")
plt.plot(x,y, color='k')
# Jaya
d = pickle.load(open("results/jaya_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="*", linestyle='none', color='k', label="Jaya")
plt.plot(x,y, color='k')
# GA
d = pickle.load(open("results/ga_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker=">", linestyle='none', color='k', label="GA")
plt.plot(x,y, color='k')
# RO
d = pickle.load(open("results/ro_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="o", linestyle='none', color='k', label="RO")
plt.plot(x,y, color='k')
plt.legend(loc="lower right")
plt.xlabel("Iteration")
plt.ylabel("Revenue")
plt.ylim((185,255))
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("store_convergence.png", dpi=300)
plt.show()
| [
"oneelkruns@hotmail.com"
] | oneelkruns@hotmail.com |
03139a6a7f7c39063ba8cd75ec2ed4dc60c632a8 | 5619eb14e8c46a70ba228f96c4afcebbae557189 | /url_tester/migrations/0004_auto_20190107_1552.py | cb82a2213547b9a90915b99fee210e96dd484ea7 | [] | no_license | eduarde/URLTester | 3d7b6be6f43886559264189f70f126c3ee74678d | 32c158943b4892b7f00d328ed26d63d813287106 | refs/heads/master | 2020-04-15T05:36:25.856274 | 2019-02-06T13:54:29 | 2019-02-06T13:54:29 | 164,430,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 2.1.5 on 2019-01-07 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('url_tester', '0003_auto_20190107_1551'),
]
operations = [
migrations.AlterField(
model_name='url',
name='name',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"eduard.erja@gmail.com"
] | eduard.erja@gmail.com |
e6643bc565b34a1eaed02f17519df6cbdb69e7a0 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /shared/data_collection/scribeutil.py | 94a572426b9ecabc72323e87e3a64c7180ca6ad3 | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-8-27下午4:16.
"""
try:
from scribe import scribe
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
SCRIBE_AVAILABLE = True # 检查scribe相关库正常,不要影响业务系统
except ImportError, e:
print '<------->'
print e
SCRIBE_AVAILABLE = False
class Singleton(type):
'''this is a meta class for Singleton,just ommit it '''
def __init__(cls, name, bases, dic):
super(Singleton, cls).__init__(name, bases, dic)
cls.instance = None
def __call__(cls, *args, **kwargs): # @NoSelf
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls.instance
class _Transport(object):
'''
use this class as a raw socket
'''
def __init__(self, host, port, timeout=None, unix_socket=None):
self.host = host
self.port = port
self.timeout = timeout # ms
self._unix_socket = unix_socket
self._socket = TSocket.TSocket(self.host, self.port, self._unix_socket)
self._transport = TTransport.TFramedTransport(self._socket)
def __del__(self):
self._socket.close()
def connect(self):
try:
if self.timeout:
self._socket.settimeout(self.timeout)
if not self._transport.isOpen():
self._transport.open()
else:
pass
except Exception, e:
self.close()
def isOpen(self):
return self._transport.isOpen()
def get_trans(self):
return self._transport
def close(self):
self._transport.close()
import time
class ScribeClient(object):
'''a simple scribe client'''
__metaclass__ = Singleton
def __init__(self, host, port, timeout=None, unix_socket=None):
self._transObj = _Transport(host, port, timeout=timeout, unix_socket=unix_socket)
self._protocol = TBinaryProtocol.TBinaryProtocol(trans=self._transObj.get_trans(), strictRead=False, strictWrite=False)
self.client = scribe.Client(iprot=self._protocol, oprot=self._protocol)
self._transObj.connect()
def log(self, category, message):
'''specify a category and send the message'''
message = time.strftime('%H:%M:%S') + '\t' + message # add timestamp before log
log_entry = scribe.LogEntry(category=category, message=message)
try:
self.client.Log([log_entry])
except Exception, e:
self._transObj.close()
self._transObj.connect()
if self._transObj.isOpen():
self.client.Log([log_entry])
else:
pass
@classmethod
def instance(cls):
'''create a Scribe Client'''
if not hasattr(cls, '_instance'):
cls._instance = cls()
| [
"zxzxck@163.com"
] | zxzxck@163.com |
bd9632f645cbdef8be36b04fe17ef06be5f9b4d1 | 280079e18b506ec7ed85a49587e10db795947922 | /accounting/libs/templatetags/introspection_filters.py | d7e06cce56fa0b40cd5dd3cd0442d8fda47ee202 | [
"MIT"
] | permissive | Abdur-rahmaanJ/django-accounting | 6a4d30fd0070f4b11b50490a801b0dece58dd474 | 4ec094544873843722db964ea9283a7947c4ba32 | refs/heads/master | 2020-03-22T17:02:47.120624 | 2018-07-10T10:49:19 | 2018-07-10T10:49:19 | 140,368,614 | 0 | 0 | MIT | 2018-07-10T10:49:20 | 2018-07-10T02:52:28 | Python | UTF-8 | Python | false | false | 920 | py | from django import template
from django.forms import ModelForm, BaseFormSet
from django.db.models import Model
from django_select2.fields import (
AutoModelSelect2Field,
AutoModelSelect2MultipleField)
register = template.Library()
@register.filter
def get_model_verbose_name(instance):
if isinstance(instance, Model):
return instance._meta.verbose_name.title()
return '<unknown>'
@register.filter
def get_form_model_verbose_name(instance):
if isinstance(instance, ModelForm):
return instance._meta.model._meta.verbose_name.title()
if isinstance(instance, BaseFormSet):
return instance.model._meta.verbose_name_plural.title()
return '<unknown>'
@register.filter
def is_select2_field(form, field):
select2_classes = (AutoModelSelect2Field, AutoModelSelect2MultipleField)
res = any(isinstance(field.field, cls) for cls in select2_classes)
return res
| [
"dulacpier@gmail.com"
] | dulacpier@gmail.com |
f8619e156fdfa736e2afaac94ee691bc3ff1978b | e473f04f5dd4e40393bc1047e6f326e589204530 | /law/notification.py | 880ccec0117a02ee59dc937376dc76f77458dd42 | [
"BSD-3-Clause"
] | permissive | yrath/law | bc9848aa1238538af7cb9a335e0fa9fad90bbecf | 807306d6b2113e6c546c01fcaa134bba551b4759 | refs/heads/master | 2020-07-15T11:01:30.517171 | 2019-12-12T11:56:41 | 2019-12-12T11:56:41 | 205,548,161 | 0 | 0 | BSD-3-Clause | 2019-08-31T13:29:23 | 2019-08-31T13:29:22 | null | UTF-8 | Python | false | false | 1,203 | py | # coding: utf-8
"""
Notification functions.
"""
__all__ = ["notify_mail"]
import logging
from law.config import Config
from law.util import send_mail
logger = logging.getLogger(__name__)
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None,
**kwargs):
"""
Mail notification method taking a *title* and a string *message*. *recipient*, *sender*,
*smtp_host* and *smtp_port* default to the configuration values in the [notifications] section.
"""
cfg = Config.instance()
if not recipient:
recipient = cfg.get_expanded("notifications", "mail_recipient")
if not sender:
sender = cfg.get_expanded("notifications", "mail_sender")
if not smtp_host:
smtp_host = cfg.get_expanded("notifications", "mail_smtp_host")
if not smtp_port:
smtp_port = cfg.get_expanded("notifications", "mail_smtp_port")
if not recipient or not sender:
logger.warning("cannot send mail notification, recipient ({}) or sender ({}) empty".format(
recipient, sender))
return False
return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
| [
"marcelrieger@me.com"
] | marcelrieger@me.com |
fc7769537197f0cb473bfbe371228ddeb601c337 | 3fbda19ca67e6fe8dfd25cd3f9e220b67c2f1260 | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/电商项目集合/阿里1688_淘宝_天猫_京东_折800_卷皮_拼多多_蜜芽_蘑菇街_楚楚街_唯品会_聚美优品/my_flask_server/pinduoduo_parse.py | 55eceba39cb3b7fab6e469439a871c226149f745 | [] | no_license | backchenlin/python | dae4f79a84b12c638a48a700060fdc771c21a912 | f6bc9dc5698ab273f8bda3c3f6669f0989fcc2c0 | refs/heads/master | 2020-03-22T16:12:19.667931 | 2018-07-09T09:11:15 | 2018-07-09T09:11:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,300 | py | # coding:utf-8
'''
@author = super_fazai
@File : pinduoduo_parse.py
@Time : 2017/11/24 14:58
@connect : superonesfazai@gmail.com
'''
"""
拼多多页面采集系统(官网地址: http://mobile.yangkeduo.com/)
由于拼多多的pc站,官方早已放弃维护,专注做移动端,所以下面的都是基于移动端地址进行的爬取
直接requests开始时是可以的,后面就只返回错误的信息,估计将我IP过滤了
"""
import time
from random import randint
import json
import requests
import re
from pprint import pprint
from decimal import Decimal
from time import sleep
import datetime
import re
import gc
import pytz
from json import dumps
from settings import HEADERS
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from settings import PHANTOMJS_DRIVER_PATH
from my_ip_pools import MyIpPools
from my_phantomjs import MyPhantomjs
from my_requests import MyRequests
from my_utils import get_shanghai_time
from my_items import GoodsItem
# phantomjs驱动地址
EXECUTABLE_PATH = PHANTOMJS_DRIVER_PATH
class PinduoduoParse(object):
def __init__(self):
self._set_headers()
self.result_data = {}
# self.set_cookies_key_api_uid() # 设置cookie中的api_uid的值
self.my_phantomjs = MyPhantomjs()
def _set_headers(self):
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Encoding:': 'gzip',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'mobile.yangkeduo.com',
'User-Agent': HEADERS[randint(0, 34)], # 随机一个请求头
# 'Cookie': 'api_uid=rBQh+FoXerAjQWaAEOcpAg==;', # 分析发现需要这个cookie值
}
def get_goods_data(self, goods_id):
'''
模拟构造得到data的url
:param goods_id:
:return: data 类型dict
'''
if goods_id == '':
self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值
return {}
else:
tmp_url = 'http://mobile.yangkeduo.com/goods.html?goods_id=' + str(goods_id)
print('------>>>| 得到的商品手机版地址为: ', tmp_url)
'''
1.采用requests,由于经常返回错误的body(即requests.get返回的为空的html), So pass
'''
# body = MyRequests.get_url_body(url=tmp_url, headers=self.headers, had_referer=True)
'''
2.采用phantomjs来获取
'''
body = self.my_phantomjs.use_phantomjs_to_get_url_body(url=tmp_url)
if body == '':
print('body中re匹配到的data为空!')
self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值
return {}
data = re.compile(r'window.rawData= (.*?);</script>').findall(body) # 贪婪匹配匹配所有
if data != []:
data = data[0]
try:
data = json.loads(data)
except Exception:
self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值
return {}
# pprint(data)
try:
data['goods'].pop('localGroups')
data['goods'].pop('mallService')
data.pop('reviews') # 评价信息跟相关统计
except:
pass
# pprint(data)
'''
处理detailGallery转换成能被html显示页面信息
'''
detail_data = data.get('goods', {}).get('detailGallery', [])
tmp_div_desc = ''
if detail_data != []:
for index in range(0, len(detail_data)):
if index == 0: # 跳过拼多多的提示
pass
else:
tmp = ''
tmp_img_url = detail_data[index].get('url')
tmp = r'<img src="{}" style="height:auto;width:100%;"/>'.format(tmp_img_url)
tmp_div_desc += tmp
detail_data = '<div>' + tmp_div_desc + '</div>'
else:
detail_data = ''
# print(detail_data)
try:
data['goods'].pop('detailGallery') # 删除图文介绍的无第二次用途的信息
except:
pass
data['div_desc'] = detail_data
# pprint(data)
self.result_data = data
return data
else:
print('data为空!')
self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值
return {}
def deal_with_data(self):
'''
处理result_data, 返回需要的信息
:return: 字典类型
'''
data = self.result_data
if data != {}:
# 店铺名称
if data.get('mall') is not None:
shop_name = data.get('mall', {}).get('mallName', '')
else:
shop_name = ''
# 掌柜
account = ''
# 商品名称
title = data.get('goods', {}).get('goodsName', '')
# 子标题
sub_title = ''
# 商品库存
# 商品标签属性对应的值
# 商品标签属性名称
if data.get('goods', {}).get('skus', []) == []:
detail_name_list = []
else:
if data.get('goods', {}).get('skus', [])[0].get('specs') == []:
detail_name_list = []
else:
detail_name_list = [{'spec_name': item.get('spec_key')} for item in data.get('goods', {}).get('skus', [])[0].get('specs')]
# print(detail_name_list)
# 要存储的每个标签对应规格的价格及其库存
skus = data.get('goods', {}).get('skus', [])
# pprint(skus)
price_info_list = []
if skus != []: # ** 注意: 拼多多商品只有一个规格时skus也不会为空的 **
for index in range(0, len(skus)):
tmp = {}
price = skus[index].get('groupPrice', '') # 拼团价
normal_price = skus[index].get('normalPrice', '') # 单独购买价格
spec_value = [item.get('spec_value') for item in data.get('goods', {}).get('skus', [])[index].get('specs')]
spec_value = '|'.join(spec_value)
img_url = skus[index].get('thumbUrl', '')
rest_number = skus[index].get('quantity', 0) # 剩余库存
is_on_sale = skus[index].get('isOnSale', 0) # 用于判断是否在特价销售,1:特价 0:原价(normal_price)
tmp['spec_value'] = spec_value
tmp['detail_price'] = price
tmp['normal_price'] = normal_price
tmp['img_url'] = img_url
if rest_number <= 0:
tmp['rest_number'] = 0
else:
tmp['rest_number'] = rest_number
tmp['is_on_sale'] = is_on_sale
price_info_list.append(tmp)
if price_info_list == []:
print('price_info_list为空值')
return {}
# 商品价格和淘宝价
tmp_price_list = sorted([round(float(item.get('detail_price', '')), 2) for item in price_info_list])
price = tmp_price_list[-1] # 商品价格
taobao_price = tmp_price_list[0] # 淘宝价
if detail_name_list == []:
print('## detail_name_list为空值 ##')
price_info_list = []
# print('最高价为: ', price)
# print('最低价为: ', taobao_price)
# print(len(price_info_list))
# pprint(price_info_list)
# 所有示例图片地址
all_img_url = [{'img_url': item} for item in data.get('goods', {}).get('topGallery', [])]
# print(all_img_url)
# 详细信息标签名对应属性
tmp_p_value = re.compile(r'\n').sub('', data.get('goods', {}).get('goodsDesc', ''))
tmp_p_value = re.compile(r'\t').sub('', tmp_p_value)
tmp_p_value = re.compile(r' ').sub('', tmp_p_value)
p_info = [{'p_name': '商品描述', 'p_value': tmp_p_value}]
# print(p_info)
# 总销量
all_sell_count = data.get('goods', {}).get('sales', 0)
# div_desc
div_desc = data.get('div_desc', '')
# 商品销售时间区间
schedule = [{
'begin_time': self.timestamp_to_regulartime(data.get('goods', {}).get('groupTypes', [])[0].get('startTime')),
'end_time': self.timestamp_to_regulartime(data.get('goods', {}).get('groupTypes', [])[0].get('endTime')),
}]
# pprint(schedule)
# 用于判断商品是否已经下架
is_delete = 0
result = {
'shop_name': shop_name, # 店铺名称
'account': account, # 掌柜
'title': title, # 商品名称
'sub_title': sub_title, # 子标题
# 'shop_name_url': shop_name_url, # 店铺主页地址
'price': price, # 商品价格
'taobao_price': taobao_price, # 淘宝价
# 'goods_stock': goods_stock, # 商品库存
'detail_name_list': detail_name_list, # 商品标签属性名称
# 'detail_value_list': detail_value_list,# 商品标签属性对应的值
'price_info_list': price_info_list, # 要存储的每个标签对应规格的价格及其库存
'all_img_url': all_img_url, # 所有示例图片地址
'p_info': p_info, # 详细信息标签名对应属性
'div_desc': div_desc, # div_desc
'schedule': schedule, # 商品开卖时间和结束开卖时间
'all_sell_count': all_sell_count, # 商品总销售量
'is_delete': is_delete # 用于判断商品是否已经下架
}
# pprint(result)
# print(result)
# wait_to_send_data = {
# 'reason': 'success',
# 'data': result,
# 'code': 1
# }
# json_data = json.dumps(wait_to_send_data, ensure_ascii=False)
# print(json_data)
return result
else:
print('待处理的data为空的dict, 该商品可能已经转移或者下架')
return {}
def to_right_and_update_data(self, data, pipeline):
data_list = data
tmp = GoodsItem()
tmp['goods_id'] = data_list['goods_id'] # 官方商品id
now_time = get_shanghai_time()
tmp['modify_time'] = now_time # 修改时间
tmp['shop_name'] = data_list['shop_name'] # 公司名称
tmp['title'] = data_list['title'] # 商品名称
tmp['sub_title'] = data_list['sub_title'] # 商品子标题
tmp['link_name'] = '' # 卖家姓名
tmp['account'] = data_list['account'] # 掌柜名称
# 设置最高价price, 最低价taobao_price
tmp['price'] = Decimal(data_list['price']).__round__(2)
tmp['taobao_price'] = Decimal(data_list['taobao_price']).__round__(2)
tmp['price_info'] = [] # 价格信息
tmp['detail_name_list'] = data_list['detail_name_list'] # 标签属性名称
"""
得到sku_map
"""
tmp['price_info_list'] = data_list.get('price_info_list') # 每个规格对应价格及其库存
tmp['all_img_url'] = data_list.get('all_img_url') # 所有示例图片地址
tmp['p_info'] = data_list.get('p_info') # 详细信息
tmp['div_desc'] = data_list.get('div_desc') # 下方div
tmp['schedule'] = data_list.get('schedule')
tmp['is_delete'] = data_list.get('is_delete') # 逻辑删除, 未删除为0, 删除为1
tmp['my_shelf_and_down_time'] = data_list.get('my_shelf_and_down_time')
tmp['delete_time'] = data_list.get('delete_time')
tmp['all_sell_count'] = str(data_list.get('all_sell_count'))
tmp['is_price_change'] = data_list.get('_is_price_change')
tmp['price_change_info'] = data_list.get('_price_change_info')
params = self._get_db_update_params(item=tmp)
# 改价格的sql语句
# sql_str = r'update dbo.GoodsInfoAutoGet set ModfiyTime = %s, ShopName=%s, Account=%s, GoodsName=%s, SubTitle=%s, LinkName=%s, Price=%s, TaoBaoPrice=%s, PriceInfo=%s, SKUName=%s, SKUInfo=%s, ImageUrl=%s, PropertyInfo=%s, DetailInfo=%s, SellCount=%s, MyShelfAndDownTime=%s, delete_time=%s, IsDelete=%s, Schedule=%s, IsPriceChange=%s, PriceChangeInfo=%s where GoodsID = %s'
# 不改价格的sql语句
sql_str = r'update dbo.GoodsInfoAutoGet set ModfiyTime = %s, ShopName=%s, Account=%s, GoodsName=%s, SubTitle=%s, LinkName=%s, PriceInfo=%s, SKUName=%s, SKUInfo=%s, ImageUrl=%s, PropertyInfo=%s, DetailInfo=%s, SellCount=%s, MyShelfAndDownTime=%s, delete_time=%s, IsDelete=%s, Schedule=%s, IsPriceChange=%s, PriceChangeInfo=%s where GoodsID = %s'
pipeline._update_table(sql_str=sql_str, params=params)
def insert_into_pinduoduo_xianshimiaosha_table(self, data, pipeline):
data_list = data
tmp = {}
tmp['goods_id'] = data_list['goods_id'] # 官方商品id
tmp['spider_url'] = data_list['spider_url'] # 商品地址
tmp['username'] = data_list['username'] # 操作人员username
now_time = get_shanghai_time()
tmp['deal_with_time'] = now_time # 操作时间
tmp['modfiy_time'] = now_time # 修改时间
tmp['shop_name'] = data_list['shop_name'] # 公司名称
tmp['title'] = data_list['title'] # 商品名称
tmp['sub_title'] = data_list['sub_title'] # 商品子标题
# 设置最高价price, 最低价taobao_price
tmp['price'] = Decimal(data_list['price']).__round__(2)
tmp['taobao_price'] = Decimal(data_list['taobao_price']).__round__(2)
tmp['detail_name_list'] = data_list['detail_name_list'] # 标签属性名称
"""
得到sku_map
"""
tmp['price_info_list'] = data_list.get('price_info_list') # 每个规格对应价格及其库存
tmp['all_img_url'] = data_list.get('all_img_url') # 所有示例图片地址
tmp['p_info'] = data_list.get('p_info') # 详细信息
tmp['div_desc'] = data_list.get('div_desc') # 下方div
tmp['schedule'] = data_list.get('schedule')
tmp['stock_info'] = data_list.get('stock_info')
tmp['miaosha_time'] = data_list.get('miaosha_time')
tmp['miaosha_begin_time'] = data_list.get('miaosha_begin_time')
tmp['miaosha_end_time'] = data_list.get('miaosha_end_time')
# 采集的来源地
tmp['site_id'] = 16 # 采集来源地(卷皮秒杀商品)
tmp['is_delete'] = data_list.get('is_delete') # 逻辑删除, 未删除为0, 删除为1
# print('is_delete=', tmp['is_delete'])
# print('------>>>| 待存储的数据信息为: |', tmp)
print('------>>>| 待存储的数据信息为: ', tmp.get('goods_id'))
params = self._get_db_insert_miaosha_params(item=tmp)
sql_str = r'insert into dbo.pinduoduo_xianshimiaosha(goods_id, goods_url, username, create_time, modfiy_time, shop_name, goods_name, sub_title, price, taobao_price, sku_name, sku_info, all_image_url, property_info, detail_info, schedule, stock_info, miaosha_time, miaosha_begin_time, miaosha_end_time, site_id, is_delete) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
pipeline._insert_into_table(sql_str=sql_str, params=params)
def to_update_pinduoduo_xianshimiaosha_table(self, data, pipeline):
data_list = data
tmp = {}
tmp['goods_id'] = data_list['goods_id'] # 官方商品id
now_time = get_shanghai_time()
tmp['modfiy_time'] = now_time # 修改时间
tmp['shop_name'] = data_list['shop_name'] # 公司名称
tmp['title'] = data_list['title'] # 商品名称
tmp['sub_title'] = data_list['sub_title']
# 设置最高价price, 最低价taobao_price
tmp['price'] = Decimal(data_list['price']).__round__(2)
tmp['taobao_price'] = Decimal(data_list['taobao_price']).__round__(2)
tmp['detail_name_list'] = data_list['detail_name_list'] # 标签属性名称
"""
得到sku_map
"""
tmp['price_info_list'] = data_list.get('price_info_list') # 每个规格对应价格及其库存
tmp['all_img_url'] = data_list.get('all_img_url') # 所有示例图片地址
tmp['p_info'] = data_list.get('p_info') # 详细信息
tmp['div_desc'] = data_list.get('div_desc') # 下方div
tmp['schedule'] = data_list.get('schedule')
tmp['stock_info'] = data_list.get('stock_info')
tmp['miaosha_time'] = data_list.get('miaosha_time')
tmp['miaosha_begin_time'] = data_list.get('miaosha_begin_time')
tmp['miaosha_end_time'] = data_list.get('miaosha_end_time')
tmp['is_delete'] = data_list.get('is_delete') # 逻辑删除, 未删除为0, 删除为1
# print('is_delete=', tmp['is_delete'])
# print('------>>> | 待存储的数据信息为: |', tmp)
print('------>>>| 待存储的数据信息为: |', tmp.get('goods_id'))
params = self._get_db_update_miaosha_params(item=tmp)
sql_str = r'update dbo.pinduoduo_xianshimiaosha set modfiy_time = %s, shop_name=%s, goods_name=%s, sub_title=%s, price=%s, taobao_price=%s, sku_name=%s, sku_info=%s, all_image_url=%s, property_info=%s, detail_info=%s, is_delete=%s, schedule=%s, stock_info=%s, miaosha_time=%s, miaosha_begin_time=%s, miaosha_end_time=%s where goods_id = %s'
pipeline._update_table(sql_str=sql_str, params=params)
def _get_db_update_params(self, item):
'''
得到db待存储的数据
:param item:
:return:
'''
params = (
item['modify_time'],
item['shop_name'],
item['account'],
item['title'],
item['sub_title'],
item['link_name'],
# item['price'],
# item['taobao_price'],
dumps(item['price_info'], ensure_ascii=False),
dumps(item['detail_name_list'], ensure_ascii=False),
dumps(item['price_info_list'], ensure_ascii=False),
dumps(item['all_img_url'], ensure_ascii=False),
dumps(item['p_info'], ensure_ascii=False),
item['div_desc'],
item['all_sell_count'],
dumps(item['my_shelf_and_down_time'], ensure_ascii=False),
item['delete_time'],
item['is_delete'],
dumps(item['schedule'], ensure_ascii=False),
item['is_price_change'],
dumps(item['price_change_info'], ensure_ascii=False),
item['goods_id'],
)
return params
def _get_db_insert_miaosha_params(self, item):
params = (
item['goods_id'],
item['spider_url'],
item['username'],
item['deal_with_time'],
item['modfiy_time'],
item['shop_name'],
item['title'],
item['sub_title'],
item['price'],
item['taobao_price'],
dumps(item['detail_name_list'], ensure_ascii=False), # 把list转换为json才能正常插入数据(并设置ensure_ascii=False)
dumps(item['price_info_list'], ensure_ascii=False),
dumps(item['all_img_url'], ensure_ascii=False),
dumps(item['p_info'], ensure_ascii=False), # 存入到PropertyInfo
item['div_desc'], # 存入到DetailInfo
dumps(item['schedule'], ensure_ascii=False),
dumps(item['stock_info'], ensure_ascii=False),
dumps(item['miaosha_time'], ensure_ascii=False),
item['miaosha_begin_time'],
item['miaosha_end_time'],
item['site_id'],
item['is_delete'],
)
return params
def _get_db_update_miaosha_params(self, item):
params = (
item['modfiy_time'],
item['shop_name'],
item['title'],
item['sub_title'],
item['price'],
item['taobao_price'],
dumps(item['detail_name_list'], ensure_ascii=False),
dumps(item['price_info_list'], ensure_ascii=False),
dumps(item['all_img_url'], ensure_ascii=False),
dumps(item['p_info'], ensure_ascii=False),
item['div_desc'],
item['is_delete'],
dumps(item['schedule'], ensure_ascii=False),
dumps(item['stock_info'], ensure_ascii=False),
dumps(item['miaosha_time'], ensure_ascii=False),
item['miaosha_begin_time'],
item['miaosha_end_time'],
item['goods_id'],
)
return params
def set_cookies_key_api_uid(self):
'''
给headers增加一个cookie, 里面有个key名字为api_uid
:return:
'''
# 设置代理ip
ip_object = MyIpPools()
self.proxies = ip_object.get_proxy_ip_from_ip_pool() # {'http': ['xx', 'yy', ...]}
self.proxy = self.proxies['http'][randint(0, len(self.proxies) - 1)]
tmp_proxies = {
'http': self.proxy,
}
# 得到cookie中的key名为api_uid的值
host_url = 'http://mobile.yangkeduo.com'
try:
response = requests.get(host_url, headers=self.headers, proxies=tmp_proxies, timeout=10) # 在requests里面传数据,在构造头时,注意在url外头的&xxx=也得先构造
api_uid = response.cookies.get('api_uid')
# print(response.cookies.items())
# if api_uid is None:
# api_uid = 'rBQh+FoXerAjQWaAEOcpAg=='
self.headers['Cookie'] = 'api_uid=' + str(api_uid) + ';'
# print(api_uid)
except Exception:
print('requests.get()请求超时....')
pass
def timestamp_to_regulartime(self, timestamp):
'''
将时间戳转换成时间
'''
# 利用localtime()函数将时间戳转化成localtime的格式
# 利用strftime()函数重新格式化时间
# 转换成localtime
time_local = time.localtime(timestamp)
# 转换成新的时间格式(2016-05-05 20:28:54)
dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
return dt
def get_goods_id_from_url(self, pinduoduo_url):
'''
得到goods_id
:param pinduoduo_url:
:return: goods_id (类型str)
'''
is_pinduoduo_url = re.compile(r'http://mobile.yangkeduo.com/goods.html.*?').findall(pinduoduo_url)
if is_pinduoduo_url != []:
if re.compile(r'http://mobile.yangkeduo.com/goods.html\?.*?goods_id=(\d+).*?').findall(pinduoduo_url) != []:
tmp_pinduoduo_url = re.compile(r'http://mobile.yangkeduo.com/goods.html\?.*?goods_id=(\d+).*?').findall(pinduoduo_url)[0]
if tmp_pinduoduo_url != '':
goods_id = tmp_pinduoduo_url
else: # 只是为了在pycharm里面测试,可以不加
pinduoduo_url = re.compile(r';').sub('', pinduoduo_url)
goods_id = re.compile(r'http://mobile.yangkeduo.com/goods.html\?.*?goods_id=(\d+).*?').findall(pinduoduo_url)[0]
print('------>>>| 得到的拼多多商品id为:', goods_id)
return goods_id
else:
pass
else:
print('拼多多商品url错误, 非正规的url, 请参照格式(http://mobile.yangkeduo.com/goods.html)开头的...')
return ''
def __del__(self):
try:
del self.my_phantomjs
except:
pass
gc.collect()
if __name__ == '__main__':
pinduoduo = PinduoduoParse()
while True:
pinduoduo_url = input('请输入待爬取的拼多多商品地址: ')
pinduoduo_url.strip('\n').strip(';')
goods_id = pinduoduo.get_goods_id_from_url(pinduoduo_url)
data = pinduoduo.get_goods_data(goods_id=goods_id)
pinduoduo.deal_with_data() | [
"superonesfazai@gmail.com"
] | superonesfazai@gmail.com |
fc087e51acf67938797c340946b8cda20e017f72 | e65a4dbfbfb0e54e59787ba7741efee12f7687f3 | /archivers/py-brotli/files/patch-setup.py | 9f97287a706fdfc4c9aa658f8632f588dd3bf945 | [
"BSD-2-Clause"
] | permissive | freebsd/freebsd-ports | 86f2e89d43913412c4f6b2be3e255bc0945eac12 | 605a2983f245ac63f5420e023e7dce56898ad801 | refs/heads/main | 2023-08-30T21:46:28.720924 | 2023-08-30T19:33:44 | 2023-08-30T19:33:44 | 1,803,961 | 916 | 918 | NOASSERTION | 2023-09-08T04:06:26 | 2011-05-26T11:15:35 | null | UTF-8 | Python | false | false | 3,962 | py | --- setup.py.orig 2020-08-27 06:24:08 UTC
+++ setup.py
@@ -25,7 +25,7 @@ CURR_DIR = os.path.abspath(os.path.dirname(os.path.rea
def get_version():
""" Return BROTLI_VERSION string as defined in 'common/version.h' file. """
- version_file_path = os.path.join(CURR_DIR, 'c', 'common', 'version.h')
+ version_file_path = os.path.join(CURR_DIR, 'common', 'version.h')
version = 0
with open(version_file_path, 'r') as f:
for line in f:
@@ -181,92 +181,17 @@ EXT_MODULES = [
'_brotli',
sources=[
'python/_brotli.cc',
- 'c/common/constants.c',
- 'c/common/context.c',
- 'c/common/dictionary.c',
- 'c/common/platform.c',
- 'c/common/transform.c',
- 'c/dec/bit_reader.c',
- 'c/dec/decode.c',
- 'c/dec/huffman.c',
- 'c/dec/state.c',
- 'c/enc/backward_references.c',
- 'c/enc/backward_references_hq.c',
- 'c/enc/bit_cost.c',
- 'c/enc/block_splitter.c',
- 'c/enc/brotli_bit_stream.c',
- 'c/enc/cluster.c',
- 'c/enc/command.c',
- 'c/enc/compress_fragment.c',
- 'c/enc/compress_fragment_two_pass.c',
- 'c/enc/dictionary_hash.c',
- 'c/enc/encode.c',
- 'c/enc/encoder_dict.c',
- 'c/enc/entropy_encode.c',
- 'c/enc/fast_log.c',
- 'c/enc/histogram.c',
- 'c/enc/literal_cost.c',
- 'c/enc/memory.c',
- 'c/enc/metablock.c',
- 'c/enc/static_dict.c',
- 'c/enc/utf8_util.c',
],
depends=[
- 'c/common/constants.h',
- 'c/common/context.h',
- 'c/common/dictionary.h',
- 'c/common/platform.h',
- 'c/common/transform.h',
- 'c/common/version.h',
- 'c/dec/bit_reader.h',
- 'c/dec/huffman.h',
- 'c/dec/prefix.h',
- 'c/dec/state.h',
- 'c/enc/backward_references.h',
- 'c/enc/backward_references_hq.h',
- 'c/enc/backward_references_inc.h',
- 'c/enc/bit_cost.h',
- 'c/enc/bit_cost_inc.h',
- 'c/enc/block_encoder_inc.h',
- 'c/enc/block_splitter.h',
- 'c/enc/block_splitter_inc.h',
- 'c/enc/brotli_bit_stream.h',
- 'c/enc/cluster.h',
- 'c/enc/cluster_inc.h',
- 'c/enc/command.h',
- 'c/enc/compress_fragment.h',
- 'c/enc/compress_fragment_two_pass.h',
- 'c/enc/dictionary_hash.h',
- 'c/enc/encoder_dict.h',
- 'c/enc/entropy_encode.h',
- 'c/enc/entropy_encode_static.h',
- 'c/enc/fast_log.h',
- 'c/enc/find_match_length.h',
- 'c/enc/hash.h',
- 'c/enc/hash_composite_inc.h',
- 'c/enc/hash_forgetful_chain_inc.h',
- 'c/enc/hash_longest_match64_inc.h',
- 'c/enc/hash_longest_match_inc.h',
- 'c/enc/hash_longest_match_quickly_inc.h',
- 'c/enc/hash_rolling_inc.h',
- 'c/enc/hash_to_binary_tree_inc.h',
- 'c/enc/histogram.h',
- 'c/enc/histogram_inc.h',
- 'c/enc/literal_cost.h',
- 'c/enc/memory.h',
- 'c/enc/metablock.h',
- 'c/enc/metablock_inc.h',
- 'c/enc/params.h',
- 'c/enc/prefix.h',
- 'c/enc/quality.h',
- 'c/enc/ringbuffer.h',
- 'c/enc/static_dict.h',
- 'c/enc/static_dict_lut.h',
- 'c/enc/utf8_util.h',
- 'c/enc/write_bits.h',
],
include_dirs=[
- 'c/include',
+ '%%LOCALBASE%%/include',
+ ],
+ libraries=[
+ 'brotlicommon', 'brotlidec', 'brotlienc',
+ ],
+ library_dirs=[
+ '%%LOCALBASE%%/lib',
],
language='c++'),
]
| [
"sunpoet@FreeBSD.org"
] | sunpoet@FreeBSD.org |
257fdf4bf5c379f7a73f67693f5512683c6c3822 | 1a6cbe035adb81fea66615323a836327d06f9e72 | /year2020/run.py | 9cf6233087187489d27412c4e5c11f9fc5271ba4 | [] | no_license | ecurtin2/advent-of-code | a2607d857408d722b07d4cfc66855edcd019cda7 | 216db926c5bab9bf1ec3cac2aa912c1a2ff70d6c | refs/heads/main | 2022-12-15T10:06:51.202608 | 2022-12-14T17:28:15 | 2022-12-14T17:28:15 | 160,612,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | import importlib
from dataclasses import dataclass
from pathlib import Path
from statistics import mean, stdev
from time import time
from typing import Any, List, Optional, Tuple, get_type_hints
import cattr
import click
AVAILABLE_DAYS: List[int] = sorted(
int(str(p.name).replace("d", "").replace(".py", ""))
for p in Path(__file__).parent.rglob("d*.py")
)
cattr.register_structure_hook(List[int], lambda s, _: [int(l) for l in s.splitlines()])
cattr.register_structure_hook(List[str], lambda s, _: s.splitlines())
def timeit(f, *args, **kwargs) -> Tuple[float, float]:
times = []
for _ in range(1):
begin = time()
f(*args, **kwargs)
times.append(time() - begin)
return mean(times), stdev(times)
@dataclass
class Run:
day: int
part: int
result: Optional[Any] = None
mean_duration_ms: Optional[float] = None
std_duration_ms: Optional[float] = None
def execute(self, timed: bool = False):
module = importlib.import_module(f"d{self.day}")
input_path = Path(f"inputs/d{self.day}p{self.part}.txt")
if (not input_path.is_file()) and (self.part != 1):
# fallback since sometimes same input reused.
input_path = Path(f"inputs/d{self.day}p1.txt")
try:
raw_str = input_path.read_text()
except FileNotFoundError:
raise FileNotFoundError(
f"No data file found for day {self.day} part {self.part}. Expected {input_path}"
)
func = getattr(module, f"part{self.part}")
typ = list(get_type_hints(func).values())[0]
inp = cattr.structure(raw_str, typ)
self.result = func(inp)
if timed:
m, s = timeit(func, inp)
self.mean_duration_ms = m * 1000
self.std_duration_ms = s * 1000
@click.command()
@click.option(
"--day",
"-d",
type=click.Choice([str(d) for d in AVAILABLE_DAYS] + ["all"]),
required=False,
default="all",
)
@click.option("--part", "-p", type=click.Choice(["1", "2"]), required=False)
@click.option("--timed/--no-timed", default=False)
def cli(day: int, part: Optional[int], timed: bool):
if part is None:
parts = [1, 2]
else:
parts = [part]
if day == "all":
runs = [Run(day=d, part=p) for d in AVAILABLE_DAYS for p in parts]
else:
runs = [Run(day=int(day), part=p) for p in parts]
last_day = -1
for run in runs:
run.execute(timed=timed)
if run.day != last_day:
print(f"\nDay {run.day}\n--------------------------------")
last_day = run.day
print(f"Part {run.part}:\n Result: {run.result}")
if run.mean_duration_ms:
print(
f" Time: {run.mean_duration_ms:07.4f} +/- {run.std_duration_ms:07.4f}ms"
)
if __name__ == "__main__":
cli()
| [
"ecurtin2@illinois.edu"
] | ecurtin2@illinois.edu |
b3066063ef59fcb92ab7a88089fc8ce41b881999 | bd1db30fd3c593e8dc4f2e21de630668456ed28f | /educational-resources/robotics/gym-gazebo-master/examples/turtlebot/circuit2_turtlebot_lidar_sarsa.py | ed521957b28cc965d5848020afaef371190c0b89 | [
"MIT",
"GPL-3.0-only"
] | permissive | vicb1/miscellaneous-notes | c50d759729b4d0067b3c4cb51a69350db5a941b9 | eb63641a8156a4dcd78924b5d0f6f0618479ceaf | refs/heads/master | 2023-05-11T06:04:03.254582 | 2023-05-09T12:34:18 | 2023-05-09T12:34:18 | 227,648,115 | 1 | 0 | MIT | 2022-12-11T17:00:50 | 2019-12-12T16:20:38 | Python | UTF-8 | Python | false | false | 2,709 | py | #!/usr/bin/env python
import gym
from gym import wrappers
import gym_gazebo
import time
import numpy
import random
import time
import liveplot
import sarsa
if __name__ == '__main__':
env = gym.make('GazeboCircuit2TurtlebotLidar-v0')
outdir = '/tmp/gazebo_gym_experiments'
env = gym.wrappers.Monitor(env, outdir, force=True)
plotter = liveplot.LivePlot(outdir)
last_time_steps = numpy.ndarray(0)
sarsa = sarsa.Sarsa(actions=range(env.action_space.n),
epsilon=0.9, alpha=0.2, gamma=0.9)
initial_epsilon = sarsa.epsilon
epsilon_discount = 0.9986
start_time = time.time()
total_episodes = 10000
highest_reward = 0
for x in range(total_episodes):
done = False
cumulated_reward = 0 #Should going forward give more reward then L/R ?
observation = env.reset()
if sarsa.epsilon > 0.05:
sarsa.epsilon *= epsilon_discount
#render() #defined above, not env.render()
state = ''.join(map(str, observation))
for i in range(1500):
# Pick an action based on the current state
action = sarsa.chooseAction(state)
# Execute the action and get feedback
observation, reward, done, info = env.step(action)
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
nextAction = sarsa.chooseAction(nextState)
#sarsa.learn(state, action, reward, nextState)
sarsa.learn(state, action, reward, nextState, nextAction)
env._flush(force=True)
if not(done):
state = nextState
else:
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
if x%100==0:
plotter.plot(env)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
print ("EP: "+str(x+1)+" - [alpha: "+str(round(sarsa.alpha,2))+" - gamma: "+str(round(sarsa.gamma,2))+" - epsilon: "+str(round(sarsa.epsilon,2))+"] - Reward: "+str(cumulated_reward)+" Time: %d:%02d:%02d" % (h, m, s))
#Github table content
print ("\n|"+str(total_episodes)+"|"+str(sarsa.alpha)+"|"+str(sarsa.gamma)+"|"+str(initial_epsilon)+"*"+str(epsilon_discount)+"|"+str(highest_reward)+"| PICTURE |")
l = last_time_steps.tolist()
l.sort()
#print("Parameters: a="+str)
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| [
"vbajenaru@gmail.com"
] | vbajenaru@gmail.com |
21fcf9c6612d4c6ab1f4b9d492db8700a0d3e51e | 1bb3c20ce5a53889eac280826d7d54194c7db33f | /string/1152.py | cba6aa371197ec76347749417139c13963d48e74 | [] | no_license | yejiiha/BaekJoon_step | bd2c040597766613985ae8d3a943999cb35d6671 | 3eaedbb832f14c51f3fb990e7e140f00d732df1e | refs/heads/master | 2023-03-24T06:36:28.851139 | 2021-03-21T14:15:40 | 2021-03-21T14:15:40 | 285,825,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | string = list(input().split())
print(len(string))
| [
"1126yezy@gmail.com"
] | 1126yezy@gmail.com |
6d2738154c6822ac0a29f7528ce4dbd6d67416dc | 9db0c5f0a655ec142a318a906c5272845f7d928a | /blog/blog/views.py | 2f251ce19c493fd923c1f6b164b08db8d7a27349 | [] | no_license | SeriousMarc/my-blog | 0f0b3a1538d42219defd3e61286d919736928663 | aeeef2e9c6fe617d39464c569244327c8e8f995f | refs/heads/master | 2021-04-29T21:17:59.924803 | 2018-02-15T20:51:58 | 2018-02-15T20:51:58 | 121,612,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from django.shortcuts import render
def homepage(request):
return render(request, 'base_layout.html')
| [
"medivhbox@gmail.com"
] | medivhbox@gmail.com |
52cff6065e5fb915053715a160ce650b4c2235d8 | 923f9270a12be35fdd297d8f27e522c601e94eab | /doc/slides/src/decay/src-decay/decay_mod_unittest.py | 807c314773b07bb8b1fb368b785a75891694aec8 | [] | no_license | t-bltg/INF5620 | a06b6e06b6aba3bc35e933abd19c58cd78584c1f | d3e000462302839b49693cfe06a2f2df924c5027 | refs/heads/master | 2021-05-31T00:41:41.624838 | 2016-03-22T09:29:00 | 2016-03-22T09:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,015 | py | import numpy as np
import matplotlib.pyplot as plt
import sys
from math import log
def solver(I, a, T, dt, theta):
"""
Solve u'=-a*u, u(0)=I, for t in (0,T] with steps of dt.
>>> u, t = solver(I=0.8, a=1.2, T=4, dt=0.5, theta=0.5)
>>> for t_n, u_n in zip(t, u):
... print 't=%.1f, u=%.14f' % (t_n, u_n)
t=0.0, u=0.80000000000000
t=0.5, u=0.43076923076923
t=1.0, u=0.23195266272189
t=1.5, u=0.12489758761948
t=2.0, u=0.06725254717972
t=2.5, u=0.03621291001985
t=3.0, u=0.01949925924146
t=3.5, u=0.01049960113002
t=4.0, u=0.00565363137770
"""
dt = float(dt) # avoid integer division
Nt = int(round(T/dt)) # no of time intervals
T = Nt*dt # adjust T to fit time step dt
u = np.zeros(Nt+1) # array of u[n] values
t = np.linspace(0, T, Nt+1) # time mesh
u[0] = I # assign initial condition
for n in range(0, Nt): # n=0,1,...,Nt-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
return u, t
def exact_solution(t, I, a):
return I*np.exp(-a*t)
def explore(I, a, T, dt, theta=0.5, makeplot=True):
"""
Run a case with the solver, compute error measure,
and plot the numerical and exact solutions (if makeplot=True).
>>> for theta in 0, 0.5, 1:
... E = explore(I=1.9, a=2.1, T=5, dt=0.1, theta=theta,
... makeplot=False)
... print '%.10E' % E
...
7.3565079236E-02
2.4183893110E-03
6.5013039886E-02
"""
u, t = solver(I, a, T, dt, theta) # Numerical solution
u_e = exact_solution(t, I, a)
e = u_e - u
E = np.sqrt(dt*np.sum(e**2))
if makeplot:
plt.figure() # create new plot
t_e = np.linspace(0, T, 1001) # very fine mesh for u_e
u_e = exact_solution(t_e, I, a)
plt.plot(t, u, 'r--o') # red dashes w/circles
plt.plot(t_e, u_e, 'b-') # blue line for u_e
plt.legend(['numerical', 'exact'])
plt.xlabel('t')
plt.ylabel('u')
plt.title('Method: theta-rule, theta=%g, dt=%g' % \
(theta, dt))
theta2name = {0: 'FE', 1: 'BE', 0.5: 'CN'}
plt.savefig('%s_%g.png' % (theta2name[theta], dt))
plt.show()
return E
def define_command_line_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--I', '--initial_condition', type=float,
default=1.0, help='initial condition, u(0)',
metavar='I')
parser.add_argument('--a', type=float,
default=1.0, help='coefficient in ODE',
metavar='a')
parser.add_argument('--T', '--stop_time', type=float,
default=3.0, help='end time of simulation',
metavar='T')
parser.add_argument('--makeplot', action='store_true',
help='display plot or not')
parser.add_argument('--dt', '--time_step_values', type=float,
default=[0.5], help='time step values',
metavar='dt', nargs='+', dest='dt_values')
return parser
def read_command_line(use_argparse=True):
"""
Read parameters from the command line and return their
values as the sequence I, a, T, makeplot, dt_values.
>>> sys.argv[1:] = '1.2 0.9 4 True 0.1 0.05'.split()
>>> prms = read_command_line(use_argparse=False)
>>> print prms
(1.2, 0.9, 4.0, True, [0.1, 0.05])
>>> sys.argv[1:] = '--I 1.2 --a 0.9 --T 4 --makeplot '\
'--dt 0.1 0.05'.split()
>>> prms = read_command_line(use_argparse=True)
>>> print prms
(1.2, 0.9, 4.0, True, [0.1, 0.05])
"""
if use_argparse:
parser = define_command_line_options()
args = parser.parse_args()
return args.I, args.a, args.T, args.makeplot, args.dt_values
else:
if len(sys.argv) < 6:
print 'Usage: %s I a on/off dt1 dt2 dt3 ...' % \
sys.argv[0]; sys.exit(1)
I = float(sys.argv[1])
a = float(sys.argv[2])
T = float(sys.argv[3])
makeplot = sys.argv[4] in ('on', 'True')
dt_values = [float(arg) for arg in sys.argv[5:]]
return I, a, T, makeplot, dt_values
def main():
I, a, T, makeplot, dt_values = read_command_line()
r = {}
for theta in 0, 0.5, 1:
E_values = []
for dt in dt_values:
E = explore(I, a, T, dt, theta, makeplot=False)
E_values.append(E)
# Compute convergence rates
m = len(dt_values)
r[theta] = [log(E_values[i-1]/E_values[i])/
log(dt_values[i-1]/dt_values[i])
for i in range(1, m, 1)]
for theta in r:
print '\nPairwise convergence rates for theta=%g:' % theta
print ' '.join(['%.2f' % r_ for r_ in r[theta]])
return r
if __name__ == '__main__':
main()
| [
"hpl@simula.no"
] | hpl@simula.no |
0545fda410ccd767130446d92bd5540e4debe9be | c4c9fd6bb97f8d0d16de36f67365b76278c93959 | /proj/proj03/tests/q1_10.py | fa4a5049dd509f6c35ecf0afb423b3be88f203d0 | [] | no_license | ds-connectors/DATA-88-EconModels-sp20 | d71795dad299031add94e52a60206b5840b144c7 | 6a8297cf20f8afe8ef52e4bfa93aca3cfff1afc5 | refs/heads/master | 2023-08-30T03:47:51.417760 | 2020-04-27T19:35:16 | 2020-04-27T19:35:16 | 231,190,728 | 1 | 0 | null | 2023-08-14T21:55:19 | 2020-01-01T08:09:44 | Jupyter Notebook | UTF-8 | Python | false | false | 447 | py | test = { 'name': 'q1_10',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> -4000 <= default_beta <= -3000\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> 51000 <= default_alpha <= 52000\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"cpyles@berkeley.edu"
] | cpyles@berkeley.edu |
3a0dcd089e8fecfa5fd81aa3848aa8220b1e2507 | 0e820627e68413aebe27fbc32dde66a3c99651d3 | /flamingo/core/parser.py | 186d7d11e2fd1b1da243823e9c6d051bca7a2bdc | [
"Apache-2.0"
] | permissive | ejoerns/flamingo | 17de116008561a4a64613b2b6528eb6d52706281 | 1d61c99c9ad34dd0a2a652f80783226051e07238 | refs/heads/master | 2020-04-11T02:53:47.295752 | 2018-12-14T11:17:23 | 2018-12-14T11:37:39 | 161,460,547 | 0 | 0 | Apache-2.0 | 2018-12-12T09:00:34 | 2018-12-12T09:00:34 | null | UTF-8 | Python | false | false | 2,022 | py | from configparser import ConfigParser, Error as ConfigParserError
from io import StringIO
import os
from flamingo.core.errors import FlamingoError
class ParsingError(FlamingoError):
pass
class ContentParser:
FILE_EXTENSIONS = []
def __init__(self):
self.configparser = ConfigParser(interpolation=None)
def parse_meta_data(self, fp, content):
meta_data_buffer = StringIO('[meta]\n')
meta_data_buffer.read()
empty_lines = 0
while True:
line = fp.readline()
if not line: # eof
break
if not line.strip():
empty_lines += 1
else:
empty_lines = 0
if empty_lines == 2:
break
meta_data_buffer.write(line)
meta_data_buffer.seek(0)
self.configparser.clear()
self.configparser.read_file(meta_data_buffer)
for option in self.configparser.options('meta'):
content[option] = self.configparser.get('meta', option)
def parse(self, fp, content):
self.parse_meta_data(fp, content)
content['content_body'] = fp.read().strip()
class FileParser:
def __init__(self):
self._parsers = []
def add_parser(self, parser):
self._parsers.append(parser)
def find_parser(self, extension):
for parser in self._parsers:
if extension in parser.FILE_EXTENSIONS:
return parser
def get_extensions(self):
return sum([i.FILE_EXTENSIONS for i in self._parsers], [])
def parse(self, path, content):
extension = os.path.splitext(path)[1][1:]
parser = self.find_parser(extension)
if not parser:
raise ParsingError(
"file extension '{}' is not supported".format(extension))
try:
parser.parse(open(path, 'r'), content) # FIXME: chardet
except ConfigParserError:
raise ParsingError('Metadata seem to be broken')
| [
"f.scherf@pengutronix.de"
] | f.scherf@pengutronix.de |
c29e057c72aa360392571f06201dd466d58bf1fa | 79f1e7932c27eb01483f8764720c672242052e1f | /training_horovod_single_aa.py | 4d7e93d81b64226a254dda09ec2813e09755c295 | [] | no_license | pk-organics/uniparc_modeling | 3b16ae5b85dc178fdcab4be3b4ddbdab02c80897 | ab9faaad00c20416ea2ac86f6f91b83f86ffb7a4 | refs/heads/master | 2023-02-13T19:58:29.841889 | 2019-12-05T18:53:05 | 2019-12-05T18:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,457 | py | import os
import argparse
parser = argparse.ArgumentParser(description='BERT model training')
parser.add_argument('--modelName', default='bert', help='model name for directory saving')
parser.add_argument('--batchSize', type=int, default=20, help='batch size per gpu')
parser.add_argument('--stepsPerEpoch', type=int, default=10000, help='steps per epoch')
parser.add_argument('--warmup', type=int, default=16000, help='warmup steps')
arguments = parser.parse_args()
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from bert.dataset import create_masked_input_dataset
from bert.layers import (PositionEmbedding, Attention, Transformer, TokenEmbedding, Bias,
gelu, masked_sparse_cross_entropy_loss, InverseSquareRootSchedule,
initializer, Projection)
import horovod.tensorflow.keras as hvd
# Horovod: initialize Horovod.
hvd.init()
# Print runtime config on head node
if hvd.rank() == 0:
print(arguments)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
# import tensorflow_addons as tfa
from tensorflow.keras import layers
vocab_size = 22
max_seq_len = 1024
def encode(line_tensor):
line = line_tensor.numpy().decode('utf8')
if len(line) > max_seq_len:
offset = np.random.randint(
low=0, high=len(line) - max_seq_len + 1)
line = line[offset:(offset + max_seq_len)]
vocab = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V',
'W', 'Y']
replacement_dict = {key: i + 2 for i, key in enumerate(vocab)}
return np.asarray([replacement_dict[item] for item in line])
def encode_tf(line_tensor):
return tf.py_function(encode, inp=[line_tensor], Tout=[tf.int32,])
training_data = create_masked_input_dataset(
encode_fn=encode_tf,
sequence_path='/projects/bpms/pstjohn/uniparc/sequences_train.txt',
max_sequence_length=max_seq_len,
batch_size=arguments.batchSize,
buffer_size=1024,
vocab_size=vocab_size,
mask_index=4,
vocab_start=5,
fix_sequence_length=True,
shard_num_workers=hvd.size(),
shard_worker_index=hvd.rank())
training_data.repeat().prefetch(tf.data.experimental.AUTOTUNE)
valid_data = create_masked_input_dataset(
encode_fn=encode_tf,
sequence_path='/projects/bpms/pstjohn/uniparc/sequences_valid.txt',
max_sequence_length=max_seq_len,
batch_size=arguments.batchSize,
buffer_size=1024,
vocab_size=vocab_size,
mask_index=4,
vocab_start=5,
fix_sequence_length=True,
shard_num_workers=hvd.size(),
shard_worker_index=hvd.rank())
valid_data.repeat().prefetch(tf.data.experimental.AUTOTUNE)
embedding_dimension = 128
model_dimension = 768
transformer_dimension = 4 * model_dimension
num_attention_heads = model_dimension // 64
num_transformer_layers = 12
# embedding_dimension = 32
# model_dimension = 64
# num_attention_heads = model_dimension // 16
# num_transformer_layers = 4
dropout_rate = 0.
# Horovod: adjust learning rate based on number of GPUs.
learning_rate = 1E-4
inputs = layers.Input(shape=(max_seq_len,), dtype=tf.int32, batch_size=None)
input_mask = layers.Input(shape=(max_seq_len,), dtype=tf.bool, batch_size=None)
token_embedding_layer = TokenEmbedding(
vocab_size, embedding_dimension, embeddings_initializer=initializer(), mask_zero=True)
token_embeddings = token_embedding_layer(inputs)
position_embeddings = PositionEmbedding(
max_seq_len + 1, embedding_dimension, embeddings_initializer=initializer(),
mask_zero=True)(inputs)
embeddings = layers.Add()([token_embeddings, position_embeddings])
embeddings = Projection(model_dimension, dropout_rate, use_residual=False)(embeddings)
transformer = Transformer(num_attention_heads, transformer_dimension, dropout=dropout_rate)
for i in range(num_transformer_layers):
embeddings = transformer(embeddings)
out = layers.Dense(embedding_dimension, activation=gelu, kernel_initializer=initializer())(embeddings)
out = token_embedding_layer(out, transpose=True)
out = Bias()([out, input_mask])
model = tf.keras.Model([inputs, input_mask], [out], name='model')
if hvd.rank() == 0:
model.summary()
# Horovod: add Horovod DistributedOptimizer.
# opt = tfa.optimizers.AdamW(weight_decay=0.01, learning_rate=learning_rate)
opt = tf.optimizers.Adam(learning_rate=learning_rate)
opt = hvd.DistributedOptimizer(opt)
from tensorflow.python.keras.metrics import MeanMetricWrapper
def exponentiated_sparse_categorical_crossentropy(*args, **kwargs):
return tf.exp(tf.losses.sparse_categorical_crossentropy(*args, **kwargs))
class ExponentiatedSparseCategoricalCrossentropy(MeanMetricWrapper):
def __init__(self,
name='exponentiated_sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(ExponentiatedSparseCategoricalCrossentropy, self).__init__(
exponentiated_sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(),
ExponentiatedSparseCategoricalCrossentropy(from_logits=True)],
optimizer=opt,
experimental_run_tf_function=False)
model_name = arguments.modelName
checkpoint_dir = f'{model_name}_checkpoints'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}.h5")
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
InverseSquareRootSchedule(learning_rate=learning_rate, warmup_updates=arguments.warmup),
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
callbacks.append(tf.keras.callbacks.CSVLogger(f'{checkpoint_dir}/log.csv'))
callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix))
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
model.fit(training_data, steps_per_epoch=arguments.stepsPerEpoch, epochs=500,
verbose=verbose, validation_data=valid_data, validation_steps=100,
callbacks=callbacks)
| [
"peterc.stjohn@gmail.com"
] | peterc.stjohn@gmail.com |
e312d6d3314dc67c99a82a7d258f106f69278143 | c70aa626cb96e5a62b8f4789c6bb885229375134 | /ProjectOne/card.py | 4cf7f030878e1e681a71fdbc247088b3836ae994 | [] | no_license | BaranAkcakaya/PythonProject | 279b62907ba9598199fdf1f7642bfb3af9aa98be | 486389196b22df7134b02190de636c949fbb08a1 | refs/heads/main | 2023-01-08T14:51:14.735863 | 2020-11-02T10:34:15 | 2020-11-02T10:34:15 | 309,336,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | #SUIT
HEARTS = 0 #RED
DIAMONDS = 1 #RED
CLUBS = 2 #BLACK
SPADES = 3 #BLACk
#RANK
TWO = 0
THREE = 1
FOUR = 2
FIVE = 3
SIX = 4
SEVEN = 5
EIGHT = 6
NINE = 7
TEN = 8
JACK = 9
QUEEN = 10
KING = 11
ACE = 12
def get_suit(card):
return int((card % 4) - 1) #Suit 4 oldugu için 4 e göre mod alarak hangi suit oldugunu bulabşliriz -1 sebebi ise 0 danbaslıyor
def get_rank(card):
return int((card / 4)) #Burada 4'e göremod alarak hangi dörtlüde olduğunu buluyoruz
def same_suit(card1, card2):
if(get_suit(card1) == get_suit(card2)):
return True
else:
return False
def same_rank(card1, card2):
if(get_rank(card1) == get_rank(card2)):
return True
else:
return False
def same_color_suit(card1, card2):
suit1 = get_suit(card1) #Hazır fonksiyonları kullandık
suit2 = get_suit(card2)
if(suit1 == suit2):
return True
else:
if(suit1<2 and suit2<2):
return True
elif(suit1>1 and suit2>1):
return True
else:
return False
print(get_suit(25))
print(get_rank(19))
print(same_rank(11, 32))
print(same_suit(17, 33))
print(same_color_suit(35, 7)) | [
"noreply@github.com"
] | BaranAkcakaya.noreply@github.com |
ab34f34aceab1369242bf0c820e0d61023b7378e | 84c7ad9116bfe7a81541520cdf493d9788881605 | /storedata_ita2.py | 5a7cc14dfb7c9932251030ba349b92bcff519c32 | [] | no_license | igorsowa9/trialsites | 3b32edc81fa03631cb864226a137f3b80f3aa9e0 | 900439e188386a34253fdc176d74b473fad4c176 | refs/heads/master | 2018-10-19T09:22:35.012649 | 2018-09-26T10:43:11 | 2018-09-26T10:43:11 | 119,725,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,013 | py | import sys
import paho.mqtt.client as mqttcli
import paho.mqtt.publish as publish
import time
import sys, os
from datetime import datetime
import psycopg2
import json
import logging
import shutil
from multiprocessing import Pool
from settings import *
def db_connection(dbname):
try:
global conn
conn = psycopg2.connect("dbname='" + dbname + "' user='postgres' host="+db_ip2+" password='postgres'")
if log_inf:
logging.info(" When: " + str(datetime.now()) + " --- " + " DB: " + dbname + " connected.")
return conn
except:
logging.error(" When: " + str(datetime.now()) + " --- " + "I am unable to connect to the database.")
print("I am unable to connect to the database. STOP.")
def sqlquery_make(tablename, labels, values):
labels_string = ""
for l in range(len(labels)):
label = labels[l]
if l == 0:
labels_string += "(" + str(label) + ", "
elif l == len(labels)-1:
labels_string += str(label) + ") "
else:
labels_string += str(label) + ", "
values_string = ""
for v in range(len(values)):
value = values[v]
if v == 0:
values_string += "('" + str(value) + "', "
elif v == len(values)-1:
values_string += "'" + str(value) + "')"
else:
values_string += "'" + str(value) + "', "
SQLtext = ""
SQLtext += "INSERT INTO public." + str(tablename) + str(labels_string) + "VALUES " + values_string + ";"
return SQLtext
# def sqlquery_make_string(tablename, value):
# SQLtext = ""
# SQLtext += "INSERT INTO public." + str(tablename) + " (message_string, lab_ts) " + \
# "VALUES ('" + value + "' , '" + str(datetime.utcnow()) + "');"
# return SQLtext
def on_message_writetodb(client, userdata, message):
# print("Received: " + str(userdata))
lab_ts_utc = datetime.utcnow()
json1_str = message.payload.decode("utf-8")
try:
json1_data = json.loads(json1_str)
except json.decoder.JSONDecodeError as e:
logging.error(" When: " + str(datetime.now()) + " --- " + 'Json decode error: ' + str(e))
json1_data = None
if userdata == "irl001":
trialsite_settings = irl001_settings
json_to_sqlreq_irl(json1_data, trialsite_settings, lab_ts_utc)
elif userdata == "ita005":
trialsite_settings = ita005_settings
json_to_sqlreq_ita(json1_data, trialsite_settings, lab_ts_utc)
elif userdata == "ita006":
trialsite_settings = ita006_settings
json_to_sqlreq_ita(json1_data, trialsite_settings, lab_ts_utc)
elif userdata == "ita007":
trialsite_settings = ita007_settings
json_to_sqlreq_ita(json1_data, trialsite_settings, lab_ts_utc)
elif userdata == "ita008":
trialsite_settings = ita008_settings
json_to_sqlreq_ita(json1_data, trialsite_settings, lab_ts_utc)
else:
logging.warning(" When: " + str(datetime.now()) + " --- " + "Unrecognized userdata! :" + str(userdata))
return
def json_to_sqlreq_irl(json1_data, trialsite_settings, lab_ts_utc):
# calculate latency
smx_ts = json1_data["SMXtimestamp"]
# lab_ts_dt = datetime.strptime(str(lab_ts), '%Y-%m-%d %H:%M:%S.%f')
smx_ts_dt = datetime.strptime(str(smx_ts), '%Y/%m/%d %H:%M:%S:%f')
latency = lab_ts_utc - smx_ts_dt
latency_sec = latency.microseconds / 10e5
if 'wallya1' in json1_data:
which_wally = 'wallya1'
wally_idx = 0
elif 'wallya2' in json1_data:
which_wally = 'wallya2'
wally_idx = 1
else:
logging.warning(" When: " + str(datetime.now()) + " --- " +
"No wallyXX label detected in the irish data.")
return
SQLtext = ""
msg_label = trialsite_settings['msg_labels'][wally_idx]
values = []
for l in range(len(msg_label)):
key = msg_label[l]
if key == 'SMXtimestamp' or key == 'SysDateTimeUTC':
value = json1_data[key]
else:
value = json1_data[which_wally][key]['value']
values.append(value)
values.append(str(lab_ts_utc))
values.append(str(latency_sec))
SQLtext += sqlquery_make(trialsite_settings['db_tables'][wally_idx], trialsite_settings['db_labels'][wally_idx], values)
# the request below to store the whole received string as backup
# SQLtext += sqlquery_make_string(trialsite_settings['db_tables_string'][ml], str(json1_str))
conn = db_connection(trialsite_settings['db_name'])
cursor = conn.cursor()
try:
cursor.execute(SQLtext)
conn.commit()
except psycopg2.OperationalError as e:
logging.warning(" When: " + str(datetime.now()) + " --- " + "Unable to execute query! " + format(e))
finally:
conn.close()
if log_inf: logging.info(" When: " + str(datetime.now()) + " --- " + 'Data written in DB.')
print("Data (" + trialsite_settings['name'] + ") written in DB. SMX ts=" + str(smx_ts))
return
def json_to_sqlreq_ita(json1_data, trialsite_settings, lab_ts_utc):
# calculate latency
smx_ts = json1_data[".SERVER_TIME"]
# lab_ts_dt = datetime.strptime(str(lab_ts), '%Y-%m-%d %H:%M:%S.%f')
smx_ts_dt = datetime.strptime(str(smx_ts), '%Y-%m-%d %H:%M:%S')
latency = lab_ts_utc - smx_ts_dt
latency_sec = latency.microseconds / 10e5
meter_idx = 0
SQLtext = ""
msg_label = trialsite_settings['msg_labels'][meter_idx]
values = []
for l in range(len(msg_label)):
key = msg_label[l]
if key[0] == ".":
value = json1_data[key]
else:
value = json1_data[key]['value']
values.append(value)
values.append(str(lab_ts_utc))
values.append(str(latency_sec))
SQLtext += sqlquery_make(trialsite_settings['db_tables'][meter_idx], trialsite_settings['db_labels'][meter_idx], values)
# SQLtext += sqlquery_make_string(trialsite_settings['db_tables_string'][meter_idx], str(json1_str))
conn = db_connection(trialsite_settings['db_name'])
cursor = conn.cursor()
try:
cursor.execute(SQLtext)
conn.commit()
except psycopg2.OperationalError as e:
logging.warning(" When: " + str(datetime.now()) + " --- " + "Unable to execute query! " + format(e))
finally:
conn.close()
if log_inf: logging.info(" When: " + str(datetime.now()) + " --- " + 'Data written in DB.')
print("Data (" + trialsite_settings['name'] + ") written in DB. SMX ts=" + str(smx_ts))
return
def storedataAttempt(trialsite_settings):
# print("storeAttempt: " + str(trialsite_settings['name']))
if trialsite_settings['name'] == 'irl001':
vm = mqttcli.Client(userdata="irl001")
elif trialsite_settings['name'] == 'ita005':
vm = mqttcli.Client(userdata="ita005")
elif trialsite_settings['name'] == 'ita006':
vm = mqttcli.Client(userdata="ita006")
elif trialsite_settings['name'] == 'ita007':
vm = mqttcli.Client(userdata="ita007")
elif trialsite_settings['name'] == 'ita008':
vm = mqttcli.Client(userdata="ita008")
else:
logging.warning(" When: " + str(datetime.now()) + " --- " + "Unrecognized trialsite settings! :")
return
# vm.on_message = on_message_writetodb
vm.connect(trialsite_settings["ip"])
vm.loop_start()
vm.subscribe(trialsite_settings["mqtt_topics"])
for topic in trialsite_settings["mqtt_topics"]:
vm.message_callback_add(topic[0], on_message_writetodb)
# print("Waiting for data...")
if log_inf: logging.info(" When: " + str(datetime.now()) + " --- " + "Waiting for data...")
time.sleep(2)
vm.loop_stop()
def storedataOnce():
while True:
try:
# pool = Pool(processes=4)
# pool.apply_async(storedataAttempt, [irl001_settings])
# pool.apply_async(storedataAttempt, [ita005_settings])
# pool.apply_async(storedataAttempt, [ita006_settings])
# pool.apply_async(storedataAttempt, [ita007_settings])
# storedataAttempt(irl001_settings)
# storedataAttempt(ita005_settings)
storedataAttempt(ita006_settings)
# storedataAttempt(ita007_settings)
# storedataAttempt(ita008_settings)
except:
print("Unexpected error:", sys.exc_info())
logging.error(" When: " + str(datetime.now()) + " --- " + "Error in storedataOnce(): ", sys.exc_info())
pass
else:
break
def storedataRepeatedly():
while True:
storedataOnce()
time.sleep(0.1)
# archive_name = "logarchive_" + str(datetime.now().isoformat()) + ".log"
# shutil.copy("logfile.log", archive_name)
#shutil.make_archive(archive_name, "zip")
#os.remove(archive_name)
# logging.basicConfig(filename='logfile.log', level=logging.INFO)
# logging.warning(" When: " + str(datetime.now()) + " --- " + "Initiate logfile")
storedataRepeatedly()
| [
"igorsowa9@gmail.com"
] | igorsowa9@gmail.com |
701c7372e4483add670bc094a28089806a2fd902 | 0dfa97730b9ad9c077868a045d89cc0d4b09f433 | /tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_create_instance_sync.py | d2e83c8ed0211cc443a3c114985748bbc9997b66 | [
"Apache-2.0"
] | permissive | anukaal/gapic-generator-python | 546c303aaf2e722956133b07abb0fb1fe581962f | e3b06895fa179a2038ee2b28e43054e1df617975 | refs/heads/master | 2023-08-24T23:16:32.305652 | 2021-10-09T15:12:14 | 2021-10-09T15:12:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-redis
# [START redis_generated_redis_v1_CloudRedis_CreateInstance_sync]
from google.cloud import redis_v1
def sample_create_instance():
"""Snippet for create_instance"""
# Create a client
client = redis_v1.CloudRedisClient()
# Initialize request argument(s)
instance = redis_v1.Instance()
instance.name = "name_value"
instance.tier = "STANDARD_HA"
instance.memory_size_gb = 1499
request = redis_v1.CreateInstanceRequest(
parent="projects/{project}/locations/{location}",
instance_id="instance_id_value",
instance=instance,
)
# Make the request
operation = client.create_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
# [END redis_generated_redis_v1_CloudRedis_CreateInstance_sync]
| [
"noreply@github.com"
] | anukaal.noreply@github.com |
6062e7d3554b25aae792e7e5493c0fe5f810af21 | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/security_beta/azext_security_beta/vendored_sdks/security/operations/_security_ti_indicator_operations.py | c12d0da4351c6731bc5f09bbd3d7e8106d09835e | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,263 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityTIIndicatorOperations(object):
"""SecurityTIIndicatorOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delete_ti_indicator(
self,
value=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphResultInfo"]
"""Invoke action deleteTiIndicators.
Invoke action deleteTiIndicators.
:param value:
:type value: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphResultInfo, or the result of cls(response)
:rtype: list[~security.models.MicrosoftGraphResultInfo]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphResultInfo"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsO3Cp3MSecurityTiindicatorsMicrosoftGraphDeletetiindicatorsPostRequestbodyContentApplicationJsonSchema(value=value)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.delete_ti_indicator.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsO3Cp3MSecurityTiindicatorsMicrosoftGraphDeletetiindicatorsPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphResultInfo]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_ti_indicator.metadata = {'url': '/Security/tiIndicators/microsoft.graph.deleteTiIndicators'} # type: ignore
def delete_ti_indicator_by_external_id(
self,
value=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphResultInfo"]
"""Invoke action deleteTiIndicatorsByExternalId.
Invoke action deleteTiIndicatorsByExternalId.
:param value:
:type value: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphResultInfo, or the result of cls(response)
:rtype: list[~security.models.MicrosoftGraphResultInfo]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphResultInfo"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.Paths1Gb7He6SecurityTiindicatorsMicrosoftGraphDeletetiindicatorsbyexternalidPostRequestbodyContentApplicationJsonSchema(value=value)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.delete_ti_indicator_by_external_id.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'Paths1Gb7He6SecurityTiindicatorsMicrosoftGraphDeletetiindicatorsbyexternalidPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphResultInfo]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_ti_indicator_by_external_id.metadata = {'url': '/Security/tiIndicators/microsoft.graph.deleteTiIndicatorsByExternalId'} # type: ignore
def submit_ti_indicator(
self,
value=None, # type: Optional[List["models.MicrosoftGraphTIIndicator"]]
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphTIIndicator"]
"""Invoke action submitTiIndicators.
Invoke action submitTiIndicators.
:param value:
:type value: list[~security.models.MicrosoftGraphTIIndicator]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphTIIndicator, or the result of cls(response)
:rtype: list[~security.models.MicrosoftGraphTIIndicator]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphTIIndicator"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.Paths1I03ShnSecurityTiindicatorsMicrosoftGraphSubmittiindicatorsPostRequestbodyContentApplicationJsonSchema(value=value)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.submit_ti_indicator.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'Paths1I03ShnSecurityTiindicatorsMicrosoftGraphSubmittiindicatorsPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphTIIndicator]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit_ti_indicator.metadata = {'url': '/Security/tiIndicators/microsoft.graph.submitTiIndicators'} # type: ignore
def update_ti_indicator(
self,
value=None, # type: Optional[List["models.MicrosoftGraphTIIndicator"]]
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphTIIndicator"]
"""Invoke action updateTiIndicators.
Invoke action updateTiIndicators.
:param value:
:type value: list[~security.models.MicrosoftGraphTIIndicator]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphTIIndicator, or the result of cls(response)
:rtype: list[~security.models.MicrosoftGraphTIIndicator]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphTIIndicator"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsS86SndSecurityTiindicatorsMicrosoftGraphUpdatetiindicatorsPostRequestbodyContentApplicationJsonSchema(value=value)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_ti_indicator.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsS86SndSecurityTiindicatorsMicrosoftGraphUpdatetiindicatorsPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphTIIndicator]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ti_indicator.metadata = {'url': '/Security/tiIndicators/microsoft.graph.updateTiIndicators'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
4b52610a596acae2c84caeeeab48b5fe380dafce | c4a8f3200add74f4c42fe34b2f3b284d6249a481 | /sciwx/demo/mesh2_mesh_demo.py | 076d254d04a5dd678a223fd6c388ca83397f2f19 | [
"BSD-2-Clause"
] | permissive | pengguanjun/imagepy | 1908e83a7ec2a6472524f443aefeaade12c2b649 | d96ef98c2c3e93d368131fd2753bce164e1247cd | refs/heads/master | 2022-12-15T11:48:04.841436 | 2020-09-14T11:59:49 | 2020-09-14T11:59:49 | 275,398,356 | 1 | 0 | NOASSERTION | 2020-06-27T15:18:59 | 2020-06-27T15:18:58 | null | UTF-8 | Python | false | false | 1,249 | py | import sys, wx
sys.path.append('../../')
from sciwx.mesh import Canvas3D, MCanvas3D, MeshSet
from sciapp.util import surfutil
from sciapp.object import Surface
from sciwx.mesh import Canvas3DFrame, Canvas3DNoteBook, Canvas3DNoteFrame
vts, fs, ns, cs = surfutil.build_ball((100,100,100),50, (1,0,0))
def add_with_para():
cnf = Canvas3DFrame(None)
surf = Surface(vts, fs, ns, cs, mode='grid')
cnf.add_surf('gridball', surf)
cnf.Show()
def mesh_obj_test():
cnf = Canvas3DFrame(None)
meshes = MeshSet()
vts, fs, ns, cs = surfutil.build_ball((100,100,100),50, (1,0,0))
redball = Surface(vts, fs, ns, cs)
meshes.add_surf('redball', redball)
vts, fs, ns, cs = surfutil.build_ball((300,100,100),50, (1,1,0))
yellowball = Surface(vts, fs, ns, cs, mode='grid')
meshes.add_surf('yellowball', yellowball)
hideball = Surface(vts, fs, ns, cs)
vts, fs, ns, cs = surfutil.build_ball((300,-300,100),50, (0,1,0))
hideball = Surface(vts, fs, ns, cs, visible=False)
hideball = meshes.add_surf('hideball', hideball)
meshes.background = (0, 0, 0.3)
cnf.set_mesh(meshes)
cnf.Show()
if __name__ == '__main__':
app = wx.App()
add_with_para()
mesh_obj_test()
app.MainLoop()
| [
"imagepy@sina.com"
] | imagepy@sina.com |
c996297347ac933fba624d954a3d78dd294d33ff | 75ec986d34d5391d46d6469c513626f69f5d978d | /Incepator/cycles/cycles3.py | e733dc4092d410e8ac117846f3fbe5203f2b7e44 | [] | no_license | CatrunaMarius/python | d9f8dc221458e4b65c3f801daf3b59aa2b946358 | d063bffb4eafa56ac1e205c2d39fc893ab50e992 | refs/heads/master | 2020-04-24T05:23:22.756002 | 2020-01-06T11:56:12 | 2020-01-06T11:56:12 | 171,703,482 | 0 | 0 | null | 2019-02-20T16:12:39 | 2019-02-20T15:59:08 | null | UTF-8 | Python | false | false | 150 | py | #multiplication table (while)
i=1
while i<10:
j=1
while j<10:
print("%4d" % (i*j), end="")
j+=1
print()
i+=1
| [
"noreply@github.com"
] | CatrunaMarius.noreply@github.com |
bc89c88a357f13ea1706d544aa08b81f6fb6036f | 1bad7d2b7fc920ecf2789755ed7f44b039d4134d | /A other/エイシング プログラミング コンテスト 2020/B.py | 77e2b68e5bde2a0361da7ee0c237f62e8c1262a4 | [] | no_license | kanekyo1234/AtCoder_solve | ce95caafd31f7c953c0fc699f0f4897dddd7a159 | e5ea7b080b72a2a2fd3fcb826cd10c4ab2e2720e | refs/heads/master | 2023-04-01T04:01:15.885945 | 2021-04-06T04:03:31 | 2021-04-06T04:03:31 | 266,151,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | n = int(input())
a = list(map(int, input().split()))
ans = 0
for i in range(n):
if (i+1) % 2 == 1 and a[i] % 2 == 1:
ans += 1
print(ans)
| [
"kanekyohunter.0314@softbank.ne.jp"
] | kanekyohunter.0314@softbank.ne.jp |
dffe88ae437ac28f4a3898604f43f1694690b380 | 8ee8fe3c2acea497a85428bfb3dfde19e58b2bc3 | /test-examples/range_one_image.py | 5b8657935e4171ac7ebfc64c064995200cabb8e0 | [
"BSD-3-Clause"
] | permissive | sofroniewn/image-demos | a6e46f08fd4ce621aa96d6b6378b50f63ac2b381 | 2eeeb23f34a47798ae7be0987182724ee3799eb8 | refs/heads/master | 2022-11-02T23:50:23.098830 | 2022-10-30T04:38:19 | 2022-10-30T04:38:19 | 179,378,745 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | """
Test adding an image with a range one dimensions.
There should be no slider shown for the axis corresponding to the range
one dimension.
"""
import numpy as np
from skimage import data
import napari
with napari.gui_qt():
viewer = napari.view_image(np.random.random((4, 4, 1, 30, 40)))
| [
"sofroniewn@gmail.com"
] | sofroniewn@gmail.com |
d15b12f713424c1f85485dd0dbc9c96a2b451d2c | 63f443df12960b3eda3ac23e4a2c343b2c8a04f8 | /text/1.py | 1debf0c03aa5d4f7cbddfe0b3a77a13991bcc068 | [] | no_license | persontianshuang/crapy500m | 7c30e101da6f578c475a63117fa062000ce9460f | 0e725f037a460fc01926000e023d4f5dbb6c1f4e | refs/heads/master | 2021-08-09T02:54:49.102719 | 2017-11-12T00:52:37 | 2017-11-12T00:52:37 | 102,925,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import os
file="C:\\Users\Administrator\Desktop\不侵权\沙拉盘不侵权-38.xls"
with open(file, 'r',encoding='gbk') as f:
lines = f.readlines()
for line in lines:
line = line
print(line)
| [
"mengyouhan@gmail.com"
] | mengyouhan@gmail.com |
cecd22f5838eed05a65114d0526a5383ebf2af83 | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /build/config/linux/pkg-config.py | 32068ada80b0b4a41d1a52a4c379f1fec68a3456 | [
"BSD-3-Clause"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 7,689 | py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import subprocess
import sys
import re
from optparse import OptionParser
# This script runs pkg-config, optionally filtering out some results, and
# returns the result.
#
# The result will be [ <includes>, <cflags>, <libs>, <lib_dirs>, <ldflags> ]
# where each member is itself a list of strings.
#
# You can filter out matches using "-v <regexp>" where all results from
# pkgconfig matching the given regular expression will be ignored. You can
# specify more than one regular expression my specifying "-v" more than once.
#
# You can specify a sysroot using "-s <sysroot>" where sysroot is the absolute
# system path to the sysroot used for compiling. This script will attempt to
# generate correct paths for the sysroot.
#
# When using a sysroot, you must also specify the architecture via
# "-a <arch>" where arch is either "x86" or "x64".
#
# CrOS systemroots place pkgconfig files at <systemroot>/usr/share/pkgconfig
# and one of <systemroot>/usr/lib/pkgconfig or <systemroot>/usr/lib64/pkgconfig
# depending on whether the systemroot is for a 32 or 64 bit architecture. They
# specify the 'lib' or 'lib64' of the pkgconfig path by defining the
# 'system_libdir' variable in the args.gn file. pkg_config.gni communicates this
# variable to this script with the "--system_libdir <system_libdir>" flag. If no
# flag is provided, then pkgconfig files are assumed to come from
# <systemroot>/usr/lib/pkgconfig.
#
# Additionally, you can specify the option --atleast-version. This will skip
# the normal outputting of a dictionary and instead print true or false,
# depending on the return value of pkg-config for the given package.
def SetConfigPath(options):
"""Set the PKG_CONFIG_LIBDIR environment variable.
This takes into account any sysroot and architecture specification from the
options on the given command line.
"""
sysroot = options.sysroot
assert sysroot
# Compute the library path name based on the architecture.
arch = options.arch
if sysroot and not arch:
print "You must specify an architecture via -a if using a sysroot."
sys.exit(1)
libdir = sysroot + '/usr/' + options.system_libdir + '/pkgconfig'
libdir += ':' + sysroot + '/usr/share/pkgconfig'
os.environ['PKG_CONFIG_LIBDIR'] = libdir
return libdir
def GetPkgConfigPrefixToStrip(options, args):
"""Returns the prefix from pkg-config where packages are installed.
This returned prefix is the one that should be stripped from the beginning of
directory names to take into account sysroots.
"""
# Some sysroots, like the Chromium OS ones, may generate paths that are not
# relative to the sysroot. For example,
# /path/to/chroot/build/x86-generic/usr/lib/pkgconfig/pkg.pc may have all
# paths relative to /path/to/chroot (i.e. prefix=/build/x86-generic/usr)
# instead of relative to /path/to/chroot/build/x86-generic (i.e prefix=/usr).
# To support this correctly, it's necessary to extract the prefix to strip
# from pkg-config's |prefix| variable.
prefix = subprocess.check_output([options.pkg_config,
"--variable=prefix"] + args, env=os.environ)
if prefix[-4] == '/usr':
return prefix[4:]
return prefix
def MatchesAnyRegexp(flag, list_of_regexps):
"""Returns true if the first argument matches any regular expression in the
given list."""
for regexp in list_of_regexps:
if regexp.search(flag) != None:
return True
return False
def RewritePath(path, strip_prefix, sysroot):
"""Rewrites a path by stripping the prefix and prepending the sysroot."""
if os.path.isabs(path) and not path.startswith(sysroot):
if path.startswith(strip_prefix):
path = path[len(strip_prefix):]
path = path.lstrip('/')
return os.path.join(sysroot, path)
else:
return path
def main():
# If this is run on non-Linux platforms, just return nothing and indicate
# success. This allows us to "kind of emulate" a Linux build from other
# platforms.
if "linux" not in sys.platform:
print "[[],[],[],[],[]]"
return 0
parser = OptionParser()
parser.add_option('-d', '--debug', action='store_true')
parser.add_option('-p', action='store', dest='pkg_config', type='string',
default='pkg-config')
parser.add_option('-v', action='append', dest='strip_out', type='string')
parser.add_option('-s', action='store', dest='sysroot', type='string')
parser.add_option('-a', action='store', dest='arch', type='string')
parser.add_option('--system_libdir', action='store', dest='system_libdir',
type='string', default='lib')
parser.add_option('--atleast-version', action='store',
dest='atleast_version', type='string')
parser.add_option('--libdir', action='store_true', dest='libdir')
(options, args) = parser.parse_args()
# Make a list of regular expressions to strip out.
strip_out = []
if options.strip_out != None:
for regexp in options.strip_out:
strip_out.append(re.compile(regexp))
if options.sysroot:
libdir = SetConfigPath(options)
if options.debug:
sys.stderr.write('PKG_CONFIG_LIBDIR=%s\n' % libdir)
prefix = GetPkgConfigPrefixToStrip(options, args)
else:
prefix = ''
if options.atleast_version:
# When asking for the return value, just run pkg-config and print the return
# value, no need to do other work.
if not subprocess.call([options.pkg_config,
"--atleast-version=" + options.atleast_version] +
args):
print "true"
else:
print "false"
return 0
if options.libdir:
cmd = [options.pkg_config, "--variable=libdir"] + args
if options.debug:
sys.stderr.write('Running: %s\n' % cmd)
try:
libdir = subprocess.check_output(cmd)
except:
print "Error from pkg-config."
return 1
sys.stdout.write(libdir.strip())
return 0
cmd = [options.pkg_config, "--cflags", "--libs"] + args
if options.debug:
sys.stderr.write('Running: %s\n' % ' '.join(cmd))
try:
flag_string = subprocess.check_output(cmd)
except:
sys.stderr.write('Could not run pkg-config.\n')
return 1
# For now just split on spaces to get the args out. This will break if
# pkgconfig returns quoted things with spaces in them, but that doesn't seem
# to happen in practice.
all_flags = flag_string.strip().split(' ')
sysroot = options.sysroot
if not sysroot:
sysroot = ''
includes = []
cflags = []
libs = []
lib_dirs = []
ldflags = []
for flag in all_flags[:]:
if len(flag) == 0 or MatchesAnyRegexp(flag, strip_out):
continue;
if flag[:2] == '-l':
libs.append(RewritePath(flag[2:], prefix, sysroot))
elif flag[:2] == '-L':
lib_dirs.append(RewritePath(flag[2:], prefix, sysroot))
elif flag[:2] == '-I':
includes.append(RewritePath(flag[2:], prefix, sysroot))
elif flag[:3] == '-Wl':
ldflags.append(flag)
elif flag == '-pthread':
# Many libs specify "-pthread" which we don't need since we always include
# this anyway. Removing it here prevents a bunch of duplicate inclusions
# on the command line.
pass
else:
cflags.append(flag)
# Output a GN array, the first one is the cflags, the second are the libs. The
# JSON formatter prints GN compatible lists when everything is a list of
# strings.
print json.dumps([includes, cflags, libs, lib_dirs, ldflags])
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"jacob-chen@iotwrt.com"
] | jacob-chen@iotwrt.com |
69743a42fb64f06773a238d89bfac39069777e78 | 39d4504ec1da8975fac526d6801b94f4348b6b61 | /research/object_detection/dataset_tools/create_oid_tf_record.py | c5f409af4da470ff0c9da20f6a8793306a1016f3 | [
"Apache-2.0"
] | permissive | vincentcheny/models | fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad | afb1a59fc1bc792ac72d1a3e22e2469020529788 | refs/heads/master | 2020-07-23T21:38:24.559521 | 2019-11-15T07:50:11 | 2019-11-15T07:50:11 | 207,712,649 | 1 | 0 | Apache-2.0 | 2019-09-11T03:12:31 | 2019-09-11T03:12:31 | null | UTF-8 | Python | false | false | 5,315 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| [
"1155107977@link.cuhk.edu.hk"
] | 1155107977@link.cuhk.edu.hk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.