blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32094bfec7d1f73bf53f88c454aa658b21ff5eb7 | 44d70e38ffc6dad4021e8e72363893a290a64b7a | /mwatch/main.py | 94b6b983d7c32dba5025d844f0f747d292a71a21 | [
"Apache-2.0"
] | permissive | rick446/mongo-watch | 55353363174bfea9f22abf23060099f7bf1401a7 | a218a1c397f89dbfc90100fa650fa4b51958e281 | refs/heads/master | 2021-01-12T02:52:08.717843 | 2018-04-23T14:26:40 | 2018-04-23T14:26:40 | 78,122,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,690 | py | import logging
from itertools import chain
from pprint import pformat
import pymongo
try:
from mongoquery import Query, QueryError
except ImportError:
Query = QueryError = None
log = logging.getLogger(__name__)
class Watcher(object):
def __init__(self, cli, await_=False):
self.oplog = cli.local.oplog.rs
self.watches = {} # dict[id] = watch (so we can remove watches later)
if await_:
self._cursor_type = pymongo.CursorType.TAILABLE_AWAIT
else:
self._cursor_type = pymongo.CursorType.TAILABLE
self._last_ts = self._get_last_ts()
def watch_query(self, collection, qspec=None, check_inserts=False):
res = QueryWatch(self, collection, qspec, check_inserts)
self.watches[id(res)] = res
return res
def watch_inserts(self, collection, qspec=None):
res = InsertWatch(self, collection, qspec)
self.watches[id(res)] = res
return res
def watch_updates(self, collection, ids=None):
res = UpdateWatch(self, collection, ids)
self.watches[id(res)] = res
return res
def watch_deletes(self, collection, ids=None):
res = DeleteWatch(self, collection, ids)
self.watches[id(res)] = res
return res
def _get_last_ts(self):
final_entry = self.oplog.find().sort('$natural', -1).limit(1).next()
log.debug('final_entry: %s', final_entry)
return final_entry['ts']
def _get_cursor(self):
branches = list(chain(*[
w.oplog_branches() for w in self.watches.values()]))
assert branches, 'Nothing to watch'
if len(branches) == 1:
spec = branches[0]
else:
spec = {'$or': branches}
spec['ts'] = {'$gt': self._last_ts}
log.debug('Query oplog with\n%s', pformat(spec))
return self.oplog.find(
spec,
cursor_type=self._cursor_type,
oplog_replay=True)
def __iter__(self):
curs = self._get_cursor()
stateful_watches = [
w for w in self.watches.values()
if hasattr(w, 'process_entry')]
needs_restart = False
for doc in curs:
for w in stateful_watches:
needs_restart = needs_restart or w.process_entry(doc)
self._last_ts = doc['ts']
yield doc
if needs_restart:
break
class Watch:
def __init__(self, watcher):
self.watcher = watcher
def unwatch(self):
self.watcher.watches.pop(id(self), None)
class QueryWatch(Watch):
"""Insert/update/delete watch for a query (stateful)."""
def __init__(
self, watcher, collection, qspec=None, check_inserts=False):
super().__init__(watcher)
self.collection = collection
self.qspec = qspec
self.check_inserts = check_inserts
if check_inserts:
assert Query is not None, 'Cannot check inserts without mongoquery'
self._mquery = Query(qspec)
self._ns = '{}.{}'.format(
collection.database.name,
collection.name)
if qspec:
self._ids = set(
doc['_id'] for doc in self.collection.find(qspec, {'_id': 1}))
else:
self._ids = None
def __repr__(self):
return '<QueryWatch {} {}>'.format(self._ns, self.qspec)
def oplog_branches(self):
if self.qspec is None:
yield {'ns': self._ns, 'op': {'$in': ['i', 'u', 'd']}}
return
ins_watch = InsertWatch(self.watcher, self.collection, self.qspec)
if self._ids:
watches = [
ins_watch,
UpdateWatch(self.watcher, self.collection, list(self._ids)),
DeleteWatch(self.watcher, self.collection, list(self._ids))]
else:
watches = [ins_watch]
for w in watches:
yield from w.oplog_branches()
def process_entry(self, entry):
"""Return true if the oplog query needs to be restarted."""
if not self.qspec:
# no need to track IDs
return False
if entry['ns'] != self._ns:
# not my collection
return False
if entry['op'] == 'i':
if self.check_inserts and not self._mquery.match(entry['o']):
# I don't watch that doc
return False
self._ids.add(entry['o']['_id'])
return True
elif entry['op'] == 'd':
self._ids.discard(entry['o']['_id'])
else:
return False
class InsertWatch(Watch):
def __init__(self, watcher, collection, qspec=None):
super().__init__(watcher)
self._ns = '{}.{}'.format(
collection.database.name,
collection.name)
self.qspec = qspec
def __repr__(self):
return '<InsertWatch {} {}>'.format(self._ns, self.qspec)
def oplog_branches(self):
qspec = {
'o.{}'.format(k): v
for k, v in self.qspec.items()}
if self.qspec:
yield {'op': 'i', 'ns': self._ns, **qspec}
else:
yield {'op': 'i', 'ns': self._ns}
class UpdateWatch(Watch):
def __init__(self, watcher, collection, ids=None):
super().__init__(watcher)
self._ns = '{}.{}'.format(
collection.database.name,
collection.name)
self._ids = ids
def __repr__(self):
return '<UpdateWatch {} {}>'.format(self._ns, self._ids)
def oplog_branches(self):
if self._ids is None:
yield {'op': 'u', 'ns': self._ns}
return
ids = list(self._ids)
if len(ids) == 1:
yield {'op': 'u', 'ns': self._ns, 'o2._id': ids[0]}
if len(ids) > 0:
yield {'op': 'u', 'ns': self._ns, 'o2._id': {'$in': ids}}
def unwatch(self, id):
self._ids.remove(id)
class DeleteWatch(Watch):
def __init__(self, watcher, collection, ids=None):
super().__init__(watcher)
self._ns = '{}.{}'.format(
collection.database.name,
collection.name)
self._ids = ids
def __repr__(self):
return '<DeleteWatch {} {}>'.format(self._ns, self._ids)
def oplog_branches(self):
if self._ids is None:
yield {'op': 'd', 'ns': self._ns}
return
ids = list(self._ids)
if len(ids) == 1:
yield {'op': 'd', 'ns': self._ns, 'o._id': ids[0]}
if len(ids) > 0:
yield {'op': 'd', 'ns': self._ns, 'o._id': {'$in': ids}}
def unwatch(self, id):
self._ids.remove(id)
| [
"rick@arborian.com"
] | rick@arborian.com |
0c8e2bf1927d6dfb77056648dbdb1d8d9b79192b | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/pubsub/v1/pubsub-v1-py/google/pubsub_v1/__init__.py | e4f49d6e9d87d97f720f363967d71bd48ff32afd | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,857 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.publisher import PublisherClient
from .services.publisher import PublisherAsyncClient
from .services.schema_service import SchemaServiceClient
from .services.schema_service import SchemaServiceAsyncClient
from .services.subscriber import SubscriberClient
from .services.subscriber import SubscriberAsyncClient
from .types.pubsub import AcknowledgeRequest
from .types.pubsub import CreateSnapshotRequest
from .types.pubsub import DeadLetterPolicy
from .types.pubsub import DeleteSnapshotRequest
from .types.pubsub import DeleteSubscriptionRequest
from .types.pubsub import DeleteTopicRequest
from .types.pubsub import DetachSubscriptionRequest
from .types.pubsub import DetachSubscriptionResponse
from .types.pubsub import ExpirationPolicy
from .types.pubsub import GetSnapshotRequest
from .types.pubsub import GetSubscriptionRequest
from .types.pubsub import GetTopicRequest
from .types.pubsub import ListSnapshotsRequest
from .types.pubsub import ListSnapshotsResponse
from .types.pubsub import ListSubscriptionsRequest
from .types.pubsub import ListSubscriptionsResponse
from .types.pubsub import ListTopicSnapshotsRequest
from .types.pubsub import ListTopicSnapshotsResponse
from .types.pubsub import ListTopicsRequest
from .types.pubsub import ListTopicsResponse
from .types.pubsub import ListTopicSubscriptionsRequest
from .types.pubsub import ListTopicSubscriptionsResponse
from .types.pubsub import MessageStoragePolicy
from .types.pubsub import ModifyAckDeadlineRequest
from .types.pubsub import ModifyPushConfigRequest
from .types.pubsub import PublishRequest
from .types.pubsub import PublishResponse
from .types.pubsub import PubsubMessage
from .types.pubsub import PullRequest
from .types.pubsub import PullResponse
from .types.pubsub import PushConfig
from .types.pubsub import ReceivedMessage
from .types.pubsub import RetryPolicy
from .types.pubsub import SchemaSettings
from .types.pubsub import SeekRequest
from .types.pubsub import SeekResponse
from .types.pubsub import Snapshot
from .types.pubsub import StreamingPullRequest
from .types.pubsub import StreamingPullResponse
from .types.pubsub import Subscription
from .types.pubsub import Topic
from .types.pubsub import UpdateSnapshotRequest
from .types.pubsub import UpdateSubscriptionRequest
from .types.pubsub import UpdateTopicRequest
from .types.schema import CreateSchemaRequest
from .types.schema import DeleteSchemaRequest
from .types.schema import GetSchemaRequest
from .types.schema import ListSchemasRequest
from .types.schema import ListSchemasResponse
from .types.schema import Schema
from .types.schema import ValidateMessageRequest
from .types.schema import ValidateMessageResponse
from .types.schema import ValidateSchemaRequest
from .types.schema import ValidateSchemaResponse
from .types.schema import Encoding
from .types.schema import SchemaView
__all__ = (
'PublisherAsyncClient',
'SchemaServiceAsyncClient',
'SubscriberAsyncClient',
'AcknowledgeRequest',
'CreateSchemaRequest',
'CreateSnapshotRequest',
'DeadLetterPolicy',
'DeleteSchemaRequest',
'DeleteSnapshotRequest',
'DeleteSubscriptionRequest',
'DeleteTopicRequest',
'DetachSubscriptionRequest',
'DetachSubscriptionResponse',
'Encoding',
'ExpirationPolicy',
'GetSchemaRequest',
'GetSnapshotRequest',
'GetSubscriptionRequest',
'GetTopicRequest',
'ListSchemasRequest',
'ListSchemasResponse',
'ListSnapshotsRequest',
'ListSnapshotsResponse',
'ListSubscriptionsRequest',
'ListSubscriptionsResponse',
'ListTopicSnapshotsRequest',
'ListTopicSnapshotsResponse',
'ListTopicSubscriptionsRequest',
'ListTopicSubscriptionsResponse',
'ListTopicsRequest',
'ListTopicsResponse',
'MessageStoragePolicy',
'ModifyAckDeadlineRequest',
'ModifyPushConfigRequest',
'PublishRequest',
'PublishResponse',
'PublisherClient',
'PubsubMessage',
'PullRequest',
'PullResponse',
'PushConfig',
'ReceivedMessage',
'RetryPolicy',
'Schema',
'SchemaServiceClient',
'SchemaSettings',
'SchemaView',
'SeekRequest',
'SeekResponse',
'Snapshot',
'StreamingPullRequest',
'StreamingPullResponse',
'SubscriberClient',
'Subscription',
'Topic',
'UpdateSnapshotRequest',
'UpdateSubscriptionRequest',
'UpdateTopicRequest',
'ValidateMessageRequest',
'ValidateMessageResponse',
'ValidateSchemaRequest',
'ValidateSchemaResponse',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6e15139c4557ef381a727beb2930c858fa210a14 | dfab6798ece135946aebb08f93f162c37dd51791 | /timber/aokuang.timber/aokuang.timber/actors/formselectorfield/events.py | ac42bdb2d1eb95d7fe1639ac26c31dab2b19fe40 | [] | no_license | yxqd/luban | 405f5f7dcf09015d214079fe7e23d644332be069 | 00f699d15c572c8bf160516d582fa37f84ac2023 | refs/heads/master | 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import luban
from ....DemoPanelActor import Actor as base
class Actor(base):
title='A form with one selector field'
description = [
'The selector field is attached with event hanlders for ',
'"change", "focus", and "blur" events',
]
def createDemoPanel(self, **kwds):
doc = luban.e.document()
form = doc.form(title='login')
entries = [
('male', 'Male'),
('female', 'Female'),
]
gender = form.selector(label='gender', entries = entries, selection='female')
log = doc.document(id='log', Class='log', title='log')
gender.onchange = luban.a.load(
actor=self.name, routine='onchange',
old = luban.event.old, new = luban.event.new
)
gender.onfocus = luban.a.select(element=log).append(
newelement=luban.e.paragraph(text="focused")
)
gender.onblur = luban.a.select(element=log).append(
newelement=luban.e.paragraph(text="blured")
)
return doc
def onchange(self, old=None, new=None, **kwds):
msg = "value changed from %r to %r" % (old, new)
newelement = luban.e.paragraph(text = msg)
return luban.a.select(id='log').append(newelement=newelement)
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
32fd38b52799830d21b7091490311c7c83f30a60 | 967056372d123ad5a86705156aea928d7352fe6a | /python基础/src/modules/my.py | 0ad230e316beccd863f6dd702a4f103130deffea | [] | no_license | lxy39678/Python | ea179ef929eb9ddddb2460656aad07880ae67f84 | aba0434bc8ca7a2abdaa3ced3c4d84a8de819c61 | refs/heads/master | 2020-04-18T22:05:11.683134 | 2019-01-27T07:49:09 | 2019-01-27T07:49:09 | 167,783,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/env python
#coding=utf-8
import time
from datetime import datetime
import sys
print
reload(sys)
sys.setdefaultencoding('UTF-8')
print sys.getfilesystemencoding()
print time.strftime("%Z").decode(sys.getfilesystemencoding()).encode('utf-8')
print "中国".encode('utf-8').decode('utf-8') | [
"895902857@qq.com"
] | 895902857@qq.com |
cde49f25158ed056ea012c0cbf5dd02e5061df9b | 2fdf33eff3f22a4f2e0337f065646de8fe6cc01f | /mq/utils.py | ec63e584dd00aa2c4cdd4436bde69e7ac89a83b4 | [
"MIT"
] | permissive | apnarm/python-mq | 007d978fe6a23b0d65555909ad34f2a21df5c5d5 | 14037cf86abc2393c4f8d791fd76bcca7a781607 | refs/heads/master | 2020-04-15T10:30:05.739270 | 2014-06-09T20:58:47 | 2014-06-09T20:58:47 | 20,659,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import sys
import time
from contextlib import contextmanager
def chunk_list(sequence, size):
"""Returns a list of lists from a list."""
def get_chunks(sequence, size):
for start in xrange(0, len(sequence), size):
yield sequence[start: start + size]
return list(get_chunks(sequence, size))
@contextmanager
def time_elapsed(name=''):
"""
A context manager for timing blocks of code.
From https://gist.github.com/raymondbutcher/5168588
"""
start = time.time()
yield
elapsed = (time.time() - start) * 1000
if name:
sys.stderr.write('%s took ' % name)
if elapsed < 1:
sys.stderr.write('%.4f ms\n' % elapsed)
else:
sys.stderr.write('%d ms\n' % elapsed)
| [
"randomy@gmail.com"
] | randomy@gmail.com |
d828594adc6f7eb9cf70397564962feb6b86a16a | 796f96e8c01db0bb9493c4871be66d92689b73ab | /5097_회전/sol1.py | 6e38169fa762c0448805a506e36bde655123aa0d | [] | no_license | swhan9404/swExpertAcademy | cf82b957b7ea6231d1f4b95f77f74e7717d2de0d | dea4176229121983c6daed80811de20de6da5ff6 | refs/heads/master | 2023-04-19T22:57:37.111757 | 2021-05-02T14:58:44 | 2021-05-02T14:58:44 | 337,338,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | import sys
sys.stdin = open("input.txt")
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
# list로 queue 구현
inp_arr= list(map(int, input().split()))
for _ in range(M) :
front = inp_arr.pop(0)
inp_arr.append(front)
result = inp_arr[0]
print("#{} {}".format(tc, result))
| [
"swhan9404@naver.com"
] | swhan9404@naver.com |
db422a8166459af5fa7fb6aefad431847328f65f | fab72028e5aa12c0a1a1cf1b550212c017979f78 | /test_app/models.py | cd0321fab47456961ba3a922b9260af7557b4e50 | [] | no_license | jpic/test_input_relation | 0e43c64e659005472d3e7210ed0a1c8f65a05bd4 | 24a485670b2945430657ee4b998e5aaa756329c3 | refs/heads/master | 2021-01-02T08:19:08.771571 | 2015-05-19T23:43:38 | 2015-05-19T23:43:38 | 35,902,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from django.db import models
class TestModel(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
relation = models.ForeignKey('self', null=True, blank=True)
name0 = models.CharField(max_length=200, null=True, blank=True)
name1 = models.CharField(max_length=200, null=True, blank=True)
name2 = models.CharField(max_length=200, null=True, blank=True)
name3 = models.CharField(max_length=200, null=True, blank=True,
choices=[('a', 'a')])
| [
"jamespic@gmail.com"
] | jamespic@gmail.com |
c5c3040710b66ad3829cacb41d49fd4ffee9f79b | 5966449d2e29c9b64351895db2932f94f9de42da | /catkin_ws/build/calibration_msgs/catkin_generated/pkg.develspace.context.pc.py | c5aed50da0a8c9acf6a127e9060b081e8582ea96 | [] | no_license | godaeseong/GoHriProject | 8cbce6934485b8ba3253fc7b6c5b5b59397b4518 | 425e70b7c91b6215f5477fc2250d2b0ac96577be | refs/heads/master | 2021-05-11T22:11:56.099580 | 2018-01-15T02:20:43 | 2018-01-15T02:20:43 | 117,484,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/hri/catkin_ws/devel/.private/calibration_msgs/include;/home/hri/catkin_ws/src/calibration_toolkit/calibration_msgs/include".split(';') if "/home/hri/catkin_ws/devel/.private/calibration_msgs/include;/home/hri/catkin_ws/src/calibration_toolkit/calibration_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;sensor_msgs;actionlib_msgs;calibration_common;eigen_conversions".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcalibration_msgs".split(';') if "-lcalibration_msgs" != "" else []
PROJECT_NAME = "calibration_msgs"
PROJECT_SPACE_DIR = "/home/hri/catkin_ws/devel/.private/calibration_msgs"
PROJECT_VERSION = "1.0.0"
| [
"bigdream0129@naver.com"
] | bigdream0129@naver.com |
0082adaaf92b0c63edfb3eb67e82c9f23b7aaff1 | c6d852e5842cf6f74123445d20ff03876377ae26 | /lemon/python22/lemon_25_191023_充值接口_用例关联/test_practice_01_25/middler_ware/db_handler.py | 965b6da45635f25ac85180de1363a2f57a60577d | [] | no_license | songyongzhuang/PythonCode_office | 0b3d35ca5d58bc305ae90fea8b1e8c7214619979 | cfadd3132c2c7c518c784589e0dab6510a662a6c | refs/heads/master | 2023-02-13T14:06:10.610935 | 2021-01-14T09:11:32 | 2021-01-14T09:11:32 | 327,183,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | # --*-- coding :utf-8 --*--
# Project :python22
# Current file :db_handler.py
# Author :Administrator
# Create time :2019/10/24 15:23
# IDE :PyCharm
# TODO 成长很苦, 进步很甜, 加油!
from pymysql.cursors import DictCursor
from lemon_25_191023_充值接口_用例关联.test_practice_01_25.common.db_handler_shujuku \
import DBHandler # 数据库
from lemon_25_191023_充值接口_用例关联.test_practice_01_25.common.config_handler_peizhiwenjian \
import config # 配置文件
class MyDBHandler(DBHandler):
def __init__(self, **kw): # 获取的数据默认是元组
super().__init__(
host=config.read('db', 'host'),
port=eval(config.read('db', 'port')), # int类型去掉两端字符串
user=config.read('db', 'user'),
password=config.read('db', 'password'),
charset=config.read('db', 'charset'),
database=config.read('db', 'database'),
cursorclass=DictCursor, # 默认是元组
**kw
)
| [
"songyongzhuang9@163.com"
] | songyongzhuang9@163.com |
9d85792ee3739689520034f7a9100b57d97a01e6 | 3b56423a34de9b4adae13d0fee4905609a9e2410 | /contrib/linearize/linearize-hashes.py | 99af17d0cc38195bef52e9e3482702549581d36a | [
"MIT"
] | permissive | mulecore/mule | 5302db1cb6596475f9496495dba12b4d1cfd0d2c | 9b2d9bf0ffc47963b5c0ce9b760cfff757961533 | refs/heads/master | 2023-03-27T02:25:49.455321 | 2021-03-26T10:31:13 | 2021-03-26T10:31:13 | 351,247,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,667 | py | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Mule Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class MuleRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = MuleRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9652
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| [
"mule@muleda.com"
] | mule@muleda.com |
40fcffde1d8ae8851c4f31733e22a88f8701a626 | ea9f38468c32efc07b090ff65ae0e3369486fb8b | /update/organizations.py | d7ca0b95ea56884cfbf5989ae4130373c1226511 | [] | no_license | KohoVolit/activities | f8942e9480a1d0a20bfa1727d53f49f4df9f0bb8 | 1569e8df19c9deb718e08e17ca17baa6bbedcbb0 | refs/heads/master | 2021-01-10T16:46:46.104425 | 2016-03-30T01:53:07 | 2016-03-30T01:53:07 | 53,628,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,753 | py | # insert and update organizations
# chamber and political groups
import json
import api
import authentication
import scrapeutils
url = 'http://www.psp.cz/eknih/cdrom/opendata/poslanci.zip'
unlfile = 'organy.unl'
api.login(authentication.email,authentication.password)
zfile = scrapeutils.download(url,zipped=True)
organy = scrapeutils.zipfile2rows(zfile,unlfile)
# chamber:
for row in organy:
if row[2] == '11': #chamber
term = row[3][3:]
org = {
"name": row[4].strip(),
'classification': 'chamber',
'id': int(row[0].strip()),
'founding_date': scrapeutils.cs2iso(row[6].strip()),
'attributes': {
"abbreviation": "PSP",
"term": int(term)
}
}
if (row[7].strip() != ''):
org["dissolution_date"] = scrapeutils.cs2iso(row[7].strip())
params = {'id': "eq.%s" % (org['id'])}
r = api.get("organizations", params)
rdata = r.json()
if len(rdata) == 0:
r = api.post("organizations",org)
else:
o = r.json()[0]
try:
z = o['attributes'].copy()
z.update(org['attributes'])
org['attributes'] = z
except:
nothing = None
r = api.patch("organizations", params, org)
# political groups:
for row in organy:
if row[2] == '1': #political group
params = {
"id": "eq."+row[1].strip()
}
parent = api.get_one("organizations",params)
org = {
"name": row[4].strip(),
'classification': 'political group',
'id': int(row[0].strip()),
'founding_date': scrapeutils.cs2iso(row[6].strip()),
'attributes': {
"abbreviation": row[3].strip(),
"parent_id": parent['id'],
"term": parent['attributes']['term']
}
}
if (row[7].strip() != ''):
org["dissolution_date"] = scrapeutils.cs2iso(row[7].strip())
params = {'id': "eq.%s" % (org['id'])}
r = api.get("organizations", params)
rdata = r.json()
if len(rdata) == 0:
r = api.post("organizations",org)
else:
o = r.json()[0]
try:
z = o['attributes'].copy()
z.update(org['attributes'])
org['attributes'] = z
except:
nothing = None
r = api.patch("organizations", params, org)
# regions
for row in organy:
if row[2] == '75' or row[2] == '8': #regions
params = {
"id": "eq."+row[1].strip()
}
org = {
"name": row[4].strip(),
'classification': 'region',
'id': int(row[0].strip()),
'founding_date': scrapeutils.cs2iso(row[6].strip()),
'attributes': {
"abbreviation": row[3].strip()
}
}
if row[2] == '8': # old regions
row[7] = '31.05.2002'
if (row[7].strip() != ''):
org["dissolution_date"] = scrapeutils.cs2iso(row[7].strip())
params = {'id': "eq.%s" % (org['id'])}
r = api.get("organizations", params)
rdata = r.json()
if len(rdata) == 0:
r = api.post("organizations",org)
else:
o = r.json()[0]
try:
z = o['attributes'].copy()
z.update(org['attributes'])
org['attributes'] = z
except:
nothing = None
r = api.patch("organizations", params, org)
# electoral lists
for row in organy:
if row[2] == '6': # electoral list
params = {
"id": "eq."+row[1].strip()
}
org = {
"name": row[4].strip(),
'classification': 'electoral list',
'id': int(row[0].strip()),
'founding_date': scrapeutils.cs2iso(row[6].strip()),
'attributes': {
"abbreviation": row[3].strip()
}
}
if (row[7].strip() != ''):
org["dissolution_date"] = scrapeutils.cs2iso(row[7].strip())
params = {'id': "eq.%s" % (org['id'])}
r = api.get("organizations", params)
rdata = r.json()
if len(rdata) == 0:
r = api.post("organizations",org)
else:
o = r.json()[0]
try:
z = o['attributes'].copy()
z.update(org['attributes'])
org['attributes'] = z
except:
nothing = None
r = api.patch("organizations", params, org)
# governments
for row in organy:
if row[2] == '5': # electoral list
params = {
"id": "eq."+row[1].strip()
}
org = {
"name": row[4].strip(),
'classification': 'government',
'id': int(row[0].strip()),
'founding_date': scrapeutils.cs2iso(row[6].strip()),
'attributes': {
"abbreviation": row[3].strip()
}
}
if (row[7].strip() != ''):
org["dissolution_date"] = scrapeutils.cs2iso(row[7].strip())
params = {'id': "eq.%s" % (org['id'])}
r = api.get("organizations", params)
rdata = r.json()
if len(rdata) == 0:
r = api.post("organizations",org)
else:
o = r.json()[0]
try:
z = o['attributes'].copy()
z.update(org['attributes'])
org['attributes'] = z
except:
nothing = None
r = api.patch("organizations", params, org)
| [
"michal.skop@kohovolit.eu"
] | michal.skop@kohovolit.eu |
5e660b6bf487e557b8c33745d5b66cea959e7c4e | cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b | /hrjk/engine/kafka_engine.py | b0e5773f5a18a78305aeb9dc4ba4f5cc70279331 | [] | no_license | 1026237416/Python | ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14 | ffa8f9ffb8bfec114b0ca46295db05c4213c4c30 | refs/heads/master | 2021-07-05T00:57:00.456886 | 2019-04-26T10:13:46 | 2019-04-26T10:13:46 | 114,510,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 1.0
@author: li
@license: Apache Licence
@contact: 1026237416@qq.com
@site:
@software: PyCharm
@file: kafka_engine.py
@time: 2018/7/17 17:58
"""
import json
from config import KAFKA_HOST, KAFKA_PORT, conductor_recv_topic
from common import case_log
def default_call_back(data):
print("******************Receive New Data******************")
print type(data)
print data
print("****************************************************")
def on_message(messages, call_back):
for message in messages:
if message is not None:
case_log.info("Recv new message: %s" % message.value)
try:
data = json.loads(message.value)
call_back(message.value, key=message.key)
except ValueError as e:
case_log.warning(
"Receive an illegal message: %s" % e.message)
class KafkaEngineClientByPyKafka(object):
def __init__(self):
from pykafka import KafkaClient
kafka_host = "%s:%s" % (KAFKA_HOST, str(KAFKA_PORT))
self.client = KafkaClient(hosts=kafka_host)
self.topic = self.client.topics[conductor_recv_topic]
def get_all_topic(self):
return self.client.topics
def create_topic(self, topic_name):
pass
def recv_msg(self, call_back):
messages = self.topic.get_simple_consumer(consumer_group='case_engine',
auto_commit_enable=True,
consumer_id='case_engine')
on_message(messages=messages, call_back=call_back)
class KafkaEngineClientByKafkaPython(object):
def __init__(self):
self.kafka_host = "%s:%s" % (KAFKA_HOST, str(KAFKA_PORT))
def recv_msg(self, call_back):
from kafka import KafkaConsumer
messages = KafkaConsumer(conductor_recv_topic,
bootstrap_servers=[self.kafka_host])
on_message(messages=messages, call_back=call_back)
def send_msg(self, topic_name, msg, key):
from kafka import KafkaProducer
producer = KafkaProducer(
bootstrap_servers=self.kafka_host,
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
acks="all"
)
producer.send(topic=topic_name,
key=key,
value=msg
)
case_log.info(
"Send message to Kafka, using topic: %s, key: %s, value: %s" % (
topic_name, key, msg
))
producer.flush()
def kafka_engine():
kafka_client = KafkaEngineClientByKafkaPython()
return kafka_client
if __name__ == '__main__':
client = kafka_engine()
msg = {'requestinfo': '0000000', 'processName': '201807182248',
'requestway': 'run'}
client.send_msg(topic_name="EVENT.conductor.ack",
key=b"35b71d7d-fdca-4070-8940-85f1f1fd82c1",
msg=msg
)
| [
"1026237416@qq.com"
] | 1026237416@qq.com |
96efef66f4e59be983e051f8f19e13fff585a4dc | da0ec71df76ea5071a1481ef5b54cafd6d74d720 | /python/minesweeper/minesweeper.py | 84edda519d12bb338bbe9d7f8204a1bbc09c5e6f | [] | no_license | apalala/exercism | 6bd4926c0c78790c366b3a95c1c594669f871410 | 9d295a1ba1f6950c1e03e9de6fa10ccb392ffa22 | refs/heads/master | 2021-01-18T15:56:51.685366 | 2017-03-31T23:40:29 | 2017-03-31T23:40:29 | 86,697,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | ARROUND = [
(-1,-1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1)
]
def board(matrix):
def adjust(value):
if value == ' ':
return '1'
elif value.isdigit():
return str(int(value) + 1)
else:
return value
if not matrix:
raise ValueError('Empty board')
m = len(matrix)
n = len(matrix[0])
horz_border = '+' + '-' * (n-2) + '+'
if matrix[0] != horz_border or matrix[m - 1] != horz_border:
raise ValueError('Corrupt border')
matrix = [list(row) for row in matrix]
for i in range(1, m - 1):
if len(matrix[i]) != n:
raise ValueError('Rows not of same length')
if matrix[i][0] != '|' or matrix[i][n - 1] != '|':
raise ValueError('Corrupt border')
for j in range(1, n - 1):
c = matrix[i][j]
if c not in '* 012345678':
raise ValueError('Unknown symbol in matrix')
elif c != '*':
continue
else:
for x, y in ARROUND:
matrix[i + x][j + y] = adjust(matrix[i + x][j + y])
return [''.join(matrix) for matrix in matrix]
| [
"apalala@gmail.com"
] | apalala@gmail.com |
eafa72b01bb3f0d3be4213af4e7b7bf50022defd | 0d0afd1dce972b4748ce8faccd992c019794ad9e | /integra/construtora/__openerp__.py | 44b6587f79173aa9f793c38484bb52a60caab17f | [] | no_license | danimaribeiro/odoo-erp | e2ca2cfe3629fbedf413e85f7c3c0453fd16941e | d12577bf7f5266b571cbedeb930720d653320e96 | refs/heads/master | 2020-01-23T21:32:16.149716 | 2016-11-05T15:35:40 | 2016-11-05T15:35:40 | 67,892,809 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,212 | py | # -*- coding: utf-8 -*-
#from __future__ import division, print_function, unicode_literals
{
'name': 'Construcao',
'version': '1.0',
'category': 'Integra',
'description': u'Construção civil',
'author': 'Integra',
'maintainer': 'Integra',
'website': 'http://www.ERPIntegra.com.br',
'depends': [
'base',
'sale',
'sped_base',
'sped',
'project',
'account',
'project_long_term',
'finan',
'finan_modelo_lancamento',
'finan_contrato',
'finan_contrato_checklist',
'finan_cheque',
'purchase',
'compras',
'sped_purchase',
'integra_libreoffice',
'share',
],
'update_xml': [
'security/groups.xml',
'views/mail_compose_message_negociacao_view.xml',
'views/project_task.xml',
'views/project_etapa.xml',
'views/project_orcamento.xml',
'views/project_orcamento_planejamento.xml',
'views/purchase_orcamento_planejamento.xml',
'views/purchase_orcamento_item.xml',
'views/project_orcamento_medicao.xml',
'views/res_partner_address_view.xml',
'views/res_cep_view.xml',
'views/partner_view.xml',
'views/res_partner_corretor_view.xml',
'views/res_partner_contato_view.xml',
'views/crm_lead_prospecto_view.xml',
'views/imovel_view.xml',
'views/imovel_casa.xml',
'views/imovel_apartamento.xml',
'views/imovel_terreno.xml',
'views/imovel_chacara.xml',
'views/imovel_fazenda.xml',
'views/imovel_area.xml',
'views/imovel_galpao.xml',
'views/imovel_predio.xml',
'views/imovel_sala_comercial.xml',
'views/imovel_loja.xml',
'views/imovel_outros.xml',
'views/hr_employee_view.xml',
'views/product_view.xml',
'views/p_view.xml',
'views/purchase_view.xml',
'views/purchase_cotacao.xml',
'views/cotacao_supplier_info.xml',
'views/purchase_solicitacao_cotacao.xml',
'views/purchase_solicitacao_item.xml',
'views/purchase_order.xml',
'views/purchase_order_line_view.xml',
#'views/finan_tabela_venda.xml',
'views/imovel_crm.xml',
'views/finan_contrato_etapa.xml',
'views/finan_contrato_condicao_pagamento_view.xml',
'views/finan_contrato_condicao_pagamento_renegociacao_view.xml',
'views/finan_contrato_proposta.xml',
'views/finan_contrato_analise_financeiro.xml',
'views/finan_contrato_analise_juridico.xml',
'views/finan_contrato_receber_view.xml',
'views/sale_dashboard_view.xml',
'views/crm_lead_prospecto_dashboard.xml',
'views/finan_contrato_proposta_dashboard.xml',
'views/finan_centrocusto_view.xml',
'views/finan_pagar_view.xml',
'views/finan_receber_view.xml',
'views/finan_documento_view.xml',
'views/project_view.xml',
'views/const_zoneamento.xml',
'views/finan_comissao.xml',
'views/sped_documento_nfe_recebida.xml',
'views/sped_documento_nfe_recebida_manual.xml',
'views/sped_documento_nfse_recebida.xml',
'views/sped_documento_ecf_recebido.xml',
'views/lo_modelo.xml',
'wizard/relatorio_projeto_tarefa.xml',
'wizard/relatorio_projeto_orcamento.xml',
'wizard/relatorio_projeto_orcamento_compras.xml',
'wizard/relatorio_projeto_orcamento_medicao.xml',
'wizard/relatorio_orcamento_prefeitura.xml',
'wizard/relatorio_cronograma_desembolso.xml',
'wizard/relatorio_cronograma_fisico_gantt.xml',
'wizard/finan_relatorio_pagar.xml',
'wizard/finan_relatorio_diario_fornecedor_rateio.xml',
'wizard/finan_relatorio_diario_cliente_rateio.xml',
'wizard/finan_pagar_sintetico_rateio_view.xml',
'wizard/finan_relatorio_movimentacao.xml',
'wizard/res_users_imovel_wizard.xml',
'views/bi_view.xml',
'views/finan_contrato_dashboard_view.xml',
],
'init_xml': [],
'installable': True,
'application': True,
'auto_install': False,
}
| [
"danimaribeiro@gmail.com"
] | danimaribeiro@gmail.com |
620b2eb456cd0bacb38145e96666ec13851dbbd2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03393/s159130267.py | fab972600880229cd01c0c174bf16684d93f78dd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | S = input()
alp = 'abcdefghijklmnopqrstuvwxyz'
if len(S) < 26:
for char in alp:
if not char in S:
print(S+char)
exit()
else:
for i in range(1,26)[::-1]:
if S[i-1] < S[i]:
for j in range(i,26)[::-1]:
if S[j] > S[i-1]:
print(S[:i-1]+S[j])
exit()
print(-1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e982cd970fcc733ffec8bc5132aa59a74bc15347 | e3fd35a8443aaf2f293ae03a5f6c819046a4dd21 | /leetcode-python/medium/_875_koko_eating_bananas/solution.py | 8386f90d8a84f00fa8f75457e852eec954a432c3 | [] | no_license | hieutran106/leetcode-ht | 2223ea6bcd459c2cdbc33344c0ff69df7f8a3c7f | 8332eb20e613f82cda2e326218154c7803a32403 | refs/heads/main | 2023-08-09T02:52:41.360360 | 2023-07-27T10:12:28 | 2023-07-27T10:12:28 | 234,890,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | from typing import List
import math
class Solution:
def minEatingSpeed(self, piles: List[int], h: int) -> int:
"""
We need to search k in range [1..max(piles]
Bruce force is one option, but we can optimize by using binary search
"""
left = 1
right = max(piles)
while left <= right:
mid = (left + right) // 2
can_finish = self.can_finish_banana_before_guard(mid, piles, h)
# if koko can finish, we need to search to the left to find minimum value of k
if can_finish:
right = mid - 1
else:
# koko cannot finish, need to eat at faster speed
left = mid + 1
return left
def can_finish_banana_before_guard(self, speed, piles, returning_hour):
hour = 0
for banana in piles:
hour += math.ceil(banana/speed)
return hour <= returning_hour
| [
"hieutran106@gmail.com"
] | hieutran106@gmail.com |
a868b212e954ecde9a6e2ddbb97f02b3e2926de7 | 7229c0e24c02839963a3ac744b8b03dffc1a1463 | /setup.py | ee1a52ace29003f083e5dda5f8a8ec9ae1bc4141 | [
"MIT"
] | permissive | bio2bel/hippie | a4589bc912df305852fa472ef29aebad22dd2a98 | 7b21ae56a372e9972153811be9e869f6614472ad | refs/heads/master | 2020-03-29T18:47:32.021576 | 2019-08-26T13:35:17 | 2019-08-26T13:35:17 | 150,231,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # -*- coding: utf-8 -*-
"""Setup module for Bio2BEL HIPPIE."""
import setuptools
if __name__ == '__main__':
setuptools.setup()
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
12f03eb4d7c67b7384a949715d2a876250691b6e | e3bdb7844f634efd89109079d22cade713c4899d | /openapi_client/models/soft_descriptor.py | 6e955d44a5031c57cec38929bb14019ee69a59d9 | [] | no_license | pc-coholic/Python | 5170c27da09b066c353e09539e404961f7ad50b7 | b7251c31339b579f71fb7ee9db05be51e9e43361 | refs/heads/master | 2023-04-19T02:42:02.914726 | 2021-04-26T16:07:37 | 2021-04-26T16:07:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,796 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.2.0.20210406.001
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class SoftDescriptor(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'dynamic_merchant_name': 'str',
'customer_service_number': 'str',
'mcc': 'str',
'dynamic_address': 'Address'
}
attribute_map = {
'dynamic_merchant_name': 'dynamicMerchantName',
'customer_service_number': 'customerServiceNumber',
'mcc': 'mcc',
'dynamic_address': 'dynamicAddress'
}
def __init__(self, dynamic_merchant_name=None, customer_service_number=None, mcc=None, dynamic_address=None): # noqa: E501
"""SoftDescriptor - a model defined in OpenAPI""" # noqa: E501
self._dynamic_merchant_name = None
self._customer_service_number = None
self._mcc = None
self._dynamic_address = None
self.discriminator = None
self.dynamic_merchant_name = dynamic_merchant_name
if customer_service_number is not None:
self.customer_service_number = customer_service_number
if mcc is not None:
self.mcc = mcc
if dynamic_address is not None:
self.dynamic_address = dynamic_address
@property
def dynamic_merchant_name(self):
"""Gets the dynamic_merchant_name of this SoftDescriptor. # noqa: E501
Store \"doing-business-as\" name. # noqa: E501
:return: The dynamic_merchant_name of this SoftDescriptor. # noqa: E501
:rtype: str
"""
return self._dynamic_merchant_name
@dynamic_merchant_name.setter
def dynamic_merchant_name(self, dynamic_merchant_name):
"""Sets the dynamic_merchant_name of this SoftDescriptor.
Store \"doing-business-as\" name. # noqa: E501
:param dynamic_merchant_name: The dynamic_merchant_name of this SoftDescriptor. # noqa: E501
:type: str
"""
if dynamic_merchant_name is None:
raise ValueError("Invalid value for `dynamic_merchant_name`, must not be `None`") # noqa: E501
if dynamic_merchant_name is not None and not re.search(r'^(?!\s*$).+', dynamic_merchant_name): # noqa: E501
raise ValueError(r"Invalid value for `dynamic_merchant_name`, must be a follow pattern or equal to `/^(?!\s*$).+/`") # noqa: E501
self._dynamic_merchant_name = dynamic_merchant_name
@property
def customer_service_number(self):
"""Gets the customer_service_number of this SoftDescriptor. # noqa: E501
Customer service phone number information that is passed to the issuer (it may appear on the cardholder’s statement) or if merchant wants to pass information that differs from the information stored on our master File. # noqa: E501
:return: The customer_service_number of this SoftDescriptor. # noqa: E501
:rtype: str
"""
return self._customer_service_number
@customer_service_number.setter
def customer_service_number(self, customer_service_number):
"""Sets the customer_service_number of this SoftDescriptor.
Customer service phone number information that is passed to the issuer (it may appear on the cardholder’s statement) or if merchant wants to pass information that differs from the information stored on our master File. # noqa: E501
:param customer_service_number: The customer_service_number of this SoftDescriptor. # noqa: E501
:type: str
"""
if customer_service_number is not None and len(customer_service_number) > 10:
raise ValueError("Invalid value for `customer_service_number`, length must be less than or equal to `10`") # noqa: E501
if customer_service_number is not None and not re.search(r'^[0-9]+$', customer_service_number): # noqa: E501
raise ValueError(r"Invalid value for `customer_service_number`, must be a follow pattern or equal to `/^[0-9]+$/`") # noqa: E501
self._customer_service_number = customer_service_number
@property
def mcc(self):
"""Gets the mcc of this SoftDescriptor. # noqa: E501
The 4-digit merchant category code (MCC). The merchant might be associated with multiple MCCs. In that case the MCC provided here will be the one that better describes the current transaction. # noqa: E501
:return: The mcc of this SoftDescriptor. # noqa: E501
:rtype: str
"""
return self._mcc
@mcc.setter
def mcc(self, mcc):
"""Sets the mcc of this SoftDescriptor.
The 4-digit merchant category code (MCC). The merchant might be associated with multiple MCCs. In that case the MCC provided here will be the one that better describes the current transaction. # noqa: E501
:param mcc: The mcc of this SoftDescriptor. # noqa: E501
:type: str
"""
if mcc is not None and len(mcc) > 4:
raise ValueError("Invalid value for `mcc`, length must be less than or equal to `4`") # noqa: E501
self._mcc = mcc
@property
def dynamic_address(self):
"""Gets the dynamic_address of this SoftDescriptor. # noqa: E501
:return: The dynamic_address of this SoftDescriptor. # noqa: E501
:rtype: Address
"""
return self._dynamic_address
@dynamic_address.setter
def dynamic_address(self, dynamic_address):
"""Sets the dynamic_address of this SoftDescriptor.
:param dynamic_address: The dynamic_address of this SoftDescriptor. # noqa: E501
:type: Address
"""
self._dynamic_address = dynamic_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftDescriptor):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
f7fcfd90159532e1747d9597ff8f8ac95b02b0b6 | 149df7dee4b00a65fd7edd143e6d5a8791b0f05f | /plotlib.py | 1589c324f8266ce95d327f744e20f95db8530943 | [] | no_license | PiotrDabkowski/Py3D | 4a65c4344884255996ea4c02cda1af8b25bc8f54 | cd383f47d22de28171be59690defe66e838d08cb | refs/heads/master | 2020-04-06T06:46:41.424810 | 2014-11-09T20:20:39 | 2014-11-09T20:20:39 | 26,407,756 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | from PIL import Image
from PIL import ImageDraw
import guif
import math
class Plot:
def __init__(self, size=(600,400), pos=(0,0), scale_x=1, scale_y=1, centre=True):
self.__vid = guif.Video(size)
self.__size = size
self.__im = Image.new('RGBA', self.__size, 'white')
self.__draw = ImageDraw.Draw(self.__im)
self.d = self.__draw
self.__pos = pos
if centre:
self.__pos = -size[0]/2, -size[1]/2
self.__scale_x = float(scale_x)
self.__scale_y = float(scale_y)
self.update()
def update(self):
self.__vid.change_frame(self.__im)
def clear(self):
self.__im = Image.new('RGBA', self.__size, 'white')
self.__draw = ImageDraw.Draw(self.__im)
def line(self, p1, p2, color='black', thickness=1):
self.__draw.line(list(self.__point_transform(p1))+list(self.__point_transform(p2)), color, thickness)
def polygon(self, points, color='black', fill=False, thickness=1):
if fill: # Fill inside
points = [self.__point_transform(p) for p in points]
self.__draw.polygon(points, color)
else:
last = points[-1]
for point in points:
self.line(last, point, color, thickness)
last = point
def circle(self, centre, radius, color='black', full=False, thickness=1):
(x, y), r= self.__point_transform(centre), radius
if full:
self.__draw.ellipse((x-r, y-r, x+r, y+r), color) #Fix scale!
else:
n=36
step = 2*math.pi/n
points = [(round(centre[0]+radius*math.cos(s*step)), round(centre[1]+radius*math.sin(s*step))) for s in xrange(n)]
self.polygon(points, color, False, thickness)
def graph(self, func, x_domain, y_domain):
pass
def text(self, text, pos, color='black'):
self.__draw.text(self.__point_transform(pos), text, color)
self.__update()
def change_view(self, left_bottom_corner, right_top_corner):
sx, sy = self.__size
self.__pos = left_bottom_corner
self.__scale_x = abs(left_bottom_corner[0]-right_top_corner[0])/float(sx)
self.__scale_y = abs(left_bottom_corner[1]-right_top_corner[1])/float(sy)
def __point_transform(self, point):
#return point[0], self.__size[1]-point[1]
return (point[0]-self.__pos[0])/self.__scale_x, (self.__size[1]-point[1]+self.__pos[1])/self.__scale_y
| [
"piodrus@gmail.com"
] | piodrus@gmail.com |
3d2e1e76aeff9fb853b71d131c8b95d2b0916654 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/4.2/lib/python2.7/site-packages/RoboGalaxyLibrary/keywords/native.py | 276bc2b15f011103f08073a5398ba2f918fd3560 | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | import os
if os.name == 'nt':
from ntnative import NativeOsKeywords
elif os.name == 'posix':
from posixnative import NativeOsKeywords
| [
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] | akul@SAC0MKUVCQ.asiapacific.hpqcorp.net |
a6149cc5912615ead7a405dd66d41c846a8e853d | 38d1fda1533b1ee545e4f507a77e405114ca2a51 | /tests/__init__.py | c1af8ee0dad556b017986378cedbfeb13c12ca02 | [
"WTFPL"
] | permissive | ActivKonnect/castor | 6d5f13715a78fac0b503688da95afb0130cf3929 | a6837389c23eaba9c581a194689c30587820c9e8 | refs/heads/develop | 2020-12-24T15:22:29.845970 | 2016-03-04T14:12:41 | 2016-03-04T14:12:41 | 38,937,755 | 6 | 1 | null | 2016-03-04T14:14:30 | 2015-07-11T19:26:26 | Python | UTF-8 | Python | false | false | 159 | py | # vim: fileencoding=utf-8 tw=100 expandtab ts=4 sw=4 :
#
# Castor
# (c) 2015 ActivKonnect
# Rémy Sanchez <remy.sanchez@activkonnect.com>
from .repo import *
| [
"remy.sanchez@hyperthese.net"
] | remy.sanchez@hyperthese.net |
97bfe0b12d6ac454218f101c5824d8b48fc8d518 | 4331b28f22a2efb12d462ae2a8270a9f666b0df1 | /.history/dvdstore/webapp/urls_20190914174439.py | 3a5fcfde3376715beebb709d88261bd656548217 | [] | no_license | ZiyaadLakay/csc312.group.project | ba772a905e0841b17478eae7e14e43d8b078a95d | 9cdd9068b5e24980c59a53595a5d513c2e738a5e | refs/heads/master | 2020-07-26T23:30:22.542450 | 2019-09-16T11:46:41 | 2019-09-16T11:46:41 | 200,703,160 | 0 | 0 | null | 2019-08-05T17:52:37 | 2019-08-05T17:52:37 | null | UTF-8 | Python | false | false | 1,490 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('clerk/', views.clerk, name='clerk'),
path('clerk/register2',views.register2, name='register2'),
path('clerk/register3',views.register3, name='register3'),
path('clerk/model_form_upload',views.model_form_upload, name='model_form_upload'),
path('transactions/register2',views.register2, name='register2'),
path('transactions/register3',views.register3, name='register3'),
path('transactions/model_form_upload',views.model_form_upload, name='model_form_upload'),
path('booking',views.booking, name='booking'),
path('clerk/checkout',views.checkout, name='checkout'),
path('clerk/checkin',views.checkin, name='checkin'),
path('transactions/', views.transactions, name='transactions'),
path('userstbl/', views.userstbl, name='userstbl'),
path('clerk/deleteMovie',views.deleteMovie, name='deleteMovie'),
path('transactions/deleteTransaction',views.deleteTransaction, name='deleteTransaction'),
path('userstbl/deleteUser',views.deleteUser, name='deleteUser'),
path('user_detail/', views.user_detail, name='user_detail'),
path('accounts/registerCustomer',views.registerCustomer, name='registerCustomer'),
path('user_detail/updateCustomer',views.updateCustomer, name='updateCustomer'),
path('user_detail/updateUser',views.updateUser, name='updateUser'),
]
| [
"uzairjoneswolf@gmail.com"
] | uzairjoneswolf@gmail.com |
c80e313bd12a3aacf520301551236756fa08a96a | 0f2e7d7c4323fb8607d5a1d709cb36f8de13078c | /0x02-python-import_modules/2-args.py | 60fb770e2d178d9b997f88c47f765de76722a66d | [] | no_license | nelsfichera/holbertonschool-higher_level_programming | ab031450f5ebc4401a255187fad82ad8b8fd9c8b | 4f3e2155746ad8e8c4cb56443c6750466d66d346 | refs/heads/main | 2023-08-18T21:31:59.425903 | 2021-09-22T16:52:32 | 2021-09-22T16:52:32 | 361,780,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #!/usr/bin/python3
if __name__ == "__main__":
import sys
n_args = len(sys.argv)
if n_args == 2:
print("1 argument:".format(n_args - 1))
print("1: {:s}".format(sys.argv[1]))
elif n_args > 2:
print("{:d} arguments".format(n_args - 1))
count = 1
while count < n_args:
print("{:d}: {:s}".format(count, sys.argv[count]))
count += 1
else:
print("0 arguments.")
| [
"nelsfichera@gmail.com"
] | nelsfichera@gmail.com |
9ee29ecd7bf3848994ee9db3b3493e2ba6053189 | 8b2435044491c4f1887bcce6fdd3989b2f55be88 | /meddet/data/datasets/base.py | 6fdee7f869ce73b45b3d54a5ab17ceb0fc970800 | [] | no_license | JoeeYF/MedDetection | ed169c481ff628a771966ba5e5290f799ac2323b | 8c183d8bf632fe6bf54841ac20db19955331f336 | refs/heads/main | 2023-06-12T00:26:13.537943 | 2021-07-06T02:32:42 | 2021-07-06T02:32:42 | 382,782,483 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py |
from abc import abstractmethod
from torch.utils.data import Dataset
import json
from ..pipelines import ForwardCompose, LoadPrepare
class BasicDataset(Dataset):
N_FOLD = 5
dataset_format = ""
def __init__(self, dataset_file, image_prefix, pipeline, infer_mode=False, task=None, fold=None):
self._TASK = task
self._FOLD = fold
self.dataset = json.load(open(dataset_file))
self.check_dataset_format(self.dataset)
self.dataset_file = dataset_file
self.image_prefix = image_prefix
self.infer_mode = infer_mode
self.pairs = self.dataset['pairs']
self.pre_pipeline = LoadPrepare()
self.pipeline = ForwardCompose(pipeline)
def __len__(self):
return len(self.pairs)
@abstractmethod
def check_dataset_format(self, dataset) -> bool:
return False
@abstractmethod
def run_training_strategy(self):
pass
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(dataset_file={}, image_prefix={},infer_mode={})'.format(
self.dataset_file, self.image_prefix, self.infer_mode)
repr_str += '\nPipeline: \n{}'.format(self.pipeline)
return repr_str | [
"qiao_yuanfang@163.com"
] | qiao_yuanfang@163.com |
12150465282ffc30111b9f36e79907253eb9a7f4 | 11fa6e6506076faea2c2411143cc53ee3852a676 | /dl/pytorch/tutorial/1.2_data_loading/data_load.py | bf6a515b77d3c1c1c5eb176bf8d95ed5f51232a2 | [] | no_license | ZhangXinNan/LearnPractice | 5f0403ebe589018b7c2dd4f349228dd83ab5c60f | 992679f8697923712e42f8a5e68fbfedbeeda82d | refs/heads/master | 2023-08-04T11:46:51.673750 | 2023-07-22T06:37:50 | 2023-07-22T06:37:50 | 60,957,100 | 18 | 7 | null | 2022-11-02T08:11:56 | 2016-06-12T08:51:41 | Shell | UTF-8 | Python | false | false | 2,795 | py | from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
root_dir = 'd:/data_public/face/faces/'
csv_file = os.path.join(root_dir, 'face_landmarks.csv')
landmarks_frame = pd.read_csv(csv_file)
n = 65
img_name = landmarks_frame.iloc[n, 0]
landmarks = landmarks_frame.iloc[n, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001) # pause a bit so that plots are updated
plt.figure()
show_landmarks(io.imread(os.path.join(root_dir, img_name)),
landmarks)
plt.show()
class FaceLandmarksDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir,
self.landmarks_frame.iloc[idx, 0])
image = io.imread(img_name)
landmarks = self.landmarks_frame.iloc[idx, 1:]
landmarks = np.array([landmarks])
landmarks = landmarks.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
face_dataset = FaceLandmarksDataset(csv_file=csv_file,
root_dir=root_dir)
fig = plt.figure()
for i in range(len(face_dataset)):
sample = face_dataset[i]
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
plt.savefig('faces.png')
break
| [
"zhangxin19870504@163.com"
] | zhangxin19870504@163.com |
854658cf13f4ecfb539041685c704bf876be67dd | d60c06ce1cf676752c4d2331315a5fa4a18389b0 | /package/3xpath/05xpath解析基础.py | 088ae63f47c39b3bce44a03337c76af68d51a4b9 | [
"MIT"
] | permissive | HuangCongQing/Spider | f9bab6462ba7e2525a3297fceb0a0bc6f2b5e61a | f0204b8bc60ad4f78f1606a9e5c4c157094fea9b | refs/heads/master | 2023-07-05T23:19:00.272380 | 2023-07-04T12:29:27 | 2023-07-04T12:29:27 | 134,965,705 | 14 | 13 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | '''
Description:
Author: HCQ
Company(School): UCAS
Email: 1756260160@qq.com
Date: 2021-01-01 13:56:16
LastEditTime: 2021-01-04 10:34:44
FilePath: /Spider/package/3xpath/05xpath解析基础.py
'''
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from lxml import etree
if __name__ == "__main__":
#实例化好了一个etree对象,且将被解析的源码加载到了该对象中
tree = etree.parse('test.html') # 在线网页 用 : etree.HTML(page_text)
r = tree.xpath('/html/body/div') # 3个Element对象 [<Element div at 0x7fcb3819b4c8>, <Element div at 0x7fcb3819b5c8>, <Element div at 0x7fcb3819b608>]
# r = tree.xpath('/html//div') # 等价于上面/html/body/div
# r = tree.xpath('//div') # # 等价于上面
# r = tree.xpath('//div[@class="song"]')
# r = tree.xpath('//div[@class="tang"]//li[5]/a/text()')[0]
r = tree.xpath('//li[7]//text()') # ['度蜜月']
# r = tree.xpath('//div[@class="tang"]//text()')
# r = tree.xpath('//div[@class="song"]/img/@src')
print(r)
| [
"1756260160@qq.com"
] | 1756260160@qq.com |
136345666dd80f0d00f9787130e307f038c2ef90 | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/leccion_20210910222316.py | dc35f2abc2ad2289ec975569ef538dca51980b08 | [] | no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # frase=input("ingresas frase")
# c=frase.read()
# if c>1 and c<=5:
# print(frase)
# else:
# print("ingrese bien la frase")
from datetime import date
class Calculos:
def ant(self,fecha):
hoy=date.today()
if hoy<fecha:
re | [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
4011259abc726756075d513fe7b6d65e19716f62 | 1c27972511fcf83b8050f9412714e8c029296a38 | /timedisplay2/timedisplay2/wsgi.py | ee6e16e7a853335f135755348dd2effe40b2f57d | [] | no_license | enbaba/ninja_1 | 406ab7e8b115f08194b95b76be6aa80f5a49bb09 | 9c63c851da4847eb3e858983f53cbfd7121e81fa | refs/heads/master | 2022-12-05T14:33:38.336436 | 2020-08-29T00:48:09 | 2020-08-29T00:48:09 | 291,173,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for timedisplay2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'timedisplay2.settings')
application = get_wsgi_application()
| [
"enbaba10@gmail.com"
] | enbaba10@gmail.com |
92a503fbafa9676d2472155a5bba906a947d0358 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-ci/connectors/qa-engine/tests/test_inputs.py | 295dd8a484f90d3ff29746c323413a6e4167bcd2 | [
"LicenseRef-scancode-free-unknown",
"MIT",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 6,629 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from importlib.resources import files
from unittest.mock import MagicMock, call
import pandas as pd
import pytest
import requests
from qa_engine import constants, inputs
@pytest.mark.parametrize("catalog_url", [constants.OSS_CATALOG_URL, constants.CLOUD_CATALOG_URL])
def test_fetch_remote_catalog(catalog_url):
catalog = inputs.fetch_remote_catalog(catalog_url)
assert isinstance(catalog, pd.DataFrame)
expected_columns = ["connector_type", "connector_definition_id"]
assert all(expected_column in catalog.columns for expected_column in expected_columns)
assert set(catalog.connector_type.unique()) == {"source", "destination"}
def test_fetch_adoption_metrics_per_connector_version(mocker):
fake_bigquery_results = pd.DataFrame(
[
{
"connector_definition_id": "abcdefgh",
"connector_version": "0.0.0",
"number_of_connections": 10,
"number_of_users": 2,
"succeeded_syncs_count": 12,
"failed_syncs_count": 1,
"total_syncs_count": 3,
"sync_success_rate": 0.99,
"unexpected_column": "foobar",
}
]
)
mocker.patch.object(inputs.pd, "read_gbq", mocker.Mock(return_value=fake_bigquery_results))
expected_columns = {
"connector_definition_id",
"connector_version",
"number_of_connections",
"number_of_users",
"succeeded_syncs_count",
"failed_syncs_count",
"total_syncs_count",
"sync_success_rate",
}
expected_sql_query = files("qa_engine").joinpath("connector_adoption.sql").read_text()
expected_project_id = "airbyte-data-prod"
adoption_metrics_per_connector_version = inputs.fetch_adoption_metrics_per_connector_version()
assert isinstance(adoption_metrics_per_connector_version, pd.DataFrame)
assert set(adoption_metrics_per_connector_version.columns) == expected_columns
inputs.pd.read_gbq.assert_called_with(expected_sql_query, project_id=expected_project_id)
@pytest.mark.parametrize(
"connector_name, mocked_json_payload, mocked_status_code, expected_status",
[
(
"connectors/source-pokeapi",
[
{
"connector_version": "0.3.0",
"success": True,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5222619538",
"date": "2023-06-09T06:50:04",
},
{
"connector_version": "0.3.0",
"success": False,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5220000547",
"date": "2023-06-09T01:42:46",
},
],
200,
inputs.BUILD_STATUSES.SUCCESS,
),
(
"connectors/source-pokeapi",
[
{
"connector_version": "0.3.0",
"success": False,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5222619538",
"date": "2023-06-09T06:50:04",
},
{
"connector_version": "0.3.0",
"success": True,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5220000547",
"date": "2023-06-09T01:42:46",
},
],
200,
inputs.BUILD_STATUSES.FAILURE,
),
("connectors/source-pokeapi", None, 404, inputs.BUILD_STATUSES.NOT_FOUND),
(
"connectors/source-pokeapi",
[
{
"connector_version": "0.3.0",
"success": None,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5222619538",
"date": "2023-06-09T06:50:04",
}
],
200,
inputs.BUILD_STATUSES.NOT_FOUND,
),
("connectors/source-pokeapi", None, 404, inputs.BUILD_STATUSES.NOT_FOUND),
],
)
def test_fetch_latest_build_status_for_connector(mocker, connector_name, mocked_json_payload, mocked_status_code, expected_status):
# Mock the api call to get the latest build status for a connector version
mock_response = MagicMock()
mock_response.json.return_value = mocked_json_payload
mock_response.status_code = mocked_status_code
mock_get = mocker.patch.object(requests, "get", return_value=mock_response)
connector_name = connector_name.replace("connectors/", "")
assert inputs.fetch_latest_build_status_for_connector(connector_name) == expected_status
assert mock_get.call_args == call(f"{constants.CONNECTOR_TEST_SUMMARY_URL}/{connector_name}/index.json")
def test_fetch_latest_build_status_for_connector_invalid_status(mocker, caplog):
connector_name = "connectors/source-pokeapi"
mocked_json_payload = [
{
"connector_version": "0.3.0",
"success": "unknown_outcome_123",
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5222619538",
"date": "2023-06-09T06:50:04",
},
{
"connector_version": "0.3.0",
"success": False,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5220000547",
"date": "2023-06-09T01:42:46",
},
{
"connector_version": "0.3.0",
"success": True,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5212578854",
"date": "2023-06-08T07:46:37",
},
{
"connector_version": "0.3.0",
"success": True,
"gha_workflow_run_url": "https://github.com/airbytehq/airbyte/actions/runs/5198665885",
"date": "2023-06-07T03:05:40",
},
]
# Mock the api call to get the latest build status for a connector version
mock_response = MagicMock()
mock_response.json.return_value = mocked_json_payload
mock_response.status_code = 200
mocker.patch.object(requests, "get", return_value=mock_response)
assert inputs.fetch_latest_build_status_for_connector(connector_name) == inputs.BUILD_STATUSES.NOT_FOUND
assert "Error: Unexpected build status value: unknown_outcome_123 for connector connectors/source-pokeapi" in caplog.text
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
9cf14aedda368785b0835c31f60b9e5131cf77a1 | 1936f515b46c93face431709dcf485c8b7987d08 | /python/venv/bin/easy_install-3.6 | b45fd6eb73baf83cee4a4b7aae7ffa11e1ca9d23 | [] | no_license | NicolasLagaillardie/TremplinConnecte | 216be2c7078ff3b0ba5ea081da4aeaada5ef780c | dbd46445a13d48a6bc806d8e5e685279d5551b17 | refs/heads/master | 2020-04-27T01:08:38.293591 | 2019-03-06T15:26:48 | 2019-03-06T15:26:48 | 173,956,849 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | 6 | #!/home/lag/.aws/python/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"lagaillardie.nicolas@live.fr"
] | lagaillardie.nicolas@live.fr |
55f96f25d8159f07b2d0ec344980cf26cb305796 | e9e3f7c7a8c5e3029232327fb129967226e70c7c | /configatron/nodes/comment.py | 6ea96bafdea326f6eca5ceedda529b51283128f1 | [] | no_license | vtemian/configatron | 6ae8804d485597c732f6e8dbbb0b49156352677d | 863e73e983157bcf54e7fc2331831496ce5ba8d3 | refs/heads/master | 2023-02-02T09:05:58.361463 | 2020-12-09T18:05:44 | 2020-12-09T18:05:44 | 319,440,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import re
from .base import Node
class Comment(Node):
"""
starts with
new line
or
any number spaces
followed by `;` and any character
end
"""
REGEX = re.compile("^(\n)|(\s*(;.*)?)$")
| [
"vladtemian@gmail.com"
] | vladtemian@gmail.com |
09cfc90b2f7570592ff9034e0952e3030cd5ae53 | 9e66d474c0bf3be77fe5c327d5e501f632154fa4 | /src/marketplace/forms.py | 11b179b2bd0066faa0881b5eef3f50226ee9b2ef | [] | no_license | rcmiskin10/university-social-network | 9064c7e952c18e445f3e32592eeedf0a0215b963 | d73d61e46b96561521a35777be363d2276617fc0 | refs/heads/master | 2021-01-17T13:04:49.069656 | 2017-01-11T03:22:23 | 2017-01-11T03:22:23 | 60,044,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Product
class ProductForm(forms.ModelForm):
def __init__(self, data=None, files=None, **kwargs):
super(ProductForm, self).__init__(data, files, kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = True
self.helper.add_input(Submit('submit', 'Add Product', css_class='btn btn-primary'))
title = forms.CharField(label='', required=False,
widget=forms.TextInput( attrs={'placeholder': 'Title: 50 character limit.'} ))
description = forms.CharField(label='', required=False,
widget=forms.TextInput( attrs={'placeholder': 'Description: 200 character limit.'} ))
price = forms.CharField(label='', required=True,
widget=forms.TextInput( attrs={'placeholder': 'Price'} ))
image = forms.ImageField(required=False)
class Meta:
model = Product
fields = ['image', 'category', 'title', 'description', 'price'] | [
"rcmiskin@gmail.com"
] | rcmiskin@gmail.com |
f99da47b22a56aad16ca6fff173571222c2fd023 | 81d2815060bdf51e59f40366df72954ad28b2398 | /3rd_hw/blog/views.py | 10d159a9fa4b366b317ec75d748ca1377c8d86d6 | [] | no_license | ningpop/LikeLion_7th_HW | 6016604427e335250f2e3daeec27f17731612b47 | b2c65a0b7a9a928a45cf07b67cd9ed18fb86d799 | refs/heads/master | 2020-06-30T18:08:54.024617 | 2019-12-30T16:17:03 | 2019-12-30T16:17:03 | 200,902,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | from django.shortcuts import render, get_object_or_404, redirect
from .models import Blog
from django.utils import timezone
from django.core.paginator import Paginator
# Create your views here.
def home(request):
blogs = Blog.objects
blog_list=Blog.objects.all()
paginator = Paginator(blog_list,3)
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request,'home.html',{'blogs':blogs,'posts':posts})
def detail(request, blog_id):
blog_detail = get_object_or_404(Blog, pk=blog_id)
return render(request, 'detail.html', {'blog':blog_detail})
def new(request):
return render(request, 'new.html')
def create(request):
blog = Blog()
blog.title = request.GET['title']
blog.body = request.GET['body']
blog.pub_date = timezone.datetime.now()
blog.save()
return redirect('/' + str(blog.id))
def notice(request):
return render(request, 'notice.html') | [
"craft1933@naver.com"
] | craft1933@naver.com |
7afd0e247bdfbc9411cf0e82498b396382fe3f4c | 959b410bf72bef851f9367ae9be42f654d7b0c94 | /setup.py | 52331ce330c06e458f72c28b9dc736b15908e384 | [] | no_license | boberstarosta/Watermark | 742c9e1233e7e2cd33cd6f28adb8a750c5bf9d5c | 56849afe85da11fdd604bcc35392f7a0498f4d26 | refs/heads/master | 2021-07-01T04:30:37.387931 | 2017-09-17T21:23:47 | 2017-09-17T21:23:47 | 103,862,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from distutils.core import setup
#This is a list of files to install, and where
#(relative to the 'root' dir, where setup.py is)
#You could be more specific.
files = ["data/*"]
setup(
name = "watermark",
version = "1.0",
description = "Adding watermarks to images",
author = "Bober",
#Name the folder where your packages live:
#(If you have other packages (dirs) or modules (py files) then
#put them into the package directory - they will be found
#recursively.)
packages = ['watermark'],
package_data = {"watermark" : files },
scripts = ["run.py", "run.pyw"],
long_description = """Really long text here."""
)
| [
"boberstarosta@gmail.com"
] | boberstarosta@gmail.com |
97ac98b56d768380cf9b9c1ede253568585d7513 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/project/scheme/tests/11.py | dbc9107332c77b54238276f874348b76991872ab | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 2,690 | py | test = {
"name": "Problem 11",
"points": 2,
"suites": [
{
"cases": [
{
"code": r"""
>>> frame = global_frame.make_child_frame(Pair("a", Pair("b", Pair("c", nil))), Pair(1, Pair(2, Pair(3, nil))))
>>> global_frame.lookup("a") # Type SchemeError if you think this errors
SchemeError
>>> frame.lookup("a") # Type SchemeError if you think this errors
1
>>> frame.lookup("b") # Type SchemeError if you think this errors
2
>>> frame.lookup("c") # Type SchemeError if you think this errors
3
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> frame = global_frame.make_child_frame(nil, nil)
>>> frame.parent is global_frame
True
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> first = Frame(global_frame)
>>> second = first.make_child_frame(nil, nil)
>>> second.parent is first
True
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
>>> from scheme import *
>>> global_frame = create_global_frame()
""",
"teardown": "",
"type": "doctest"
},
{
"cases": [
{
"code": r"""
>>> # More argument values than formal parameters
>>> global_frame.make_child_frame(Pair("a", nil), Pair(1, Pair(2, Pair(3, nil))))
SchemeError
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> # More formal parameters than argument values
>>> global_frame.make_child_frame(Pair("a", Pair("b", Pair("c", nil))), Pair(1, nil))
SchemeError
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> # Values can be pairs.
>>> frame = global_frame.make_child_frame(Pair("a", Pair("b", nil)), Pair(Pair(1, nil), Pair(Pair(2, nil), nil)))
>>> frame.lookup("a")
Pair(1, nil)
>>> frame.lookup("b")
Pair(2, nil)
>>> frame2 = frame.make_child_frame(nil, nil) # Bind parents correctly
>>> frame2.lookup("a")
Pair(1, nil)
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
>>> from scheme import *
>>> global_frame = create_global_frame()
""",
"teardown": "",
"type": "doctest"
}
]
}
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
9f48c60e618932f8fb6ccf0625f40cbc02f27339 | 990f13d54c09ebc217c5fcc509dab2f01f020d16 | /setup.py | 8ea30fe5ee3a5490078354b68d0ee2059bf02f16 | [] | no_license | jamesblunt/python-tclip | 5ade53b7185907e72257f8e9f6bea9fb1e71b836 | 301ca16adeb56ad791cf3b6827e8f2d4f4eecf9b | refs/heads/master | 2021-01-15T08:35:40.462261 | 2013-12-07T11:06:35 | 2013-12-07T11:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from setuptools import setup
setup(
name="python-tclip",
author="Jiangge Zhang",
author_email="tonyseek@gmail.com",
version="0.1.0",
zip_safe=False,
url="https://github.com/tonyseek/python-tclip",
py_modules=["tclip"],
install_requires=["cffi"],
)
| [
"tonyseek@gmail.com"
] | tonyseek@gmail.com |
666d936289216c750dad4e20c1392402bebc9ad9 | 4553d9a87fa134976b1622179dd2077f0f8142a0 | /apps/photos/models.py | cb1f3508bc4041860102d1f87737ee4832bc9f9d | [] | no_license | wd5/mamochkam.com | 27f07cd692ad46049ae5b8c0cd7425448fc62b66 | d554f2890e5dbc42b7124aef108938bb73481898 | refs/heads/master | 2021-01-23T21:03:17.772465 | 2011-06-05T21:56:05 | 2011-06-05T21:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,413 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from PIL import Image
from django.contrib.auth.models import User
from django.conf import settings
from django.db import models
from mamochkam.apps.common.models import Entity
from mamochkam.apps.search.models import Tag
from mamochkam.apps.utils import images
#GALLERY MODEL
class Gallery(models.Model):
title = models.CharField(max_length=50)
description = models.CharField(max_length=255)
slug = models.CharField(max_length=50)
#STRING REPRESENTATION
def __unicode__(self):
return self.title
'''
#ADMIN
class Admin:
prepopulated_fields = {'slug': ('title',)}
'''
#META
class Meta:
db_table = 'gallery'
verbose_name = u'Галлерея'
verbose_name_plural = u'Галлереи'
#COMMENTS MODEL
class PhotoComment(models.Model):
user = models.ForeignKey(User)
pub_date = models.DateTimeField(default=datetime.now)
text = models.CharField(max_length=255)
#STRING REPRESENTATION
def __unicode__(self):
return self.text
#META
class Meta:
ordering = ['pub_date',]
db_table = 'photo_comment'
verbose_name = u'Комментарий'
verbose_name_plural = u'Комментарии'
#MAIN PHOTO MODEL
class Photo(models.Model, Entity):
user = models.ForeignKey(User, related_name='photos')
pub_date = models.DateTimeField(default=datetime.now)
gallery = models.ForeignKey(Gallery)
photo = models.ImageField(upload_to='upload/photos')
title = models.CharField(max_length=50)
publish = models.BooleanField('Publish on site', default=False)
comments = models.ManyToManyField(PhotoComment, blank=True)
tags = models.ManyToManyField(Tag, related_name='photos', db_table='photo_tag', blank=True)
#STRING REPRESENTATION
def __unicode__(self):
return self.title
#СОХРАНИТЬ ФОТО И СОЗДАТЬ THUMB
def save(self):
super(Photo, self).save()
images.resize(self.photo.path)
images.generate_thumb(self.photo.path, (100, 100))
#PREPARE URL
def thumb_url(self):
try:
return self.photo.url+'_thumb'
except KeyError:
return ''
#META
class Meta:
db_table = 'photo'
verbose_name = u'Изображение'
verbose_name_plural = u'Изображения'
| [
"nide@inbox.ru"
] | nide@inbox.ru |
e3f1939c28d697c0ca181b82b6da1242428ed978 | 3f4210b6a092c8a7cc43820e8d79f495018d4e68 | /starter_code/config.py | 1942d945533b930b9677779fccd1ec2f9034480b | [] | no_license | yopi1838/fyyur_yopiprabowo | a19e35722b9f2a29b44630fb8dd17972d210c824 | 557c0628417dd7be752f6e28afc8a5f510a4e1b6 | refs/heads/master | 2022-08-03T11:48:27.175178 | 2020-06-02T15:07:19 | 2020-06-02T15:07:19 | 268,832,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | import os
SECRET_KEY = os.urandom(32)
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
# Enable debug mode.
DEBUG = True
# Connect to the database
# TODO IMPLEMENT DATABASE URL
SQLALCHEMY_DATABASE_URI = 'postgres://yopiprabowooktiovan@localhost:5432/fyyur'
SQLALCHEMY_TRACK_MODIFICATIONS=False | [
"yopi1838@gmail.com"
] | yopi1838@gmail.com |
5ee277f18cbfac9784e9029f3e68f1925cf426b2 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4079/codes/1668_1396.py | 74195d25358994a69e2ee3eed0ec42b47ccd773c | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | conta_restaurante=float(input("valor))
if (gorjeta<=300):
print(gorjeta-round(gorjeta*0.10))
else:
gorjeta(gorjeta-(gorjeta*0.06)
print(conta_restaurante,2) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
d4673bf29ad447112ef749f137da86ca1e1456d5 | 25e40b56430fa44538aea8f77258d59899d69ff6 | /bank/context_processors.py | 12feb24860c0afb1c372305f2d10a04b0da5b429 | [] | no_license | Galaxylive/bankcenter | 207e96714fbfddc508596aad911ea85ce779bc32 | e4a2fe0e3e8b4e8b63b055aa61a39fb8cb20c12b | refs/heads/master | 2021-09-12T12:13:51.086481 | 2018-04-16T18:01:14 | 2018-04-16T18:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from .models import Bank, Location
def required_context(request):
bank_list = Bank.objects.all()
location_list = Location.objects.all()
return {'bank_list': bank_list, 'location_list': location_list}
| [
"akshar@agiliq.com"
] | akshar@agiliq.com |
756b2e0419e746d5f2ba7d94893571724ac1e41f | 81357d11785eb03ec9abceb93e545e5fd9bcc156 | /shop/urls.py | 41b553a2f380016c52011588f46c52fa86ce87ee | [] | no_license | ytshaha/my-first-blog | f63dcc8372aac8cd0b1bfad47a67200b2b19772d | 7ee8c893e8c98cd0c290a1121b94f34110018525 | refs/heads/master | 2023-04-19T15:31:29.996220 | 2021-05-11T18:22:12 | 2021-05-11T18:22:12 | 328,199,497 | 0 | 0 | null | 2021-04-02T07:48:22 | 2021-01-09T16:39:27 | HTML | UTF-8 | Python | false | false | 864 | py | from django.urls import path
from . import views
import django.contrib.auth.views as django_views
app_name = 'shop'
urlpatterns = [
path('', views.index, name='index'),
path('moum/', views.introduction, name='introduction'),
# path('ticket/', views.buying_ticket, name='buying_ticket'),
path('ticket/', views.buying_ticket, name='buying_ticket'),
path('ticket/result/<int:pk>', views.BuyingTicketResultView.as_view(), name='buying_ticket_result'),
# path('shop_login/', django_views.LoginView.as_view(), name='shop_login'),
path('accounts/logout/', django_views.LogoutView.as_view(next_page='/shop/'), name='logout'),
path('event/1/', views.open_event, name='event_1'),
path('privacy/', views.privacy, name='privacy'),
path('terms/', views.terms, name='terms'),
path('policy/', views.policy, name='policy'),
] | [
"ytshaha@naver.com"
] | ytshaha@naver.com |
7c9dd7e6e35c09d734608c28e1631e8577429162 | 595c4816196017a36850770ca2e39b64a2fb7590 | /landsat/msi_landsat5.py | d8a67eba7f3bac42ba368f2d1d58a2a4149bf579 | [
"Apache-2.0"
] | permissive | XingyuXu-cuhk/Landuse_DL | 6feb546b26400733caaadcd4b420ac8557b8fe31 | a993988727387be22e7a88f6f5790de8a88dccd6 | refs/heads/master | 2023-06-06T09:23:22.323195 | 2021-06-18T09:03:16 | 2021-06-18T09:03:16 | 269,514,918 | 0 | 0 | Apache-2.0 | 2020-06-05T02:43:25 | 2020-06-05T02:43:24 | null | UTF-8 | Python | false | false | 4,215 | py | #!/usr/bin/env python
# Filename: ndvi_landsat5
"""
introduction: calculate multispectral indices of Landsat 5,
including Brightness, Greenness, Wetness, NDVI, NDWI, NDMI
The input image is download from Google Eerth Engine
For comparison, we will stack the NDVI of each image and give a name consiting with image date, pathrow, and 'NDVI'
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 26 March, 2019
"""
import sys,os
from optparse import OptionParser
import rasterio
import numpy as np
HOME = os.path.expanduser('~')
codes_dir2 = HOME + '/codes/PycharmProjects/DeeplabforRS'
sys.path.insert(0, codes_dir2)
import basic_src.io_function as io_function
import datetime
import struct
# from basic_src.RSImage import RSImageclass
# from msi_landsat8 import get_band_names # get_band_names (img_path):
# from msi_landsat8 import get_band_name # get_band_name (img_path,pro_name):
from msi_landsat8 import cal_two_band # cal_two_band (img_path,band1_name,band2_name,pro_name)
from msi_landsat8 import cal_tasselled_cap # cal_tasselled_cap(img_path,coefficents,pro_name):
# from msi_landsat8 import save_oneband_list # save_oneband_list(save_path,ndvi_list, band_name_list, org_img):
from msi_landsat8 import batch_cal_msi # batch_cal_msi(img_file_list, output_name, cal_function):
def cal_ndvi_landsat5(img_path):
return cal_two_band(img_path, 'B4','B3','NDVI')
def cal_ndwi_landsat5(img_path):
return cal_two_band(img_path, 'B2','B4','NDWI')
def cal_ndmi_landsat5(img_path):
return cal_two_band(img_path, 'B4','B5','NDMI')
## coefficents are from paper: Crist, E. P. (1985).
# A TM tasseled cap equivalent transformation for reflectance factor data.
# Remote Sensing of Environment, 17(3), 301-306.
def cal_brightness_landsat5(img_path):
brightness_coeff = np.array([0.2043, 0.4158, 0.5524, 0.5741, 0.3124, 0.2303])
return cal_tasselled_cap(img_path,brightness_coeff,'brightness')
def cal_greenness_landsat5(img_path):
greenness_coeff = np.array([-0.1603, -0.2819, -0.4934, 0.794, -0.0002, -0.1446])
return cal_tasselled_cap(img_path, greenness_coeff, 'greenness')
def cal_wetness_landsat5(img_path):
wetness_coeff = np.array([0.0315, 0.2021, 0.3102, 0.1594, -0.6806, -0.6109])
return cal_tasselled_cap(img_path, wetness_coeff, 'wetness')
def main(options, args):
# folder containing images (download from Google Earth Engine)
# img_folder = args[0]
img_folder = '/Users/huanglingcao/Data/Qinghai-Tibet/beiluhe/beiluhe_landsat/LT05_2010to2011'
img_file_list = io_function.get_file_list_by_ext('.tif',img_folder,bsub_folder=False)
# img_file_list = img_file_list[:2] # for test
satellite = 'landsat5'
# #ndvi
batch_cal_msi(img_file_list, satellite+'_ndvi.tif', cal_ndvi_landsat5)
#ndwi
batch_cal_msi(img_file_list, satellite+'_ndwi.tif', cal_ndwi_landsat5)
#ndmi
batch_cal_msi(img_file_list, satellite+'_ndmi.tif', cal_ndmi_landsat5)
#brightness
batch_cal_msi(img_file_list, satellite+'_brightness.tif', cal_brightness_landsat5)
# greenness
batch_cal_msi(img_file_list, satellite+'_greenness.tif', cal_greenness_landsat5)
# wetness
batch_cal_msi(img_file_list, satellite+'_wetness.tif', cal_wetness_landsat5)
pass
if __name__ == "__main__":
usage = "usage: %prog [options] image_folder "
parser = OptionParser(usage=usage, version="1.0 2019-3-26")
parser.description = 'Introduction: calculate MSI from the image downloaded from Google Earth Engine'
parser.add_option("-o", "--output",
action="store", dest="output",
help="the output file path")
# parser.add_option("-p", "--para",
# action="store", dest="para_file",
# help="the parameters file")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
## set parameters files
# if options.para_file is None:
# print('error, no parameters file')
# parser.print_help()
# sys.exit(2)
# else:
# parameters.set_saved_parafile_path(options.para_file)
main(options, args)
| [
"huanglingcao@gmail.com"
] | huanglingcao@gmail.com |
5ee65cca03a834f446911ce6e22c47e2f86c728d | ebb71d8710ac1445dc00b1c2a65a1e42979e2718 | /25-introduction-to-time-series-analysis-in-python/4-moving-average-ma-and-arma-models/03-estimating-an-ma-model.py | 642e3f491204bf86580507648592c948dc4a5af6 | [] | no_license | 0ashu0/datacamp-1 | 6473d83afc0ae00dc43116c1889bf065fb923ce4 | 9f0f64427ff07ff5f132886a5f44e19c5045c705 | refs/heads/master | 2020-12-13T07:56:18.687044 | 2019-01-09T07:18:02 | 2019-01-09T07:18:02 | 234,354,666 | 1 | 0 | null | 2020-01-16T15:49:48 | 2020-01-16T15:49:47 | null | UTF-8 | Python | false | false | 1,760 | py | '''
Estimating an MA Model
You will estimate the MA(1) parameter, <theta>, of one of the simulated series that you generated in the earlier exercise. Since the parameters are known for a simulated series, it is a good way to understand the estimation routines before applying it to real data.
For simulated_data_1 with a true <theta> of -0.9, you will print out the estimate of <theta>. In addition, you will also print out the entire output that is produced when you fit a time series, so you can get an idea of what other tests and summary statistics are available in statsmodels.
'''
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
ar1 = np.array([1])
ma1 = np.array([1, -0.9])
MA_object1 = ArmaProcess(ar1, ma1)
simulated_data_1 = MA_object1.generate_sample(nsample=1000)
'''
INSTRUCTIONS
* Import the class ARMA in the module statsmodels.tsa.arima_model.
* Create an instance of the ARMA class called mod using the simulated data simulated_data_1 and the order (p,q) of the model (in this case, for an MA(1)), is order=(0,1).
* Fit the model mod using the method .fit() and save it in a results object called res.
* Print out the entire summmary of results using the .summary() method.
* Just print out an estimate of the constant and phi parameter using the .params attribute (no arguments).
'''
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an MA(1) model to the first simulated data
mod = ARMA(simulated_data_1, order=(0,1))
res = mod.fit()
# Print out summary information on the fit
print(res.summary())
# Print out the estimate for the constant and for theta
print("When the true theta=-0.9, the estimate of theta (and the consant) are:")
print(res.params)
| [
"sashakrasnov.nfo@gmail.com"
] | sashakrasnov.nfo@gmail.com |
94d4170d3d0d54d449799c097408c38eeade2496 | f9f1f887629855bbf12ecb0b7358fed5946b3caa | /.history/app_blog_forum/views_20201117210223.py | ff14f3fac7843008b6d907ae22f9c8a8e7ca936e | [] | no_license | hibamohi5/blog_forum | 4f687cee3ca6bdb1d0302b3657a77c01945404b3 | d6380eb7149355c79276b738da7da94c2ee03570 | refs/heads/main | 2023-01-14T18:33:53.043754 | 2020-11-20T01:52:22 | 2020-11-20T01:52:22 | 314,417,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_new_user(request):
errors = User.objects.user_registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/view')
def login(request):
# user did provide email/password, now lets check database
email_from_post = request.POST['email']
password_from_post = request.POST['password']
# this will return all users that have the email_from_post
# in future we should require email to be unique
users = User.objects.filter(email=email_from_post)
if len(users) == 0:
messages.error(request, "email/password does not exist")
return redirect("/")
user = users[0]
print(user)
if (user.password != password_from_post):
messages.error(request, "email/password does not exist")
return redirect("/")
request.session['user_id'] = user.id
return redirect("/login")
def logout(request):
request.session.clear()
return redirect("/")
def view_home(request):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
context = {
"user": user
}
return render(request, "view_home.html", context)
def view_articles(request):
if 'user_id' not in request.session:
return redirect('/')
user = User.objects.get(id=request.session['user_id'])
context = {
'user':user
}
return render(request, "view")
| [
"hibamohi5@gmail.com"
] | hibamohi5@gmail.com |
cf134c811fb3713e81e175427e899b6ebd34c10f | 00ee6a3c859362bbc20342c568a27ea2a493e427 | /src/x007007007/djapp/localnet/nameserver/models/domain.py | 35a784009909f91f382019ba5f13b41bf6ab4811 | [
"MIT"
] | permissive | x007007007/raspberrypi | 7721b1fde2763fd28db579ca65217b81ee2193ae | 9dfe49666c029b8bb617830a5c5a873a6106d853 | refs/heads/master | 2022-10-04T04:51:29.974216 | 2022-10-03T16:36:00 | 2022-10-03T16:36:00 | 56,951,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from x007007007.djapp import _models
class DomainQuerySet(_models.QuerySet):
def available(self):
return self.filter(enable=True)
class DomainModel(_models.Model):
objects = DomainQuerySet.as_manager()
name = _models.CharField(max_length=254)
enable = _models.BooleanField(default=False)
def __str__(self):
return f'<Domain: ({self.pk}) .{self.name}>' | [
"x007007007@hotmail.com"
] | x007007007@hotmail.com |
aa7b589d699b8db00da9ad7ac174df285adf2b18 | fa688be812459dee92b16defaf3446103f7e557c | /models.py | 45775bda2d4e3c55506cd174fe4a979b84e5f298 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | NextFloor/paste | ed042ebb2a711f2438e3aa1e534a540cd572b411 | 829f7dae7f30c621def9378b82b62d19b2ede043 | refs/heads/master | 2023-02-04T00:20:59.229854 | 2020-04-17T06:59:58 | 2020-04-17T06:59:58 | 97,236,204 | 7 | 1 | MIT | 2023-02-02T06:24:20 | 2017-07-14T13:24:01 | Python | UTF-8 | Python | false | false | 3,755 | py | import botocore
import random
import uuid
from datetime import datetime, timedelta
from passlib.hash import argon2
from pygments.lexers import guess_lexer
from sqlalchemy.sql import exists
from flask import abort
from flask import current_app as app
from flask_boto3 import Boto3
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
boto3 = Boto3()
class Paste(db.Model):
__tablename__ = 'paste'
slug = db.Column(db.String(4), primary_key=True)
source = db.Column(db.Text, nullable=False)
lexer = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(64))
password = db.Column(db.String(128))
is_resource = db.Column(db.Boolean, default=False)
view_count = db.Column(db.Integer, nullable=False, default=0)
created_at = db.Column(db.DateTime, default=datetime.now)
expire_at = db.Column(db.DateTime)
def __init__(self, source, highlight, expiration, title, password, is_resource):
expiration = int(expiration)
if not source:
raise ValueError()
self.source = source
self.is_resource = is_resource
if title:
self.title = title
if password:
self.password = password
if expiration > 0:
self.expire_at = datetime.now() + timedelta(minutes=expiration)
if highlight == 'auto':
self.lexer = guess_lexer(source).aliases[0]
else:
self.lexer = highlight
for _ in range(5):
slug = self._generate_random_slug()
if not db.session.query(exists().where(Paste.slug == slug)).scalar():
self.slug = slug
break
else:
raise RuntimeError()
@db.validates('password')
def _validate_password(self, key, password):
return argon2.hash(password)
def verify_password(self, password):
return (not self.password) or argon2.verify(password, self.password)
def generate_presigned_resource_url(self):
s3 = boto3.clients['s3']
url = s3.generate_presigned_url('get_object', {
'Bucket': app.config['AWS_S3_BUCKET'],
'Key': self.source,
}, ExpiresIn=60)
return url
@classmethod
def get_or_404(cls, slug):
paste = Paste.query.get_or_404(slug)
if paste.expire_at and (paste.expire_at <= datetime.now()):
if paste.is_resource:
s3 = boto3.clients['s3']
s3.delete_object(
Bucket=app.config['AWS_S3_BUCKET'],
Key=paste.source,
)
db.session.delete(paste)
db.session.commit()
abort(404)
return paste
@staticmethod
def _generate_random_slug():
return ''.join(random.choice('ACDEFGHJKLNPQRSTXYabcdefghijknopqrstxyz3456789') for _ in range(4))
@staticmethod
def generate_random_resource_key():
s3 = boto3.clients['s3']
for _ in range(5):
key = str(uuid.uuid4())
try:
s3.head_object(
Bucket=app.config['AWS_S3_BUCKET'],
Key=key,
)
except botocore.exceptions.ClientError as e:
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return key
else:
raise
else:
raise RuntimeError()
@staticmethod
def upload_file(key, fs):
s3 = boto3.clients['s3']
s3.put_object(
Body=fs.read(),
Bucket=app.config['AWS_S3_BUCKET'],
ContentDisposition='attachment; filename="{}"'.format(fs.filename),
Key=key,
)
| [
"devunt@gmail.com"
] | devunt@gmail.com |
3e0e8742d55a9c6fc96366fb5ebe8ab2bab27bff | bfd7de2bf935e969ef64431c3760369525ea9db5 | /백준/1차원배열/OX퀴즈.py | 6eadfa9df4e06df0e2644a435e26bb342aab4aa1 | [] | no_license | greenhelix/AlgorithmStudy | 29923de1c2bf4c484b6ea7070af1868b14c9acf1 | 04019cba5c2de2e1ce472420401952ed96087c96 | refs/heads/master | 2023-05-11T10:51:13.521785 | 2023-05-10T15:05:20 | 2023-05-10T15:05:20 | 238,429,809 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # 백준 8958번
# OOXXOXXOOO 같이 문제의 결과가 있다.
# 연속되는 0의 등장하면 점수를 연속된 0의 수만큼 계산
# input>>
# 5
# OOXXOXXOOO
# OOXXOOXXOO
# OXOXOXOXOXOXOX
# OOOOOOOOOO
# OOOOXOOOOXOOOOX
# output >>
# 10
# 9
# 7
# 55
# 30
n = int(input())
a = []
for t in range(n):
a.append(str(input()))
for i in range(n):
quiz = list(a[i])
count = 0
result = []
un = 0
for j in quiz:
if j == 'X':
un = 1
count = 0
elif j == 'O':
un = 0
if un == 0:
count += 1
result.append(count)
print(sum(result))
| [
"dlrghks4444@gmail.com"
] | dlrghks4444@gmail.com |
b0b95374a9a79730f59536a506870227180732e9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_Pyranja_counting_sheep.py | 91081f1789d16088e1118c785a1fc6233bb1685b | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 727 | py | #!/usr/bin/env python3
import sys, logging
"""codejam 2016 - counting sheep"""
def solve(n):
if n == 0:
return 'INSOMNIA'
seen = set()
current = n
while True:
for c in str(current):
seen.add(c)
if len(seen) >= 10:
return current
current += n
# ========================================= boilerplate ========================================== #
def main(cases):
return '\n'.join([formatCase(idx, solve(int(case))) for (idx, case) in enumerate(cases, 1)])
def formatCase(idx, answer):
return 'Case #{0}: {1}'.format(idx, answer)
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
print(main(sys.stdin.readlines()[1:]))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
c37c536fc92fc60bf9634767a43b3d98a1823423 | a995e950a202251be3c674f4b7df3fc0111b0c9b | /isur2/urls.py | 3681e1f549407dbfc8d61c91b00c841a97ae6509 | [] | no_license | asmuratbek/isurlessons | 83188f1ba3469cb92274501fe466bbafc07281e9 | 8fd17b6f57ee16c6cee3361808256f58618fd5ce | refs/heads/master | 2021-01-20T13:17:10.068541 | 2017-05-13T14:17:43 | 2017-05-13T14:17:43 | 90,466,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | """isur2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
import comments
import news.urls
from isur2 import settings
from social.views import *
urlpatterns = [
url(r'^jet/', include('jet.urls', 'jet')),
url(r'^jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')),
url(r'^admin/', admin.site.urls),
url(r'^$', index, name='index'),
url(r'^blog/$', BlogListView.as_view(), name='blog'),
url(r'^thanks/news/', include('news.urls', namespace='create_news')),
url(r'^thanks/blog/', include('blog.urls', namespace='create_blog')),
url(r'^news/$', NewsListView.as_view(), name='news'),
url(r'^news/create/', NewsCreateView.as_view(), name='news_create'),
url(r'^blog/create/', BlogCreateView.as_view(), name='blog_create'),
url(r'^blog/(?P<pk>[0-9]+)/update/$', BlogUpdateView.as_view(), name='blog_update'),
# url(r'^news/get/(?P<pk>[0-9]+)$', NewsDetailView.as_view(), name='get_news'),
url(r'^news/(?P<id>[0-9]+)$', add_news_comment, name='get_news'),
url(r'^blog/get/(?P<id>[0-9]+)$', get_blog, name='get_blog'),
url(r'^comments/add$', comments_add, name='comments_add'),
url(r'^comments/$', comments_all, name='comments_all'),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns += staticfiles_urlpatterns()
| [
"asmuratbek@gmail.com"
] | asmuratbek@gmail.com |
52178c5004f54c1e21eb8c03f81c52949282cbcb | 7002368b209d45dc6f076cab4064fecb1d2cb28d | /openimu/utils.py | 70ba7f1214eed1487d289fc1195e70ec63398c4f | [] | no_license | klaird/python-openimu | 970269769b6246256f45b8c4ed4a7b9cd9b6da66 | 936942491f02d377d152cf19032b380273dadef2 | refs/heads/master | 2022-04-17T17:28:37.192828 | 2020-03-25T04:57:09 | 2020-03-25T04:57:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import sys
import os
import pkgutil
def is_in_bundle():
return hasattr(sys, 'frozen') and getattr(sys, 'frozen') and hasattr(sys, '_MEIPASS')
def get_executor_path():
if is_in_bundle():
path = os.path.abspath(os.path.dirname(sys.executable))
else:
path = os.path.join(os.path.expanduser('~'),'openimu') #sys.path[0]
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_content_from_bundle(package, path):
module_name = 'openimu'
if is_in_bundle():
content = pkgutil.get_data(package, path)
else:
content = pkgutil.get_data(module_name, os.path.join(package, path))
return content
| [
"ywsong@aceinna.com"
] | ywsong@aceinna.com |
78cbe639071b36c9dc723169f358e8c6f3a4a7e5 | dba16143d8fa6aa73ca1d4df7bcfaca42824412c | /src/year2022/day04b.py | e6f81b1ff03c0c75c6dad604b168d793845c54e3 | [
"Unlicense"
] | permissive | lancelote/advent_of_code | 84559bf633189db3c3e4008b7777b1112f7ecd30 | 4b8ac6a97859b1320f77ba0ee91168b58db28cdb | refs/heads/master | 2023-02-03T14:13:07.674369 | 2023-01-24T20:06:43 | 2023-01-24T20:06:43 | 47,609,324 | 11 | 0 | null | 2019-10-07T07:06:42 | 2015-12-08T08:35:51 | Python | UTF-8 | Python | false | false | 435 | py | """2022 - Day 4 Part 2: Camp Cleanup."""
from src.year2022.day04a import Pair
from src.year2022.day04a import process_data
def overlap(pair: Pair) -> bool:
(a1, b1), (a2, b2) = pair
return any(
[
a1 <= a2 <= b1,
a2 <= a1 <= b2,
]
)
def solve(task: str) -> int:
count = 0
for pair in process_data(task):
if overlap(pair):
count += 1
return count
| [
"lancelote.du.lac@gmail.com"
] | lancelote.du.lac@gmail.com |
78a54bb8303ec0f808abbef1226b492cd124a644 | 1564d12d61f669ce9f772f3ef7563167f7fe13bf | /codechef/august/lunchtime/chefAndTrip.py | 6b92dc5e388dbab98b60015f6c662a736547c0ea | [] | no_license | sakshamk6999/codingPractice | 73ec4873defb0f0d2e47173150a589ee12e5e0a1 | f727aac6d87448b19fc9d48660dc6978fe5edc14 | refs/heads/master | 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 | Python | UTF-8 | Python | false | false | 1,583 | py | from collections import defaultdict
for _ in range(int(input())):
n, k = map(int, input().split())
a = list(map(int, input().split()))
record = defaultdict(int)
for i in range(1, n + 1):
record[i] = 1
l = []
i = 0
temp = []
tl = 0
while i < n:
if a[i] == -1:
temp.append(-1)
i += 1
tl += 1
else:
temp.append(a[i])
l.append([temp, tl + 1])
tl = 1
temp = [a[i]]
i += 1
if a[n - 1] == -1:
l.append([temp, tl])
for i in l:
nTemp = i[1]
tempList = i[0]
if tempList[0] != -1:
record[tempList[0]] = 0
if tempList[0] != -1:
record[tempList[nTemp - 1]] = 0
initial = -1
later = -1
for i in range(1, tempList[0]):
if record[i] == 1:
initial = i
record[i] = 0
break
if initial == -1:
for i in range(tempList[0] + 1, k + 1):
if record[i] == 1:
initial = i
record[i] = 0
break
for i in range(1, tempList[0]):
if record[i] == 1:
later = i
record[i] = 0
break
if later == -1:
for i in range(tempList[0] + 1, k + 1):
if record[i] == 1:
later = i
record[i] = 0
break
if later == -1:
for j in range(nTemp):
| [
"sakshamkhatwani@gmail.com"
] | sakshamkhatwani@gmail.com |
0006254dc5982808aec74a2e9542824225d415d7 | 7e9dbc8dddc3f7e7a74adadabc39c6bc31f5208d | /in_place_rev_linked_list/rev_linked_list.py | e92141ed672ca20d465ca926f70fd25196917a55 | [] | no_license | ashish-bisht/ds_algo_handbook | 113bdb20d40e4b885791218d125aaae957243ded | a1847c8c4cc995995bc4791d093a5a7496e4b15b | refs/heads/master | 2023-01-19T02:07:42.622813 | 2020-11-25T04:16:38 | 2020-11-25T04:16:38 | 309,753,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py |
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def display(head):
while head:
print(head.value)
head = head.next
def reverse(head):
prev = None
cur = head
while cur:
nxt = cur.next
cur.next = prev
prev = cur
cur = nxt
return prev
head = Node(2)
head.next = Node(4)
head.next.next = Node(6)
head.next.next.next = Node(8)
head.next.next.next.next = Node(10)
display(head)
reversed_lst = reverse(head)
display(reversed_lst)
| [
"ashishbisht723@gmail.com"
] | ashishbisht723@gmail.com |
6e8fcc257405350e2e5a799997d018555f82b433 | c25a17f0f82c2eebca55bbe180f4c2ccbbf00292 | /03_Data_Science/1_Collection/CSV_Handle/csv_test_01.03.py | 0b0b7e514f9d843c28239b3be0f4ca1ec2808d51 | [] | no_license | superbeom97/jumpjump | a0a4da6f0df0483ef0cef9833b5fe0402ec63c9c | fc45efce2a2b00c614aa5aa54b36be1572ed40ce | refs/heads/master | 2021-09-15T09:35:16.903857 | 2018-05-30T00:00:59 | 2018-05-30T00:00:59 | 111,883,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,496 | py | import csv
import math
def get_csv_rowInstance(row_name): # 행(row) 입력 함수
row_instance = []
row_index = data[0].index(row_name)
for element_row in data[1:]:
row_instance.append(element_row[row_index])
return row_instance
def get_csv_columnInstance(primary_key): # 열(column) 입력 함수
column_instance = []
for element_column in data[1:]:
if element_column[0] == primary_key:
column_instance.append(element_column)
return column_instance[0]
def row_Print(row_instance): # 행(row) 출력 함수
for element_row_print in row_instance:
print(element_row_print)
def column_Print(element_instance): # 열(column) 입력 함수
for element_column_print in element_instance:
print(element_column_print)
def element_row_Print(row_instance): # 행(row) 요소 출력 함수
print("<<선택하신 access key값의 요소는 다음과 같습니다>>")
for element_row_print in row_instance:
print(element_row_print, end=" ")
print("")
def my_Sum(row_instance): # 총합 함수
element_row_Print(row_instance)
sum = 0
for element_row in row_instance:
sum += float(element_row)
print("총합 : %g" %sum)
def my_Average(row_instance): # 평균 함수
element_row_Print(row_instance)
sum = 0
for element_row in row_instance:
sum += float(element_row)
average_row = sum/len(row_instance)
print("평균 : %g" % average_row)
def my_Max(row_instance): # 최댓값 함수
element_row_Print(row_instance)
max_row = []
for element_row in row_instance:
max_row.append(float(element_row))
print("최댓값 : %g" % max(max_row))
def my_Min(row_instance): # 최솟값 함수
element_row_Print(row_instance)
min_row = []
for element_row in row_instance:
min_row.append(float(element_row))
print("최댓값 : %g" % min(min_row))
def my_Deviation(row_instance): # 편차 함수
element_row_Print(row_instance)
sum = 0
for element_row in row_instance:
sum += float(element_row)
average_row = sum / len(row_instance)
print("표본 편차")
for element_row_j in row_instance:
print("%-3g %3g" % (float(element_row_j), (float(element_row_j)-average_row)))
## %3g는 전체 길이가 3개인 문자열 공간에서 오른쪽 정렬하고, 그 앞의 나머지는 공백으로
## %-3g는 전체 길이가 3개인 문자열 공간에서 왼쪽 정렬하고, 그 뒤의 나머지는 공백으로
def my_Variance(row_instance): # 분산 입력 함수
#제곱의 평균 - 평균의 제곱
element_row_Print(row_instance)
square_one = 0
average_one = 0
for element_row in row_instance:
square_one += float(element_row)*float(element_row)
average_one += float(element_row)
square_one_average = square_one/len(row_instance)
average_two = average_one/len(row_instance)
average_two_square = average_two*average_two
variance_row = square_one_average - average_two_square
return variance_row
def my_Variance_Print(variance_row): # 분산 출력 함수
print("편차 : %g" % (variance_row))
def my_Standard_Deviation(variance_row): # 표준편차 출력 함수
print("표준편차 : %g" % math.sqrt(variance_row))
def my_Cendant(row_instance): # 오름차순/내림차순 입력 함수
element_row_Print(row_instance)
cendant_row = []
for element_row in row_instance:
cendant_row.append(float(element_row))
cendant_row.sort()
return cendant_row
def my_Ascendant(cendant_row): # 오름차순 출력 함수
print("<<오름차순 정렬>>")
for z in cendant_row:
print("%g" % z, end=" ")
print("")
def my_Descendant(cendant_row): # 내림차순 출력 함수
cendant_row.reverse()
print("<<내림차순 정렬>>")
for z in cendant_row:
print("%g" % z, end=" ")
print("")
with open("Demographic_Statistics_By_Zip_Code.csv", newline="") as infile:
data = list(csv.reader(infile))
while True:
number = int(input("<<원하는 서비스를 선택하시오>> \n열:1, 행:2, 총합:3, 평균:4, 최댓값:5, 최솟값:6, 편차:7, 분산:8, 표준편차:9, 오름차순:10, 내림차순:11, 종료:12\n=> "))
if number == 1:
access_key = input("구하고자 하는 열의 access key값을 입력하시오: ")
column_Print(get_csv_columnInstance("%s" %access_key))
elif number == 2:
access_key = input("구하고자 하는 행의 access key값을 입력하시오: ")
row_Print(get_csv_rowInstance("%s" % access_key))
elif number == 3:
access_key = input("총합을 원하는 행의 access key값을 입력하시오: ")
my_Sum(get_csv_rowInstance("%s" % access_key))
elif number == 4:
access_key = input("평균을 원하는 행의 access key값을 입력하시오: ")
my_Average(get_csv_rowInstance("%s" % access_key))
elif number == 5:
access_key = input("최댓값을 원하는 행의 access key값을 입력하시오: ")
my_Max(get_csv_rowInstance("%s" % access_key))
elif number == 6:
access_key = input("최솟값을 원하는 행의 access key값을 입력하시오: ")
my_Min(get_csv_rowInstance("%s" % access_key))
elif number == 7:
access_key = input("편차를 원하는 행의 access key값을 입력하시오: ")
my_Deviation(get_csv_rowInstance("%s" % access_key))
elif number == 8:
access_key = input("분산을 원하는 행의 access key값을 입력하시오: ")
my_Variance_Print(my_Variance(get_csv_rowInstance("%s" % access_key)))
elif number == 9:
access_key = input("표준편차를 원하는 행의 access key값을 입력하시오: ")
my_Standard_Deviation(my_Variance(get_csv_rowInstance("%s" % access_key)))
elif number == 10:
access_key = input("오름차순을 원하는 행의 access key값을 입력하시오: ")
my_Ascendant(my_Cendant(get_csv_rowInstance("%s" % access_key)))
elif number == 11:
access_key = input("내림차순을 원하는 행의 access key값을 입력하시오: ")
my_Descendant(my_Cendant(get_csv_rowInstance("%s" % access_key)))
elif number == 12:
print("이용해 주셔서 감사합니다!!")
break | [
"beom9790@naver.com"
] | beom9790@naver.com |
3d8e86b94f8c6ea393d43c5404233f19c3dff896 | 9d39f6ec24ea355ee82adfd4487453172953dd37 | /tao_detection_release/configs/transferred/faster_rcnn_r50_fpn_1x_lvis_reweighthead_bf.py | eeab0d968e0f56d946a0e730ab9ff8489f0f65f3 | [
"Apache-2.0"
] | permissive | feiaxyt/Winner_ECCV20_TAO | d69c0efdb1b09708c5d95c3f0a38460dedd0e65f | dc36c2cd589b096d27f60ed6f8c56941b750a0f9 | refs/heads/main | 2023-03-19T14:17:36.867803 | 2021-03-16T14:04:31 | 2021-03-16T14:04:31 | 334,864,331 | 82 | 6 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='ReweightBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
reweight_cfg=dict(
cls_weight='./data/lvis/cls_weight_bf.pt',
),
roi_feat_size=7,
num_classes=1231,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
score_thr=0.0,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=300)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'LvisDataset'
data_root = 'data/lvis/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_train.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x_lvis_reweighthead_bs'
load_from = './work_dirs/faster_rcnn_r50_fpn_1x_lvis/latest.pth'
# load_from = './data/download_models/faster_rcnn_r50_fpn_2x_20181010-443129e1.pth'
resume_from = None
workflow = [('train', 1)]
# Train which part, 0 for all, 1 for cls, 2 for bbox_head
selectp = 1
| [
"feiaxyt@163.com"
] | feiaxyt@163.com |
1345c47a293a6a346ffd0dbc4a78ec9fb339dfcc | 01d4967b9f8605c2954a10ed7b0e1d7936022ab3 | /components/ownership.gypi | 46a7a51093e7e858d0092d1d676f77bd9e00cf3a | [
"BSD-3-Clause"
] | permissive | tmpsantos/chromium | 79c4277f98c3977c72104ecc7c5bda2f9b0295c2 | 802d4aeeb33af25c01ee5994037bbf14086d4ac0 | refs/heads/master | 2021-01-17T08:05:57.872006 | 2014-09-05T13:39:49 | 2014-09-05T13:41:43 | 16,474,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | gypi | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [{
'target_name': 'ownership',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/crypto/crypto.gyp:crypto',
],
'defines': [
'OWNERSHIP_IMPLEMENTATION',
],
'sources': [
'ownership/mock_owner_key_util.cc',
'ownership/mock_owner_key_util.h',
'ownership/owner_key_util.cc',
'ownership/owner_key_util.h',
'ownership/owner_key_util_impl.cc',
'ownership/owner_key_util_impl.h',
],
}],
}
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
6a896d793698e10203bfb57202bdfca826183a3f | 7a2b4aca5ae841cb873e6bced7298c7884eb7e9d | /partOne.py | 6b1dbfe8a1fd8dadd50584cb64e554f06cba38c4 | [] | no_license | SunilKumar-ugra/Nueral_Networks_Tutorials | 73e297fa2df604c74c79eed1fdb2d891f9bd7fb0 | 8fa2d7f8c8bc3c6611b06a3d7860f8bfa11d5795 | refs/heads/master | 2020-08-02T22:48:04.176970 | 2019-09-28T16:53:04 | 2019-09-28T16:53:04 | 211,532,609 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# Stephen Welch
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2],[11,2]), dtype=float)
x = np.amax(X, axis=0)
xx = np.amax(X, axis=1)
y = np.array(([75], [82], [93]), dtype=float)
print(X)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
print(x)
print(xx) | [
"sk7899132595@gmail.com"
] | sk7899132595@gmail.com |
4e8f79161ea5ee145ef469ba6b36cf2c363e42c1 | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_08_Code/4375OS_08_17_date_var.py | f569bdb0c198e340e01ec30e63c0fd18a5fff006 | [] | no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | """
Name : 4375OS_08_17_date_var.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import pandas as pd
url='http://chart.yahoo.com/table.csv?s=IBM'
x=pd.read_csv(url,index_col=0,parse_dates=True)
| [
"944727327@qq.com"
] | 944727327@qq.com |
42eefd3a6d0d2cec8f98979cc4dc82356db3b8bb | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/Zope2-2.13.26-py2.7.egg/Testing/ZopeTestCase/zopedoctest/testWarningsTest.py | 8dd208bceaed33f910d7d572212d8302ed3bc1e6 | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | ##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Example doctest
"""
from unittest import TestSuite
from Testing.ZopeTestCase import ZopeDocFileSuite
def test_suite():
return TestSuite((
ZopeDocFileSuite('WarningsTest.txt'),
))
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
615ad3d1352d00f1e5667b7258d5c581fde14184 | 89670ba42a5087a3965cadb678a9f87b6d8286cf | /huati/migrations/0011_auto_20171111_1211.py | 2e77a6d316d0ae1c993e9a7bf58b897a1c3513c4 | [] | no_license | qimengmeng/django_lei_zhihu | f56dd08229bf66e183f3f5b82145d5e183a4d161 | 296a7ee1435efcb2492e0ca078a1d9b5c153e41e | refs/heads/master | 2020-04-08T15:11:35.696655 | 2018-01-06T11:43:08 | 2018-01-06T11:43:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-11 04:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('huati', '0010_auto_20171111_1202'),
]
operations = [
migrations.AlterField(
model_name='hua',
name='fenlei',
field=models.ManyToManyField(related_name='fenlei', to='huati.Huafen'),
),
]
| [
"leileili126@163.com"
] | leileili126@163.com |
8b58710838901f702b0d87c2013b13497abe0572 | 6bae32e5ad8f198e31d6da864b0a9f44d54c5ec6 | /src/monteur/egginfo/write.py | e427ef7edb14d7221532778e5b91c5b7fc054297 | [] | no_license | thefunny42/Zeam-Setup | 42dcce867a947fb82e6b17cebd47f92285187a9e | 0c27945b87b0150ee462b493cf646111822e8867 | refs/heads/master | 2020-04-15T15:37:40.153075 | 2013-03-15T10:42:36 | 2013-03-15T10:42:36 | 445,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,474 | py |
import logging
import os
from monteur.utils import create_directory
logger = logging.getLogger('monteur')
def write_pkg_info(path, package):
pkg_info = open(os.path.join(path, 'PKG-INFO'), 'w')
pkg_info.write('Metadata-Version: 1.0\n')
pkg_info.write('Name: %s\n' % package.name)
pkg_info.write('Version: %s\n' % package.version)
def write_options(key, value):
if value:
pkg_info.write('%s: %s\n' % (key, value))
write_options('Summary', package.summary)
write_options('Author', package.author)
write_options('Author-email', package.author_email)
write_options('License', package.license)
pkg_info.write('Platform: %s\n' % (package.platform or 'UNKNOWN'))
pkg_info.close()
def write_requires(path, package):
requirements = package.requirements
if requirements:
file = open(os.path.join(path, 'requires.txt'), 'w')
for requirement in requirements:
file.write(str(requirement) + '\n')
for extra, requirements in package.extras.items():
file.write('\n\n[%s]\n' % extra)
for requirement in requirements:
file.write(str(requirement) + '\n')
file.close()
def write_missing_setuptool_files(path, package):
for filename in ['dependency_links.txt', 'not-zip-safe']:
file = open(os.path.join(path, filename), 'w')
file.write('\n')
file.close()
def write_entry_points(path, package):
if package.entry_points:
formatted_points = ''
for section, entries in package.entry_points.items():
formatted_points += '[%s]\n' % section
for name, module in entries.items():
formatted_points += '%s = %s\n' % (name, module)
formatted_points += '\n'
entry_points = open(os.path.join(path, 'entry_points.txt'), 'w')
entry_points.write(formatted_points)
entry_points.close()
def write_egg_info(package, writers=[write_pkg_info,
write_missing_setuptool_files,
write_entry_points,
write_requires], package_path=None):
if package_path is None:
package_path = package.path
logger.info('Writing EGG-INFO in %s for %s' % (package_path, package.name))
path = os.path.join(package_path, 'EGG-INFO')
create_directory(path)
for writer in writers:
writer(path, package)
| [
"thefunny@gmail.com"
] | thefunny@gmail.com |
96411aeaf2d788d02c078f263691bf88afb267b8 | 657c80336bce1cc6158cd349ce208c5e680a4d0d | /pyglet/canvas/cocoa.py | cd44673204ca093754a7e6678941b655c1cfa070 | [
"BSD-3-Clause"
] | permissive | Xinmudotmoe/pyglet | b37628618647bf3b1e3d7db28202a5e14c60450c | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | refs/heads/master | 2021-05-29T22:05:40.676643 | 2015-10-24T05:55:49 | 2015-10-24T05:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,972 | py | """
"""
from ctypes import *
from ctypes import util
from pyglet import app
from .base import Display, Screen, ScreenMode, Canvas
from pyglet.libs.darwin.cocoapy import *
class CocoaDisplay(Display):
def get_screens(self):
max_displays = 256
active_displays = (CGDirectDisplayID * max_displays)()
count = c_uint32()
quartz.CGGetActiveDisplayList(
max_displays, active_displays, byref(count))
return [CocoaScreen(self, displayID) for displayID in
list(active_displays)[:count.value]]
class CocoaScreen(Screen):
def __init__(self, display, displayID):
bounds = quartz.CGDisplayBounds(displayID)
# FIX ME:
# Probably need to convert the origin coordinates depending on context:
# http://www.cocoabuilder.com/archive/cocoa/233492-ns-cg-rect-conversion-and-screen-coordinates.html
x, y = bounds.origin.x, bounds.origin.y
width, height = bounds.size.width, bounds.size.height
super().__init__(
display, int(x), int(y), int(width), int(height))
self._cg_display_id = displayID
# Save the default mode so we can restore to it.
self._default_mode = self.get_mode()
# FIX ME:
# This method is needed to get multi-monitor support working properly.
# However the NSScreens.screens() message currently sends out a warning:
# "*** -[NSLock unlock]: lock (<NSLock: 0x...> '(null)') unlocked when not locked"
# on Snow Leopard and apparently causes python to crash on Lion.
#
# def get_nsscreen(self):
# """Returns the NSScreen instance that matches our CGDirectDisplayID."""
# NSScreen = ObjCClass('NSScreen')
# # Get a list of all currently active NSScreens and then search through
# # them until we find one that matches our CGDisplayID.
# screen_array = NSScreen.screens()
# count = screen_array.count()
# for i in range(count):
# nsscreen = screen_array.objectAtIndex_(i)
# screenInfo = nsscreen.deviceDescription()
# displayID = screenInfo.objectForKey_(get_NSString('NSScreenNumber'))
# displayID = displayID.intValue()
# if displayID == self._cg_display_id:
# return nsscreen
# return None
def get_matching_configs(self, template):
canvas = CocoaCanvas(self.display, self, None)
return template.match(canvas)
def get_modes(self):
cgmodes = c_void_p(
quartz.CGDisplayCopyAllDisplayModes(self._cg_display_id, None))
modes = [CocoaScreenMode(self, cgmode)
for cgmode in cfarray_to_list(cgmodes)]
cf.CFRelease(cgmodes)
return modes
def get_mode(self):
cgmode = c_void_p(quartz.CGDisplayCopyDisplayMode(self._cg_display_id))
mode = CocoaScreenMode(self, cgmode)
quartz.CGDisplayModeRelease(cgmode)
return mode
def set_mode(self, mode):
assert mode.screen is self
quartz.CGDisplayCapture(self._cg_display_id)
quartz.CGDisplaySetDisplayMode(self._cg_display_id, mode.cgmode, None)
self.width = mode.width
self.height = mode.height
def restore_mode(self):
quartz.CGDisplaySetDisplayMode(
self._cg_display_id, self._default_mode.cgmode, None)
quartz.CGDisplayRelease(self._cg_display_id)
def capture_display(self):
quartz.CGDisplayCapture(self._cg_display_id)
def release_display(self):
quartz.CGDisplayRelease(self._cg_display_id)
class CocoaScreenMode(ScreenMode):
def __init__(self, screen, cgmode):
super().__init__(screen)
quartz.CGDisplayModeRetain(cgmode)
self.cgmode = cgmode
self.width = int(quartz.CGDisplayModeGetWidth(cgmode))
self.height = int(quartz.CGDisplayModeGetHeight(cgmode))
self.depth = self.getBitsPerPixel(cgmode)
self.rate = quartz.CGDisplayModeGetRefreshRate(cgmode)
def __del__(self):
quartz.CGDisplayModeRelease(self.cgmode)
self.cgmode = None
def getBitsPerPixel(self, cgmode):
# from
# /System/Library/Frameworks/IOKit.framework/Headers/graphics/IOGraphicsTypes.h
IO8BitIndexedPixels = "PPPPPPPP"
IO16BitDirectPixels = "-RRRRRGGGGGBBBBB"
IO32BitDirectPixels = "--------RRRRRRRRGGGGGGGGBBBBBBBB"
cfstring = c_void_p(quartz.CGDisplayModeCopyPixelEncoding(cgmode))
pixelEncoding = cfstring_to_string(cfstring)
cf.CFRelease(cfstring)
if pixelEncoding == IO8BitIndexedPixels:
return 8
if pixelEncoding == IO16BitDirectPixels:
return 16
if pixelEncoding == IO32BitDirectPixels:
return 32
return 0
class CocoaCanvas(Canvas):
def __init__(self, display, screen, nsview):
super().__init__(display)
self.screen = screen
self.nsview = nsview
| [
"leif.theden@gmail.com"
] | leif.theden@gmail.com |
6a4d8a465d39c66733cc610bd65c7d3bd8d6ee32 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=41/sched.py | e32c511497a8def6c0ab223964299fde320c376c | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | -S 0 -X RUN -Q 0 -L 2 99 400
-S 0 -X RUN -Q 0 -L 2 67 200
-S 0 -X RUN -Q 0 -L 2 67 200
-S 1 -X RUN -Q 0 -L 2 44 250
-S 1 -X RUN -Q 0 -L 2 43 200
-S 1 -X RUN -Q 0 -L 2 43 250
-S 2 -X RUN -Q 1 -L 1 39 250
-S 2 -X RUN -Q 1 -L 1 39 400
-S 2 -X RUN -Q 1 -L 1 35 200
-S 2 -X RUN -Q 1 -L 1 35 250
-S 2 -X RUN -Q 1 -L 1 29 300
-S 3 -X RUN -Q 2 -L 1 28 175
-S 3 -X RUN -Q 2 -L 1 28 175
-S 3 -X RUN -Q 2 -L 1 26 250
-S 3 -X RUN -Q 2 -L 1 21 125
-S 3 -X RUN -Q 2 -L 1 20 100
-S 4 -X RUN -Q 3 -L 1 19 200
-S 4 -X RUN -Q 3 -L 1 18 175
-S 4 -X RUN -Q 3 -L 1 17 150
-S 4 -X RUN -Q 3 -L 1 15 125
-S 4 -X RUN -Q 3 -L 1 13 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
f14b9015107fd96344bbe692fb7204178aa721d2 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /nlp/src/chunkerScorer.py | 57c8e2a06f10c06d1990556f4473081946c9d041 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,720 | py | from routeDirectionCorpusReader import readSession
from chunker import makeTagger, tokenize
class ConfusionMatrix:
def __init__(self):
self.TP = 0.0
self.FP = 0.0
self.TN = 0.0
self.FN = 0.0
@property
def numberOfExamples(self):
return self.TP + self.FP + self.TN + self.FN
@property
def accuracy(self):
if self.numberOfExamples == 0:
return 0.0
else:
return float(self.TP + self.FP) / (self.TP + self.FP + self.FN + self.TN)
@property
def precision(self):
if self.numberOfExamples == 0:
return 0.0
else:
return float(self.TP) / (self.TP + self.FP)
@property
def recall(self):
if self.numberOfExamples == 0:
return 0.0
else:
return float(self.TP) / (self.TP + self.FN)
@property
def f1(self):
return 2.0 * self.precision * self.recall / (self.precision + self.recall)
def findMatch(testAnnotation, groundTruthAnnotations, matchFunction):
for i, groundTruthAnnotation in enumerate(groundTruthAnnotations):
if matchFunction(testAnnotation, groundTruthAnnotation):
return i, groundTruthAnnotation
return None, None
def ppMatch(x, y):
return ((x.spatialRelation.range == y.spatialRelation.range) and
(x.landmark.range == y.landmark.range))
def npMatch(x, y):
return x.landmark.range == y.landmark.range
def score(groundTruthSessions, testSessions):
tagger = makeTagger()
cm = ConfusionMatrix()
for groundTruth in groundTruthSessions:
testSession = testSessions[groundTruth]
for instructionIdx, instruction in enumerate(groundTruth.routeInstructions):
groundTruthAnnotations = groundTruth.routeAnnotations[instructionIdx]
indexes, tokens = tokenize(instruction)
print "tokens", tokens
tags = tagger.tag(tokens)
print " ".join(["%s/%s" % (word, tag)
for word, tag in tags])
matchedIndexes = [False for g in groundTruthAnnotations]
if len(groundTruthAnnotations) != 0:
print "considering", groundTruth.key, "instruction", instructionIdx
for testAnnotation in testSession.routeAnnotations[instructionIdx]:
idx, groundTruthMatch = findMatch(testAnnotation,
groundTruthAnnotations,
npMatch)
if groundTruthMatch is None:
print "fp", testAnnotation
cm.FP += 1
else:
print "tp", testAnnotation
print "\tmatched", groundTruthMatch
cm.TP += 1
matchedIndexes[idx] = True
for i, hasMatch in enumerate(matchedIndexes):
if not hasMatch:
cm.FN += 1
print "fn", groundTruthAnnotations[i]
#else:
# what to do with true negatives
print "precision", cm.precision
print "recall", cm.recall
print "f1", cm.f1
if __name__ == "__main__":
fname = "data/Direction understanding subjects Floor 1 (Final).ods"
#fname = "data/Direction understanding subjects Floor 1.ods"
groundTruthSessions = readSession(fname, "stefie10")
testSessions = readSession(fname, "regexp_chunker")
score(groundTruthSessions, testSessions)
| [
"stefie10@alum.mit.edu"
] | stefie10@alum.mit.edu |
b4bdb3c591598f57390240cf90a2ad80f0bde29b | 60e34c75afec810f4b1c2c82495d8d3017f32d33 | /09概率组合数学/03jump.py | b9ec6a464063c03077318da1157013605b35d5c4 | [] | no_license | ares5221/Data-Structures-and-Algorithms | af97c6b34b810c37f152af595846870a7b9b304b | 7c51eee0c375136f995cc063ffc60d33a520d748 | refs/heads/master | 2021-07-17T21:18:46.556958 | 2018-12-03T07:30:13 | 2018-12-03T07:30:13 | 144,227,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''
给定非负整数数组,初始时在数组起始位置放置一机器人,数组的每个元素表示在当前
位置机器人最大能够跳跃的数目。它的目的是用最少的步数到达数组末端。例如:给定
数组A=[2,3,1,1,2],最少跳步数目是2,对应的跳法是:232。
不能简单使用贪心。
初始步数step赋值为0;
记当前步的控制范围是[i,j],则用k遍历i到j
计算A[k]+k的最大值,记做j2;
step++;继续遍历[j+1,j2]
'''
def Jump(a, n):
if n == 1:
return 0
step, i, j = 0, 0, 0
while j < n:
step += 1
j2 = j
for k in range(i, j + 1):
j2 = max(j2, k + a[k])
if j2 >= n - 1:
return step
i = j + 1
j = j2
if j < i:
return -1
return step
if __name__ == '__main__':
sa = [2, 3, 1, 1, 2, 4, 1, 1, 6, 1, 7]
res = Jump(sa, len(sa))
print('最小步数', res)
| [
"674361437@qq.com"
] | 674361437@qq.com |
b4632e7af892ed9695871d7307a6e394648aaa00 | 49c4d5ddda86f05c15587c13cda11f9a40e4c4f1 | /yggdrasil/metaschema/datatypes/ContainerMetaschemaType.py | 8878fb0740b5e018c8d83165dd2bda0b017ec49a | [
"BSD-3-Clause"
] | permissive | ritviksahajpal/yggdrasil | 816c314db9fa48d5e8effbe498c014c7efd063ec | 777549413719918ba208d73018da4df678a1754e | refs/heads/master | 2020-05-17T12:58:44.339879 | 2019-04-24T21:21:56 | 2019-04-24T21:21:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,876 | py | from yggdrasil.metaschema.datatypes import (
get_type_class, complete_typedef, encode_data, encode_data_readable)
from yggdrasil.metaschema.datatypes.MetaschemaType import MetaschemaType
class ContainerMetaschemaType(MetaschemaType):
r"""Type associated with a container of subtypes."""
name = 'container'
description = 'A container of other types.'
python_types = []
_container_type = None
_json_type = None
_json_property = None
def __init__(self, *args, **kwargs):
self._typecls = self._container_type()
super(ContainerMetaschemaType, self).__init__(*args, **kwargs)
@classmethod
def _iterate(cls, container):
r"""Iterate over the contents of the container. Each element returned
should be a tuple including an index and a value.
Args:
container (obj): Object to be iterated over.
Returns:
iterator: Iterator over elements in the container.
"""
raise NotImplementedError("This must be overwritten by the subclass.")
@classmethod
def _assign(cls, container, index, value):
r"""Assign an element in the container to the specified value.
Args:
container (obj): Object that element will be assigned to.
index (obj): Index in the container object where element will be
assigned.
value (obj): Value that will be assigned to the element in the
container object.
"""
raise NotImplementedError("This must be overwritten by the subclass.")
@classmethod
def _has_element(cls, container, index):
r"""Check to see if an index is in the container.
Args:
container (obj): Object that should be checked for index.
index (obj): Index that should be checked for.
Returns:
bool: True if the index is in the container.
"""
raise NotImplementedError("This must be overwritten by the subclass.")
@classmethod
def _get_element(cls, container, index, default):
r"""Get an element from the container if it exists, otherwise return
the default.
Args:
container (obj): Object that should be returned from.
index (obj): Index of element that should be returned.
default (obj): Default that should be returned if the index is not
in the container.
Returns:
object: Container contents at specified element.
"""
out = default
if cls._has_element(container, index):
out = container[index]
return out
@classmethod
def encode_data(cls, obj, typedef):
r"""Encode an object's data.
Args:
obj (object): Object to encode.
typedef (dict): Type definition that should be used to encode the
object.
Returns:
string: Encoded object.
"""
container = cls._container_type()
for k, v in cls._iterate(obj):
vtypedef = None
if cls._json_property in typedef:
vtypedef = cls._get_element(typedef[cls._json_property], k, None)
vbytes = encode_data(v, typedef=vtypedef)
cls._assign(container, k, vbytes)
return container
@classmethod
def encode_data_readable(cls, obj, typedef):
r"""Encode an object's data in a readable format.
Args:
obj (object): Object to encode.
typedef (dict): Type definition that should be used to encode the
object.
Returns:
string: Encoded object.
"""
container = cls._container_type()
for k, v in cls._iterate(obj):
if cls._json_property in typedef:
vtypedef = cls._get_element(typedef[cls._json_property], k, None)
else:
vtypedef = None
vbytes = encode_data_readable(v, typedef=vtypedef)
cls._assign(container, k, vbytes)
return container
@classmethod
def decode_data(cls, obj, typedef):
r"""Decode an object.
Args:
obj (string): Encoded object to decode.
typedef (dict): Type definition that should be used to decode the
object.
Returns:
object: Decoded object.
"""
container = cls._container_type()
for k, v in cls._iterate(obj):
vtypedef = cls._get_element(typedef[cls._json_property], k, {})
vcls = get_type_class(vtypedef['type'])
cls._assign(container, k, vcls.decode_data(v, vtypedef))
return container
@classmethod
def extract_typedef(cls, metadata):
r"""Extract the minimum typedef required for this type from the provided
metadata.
Args:
metadata (dict): Message metadata.
Returns:
dict: Encoded type definition with unncessary properties removed.
"""
out = super(ContainerMetaschemaType, cls).extract_typedef(metadata)
if cls._json_property in out:
contents = out[cls._json_property]
if isinstance(contents, cls.python_types):
for k, v in cls._iterate(contents):
if 'type' in v:
vcls = get_type_class(v['type'])
cls._assign(contents, k, vcls.extract_typedef(v))
out[cls._json_property] = contents
return out
def update_typedef(self, **kwargs):
r"""Update the current typedef with new values.
Args:
**kwargs: All keyword arguments are considered to be new type
definitions. If they are a valid definition property, they
will be copied to the typedef associated with the instance.
Returns:
dict: A dictionary of keyword arguments that were not added to the
type definition.
"""
map = kwargs.get(self._json_property, None)
map_out = self._container_type()
if isinstance(map, self.python_types):
for k, v in self._iterate(map):
v_typedef = complete_typedef(v)
if self._has_element(self._typecls, k):
self._assign(map_out, k,
self._typecls[k].update_typedef(**v_typedef))
else:
self._assign(self._typecls, k,
get_type_class(v_typedef['type'])(**v_typedef))
self._assign(map, k, self._typecls[k]._typedef)
kwargs[self._json_property] = map
out = super(ContainerMetaschemaType, self).update_typedef(**kwargs)
if map_out:
out[self._json_property] = map_out
return out
| [
"langmm.astro@gmail.com"
] | langmm.astro@gmail.com |
87bfbb8dd54b0bf8bbc656327512c7e6baf6580c | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/monitor/azure-monitor-query/samples/sample_batch_query.py | 50d45f0e0b24bde23a6296c242aa30f492e9df3a | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 2,506 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
FILE: sample_batch_query.py
DESCRIPTION:
This sample demonstrates querying multiple queries in a batch.
USAGE:
python sample_batch_query.py
Set the environment variables with your own values before running the sample:
1) LOGS_WORKSPACE_ID - The The first (primary) workspace ID.
This example uses DefaultAzureCredential, which requests a token from Azure Active Directory.
For more information on DefaultAzureCredential, see https://docs.microsoft.com/python/api/overview/azure/identity-readme?view=azure-python#defaultazurecredential.
**Note** - Although this example uses pandas to print the response, it's optional and
isn't a required package for querying. Alternatively, native Python can be used as well.
"""
from datetime import datetime, timedelta, timezone
import os
import pandas as pd
from azure.monitor.query import LogsQueryClient, LogsBatchQuery, LogsQueryStatus
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
client = LogsQueryClient(credential)
# [START send_query_batch]
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
timespan=timedelta(hours=1),
workspace_id= os.environ['LOGS_WORKSPACE_ID']
),
LogsBatchQuery(
query= """bad query""",
timespan=timedelta(days=1),
workspace_id= os.environ['LOGS_WORKSPACE_ID']
),
LogsBatchQuery(
query= """let Weight = 92233720368547758;
range x from 1 to 3 step 1
| summarize percentilesw(x, Weight * 100, 50)""",
workspace_id= os.environ['LOGS_WORKSPACE_ID'],
timespan=(datetime(2021, 6, 2, tzinfo=timezone.utc), datetime(2021, 6, 5, tzinfo=timezone.utc)), # (start, end)
include_statistics=True
),
]
results = client.query_batch(requests)
for res in results:
if res.status == LogsQueryStatus.FAILURE:
# this will be a LogsQueryError
print(res)
elif res.status == LogsQueryStatus.PARTIAL:
## this will be a LogsQueryPartialResult
print(res.partial_error)
for table in res.partial_data:
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
elif res.status == LogsQueryStatus.SUCCESS:
## this will be a LogsQueryResult
table = res.tables[0]
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
# [END send_query_batch]
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
241673ed21e518b4c60725b37b18b79c68d3dc85 | 0dc8ddc02b9efc07f16ccd0e15cda4eb9c773763 | /fjfundo/mensalidades/tests/test_model_fundo.py | 7087a90a029e79b37a813fdc4912d09e3934746d | [] | no_license | juniorcarvalho/fjfundo | 04a8913e945101c7e47b6be1663af03c47149445 | 44e3e15c69e8648d7330859f9edf9e62655fe8f6 | refs/heads/master | 2020-09-17T15:34:31.674213 | 2016-10-27T16:19:34 | 2016-10-27T16:19:34 | 66,687,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | from django.test import TestCase
from datetime import date
from fjfundo.mensalidades.models import Fundo
class FundoModelTest(TestCase):
def setUp(self):
self.fundo = Fundo.objects.create(
nome_fundo='fundo de formatura',
data_inicial=date(2016, 1, 1),
data_final=date(2016, 12, 31),
cnpj='00000000000000'
)
def test_create(self):
self.assertTrue(Fundo.objects.exists())
def test_str(self):
self.assertEqual('fundo de formatura', str(self.fundo))
| [
"joseadolfojr@gmail.com"
] | joseadolfojr@gmail.com |
945f254e902ef50da52591ba46d2709f73a6b1b0 | e5d74b142a03d7cccc4acd5fdcdc2af7c47dd728 | /dynamo_engine/fields.py | e1b34a3012e7b72e586163ca66c3de019507e5aa | [
"MIT"
] | permissive | eshandas/dynamo_engine | 6f10d33a0d118dbb6bae3d896690698fd63428bd | a6c245ae2618459370ee68e43b4b824ba454f0b4 | refs/heads/master | 2021-01-25T04:22:05.655529 | 2017-06-06T17:58:20 | 2017-06-06T17:58:20 | 93,432,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from base.fields import (
BaseField,
STRING,
)
AVAILABLE_KEY_FIELDS = (
'HashKeyField', 'RangeKeyField', )
AVAILABLE_FIELDS = (
'StringField', )
class HashKeyField(BaseField):
KEY_TYPE = 'HASH'
def __init__(self, type):
self.ATTRIBUTE_TYPE = type
class RangeKeyField(BaseField):
KEY_TYPE = 'RANGE'
def __init__(self, type):
self.ATTRIBUTE_TYPE = type
class StringField(BaseField):
ATTRIBUTE_TYPE = STRING
| [
"eshandasnit@gmail.com"
] | eshandasnit@gmail.com |
c19a50a6473c9a473814ca3bfb3b895e067a35e1 | 88e8e28b58092d5ba051582930c156872b9565a5 | /ABC/ABC.py | 4969b12f1de51c97fc10514845edcb0e14a9e838 | [] | no_license | dorahero/crawlers | b8a4a1c2592e817b365d56a87bee021d29598810 | 88e134fdd2493330622848f931638aabd6c906fe | refs/heads/master | 2023-02-19T07:54:54.945144 | 2021-01-23T09:13:42 | 2021-01-23T09:13:42 | 276,884,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,222 | py | import requests
import json
import os
from bs4 import BeautifulSoup
import re
ss = requests.session()
url = 'https://www.abccar.com.tw/abcapi/car/GetSearchCarBrand'
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
headers = {'User-Agent': useragent}
res_1 = requests.post(url, headers=headers)
json_c = json.loads(res_1.text)
print(json_c)
BrandID = {j['BrandID']: j['Name'] for j in json_c}
print(BrandID)
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
# data = {'BrandID': '75'}
# res = requests.post(url, headers=headers, json=data)
# json_c = json.loads(res.text)
# print(json_c)
# SeriesID = []
# for j in json_c:
# SeriesID.append(j['SeriesID'])
# print(SeriesID)
count = 0
img_url = []
cid = set()
cars = 0
for f, brand in enumerate(BrandID):
ss.cookies.clear()
print(f, brand, BrandID[brand])
url = 'https://www.abccar.com.tw//abcapi/search'
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
data = {'brand': '{}'.format(brand),
'tab': '1',
'SearchType': '1',
'BrandKey': '{}'.format(BrandID[brand][0].upper())
}
headers = {'User-Agent': useragent}
res_2 = requests.post(url, headers=headers, data=data)
json_c = json.loads(res_2.text)
page_num = int(json_c['Total'])
cars += page_num
print(page_num, '輛車')
print(int(page_num / 40) + 1, '總頁數')
for t in range(int(page_num / 40) + 1):
print(t+1, 'page')
data = {'brand': '{}'.format(brand),
'tab': '1',
'SearchType': '1',
'BrandKey': '{}'.format(BrandID[brand][0].upper()),
'page': '{}'.format(t + 1),
'series': '0',
'category': '0',
'SeriesGroup': '0',
'Dealer': '0'
}
headers = {'User-Agent': useragent}
res_3 = ss.post(url, headers=headers, data=data)
try:
json_c = json.loads(res_3.text)
soup = BeautifulSoup(json_c['List'], 'html.parser')
car_id = soup.select('a[class="abc-article__link"]')
print(len(car_id), '幾輛車')
for c in car_id:
cid.add(str(c['car-id']) + '_' + BrandID[brand])
count += 1
except Exception as e:
print(e)
print(cid)
print(len(cid))
print(cars)
cid_dict = {}
for b in BrandID:
tmp = []
for c in cid:
if c[8:] == BrandID[b]:
tmp.append(c[:7])
cid_dict[BrandID[b]] = tmp
print(cid_dict)
with open('./abc_kind.txt', 'w', encoding='utf-8') as f:
f.write(str(cid_dict))
# cars_num = 0
# for c in cid:
# url = 'https://www.abccar.com.tw/car/{}?car_source=index-top-dealer'.format(c)
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
#
# res = requests.get(url, headers=headers)
# soup = BeautifulSoup(res.text, 'html.parser')
# r = re.compile(r'<[^>]*>')
# try:
# j = r.sub('', str(soup.select('script[type="application/ld+json"]')[0]))
# # 轉換非法字元
# json_car = json.loads(j)
# img_url.append(json_car['image'])
# cars_num += len(json_car['image'])
# except Exception as e:
# with open('./jsonerror.txt', 'a', encoding='utf-8') as f:
# f.write(str(e) + str(c) + '\n')
#
# print(cars_num)
# for x in range(len(soup.select('script'))):
# j = r.sub('', str(soup.select('script')[x]))
# try:
# json_car = json.loads(j)
# if 'makesOffer' not in j:
# continue
# else:
# img_url.append(json_car['makesOffer']['itemOffered']['image'])
# count += 1
# print(count)
# break
# except Exception as e:
# print(e)
# print("Not json")
# url = 'https://www.abccar.com.tw/abcapi/car/GetCarModelBrandSeriesCategory'
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
# res = requests.get(url, headers=headers)
# json_c = json.loads(res.text)
# count = 0
# for j in json_c:
# if 'SeriesID' in j:
# if j['SeriesID'] in SeriesID:
# count += 1
# print(j)
# print(count)
# print(75, 610, 3347, 1411044)
#
# url = 'https://www.abccar.com.tw/car/1411388?car_source=index-top-dealer'
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
#
# res = requests.get(url, headers=headers)
# soup = BeautifulSoup(res.text, 'html.parser')
# print(len(soup.select('script')))
# r = re.compile(r'<[^>]*>')
# j = r.sub('', str(soup.select('script')[17]))
#
# json_car = json.loads(j)
# print(json_car['makesOffer']['itemOffered']['image'])
| [
"dorahero2727@gmail.com"
] | dorahero2727@gmail.com |
82eff230df94326a35032a2aab368a7418a28af3 | 10e3e350526641bedc7455b545463324020b8f4f | /gs12/api/serializers.py | e97d84e60c148a3482345f18134b7c33afc70382 | [] | no_license | shivamdattapurkayastha99/django-rest-studentdetail | da31566d6f289490f0b30a670df5325a44ad6523 | 2fac55908cd50e43c8c0feaee3e32e942cc28fb2 | refs/heads/master | 2023-02-18T01:23:39.433530 | 2021-01-15T17:53:38 | 2021-01-15T17:53:38 | 329,984,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from rest_framework import serializers
from .models import Student
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model=Student
fields=['id','name','roll','city']
| [
"shivamdatta465@gmail.com"
] | shivamdatta465@gmail.com |
b0e90db28ecf6add940a8671196edd2086bb23bb | 6679ab23bf4f0100eb07cf13be21a8c1b1ae4c1f | /Python_Team_Notes/Graph_Theory/Topology_Sort.py | 80c74e3505ee3644db096c34335d5e856ada6d2a | [] | no_license | gimquokka/problem-solving | 1c77e0ad1828fa93ebba360dcf774e38e157d7b6 | f3c661241d3e41adee330d19db3a66e20d23cf50 | refs/heads/master | 2023-06-28T10:19:07.230366 | 2021-07-29T11:29:26 | 2021-07-29T11:29:26 | 365,461,737 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | """
- 위상 정렬(Topology Sort)
: 방향 그래프의 모든 노드를 '방향성에 거스르지 않도록 순서대로 나열하는 것
- 전략
1. 진입차수가 0인 노드를 큐에 넣는다
2. 큐가 빌 때까지 다음의 과정을 반복한다.
(1) 큐에서 원소를 꺼내 해당 노드에서 출발하는 간선을 그래프에서 제거하낟.
(2) 새롭게 진입차수가 0이 된 노드를 큐에 넣는다.
- Time Complexity
O(V+E) # 모든 간선과 노드를 확인함으로
"""
from collections import deque
import sys
input = sys.stdin.readline
# 노드와 간선의 개수를 입력받기
n, m = map(int, input().split())
# 모든 노드에 대한 진입차수는 0으로 초기화
indegree = [0]*(n+1)
# 각 노드에 연결된 간선 정보를 담기 위한 연결 리스트(그래프) 초기화
graph = [[] for _ in range(n+1)]
# 방향 그래프의 모든 간선 정보를 입력받기
for i in range(m):
a, b = map(int, input().split())
graph[a].append(b) # 정점 A에서 B로 이동 가능
# B의 진입차수를 1 증가
indegree[b] += 1
# 위상 정렬 함수
def topology_sort():
result = []
q = deque()
# 처음 시작할 때는 진입차수가 0인 노드를 큐에 삽입. 0이 들어가면 안됨
for i in range(1, n+1):
if indegree[i] == 0:
q.append(i)
# 큐가 빌 때까지 반복 => 큐가 중간에 빈다면 Cycle, 즉, 위상정렬 불가능
while q:
# 큐에서 원소 꺼내기
now = q.popleft()
result.append(now)
# 해당 원소와 연결된 노드들의 진입차수에서 1 빼기
for i in graph[now]:
indegree[i] -= 1
# 새롭게 진입차수가 0이 되는 노드를 큐에 삽입
if indegree[i] == 0:
q.append(i)
# 결과 값 반환
return result
print(*topology_sort())
| [
"gimquokka@gmail.com"
] | gimquokka@gmail.com |
17062f8eac0ac19c8d035d5cc0e5b3e4cdd6a5af | 87227a9153cda47b720227b3e7e1930936550f7c | /matting/torchscript_resnet50_fp32/code/__torch__/torch/nn/modules/conv/___torch_mangle_56.py | 22b1f3c5fcfaad896bb9b6203f0968e4f011fe4c | [] | no_license | gkyAiLab/Style_Transfer_Matting | 488e7a65d262893fc4b42c4c90544e2f9aee64e4 | 2f461fe8c206c5adade09ae29166d238439c09b2 | refs/heads/master | 2023-07-18T12:15:55.625543 | 2021-08-12T07:50:38 | 2021-08-12T07:50:38 | 390,946,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | class Conv2d(Module):
__parameters__ = ["weight", "bias", ]
__buffers__ = []
weight : Tensor
bias : Optional[Tensor]
training : bool
transposed : bool
_reversed_padding_repeated_twice : Tuple[int, int, int, int]
out_channels : Final[int] = 24
kernel_size : Final[Tuple[int, int]] = (3, 3)
in_channels : Final[int] = 42
output_padding : Final[Tuple[int, int]] = (0, 0)
padding_mode : Final[str] = "zeros"
stride : Final[Tuple[int, int]] = (1, 1)
dilation : Final[Tuple[int, int]] = (1, 1)
padding : Final[Tuple[int, int]] = (0, 0)
groups : Final[int] = 1
def forward(self: __torch__.torch.nn.modules.conv.___torch_mangle_56.Conv2d,
input: Tensor) -> Tensor:
_0 = (self)._conv_forward(input, self.weight, )
return _0
def _conv_forward(self: __torch__.torch.nn.modules.conv.___torch_mangle_56.Conv2d,
input: Tensor,
weight: Tensor) -> Tensor:
_1 = torch.conv2d(input, weight, self.bias, [1, 1], [0, 0], [1, 1], 1)
return _1
| [
"694813183@qq.com"
] | 694813183@qq.com |
bf2188086aa03fc72778fae06fef0ee0e83af51e | 8a1144dd38388992c7e35a4cc84002e381f2cf1f | /python/django_fundamentals/disappearing_ninjas/apps/disappearingninjas/urls.py | bc2fb6f77ea81d293c029bf67a660d788e235393 | [] | no_license | vin792/dojo_assignments | 18472e868610bacbd0b5141a5322628f4afefb5b | 449b752f92df224285bfd5d03901a3692a98562e | refs/heads/master | 2021-01-20T00:20:09.896742 | 2017-05-26T17:37:09 | 2017-05-26T17:37:09 | 82,735,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^ninjas$', views.display_all),
url(r'^ninjas/(?P<color>[a-zA-Z]+$)', views.turtles),
] | [
"vin792@gmail.com"
] | vin792@gmail.com |
726806c4033a9b0cacc59d787e6a56e2e4e0ae1c | fd173195d07b5a5ce229a0c1a20ee61884d8c8a1 | /python_practice/Dictionary_programs/10_check_order_of_char_ordered_dict.py | a35adb04cec3b877f1e5ff092c1c41e1fa084e34 | [] | no_license | anitrajpurohit28/PythonPractice | f7e71946144e04b7f9cb9682087e5d4f79839789 | 8b75b67c4c298a135a5f8ab0b3d15bf5738859f1 | refs/heads/master | 2023-04-12T07:04:12.150646 | 2021-04-24T19:52:24 | 2021-04-24T19:52:24 | 293,912,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | # 10 Python | Check order of character in string using OrderedDict( )
"""
Input:
string = "engineers rock"
pattern = "er";
Output: true
Explanation:
All 'e' in the input string are before all 'r'.
Input:
string = "engineers rock"
pattern = "egr";
Output: false
Explanation:
There are two 'e' after 'g' in the input string.
Input:
string = "engineers rock"
pattern = "gsr";
Output: false
Explanation:
There are one 'r' before 's' in the input string.
"""
from collections import OrderedDict
def check_order_ordered_dict(input_str, pattern):
dict1 = OrderedDict.fromkeys(input_str)
pattern_match = 0
for key, value in dict1.items():
if key == pattern[pattern_match]:
pattern_match += 1
if pattern_match == len(pattern):
return True
return False
def check_order_naive(input_str, pattern):
for i in range(len(pattern)-1):
x = pattern[i]
# range is run till pattern len -1 to take care of
# Index Error
y = pattern[i+1]
# Now, check last occurrence of x should be lesser
# than first occurrence of y
x_last = input_str.rindex(x)
y_first = input_str.index(y)
if x_last == -1 or y_first == -1 or x_last > y_first:
return False
return True
input_str = 'engineers rock'
pattern = 'er'
print(f"input_str: {input_str}\n"
f"pattern: {pattern}\n"
f"result:")
print("Ordered dict:", check_order_ordered_dict(input_str, pattern))
print("Naive:", check_order_naive(input_str, pattern))
print()
input_str = 'engineers rock'
pattern = 'egr'
print(f"input_str: {input_str}\n"
f"pattern: {pattern}\n"
f"result:")
print("Ordered dict:", check_order_ordered_dict(input_str, pattern))
print("Naive:", check_order_naive(input_str, pattern))
print()
input_str = 'engineers rock'
pattern = 'gsr'
print(f"input_str: {input_str}\n"
f"pattern: {pattern}\n"
f"result:")
print("Ordered dict:", check_order_ordered_dict(input_str, pattern))
print("Naive:", check_order_naive(input_str, pattern))
print()
| [
"anitrajpurohit28@gmail.com"
] | anitrajpurohit28@gmail.com |
9663c4f2964c0b5f8e7d8cb59cbcaa361449d8cc | f7c0b3bbd9409f76fa8c74c8f19b89cdc1800c4b | /msnmetrosim/controllers/base/__init__.py | dc87ccb66af510afdf4be4d70687004037101dee | [] | no_license | RaenonX/Madison-Metro-Sim | 4ae320d7ebcca6e050562bfc5624165a47867e53 | 24c31a1afd5241feebc38b8ddd10d1f3b7e228ef | refs/heads/master | 2023-02-24T01:14:30.269160 | 2020-12-20T15:51:26 | 2020-12-20T16:07:40 | 297,050,693 | 2 | 2 | null | 2020-11-01T21:31:38 | 2020-09-20T10:20:34 | Jupyter Notebook | UTF-8 | Python | false | false | 158 | py | """Controller base classes."""
from .fromcsv import CSVLoadableController
from .holder import DataListHolder
from .locational import LocationalDataController
| [
"raenonx0710@gmail.com"
] | raenonx0710@gmail.com |
76f4de44efb33da80730e9422dd9244c582aeae6 | 4202a7c678e0ec25ab2065c4c2804b0296f94480 | /VCFS/fix_vcf_headers.py | 89123a315d7c75faaf1eb3c36ebb9db2ec2680e5 | [] | no_license | kaiyaprovost/whole_genome_pipeline | f1c479536560c5b8c68fe3a5ba0917140fbb0793 | 8e605d855c9f0cd6e11e1b73a97260e0d4aa3fae | refs/heads/master | 2023-04-22T20:51:01.344297 | 2023-04-06T19:12:11 | 2023-04-06T19:12:11 | 237,044,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import sys
import os
import glob
try:
filepath = sys.argv[1]
print("\tFile is: ",filepath)
except:
print("Filename not given, quitting")
#filepath = "/Users/kprovost/Documents/Dissertation/CHAPTER2_GENOMES/ASSEMBLY/genomeresequencingFromLucas/for_AMN_245109/sedtest.txt"
exit()
split = filepath.split("/")
filename = split[-1]
print(split)
splitfile = filename.split(".")
prefix = splitfile[0]
print(splitfile)
print(prefix)
to_replace = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t*"
replacement ="#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+prefix
# Read in the file
with open(filepath, 'r') as file :
filedata = file.read()
print("read")
# Replace the target string
filedata = filedata.replace(to_replace, replacement)
print("replaced 1")
to_replace = '##fileformat=VCFv4.0\n#CHROM'
replacement = '##fileformat=VCFv4.0\n##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n#CHROM'
# Replace the target string
filedata = filedata.replace(to_replace, replacement)
print("replaced 2")
# Write the file out again
with open(filepath, 'w') as file:
file.write(filedata)
print("wrote") | [
"17089935+kaiyaprovost@users.noreply.github.com"
] | 17089935+kaiyaprovost@users.noreply.github.com |
532990c39ecc04b056fad86a473af7e2f6c8637b | 453d2e699d218fdb3bc1e535a707988194ac6717 | /lib/pector/profile.py | cd69c6d93d8fc98a82b0aa1837cceccf54d5701a | [
"MIT"
] | permissive | defgsus/thegame | d54ffcd343c7e1805d2c11e24cd38b02243e73d4 | 38a627d9108f1418b94b08831fd640dd87fbba83 | refs/heads/master | 2023-07-23T06:32:40.297591 | 2022-04-11T12:02:32 | 2022-04-11T12:02:32 | 127,875,178 | 1 | 0 | MIT | 2023-07-06T22:07:07 | 2018-04-03T08:21:31 | Python | UTF-8 | Python | false | false | 6,662 | py | from .vec3 import vec3
import random
def rnd_vec3(mi=-1., ma=1.):
return vec3((random.uniform(mi, ma),
random.uniform(mi, ma),
random.uniform(mi, ma)))
def nbody_case(nbodies, nframes):
pos = [rnd_vec3() for i in range(nbodies)]
imp = [rnd_vec3() for i in range(nbodies)]
for it in range(nframes):
for i in range(len(pos)):
for j in range(i+1, len(pos)):
d = (pos[j] - pos[i])
l = d.length()
d /= l
a = 0.02 * l * d
imp[i] += a
imp[j] -= a
for i in range(len(pos)):
pos[i] += imp[i]
imp[i] *= 0.99
# TODO: i get
# File "/usr/lib/python3.4/cProfile.py", line 22, in <module>
# run.__doc__ = _pyprofile.run.__doc__
# AttributeError: 'module' object has no attribute 'run'
# without these:
def run(): pass
def runctx(): pass
if __name__ == "__main__":
def print_stats(prof):
stats = sorted(prof.getstats(), key=lambda t: -t[3]/t[1])
fmt = "%10s | %20s | %s"
print(fmt % ("time", "time per M calls", "name"))
for pe in stats:
print(fmt % (str(round(pe[3],8)), str(pe[3]/pe[1]*1.e+6), pe[0]))
def do_profile(code):
print("------ %s ------" % code)
import cProfile
prof = cProfile.Profile()
prof.run(code)
print_stats(prof)
do_profile("nbody_case(nbodies=32, nframes=50)")
"""
------ nbody_case(nbodies=32, nframes=50) ------
time | time per M calls | name
1.293539 | 1293539.0 | <built-in method exec>
1.293508 | 1293507.9999999998 | <code object <module> at 0x7ff78da22ed0, file "<string>", line 1>
1.293499 | 1293499.0 | <code object nbody_case at 0x7ff78d9f34b0, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 9>
0.000322 | 321.99999999999994 | <code object <listcomp> at 0x7ff78d9f3390, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 10>
0.000255 | 254.99999999999997 | <code object <listcomp> at 0x7ff78d9f3420, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 11>
0.36807 | 14.841532258064516 | <code object __sub__ at 0x7ff78d9fe930, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 102>
0.566942 | 11.430282258064516 | <code object _binary_operator at 0x7ff78da01780, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 143>
0.22724 | 9.162903225806451 | <code object __rmul__ at 0x7ff78d9fedb0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 115>
0.239938 | 9.088560606060605 | <code object __iadd__ at 0x7ff78d9fe810, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 98>
0.216015 | 8.710282258064517 | <code object __isub__ at 0x7ff78d9feb70, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 108>
0.000554 | 8.65625 | <code object rnd_vec3 at 0x7ff78da6cf60, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 4>
0.520338 | 6.705386597938144 | <code object _binary_operator_inplace at 0x7ff78da01810, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 150>
0.279042 | 5.618596971649485 | <code object __init__ at 0x7ff78d9f38a0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 11>
0.259388 | 5.222857603092784 | <code object set at 0x7ff78da019c0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 163>
0.09277 | 3.7407258064516125 | <code object __itruediv__ at 0x7ff78da01270, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 128>
0.005101 | 3.1881249999999994 | <code object __imul__ at 0x7ff78d9feed0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 118>
0.073217 | 2.9522983870967736 | <code object <listcomp> at 0x7ff78da016f0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 148>
0.05166 | 2.0830645161290318 | <code object length at 0x7ff78da01ae0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 196>
0.207934 | 1.654682327476445 | <code object check_float_sequence at 0x7ff78da0c1e0, file "/home/defgsus/prog/python/dev/pector/pector/tools.py", line 18>
0.030468 | 1.2285483870967742 | <code object <listcomp> at 0x7ff78da01660, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 146>
0.166587 | 0.9418932060792473 | <code object is_number at 0x7ff78da0c0c0, file "/home/defgsus/prog/python/dev/pector/pector/tools.py", line 2>
0.02533 | 0.5100273840206185 | <code object <listcomp> at 0x7ff78da018a0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 187>
0.011345 | 0.4574596774193548 | <built-in method sum>
0.01087 | 0.4383064516129032 | <code object <listcomp> at 0x7ff78da01a50, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 205>
8.2e-05 | 0.42708333333333337 | <code object uniform at 0x7ff78da0c930, file "/usr/lib/python3.4/random.py", line 342>
0.022075 | 0.2904605263157894 | <code object __iter__ at 0x7ff78d9f3e40, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 51>
0.15862 | 0.2854828839854577 | <built-in method len>
0.006613 | 0.2666532258064516 | <built-in method sqrt>
0.031618 | 0.13867543859649123 | <code object __getitem__ at 0x7ff78d9f3ed0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 54>
0.010909 | 0.137739898989899 | <code object <lambda> at 0x7ff78d9fe780, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 99>
0.010068 | 0.13532258064516126 | <code object <lambda> at 0x7ff78da011e0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 129>
0.009718 | 0.13061827956989247 | <code object <lambda> at 0x7ff78d9feae0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 109>
0.009641 | 0.12958333333333336 | <code object <lambda> at 0x7ff78d9fe8a0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 103>
0.009471 | 0.1272983870967742 | <code object <lambda> at 0x7ff78d9fed20, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 116>
0.000546 | 0.11374999999999998 | <code object <lambda> at 0x7ff78d9fee40, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 119>
1.5e-05 | 0.078125 | <method 'random' of '_random.Random' objects>
0.03102 | 0.07660621147463252 | <code object __len__ at 0x7ff78d9f3db0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 48>
0.0 | 0.0 | <method 'disable' of '_lsprof.Profiler' objects>
""" | [
"s.berke@netzkolchose.de"
] | s.berke@netzkolchose.de |
980e6fd73b204353dd08d97307bfc46b97061467 | 0b4957de738dd05f964ea838016b4b811feca970 | /ultron8/api/depends/get_jwt.py | dbdd7d50c59f3efd6c143787aef51a5f133e840d | [
"MIT",
"Apache-2.0"
] | permissive | bossjones/ultron8 | bdb5db72ba58b80645ae417cdf97287cfadd325d | 09d69c788110becadb9bfaa7b3d2a2046f6b5a1c | refs/heads/master | 2023-01-13T06:52:45.679582 | 2023-01-03T22:25:54 | 2023-01-03T22:25:54 | 187,934,920 | 0 | 0 | Apache-2.0 | 2023-01-03T22:25:56 | 2019-05-22T00:44:03 | Python | UTF-8 | Python | false | false | 1,459 | py | # SOURCE: https://github.com/bergran/fast-api-project-template
import re
from fastapi import Header, HTTPException
from starlette import status
from starlette.requests import Request
# from apps.token.constants.jwt import JWT_REGEX
from ultron8.api import settings
from ultron8.constants.jwt import JWT_REGEX
# it will regex always Authorization header with the header config that you set it or default JWT. If header does not exist or has not ^{header} [A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*$ format then will raise HTTPException and response with status code 400.
def get_jwt(
request: Request, authorization: str = Header("", alias="Authorization")
) -> str:
"""Uses regex to test existence of header in specified format. If the correct header does not exist, it will raise HTTPException and response with status code 400.
Arguments:
request {Request} -- [description]
Keyword Arguments:
authorization {str} -- [description] (default: {Header('', alias='Authorization')})
Raises:
HTTPException: [description]
Returns:
str -- [description]
"""
# config = request.state.config
regex = JWT_REGEX.format(settings.JWT_AUTH_HEADER_PREFIX)
if not re.match(regex, authorization):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Authorization has wrong format",
)
return authorization.split(" ")[-1]
| [
"noreply@github.com"
] | bossjones.noreply@github.com |
b3406f02bf858fa60820c26b9706ea1879d18919 | 9e8a754f62ab172043ca978c0fcce20687377498 | /pper.py | 6b4b8bb5df63a9a4f4dc692e7efdeebd6d1259fd | [
"MIT"
] | permissive | luyang93/ROSALIND | 41a5835ed634668742a7155e162ce14f1875b7a0 | f6e5a099a2c47203a14370cfe97ba41db1ae8429 | refs/heads/master | 2020-04-02T22:40:20.640063 | 2019-02-28T07:53:09 | 2019-02-28T07:53:09 | 154,840,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : pper.py
# @Date : 2019-02-17
# @Author : luyang(luyang@novogene.com)
from math import factorial
def main():
file = 'input/rosalind_pper.txt'
with open(file) as f:
n, k = map(int, f.readline().strip().split())
print(int(factorial(n) / factorial(n - k) % 1000000))
if __name__ == "__main__":
main()
| [
"510426762@qq.com"
] | 510426762@qq.com |
ce413e69ce72c2f5c0aae1812a035fff9118ef11 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/Scaleform/daapi/view/meta/ContactsTreeComponentMeta.py | 80bd939e51a3a9be5b3602defe190b7a9f38487d | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,413 | py | # 2016.11.19 19:51:17 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ContactsTreeComponentMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class ContactsTreeComponentMeta(BaseDAAPIComponent):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIComponent
"""
def onGroupSelected(self, mainGroup, groupData):
self._printOverrideError('onGroupSelected')
def searchLocalContact(self, flt):
self._printOverrideError('searchLocalContact')
def hasDisplayingContacts(self):
self._printOverrideError('hasDisplayingContacts')
def as_updateInfoMessageS(self, enableSearchInput, title, message, warn):
if self._isDAAPIInited():
return self.flashObject.as_updateInfoMessage(enableSearchInput, title, message, warn)
def as_getMainDPS(self):
if self._isDAAPIInited():
return self.flashObject.as_getMainDP()
def as_setInitDataS(self, val):
if self._isDAAPIInited():
return self.flashObject.as_setInitData(val)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\Scaleform\daapi\view\meta\ContactsTreeComponentMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:51:17 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
b5e5a434aefbfbd80790b0c8b37c6fe48d9555e8 | 4910c0f3d03935fc8ee03f1e9dc20dfdb2c7c04b | /Codigos estudiantes por lenguaje/PY/Bryann Valderrama/Matemática/combinatories.py | 39b502005bfe3a301a93155655641484f1b875f2 | [] | no_license | roca12/gpccodes | ab15eeedc0cadc0735651262887b44f1c2e65b93 | aa034a3014c6fb879ec5392c51f9714bdc5b50c2 | refs/heads/master | 2023-02-01T13:49:27.563662 | 2023-01-19T22:50:58 | 2023-01-19T22:50:58 | 270,723,328 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from sys import stdout
wr = stdout.write
cont1 = 0
cont2 = 0
def combUtil(arr, data, start, end, index, r):
global cont1
if index == r:
for j in range(r):
wr(f'{data[j]} ')
wr('\n')
cont1 += 1
return
i = start
while i <= end and end - i + 1 >= r - index:
data[index] = arr[i]
combUtil(arr, data, i+1, end, index+1, r)
i += 1
def combinationRepetitionUtil(chosen, arr, index, r, start, end):
global cont2
if index == r:
for i in range(r):
wr(f'{arr[chosen[i]]} ')
wr('\n')
cont2 += 1
return
for i in range(start, end):
chosen[index] = i
combinationRepetitionUtil(chosen, arr, index+1, r, i, end)
return
def printComb(arr, n, r):
data = [0 for x in range(r)]
combUtil(arr, data, 0, n-1, 0, r)
def combinationRepetition(arr, n, r):
chosen = [0 for x in range(r+1)]
combinationRepetitionUtil(chosen, arr, 0, r, 0, n-1)
arrint1 = [1, 2, 3, 4, 5]
r1 = 3
n1 = len(arrint1)
printComb(arrint1, n1, r1)
wr(f'Hay {str(cont1)} Combinaciones Sin Repetición\n')
arrint2 = [1, 2, 3, 4, 5]
r2 = 2
n2 = len(arrint2)
combinationRepetition(arrint2, n2, r2)
wr(f'Hay {str(cont2)} Combinaciones Con Repetición')
| [
"noreply@github.com"
] | roca12.noreply@github.com |
276ae40a2e543da3e05e27e93b5d1815d60013fa | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/preprocessing/plot_function_transformer.py | f2793846b5276e252b98eef912dc6bad871fcef9 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from mrex.model_selection import train_test_split
from mrex.decomposition import PCA
from mrex.pipeline import make_pipeline
from mrex.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
6af4bfdf7815e6a889dc3e4d43982356bec92e10 | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/numba/tests/npyufunc/test_dufunc.py | 7906221ae4cc9a43b5b7d94b034fe27a77edd570 | [] | no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | import pickle
import numpy as np
from numba import njit, vectorize
from numba.tests.support import MemoryLeakMixin
import unittest
from numba.np.ufunc import dufunc
def pyuadd(a0, a1):
return a0 + a1
class TestDUFunc(MemoryLeakMixin, unittest.TestCase):
def nopython_dufunc(self, pyfunc):
return dufunc.DUFunc(pyfunc, targetoptions=dict(nopython=True))
def test_frozen(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertFalse(duadd._frozen)
duadd._frozen = True
self.assertTrue(duadd._frozen)
with self.assertRaises(ValueError):
duadd._frozen = False
with self.assertRaises(TypeError):
duadd(np.linspace(0, 1, 10), np.linspace(1, 2, 10))
def test_scalar(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertEqual(pyuadd(1, 2), duadd(1, 2))
def test_npm_call(self):
duadd = self.nopython_dufunc(pyuadd)
@njit
def npmadd(a0, a1, o0):
duadd(a0, a1, o0)
X = np.linspace(0, 1.9, 20)
X0 = X[:10]
X1 = X[10:]
out0 = np.zeros(10)
npmadd(X0, X1, out0)
np.testing.assert_array_equal(X0 + X1, out0)
Y0 = X0.reshape((2, 5))
Y1 = X1.reshape((2, 5))
out1 = np.zeros((2, 5))
npmadd(Y0, Y1, out1)
np.testing.assert_array_equal(Y0 + Y1, out1)
Y2 = X1[:5]
out2 = np.zeros((2, 5))
npmadd(Y0, Y2, out2)
np.testing.assert_array_equal(Y0 + Y2, out2)
def test_npm_call_implicit_output(self):
duadd = self.nopython_dufunc(pyuadd)
@njit
def npmadd(a0, a1):
return duadd(a0, a1)
X = np.linspace(0, 1.9, 20)
X0 = X[:10]
X1 = X[10:]
out0 = npmadd(X0, X1)
np.testing.assert_array_equal(X0 + X1, out0)
Y0 = X0.reshape((2, 5))
Y1 = X1.reshape((2, 5))
out1 = npmadd(Y0, Y1)
np.testing.assert_array_equal(Y0 + Y1, out1)
Y2 = X1[:5]
out2 = npmadd(Y0, Y2)
np.testing.assert_array_equal(Y0 + Y2, out2)
out3 = npmadd(1.0, 2.0)
self.assertEqual(out3, 3.0)
def test_ufunc_props(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertEqual(duadd.nin, 2)
self.assertEqual(duadd.nout, 1)
self.assertEqual(duadd.nargs, duadd.nin + duadd.nout)
self.assertEqual(duadd.ntypes, 0)
self.assertEqual(duadd.types, [])
self.assertEqual(duadd.identity, None)
duadd(1, 2)
self.assertEqual(duadd.ntypes, 1)
self.assertEqual(duadd.ntypes, len(duadd.types))
class TestDUFuncPickling(MemoryLeakMixin, unittest.TestCase):
def check(self, ident, result_type):
buf = pickle.dumps(ident)
rebuilt = pickle.loads(buf)
# Check reconstructed dufunc
r = rebuilt(123)
self.assertEqual(123, r)
self.assertIsInstance(r, result_type)
# Try to use reconstructed dufunc in @jit
@njit
def foo(x):
return rebuilt(x)
r = foo(321)
self.assertEqual(321, r)
self.assertIsInstance(r, result_type)
def test_unrestricted(self):
@vectorize
def ident(x1):
return x1
self.check(ident, result_type=(int, np.integer))
def test_restricted(self):
@vectorize(["float64(float64)"])
def ident(x1):
return x1
self.check(ident, result_type=float)
if __name__ == "__main__":
unittest.main()
| [
"jan@multiply.ai"
] | jan@multiply.ai |
0314afce70df3d153a8efc40f409c5cc3922c701 | f4ad721b7158ff2605be6f7e4bde4af6e0e11364 | /vt_manager_kvm/src/python/vt_manager_kvm/controller/drivers/KVMDriver.py | 99551db2acfc0c494510c3c8be8a19887b9f590f | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ict-felix/stack | 3fb4222a0538c0dbbe351ccc3da1bafa9ca37057 | 583ccacf067b9ae6fc1387e53eaf066b4f3c0ade | refs/heads/master | 2021-01-10T10:16:29.851916 | 2016-06-22T15:11:11 | 2016-06-22T15:11:11 | 51,439,714 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,371 | py | from vt_manager_kvm.controller.drivers.VTDriver import VTDriver
from vt_manager_kvm.models.KVMServer import KVMServer
from vt_manager_kvm.models.KVMVM import KVMVM
from vt_manager_kvm.models.VTServer import VTServer
from vt_manager_kvm.utils.HttpUtils import HttpUtils
import threading
import logging
class KVMDriver(VTDriver):
logger = logging.getLogger("KVMDriver")
# def __init__(self):
# self.ServerClass = eval('KVMServer')
# self.VMclass = eval('KVMVM')
@staticmethod
def getInstance():
return KVMDriver()
def deleteVM(self, vm):
KVMDriver.logger.debug("deleteVM start")
try:
vm.Server.get().deleteVM(vm)
except:
raise
def getServerAndCreateVM(self,action):
try:
Server = KVMServer.objects.get(uuid = action.server.uuid )
VMmodel = Server.createVM(*KVMDriver.kvmVMtoModel(action.server.virtual_machines[0],threading.currentThread().callBackURL, save = True))
return Server, VMmodel
except Exception as e:
raise e
@staticmethod
def createOrUpdateServerFromPOST(request, instance):
#return KVMServer.constructor(server.getName(),server.getOSType(),server.getOSDistribution(),server.getOSVersion(),server.getAgentURL(),save=True)
server = KVMServer.objects.get(uuid = instance.getUUID())
if server:
return server.updateServer(HttpUtils.getFieldInPost(request,VTServer, "name"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemType"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemDistribution"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemVersion"),
HttpUtils.getFieldInPost(request,VTServer, "numberOfCPUs"),
HttpUtils.getFieldInPost(request,VTServer, "CPUFrequency"),
HttpUtils.getFieldInPost(request,VTServer, "memory"),
HttpUtils.getFieldInPost(request,VTServer, "discSpaceGB"),
HttpUtils.getFieldInPost(request,VTServer, "agentURL"),
save=True)
else:
return KVMServer.constructor(HttpUtils.getFieldInPost(request,VTServer, "name"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemType"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemDistribution"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemVersion"),
HttpUtils.getFieldInPost(request,VTServer, "numberOfCPUs"),
HttpUtils.getFieldInPost(request,VTServer, "CPUFrequency"),
HttpUtils.getFieldInPost(request,VTServer, "memory"),
HttpUtils.getFieldInPost(request,VTServer, "discSpaceGB"),
HttpUtils.getFieldInPost(request,VTServer, "agentURL"),
save=True)
def crudServerFromInstance(self,instance):
server = KVMServer.objects.filter(uuid = instance.getUUID())
if len(server)==1:
server = server[0]
return server.updateServer(instance.getName(),
instance.getOSType(),
instance.getOSDistribution(),
instance.getOSVersion(),
instance.getNumberOfCPUs(),
instance.getCPUFrequency(),
instance.getMemory(),
instance.getDiscSpaceGB(),
instance.getAgentURL(),
instance.getAgentPassword(),
save = True)
elif len(server)==0:
return KVMServer.constructor(instance.getName(),
instance.getOSType(),
instance.getOSDistribution(),
instance.getOSVersion(),
instance.getNumberOfCPUs(),
instance.getCPUFrequency(),
instance.getMemory(),
instance.getDiscSpaceGB(),
instance.getAgentURL(),
instance.getAgentPassword(),
save=True)
else:
raise Exception("Trying to create a server failed")
@staticmethod
def kvmVMtoModel(VMxmlClass, callBackURL, save):
name = VMxmlClass.name
uuid = VMxmlClass.uuid
projectId = VMxmlClass.project_id
projectName = VMxmlClass.project_name
sliceId = VMxmlClass.slice_id
sliceName = VMxmlClass.slice_name
osType = VMxmlClass.operating_system_type
osVersion = VMxmlClass.operating_system_version
osDist = VMxmlClass.operating_system_distribution
memory = VMxmlClass.xen_configuration.memory_mb
# XXX
callBackUrl = callBackURL
hdSetupType = VMxmlClass.xen_configuration.hd_setup_type
hdOriginPath = VMxmlClass.xen_configuration.hd_origin_path
virtSetupType = VMxmlClass.xen_configuration.virtualization_setup_type
return name,uuid,projectId,projectName,sliceId,sliceName,osType,osVersion,osDist,memory,None,None,callBackUrl,hdSetupType,hdOriginPath,virtSetupType,save
| [
"jenkins@localhost"
] | jenkins@localhost |
d1a417856e0372940c7300b1fec9088b1beff141 | 8411d44bb4c1316755311beaab8cc4c3ec78475e | /dungeonpath.py | b7c46d4d7423584870852dcb4f55db1d221a3a03 | [] | no_license | jrecuero/rpg3 | 45b40908ff39b692e4a68c958383db0946d9e306 | ee961023841f79c22d21c8a4c7a92225d5525c7a | refs/heads/master | 2021-01-19T07:30:46.996853 | 2014-11-25T08:20:06 | 2014-11-25T08:20:06 | 16,552,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,093 | py | #!/usr/bin/env python
"""dungeonpath.py class required for the dungeonpath.
:author: Jose Carlos Recuero
:version: 0.1
:since: 10/21/2014
"""
__docformat__ = 'restructuredtext en'
###############################################################################
## _ _
## (_)_ __ ___ _ __ ___ _ __| |_ ___
## | | '_ ` _ \| '_ \ / _ \| '__| __/ __|
## | | | | | | | |_) | (_) | | | |_\__ \
## |_|_| |_| |_| .__/ \___/|_| \__|___/
## |_|
###############################################################################
#
# import std python modules
#
#
# import dungeonpath python modules
#
import objecto
import dungeonstep
#import player
###############################################################################
##
## ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
## / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
## | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
## \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
##
###############################################################################
#
FORWARD = 'forward'
BACKWARD = 'backward'
STOP = 'stop'
RIGHT_TURN = 'right turn'
LEFT_TURN = 'left turn'
UPSIDE = 'upside'
DOWNSIDE = 'downside'
###############################################################################
## _ _ _
## ___ _ _| |__ _ __ ___ _ _| |_(_)_ __ ___ ___
## / __| | | | '_ \| '__/ _ \| | | | __| | '_ \ / _ \/ __|
## \__ \ |_| | |_) | | | (_) | |_| | |_| | | | | __/\__ \
## |___/\__,_|_.__/|_| \___/ \__,_|\__|_|_| |_|\___||___/
##
###############################################################################
#
###############################################################################
## _ _ __ _ _ _ _
## ___| | __ _ ___ ___ __| | ___ / _(_)_ __ (_) |_(_) ___ _ __ ___
## / __| |/ _` / __/ __| / _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \/ __|
## | (__| | (_| \__ \__ \ | (_| | __/ _| | | | | | |_| | (_) | | | \__ \
## \___|_|\__,_|___/___/ \__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|___/
##
###############################################################################
#
#
#------------------------------------------------------------------------------
class DungeonPath(objecto.Objecto):
"""
Dungeon path is composed of dungeon steps.
Position in the dungeon path uses x coordination for the dungeon step and
the y coordinate for the dungeon cell in the given dungeon step.
"""
#--------------------------------------------------------------------------
def __init__(self, theName=None):
""" Initialize DungeonPath instance
:type theName: str
:param theName: DungeonPath name
"""
super(DungeonPath, self).__init__(theName)
self.path = self.baseLinearPath()
self.players = []
#--------------------------------------------------------------------------
def baseLinearPath(self, theLen=100, theWide=3):
""" Create a basic linear path.
:type theLen: int
:param theLen: length of the path
:type theWide: int
:param theWide: wide for every path step
:rtype: list
:return: list the newly created path
"""
path = []
for x in xrange(theLen):
path.append(dungeonstep.DungeonStep(theWide))
return path
#--------------------------------------------------------------------------
def addPlayer(self, thePlayer, thePathPos=None):
""" Add a player to the path.
:type thePlayer: player.Player
:param thePlayer: player instance to be added to the path
:type thePathPos: point
:param thePathPos: position in the path
:rtype: bool
:return: True if player was added to the path, else False
"""
if thePlayer not in self.players:
thePlayer.dungeonpath = {'path': self,
'pathpos': thePathPos,
'pathmove': 1,
'pathdir': FORWARD}
self.players.append(thePlayer)
return True
else:
self.logger.error('Player %s was already in the path' % (thePlayer.name, ))
return False
#--------------------------------------------------------------------------
def removePlayer(self, thePlayer):
""" Remove a player from the path.
:type thePlayer: player.Player
:param thePlayer: player instance to be removed from the path
:rtype: bool
:return: True if player was removed from the path, else False
"""
reto = True
try:
thePlayer.dungeonpath = None
self.players.remove(thePlayer)
except ValueError:
self.logger.error('Player %s was not in the path' % (thePlayer.name, ))
reto = False
finally:
return reto
#--------------------------------------------------------------------------
def placePlayer(self, thePlayer, thePathPos):
""" Set the player in a given position
:type thePlayer: player.Player
:param thePlayer: player instance to be added to the path
:type thePathPos: point
:param thePathPos: position in the path
:rtype: bool
:return: True if player was added to the path, else False
"""
if thePlayer in self.players:
thePlayer.dungeonpath['pathpos'] = thePathPos
return True
else:
self.logger.error('Player %s was not in the path' % (thePlayer.name, ))
return False
#--------------------------------------------------------------------------
def movePlayer(self, thePlayer):
""" Move a player in the path
:type thePlayer: player.Player
:param thePlayer: player to move
"""
posX, posY = thePlayer.dungeonpath['pathpos']
if thePlayer.dungeonpath['pathdir'] == FORWARD:
posX += thePlayer.dungeonpath['pathmove']
elif thePlayer.dungeonpath['pathdir'] == BACKWARD:
posX -= thePlayer.dungeonpath['pathmove']
elif thePlayer.dungeonpath['pathdir'] == STOP:
pass
thePlayer.dungeonpath['pathpos'] == (posX, posY)
# Now reset to default player movement data.
thePlayer.dungeonpath['pathmove'] = 1
thePlayer.dungeonpath['pathdir'] = FORWARD
#--------------------------------------------------------------------------
def movePath(self):
""" Move all players in the dungeon path.
"""
map(self.movePlayer, self.players)
###############################################################################
## _
## _ __ ___ __ _(_)_ __
## | '_ ` _ \ / _` | | '_ \
## | | | | | | (_| | | | | |
## |_| |_| |_|\__,_|_|_| |_|
##
###############################################################################
#
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"jose.recuero@gmail.com"
] | jose.recuero@gmail.com |
8bb4e54a6ffca03f781beddb041de88e048ab536 | d0765d7c9977f892f0fd4c623107303ff3582485 | /kaggle/facial-keypoints-detection/testcustom.py | 31654fb9eae296e5d9fe7e0ba1442c0b7be43952 | [] | no_license | Lizonghang/Neural-Network | dc82cfa96d1eafcc192374645b6113774cd1cbf2 | be6ddff70022e1933c1c9ad4b04ef0ac3fcf2f70 | refs/heads/master | 2021-01-01T18:39:06.416372 | 2017-12-26T15:33:09 | 2017-12-26T15:33:09 | 98,390,467 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
def load_image(filepath):
im = Image.open(filepath)
im = im.convert('L')
im = im.resize((96, 96))
return np.array(im).reshape((1, 96, 96, 1)) / 255.0
def display(X, y_pred):
plt.figure()
plt.imshow(X.reshape((96, 96)), cmap='gray')
plt.axis('off')
y_pred = y_pred.clip(0, 1)
plt.scatter(y_pred[0::2] * 96.0, y_pred[1::2] * 96.0, c='r', marker='x')
plt.show()
if __name__ == '__main__':
model = load_model('ckpt/model.h5')
X = load_image('test1.png')
y_pred = model.predict(X).reshape((30,))
display(X, y_pred)
| [
"870644199@qq.com"
] | 870644199@qq.com |
b6aa7142f269f0ba1dd4f2de043759be68235d84 | fbe7e4cad853f2bbabadc4597cdb9bb96d27bbbf | /Simulation/17143_boj_낚시왕(Simulation)/17143_boj_fishing.py | 2fea7ae7ada0e3ff5fcc083104971d99c861aa80 | [] | no_license | edugieun/Algorithm-Solving | 67e649b894aede10e4f61ebf30d0ddbac67dd4db | a925657b893cc9877c8dbf1b986323e474872204 | refs/heads/master | 2020-09-08T20:02:14.085427 | 2020-04-15T13:30:31 | 2020-04-15T13:30:31 | 221,231,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,056 | py | import sys, time
sys.stdin = open('input.txt', 'r')
start = time.time()
def shark_change():
# 위치 변경
for shark_num in sharks.keys():
# 위
if sharks[shark_num][3] == 1:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
sharks[shark_num][0] -= sharks[shark_num][2]
while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
if sharks[shark_num][3] == 1:
sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
sharks[shark_num][3] = 2
elif sharks[shark_num][3] == 2:
sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
sharks[shark_num][3] = 1
# 아래
elif sharks[shark_num][3] == 2:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
sharks[shark_num][0] += sharks[shark_num][2]
while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
if sharks[shark_num][3] == 1:
sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
sharks[shark_num][3] = 2
elif sharks[shark_num][3] == 2:
sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
sharks[shark_num][3] = 1
# 오른쪽
elif sharks[shark_num][3] == 3:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
sharks[shark_num][1] += sharks[shark_num][2]
while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
if sharks[shark_num][3] == 3:
sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
sharks[shark_num][3] = 4
elif sharks[shark_num][3] == 4:
sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
sharks[shark_num][3] = 3
# 왼쪽
elif sharks[shark_num][3] == 4:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
sharks[shark_num][1] -= sharks[shark_num][2]
while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
if sharks[shark_num][3] == 3:
sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
sharks[shark_num][3] = 4
elif sharks[shark_num][3] == 4:
sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
sharks[shark_num][3] = 3
N_matrix = [[0] * (C + 1) for i in range(R + 1)]
# 동족 상잔
# 시간 초과 해결 방법:
# 상어가 10000마리 일 때 상어끼리 비교하기 위해, 10000*10000번을 읽는 것이 아니라 /
# 딱 상어 10000번만 읽되, matrix를 이용하여 겹치는 자리를 처리한다.
dead_shark = []
for shark_num in sharks.keys():
if N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] == 0:
N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] = shark_num
elif N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] != shark_num:
if sharks[N_matrix[sharks[shark_num][0]][sharks[shark_num][1]]][4] < sharks[shark_num][4]:
dead_shark.append(N_matrix[sharks[shark_num][0]][sharks[shark_num][1]])
N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] = shark_num
else:
dead_shark.append(shark_num)
for shark_num in dead_shark:
del sharks[shark_num]
R, C, M = map(int, input().split())
sharks = {}
for i in range(M):
sharks['s' + str(i)] = list(map(int, input().split()))
shark_sum = 0
for person_pos in range(1, C + 1):
# 낚시
get_shark_row = 99999
for shark_num in sharks.keys():
if sharks[shark_num][1] == person_pos and sharks[shark_num][0] < get_shark_row:
get_shark = shark_num
get_shark_row = sharks[get_shark][0]
if get_shark_row != 99999:
shark_sum += sharks[get_shark][4]
del sharks[get_shark]
# 상어 위치 변경 및 동족상잔
shark_change()
print(shark_sum)
print(time.time() - start)
# 시간초과
# def shark_change():
#
# # 위치 변경
# for shark_num in sharks.keys():
# # 위
# if sharks[shark_num][3] == 1:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
# sharks[shark_num][0] -= sharks[shark_num][2]
# while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
# if sharks[shark_num][3] == 1:
# sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
# sharks[shark_num][3] = 2
# elif sharks[shark_num][3] == 2:
# sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
# sharks[shark_num][3] = 1
# # 아래
# elif sharks[shark_num][3] == 2:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
# sharks[shark_num][0] += sharks[shark_num][2]
# while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
# if sharks[shark_num][3] == 1:
# sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
# sharks[shark_num][3] = 2
# elif sharks[shark_num][3] == 2:
# sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
# sharks[shark_num][3] = 1
# # 오른쪽
# elif sharks[shark_num][3] == 3:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
# sharks[shark_num][1] += sharks[shark_num][2]
# while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
# if sharks[shark_num][3] == 3:
# sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
# sharks[shark_num][3] = 4
# elif sharks[shark_num][3] == 4:
# sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
# sharks[shark_num][3] = 3
# # 왼쪽
# elif sharks[shark_num][3] == 4:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
# sharks[shark_num][1] -= sharks[shark_num][2]
# while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
# if sharks[shark_num][3] == 3:
# sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
# sharks[shark_num][3] = 4
# elif sharks[shark_num][3] == 4:
# sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
# sharks[shark_num][3] = 3
#
# #동족 상잔
# dead_shark = []
# for shark_num in sharks.keys():
# if shark_num not in dead_shark:
# for shark_num_next in sharks.keys():
# if shark_num_next not in dead_shark and shark_num != shark_num_next and sharks[shark_num][0] == sharks[shark_num_next][0] and sharks[shark_num][1] == sharks[shark_num_next][1]:
# if sharks[shark_num][4] > sharks[shark_num_next][4]:
# dead_shark.append(shark_num_next)
# elif sharks[shark_num][4] < sharks[shark_num_next][4]:
# dead_shark.append(shark_num)
# for shark_num in dead_shark:
# del sharks[shark_num]
#
# R, C, M = map(int, input().split())
#
# sharks = {}
# for i in range(M):
# sharks['s' + str(i)] = list(map(int, input().split()))
#
# shark_sum = 0
# for person_pos in range(1, C + 1):
# # 낚시
# get_shark_row = 99999
#
# for shark_num in sharks.keys():
# if sharks[shark_num][1] == person_pos and sharks[shark_num][0] < get_shark_row:
# get_shark = shark_num
# get_shark_row = sharks[get_shark][0]
#
# if get_shark_row != 99999:
# shark_sum += sharks[get_shark][4]
# del sharks[get_shark]
#
# # 상어 위치 변경 및 동족상잔
# shark_change()
#
#
# print(shark_sum) | [
"gieun625@gmail.com"
] | gieun625@gmail.com |
d23839856394dc02f015232730ab6a9f83793f3c | eb0711915d6bba2f765f052736e33ac9a9a397a6 | /HE1104/model/glee_samp/samp34/glee_chain.py | e949310be1517c45744911df615febd2d25190a9 | [] | no_license | dartoon/GL_HostGalaxy | cd2166f273ae7e0397a7d2d39f760ab59e86f014 | 7469f1c1e640d176a75cc6e9497920e494ad656a | refs/heads/master | 2016-08-11T13:27:17.545360 | 2016-04-07T19:04:57 | 2016-04-07T19:04:57 | 46,524,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import numpy,subprocess
file1 = open('HE34.001.mcmc','r') #should changed with right name
para = numpy.loadtxt(file1)[-30:,0]
file1.close()
i=int(para[0])
#subprocess.call(["echo int(para[0])"],shell=True)
#subprocess.call(["glee -S ${i} HE34.001.001"],shell=True)
#print para.shape
#print para.astype(int)
g=open('chain_NO','w')
numpy.savetxt(g,para.astype(int),fmt='%i')
g.close
| [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
1baa5445cd69ffb9bf4d2b4b632526716f1c3056 | 7a24bd87eb80edefe0db75c84ace364fc093e04a | /examples/hybrid_inverter_sigmoid/hybrid_inverter_sigmoid.py | 21d6b806f4acac4a2f6b41921f70e1272bc22318 | [] | no_license | qibolun/DryVR_0.2 | 5ab1171b0d5a3d4bdaae30713cd450d5797b002e | 4ee2bbc736d382043be585906704bcc4dc115d3d | refs/heads/master | 2021-10-09T06:00:41.738030 | 2021-09-27T19:59:26 | 2021-09-27T19:59:26 | 102,651,161 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | from examples import c2e2wrapper
def TC_Simulate(Mode,initialCondition,time_bound):
# Map givien mode to int 1
if Mode == "Rampup_A":
modenum = 1
elif Mode == "Rampup_B":
modenum = 2
elif Mode == "Rampup_C":
modenum = 3
elif Mode == "Rampup_D":
modenum = 4
elif Mode == "Rampup_E":
modenum = 5
elif Mode == "Rampup_F":
modenum = 6
elif Mode == "Rampup_G":
modenum = 7
elif Mode == "Rampdown_A":
modenum = 8
elif Mode == "Rampdown_B":
modenum = 9
elif Mode == "Rampdown_C":
modenum = 10
elif Mode == "Rampdown_D":
modenum = 11
elif Mode == "Rampdown_E":
modenum = 12
elif Mode == "Rampdown_F":
modenum = 13
elif Mode == "Rampdown_G":
modenum = 14
simfile = './examples/uniform_NOR_sigmoid/simu'
timeStep = 0.00005
# This model need some spcial handle
# This is because we want to discard the t in the simulator
# Adding t to the simulation initial condition
initialCondition = [0.0, initialCondition[0], initialCondition[1]]
result = c2e2wrapper.invokeSimulator(
modenum,
simfile,
initialCondition,
timeStep,
time_bound
)
ret = []
# Discard time info from the simulator and return to DRYVR
for line in result:
ret.append([line[0], line[1], line[2])
return ret
| [
"pricejmh0911@gmail.com"
] | pricejmh0911@gmail.com |
e7dc5ed0f00f3e833ce7517bce5d9cc929080645 | 0600f0979fe17624d33aa74c739775f0f27a3bb5 | /docs/support/test_my_module.py | 5f767d88b77b3f65e6e07decef75159d16f98d1c | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | pmacosta/putil | 2c8177fb6b9be667b8d52b48bfd3272de8b0160d | 416cea52df8221981727e25d133e9b4e3f464798 | refs/heads/master | 2021-01-21T13:33:41.232773 | 2016-05-17T12:57:30 | 2016-05-17T12:57:30 | 44,289,408 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # test_my_module.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0410,C0411,R0903,W0104,W0105
import pytest, docs.support.my_module, putil.test
def test_func():
""" Test func() function """
putil.test.assert_exception(
docs.support.my_module.func,
TypeError,
'Argument `name` is not valid',
{'name':5}
)
assert docs.support.my_module.func('John') == 'My name is John'
def test_my_class():
""" Test MyClass() class """
obj = docs.support.my_module.MyClass()
with pytest.raises(RuntimeError) as excinfo:
obj.value
assert putil.test.get_exmsg(excinfo) == 'Attribute `value` not set'
with pytest.raises(RuntimeError) as excinfo:
obj.value = 'a'
assert putil.test.get_exmsg(excinfo) == 'Argument `value` is not valid'
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
12e394688e18a0a8cd50c4672cb605e0ec4083fc | a635b8d51016220a6d84808def431c27dde41b90 | /libcms/apps/journal/urls.py | b06afb6d9b18c032c58083f127b4facac70b13b9 | [] | no_license | isergey/chel | aab3ac98ae2a10258f7a5afce88c74f9e13a2d7f | d1a38bfe7ebba80d9c39ae3b0d54ebfd2965046c | refs/heads/master | 2023-07-07T02:13:41.363452 | 2023-06-26T10:25:14 | 2023-06-26T10:25:14 | 3,816,204 | 1 | 0 | null | 2023-03-31T14:52:31 | 2012-03-24T09:33:53 | JavaScript | UTF-8 | Python | false | false | 322 | py | # -*- coding: utf-8 -*-
from django.urls import re_path, include
from . import views
from .administration import urls as aurls
urlpatterns = (
re_path(r'^$', views.index , name='index'),
re_path(r'^redirect$', views.redirect_to_url , name='redirect_to_url'),
re_path(r'^admin/', include((aurls, 'admin'))),
)
| [
"dostovalov@gmail.com"
] | dostovalov@gmail.com |
bab55d69218c204a901ec720b1cc08572d8a0b35 | eb35535691c4153ba2a52774f0e40468dfc6383d | /hash_table/find_diff_str.py | 2c56e7768ccda2172fbde10be2df7b65ac7c8071 | [] | no_license | BJV-git/leetcode | 1772cca2e75695b3407bed21af888a006de2e4f3 | dac001f7065c3c5b210024d1d975b01fb6d78805 | refs/heads/master | 2020-04-30T19:04:12.837450 | 2019-03-21T21:56:24 | 2019-03-21T21:56:24 | 177,027,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # logic: said it got added, by random shuffle
# means no set, as not sure if single or duped
# using xor
def diff(s,t):
d={}
ans=0
for c in s+t:
ans^=ord(c)
return chr(ans)
for i in s:
d[i] = d.get(i,0)+1
for j in t:
try:
d[j]-=1
except:
return j
res=[i for i,j in d if j!=0]
return res[0] | [
"noreply@github.com"
] | BJV-git.noreply@github.com |
efafed03e4c3629dbeaf409f4d9be4f0dedbd82e | 21e10ee87c314cdceaec04217e42332237c7fb58 | /SVM_Classifier.py | 86d4ae1b6e9f883d3ce122762d7655d3c9d34f1d | [] | no_license | QuantuMobileSoftware/SindhiRecognizer | 199f09d5b304dc2354972a38b7471d83dae0dfdc | 6c2af0ea9c796e7a4bc3b2b028b269a4a8be829a | refs/heads/master | 2021-01-22T23:06:14.023760 | 2017-03-20T14:43:27 | 2017-03-20T14:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | import sys
import cv2
import argparse
import warnings
from time import time
from docx import Document
from sklearn.externals import joblib
reload(sys)
sys.setdefaultencoding('utf-8')
# ignore version warnings
def warn(*args, **kwargs):
pass
warnings.warn = warn
# add argument parser for images
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', help='path to .jpg file')
args = vars(ap.parse_args())
#start timer
start_time = time()
print ('Start recognition')
# read image and model
image = cv2.imread(args['image'])
model = joblib.load("model.pkl")
# process image
image = cv2.resize(image, (80, 50), interpolation=cv2.INTER_AREA)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
threshold, image = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
image = image.flatten()
# predict class
result = model.predict(image)
# print the results
document = Document('txt/{}.docx'.format(result[0]))
for para in document.paragraphs:
print (para.text)
document.save('output.docx')
#end timer
end_time = time() - start_time
print ('Recognition ended in {} seconds'.format(round(end_time, 2))) | [
"you@example.com"
] | you@example.com |
941719ccebe58c5ac65e905376ab4b8eb872dce4 | 786de89be635eb21295070a6a3452f3a7fe6712c | /PSHist/tags/V00-03-00/SConscript | ea4b597c8c16aee6b43d5b58687145b681ac6fc9 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package PSHist
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
#
# For the standard SIT packages which build libraries, applications,
# and Python modules it is usually sufficient to call
# standardSConscript() function which defines rules for all
# above targets. Many standard packages do not need any special options,
# but those which need can modify standardSConscript() behavior using
# a number of arguments, here is a complete list:
#
# LIBS - list of additional libraries needed by this package
# BINS - dictionary of executables and their corresponding source files
# TESTS - dictionary of test applications and their corresponding source files
# SCRIPTS - list of scripts in app/ directory
# UTESTS - names of the unit tests to run, if not given then all tests are unit tests
# PYEXTMOD - name of the Python extension module, package name used by default
#
#
standardSConscript()
| [
"salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 | |
3b0fc70408fbc7d9c1869f21fc189ff9745646ab | 3c97ecb4ca2104ef454c9768cbe6a0c759d4b174 | /scratch/debug_overwrite/scratch_20.py | 86217bfe8856e29b7eb5241812f3a558d5c8602b | [
"BSD-3-Clause"
] | permissive | takuma-yoneda/ml_logger | 44a1add97e00e32e8b66bbac5d4df2711fabede8 | dd619ead4c4ae6927e6093982b40a27ff51b47ec | refs/heads/master | 2023-07-13T07:02:22.430393 | 2021-08-01T20:46:34 | 2021-08-01T20:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import pickle
from cmx import doc
from ml_logger import logger
doc @ """
# Debug Logger Overwrite Bug
Reading from metrics file:
"""
logger.configure("http://54.71.92.65:8080", prefix='geyang/project/debug_logs')
logger.remove('debug_logs')
doc.print(logger.root)
logger.log_text("""
charts:
- i
""", dedent=True, filename=".charts.yml")
for i in range(3):
logger.log_key_value(i=i)
logger.flush()
import time
time.sleep(1)
doc @ "```ansi"
doc @ logger.load_text("outputs.log")
doc @ "```"
with doc:
data = logger.read_metrics()
doc.print(data)
doc.flush()
exit()
doc @ """
# debug logger overwrite bug
Reading from metrics file:
"""
with open('outputs.log') as f:
for l in f.readlines():
print(l.rstrip())
print(pickle.load(l))
with open('metrics.pkl', 'rb') as f:
a = pickle.load(f)
print(a)
if __name__ == '__main__':
logger.configure(root="http://improbable-ai.dash.ml:8080", register_experiment=False)
df = logger.read_metrics(
path="/geyang/dreamer_v2/2021/01-22/01_atari/train/02.13.42/atari_solaris/s-200/6/metrics.pkl")
df # dataframe
print(df)
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
6ef8af05d4c89526fa1b2729dcc670018295f892 | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /test/app_compat/csuite/integration_tests/csuite_crash_detection_test.py | 9dd8a00f587c2639b5b5152ae31b4d25681ad48e | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | # Lint as: python3
#
# Copyright 2020, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests C-Suite's crash detection behavior."""
import csuite_test_utils
class CrashDetectionTest(csuite_test_utils.TestCase):
def setUp(self):
super(CrashDetectionTest, self).setUp()
self.adb = csuite_test_utils.Adb()
self.repo = csuite_test_utils.PackageRepository()
self.harness = csuite_test_utils.CSuiteHarness()
def tearDown(self):
super(CrashDetectionTest, self).tearDown()
self.harness.cleanup()
self.repo.cleanup()
def test_no_crash_test_passes(self):
test_app_package = 'android.csuite.nocrashtestapp'
self.adb.run(['logcat', '-c'])
completed_process = self.run_test(
test_app_package=test_app_package,
test_app_module='csuite_no_crash_test_app')
self.expect_regex(completed_process.stdout, r"""PASSED\s*:\s*1""")
self.expect_app_launched(test_app_package)
self.expect_package_not_installed(test_app_package)
def test_crash_on_launch_test_fails(self):
test_app_package = 'android.csuite.crashonlaunchtestapp'
self.adb.run(['logcat', '-c'])
completed_process = self.run_test(
test_app_package=test_app_package,
test_app_module='csuite_crash_on_launch_test_app')
self.expect_regex(completed_process.stdout, r"""FAILED\s*:\s*1""")
self.expect_app_launched(test_app_package)
self.expect_package_not_installed(test_app_package)
def run_test(self, test_app_package, test_app_module):
"""Set up and run the launcher for a given test app."""
# We don't check the return code since adb returns non-zero exit code if
# the package does not exist.
self.adb.uninstall(test_app_package, check=False)
self.assert_package_not_installed(test_app_package)
module_name = self.harness.add_module(test_app_package)
self.repo.add_package_apks(
test_app_package, csuite_test_utils.get_test_app_apks(test_app_module))
file_resolver_class = 'com.android.csuite.config.AppRemoteFileResolver'
return self.harness.run_and_wait([
'--serial',
csuite_test_utils.get_device_serial(),
'run',
'commandAndExit',
'launch',
'-m',
module_name,
'--enable-module-dynamic-download',
'--dynamic-download-args',
'%s:uri-template=file://%s/{package}' %
(file_resolver_class, self.repo.get_path())
])
def expect_regex(self, s, regex):
with self.subTest():
self.assertRegex(s, regex)
def assert_package_not_installed(self, package_name):
self.assertNotIn(package_name, self.adb.list_packages())
def expect_package_not_installed(self, package_name):
with self.subTest():
self.assert_package_not_installed(package_name)
def expect_app_launched(self, tag):
logcat_process = self.adb.run(['logcat', '-d', '-v', 'brief', '-s', tag])
with self.subTest():
self.assertIn('App launched', logcat_process.stdout)
if __name__ == '__main__':
csuite_test_utils.main()
| [
"rick_tan@qq.com"
] | rick_tan@qq.com |
b61bfcf65971bd7ff9bf1bd4987d1db5eade588c | 84c1e780a349c4bae2d6cf4c1da72889d5222797 | /Python/Numpy/Linear Algebra/linear_algebra.py | 1604f1d3feede1f8abdaa810eb1cabeedf232a41 | [
"MIT"
] | permissive | brianchiang-tw/HackerRank | 18e31583b10cf2189adac97e7cb2997d46790bcd | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | refs/heads/master | 2020-09-23T23:18:08.253868 | 2020-02-13T14:16:22 | 2020-02-13T14:16:22 | 225,612,833 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import numpy as np
if __name__ == '__main__':
dim = int( input() )
arr = []
for i in range(dim):
arr += list( map(float, input().split() ) )
np_arr = np.array( arr )
np_arr = np.reshape( np_arr, (dim, dim) )
determine = np.linalg.det(np_arr)
np.set_printoptions(legacy='1.13')
print( determine )
| [
"brianchiang1988@icloud.com"
] | brianchiang1988@icloud.com |
9e51c017d58a209ca1d4d21a997e29a3ebe2d9d4 | bc2327d2bce695bb4881be63b1912f550857fd14 | /data_structures/examples/example2_8.py | 05f54255aae14831835018af84afadf975b148b2 | [] | no_license | mentalclear/fluent-in-python | 1a1d9ad30e949e72d8633156091b84b6d52b85bc | 243cff274861abc853b4ba5d03090191df5cd7db | refs/heads/master | 2023-08-05T19:26:48.787996 | 2021-10-06T13:04:14 | 2021-10-06T13:04:14 | 402,944,060 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | metro_areas = [
('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),
('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),
('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),
('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),
('São Paulo', 'BR', 19.649, (-23.547778, -46.635833)),
]
def main():
print(f'{"":15} | {"latitude":>9} | {"longitude":>9}')
for name, _, _, (lat, lon) in metro_areas:
if lon <= 0:
print(f'{name:15} | {lat:9.4f} | {lon:9.4f}')
if __name__ == '__main__':
main() | [
"mentalclear@gmail.com"
] | mentalclear@gmail.com |
322bac495f779103d108af47f10b388fc9a45d48 | ab8508696b4938331c752aaed0cc5e4062bc8625 | /week2/FirstDay/keyvaluestore2/store/views.py | 5b8fd90e5a2592923b4652fbbb755e0d193a859b | [] | no_license | Nimor111/Django-Course | 5765097cb37ce8b9c1efe93c9a3dad23cd7bd07d | e3b36a345227848928f07af0efb7c81ff082bc22 | refs/heads/master | 2021-01-21T10:34:45.112421 | 2017-05-11T15:00:03 | 2017-05-11T15:00:03 | 83,458,838 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from .logic import create_user, write_key, get_key, delete_key
import json
# Create your views here.
@csrf_exempt
def create_user_view(request):
identifier = create_user()
return JsonResponse({'identifier': identifier})
@csrf_exempt
def write_key_view(request, identifier):
if request.method == 'POST':
body = request.body.decode('utf-8')
body = json.loads(body)
try:
write_key(identifier, body['key'], body['value'])
return HttpResponse(status=201)
except ValueError:
return HttpResponse(status=404)
return HttpResponse(status=403)
@csrf_exempt
def get_or_delete_view(request, identifier, key):
if request.method == 'GET':
return get_key_view(request, identifier, key)
if request.method == 'DELETE':
return delete_key_view(request, identifier, key)
@csrf_exempt
def get_key_view(_, identifier, key):
try:
data = get_key(identifier, key)
return JsonResponse(data, json_dumps_params={'indent': 4}, status=200)
except KeyError:
return JsonResponse({"error": "Key not found."},
json_dumps_params={'indent': 4},
status=404)
except ValueError:
return JsonResponse({"error": "Key not found."},
json_dumps_params={'indent': 4},
status=404)
@csrf_exempt
def delete_key_view(_, identifier, key):
try:
data = delete_key(identifier, key)
return JsonResponse({'status': 'deleted'}, status=202)
except KeyError:
return JsonResponse({'error': 'key'}, status=404)
except ValueError:
return JsonResponse({"error": "Key not found."},
json_dumps_params={'indent': 4},
status=404)
| [
"georgi.bojinov@hotmail.com"
] | georgi.bojinov@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.