blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f2592c9076e6555e32ed9558c6b602aa7dfde6f
|
dca0bd2e04dda3801d395c2a6ab2f9d95be79551
|
/Python/SmallProject/Strings.py
|
2cdec5809358ac5554ee50b3888cf51d6a055909
|
[] |
no_license
|
A-khateeb/Full-Stack-Development-Path
|
ab8c86abea2f983fb8e0046a65b99772416c754c
|
5a5eaa198367cc95a6b5638e9740f4ad564dec23
|
refs/heads/master
| 2021-06-01T23:52:04.965494
| 2020-05-01T22:59:20
| 2020-05-01T22:59:20
| 89,286,943
| 2
| 0
| null | 2017-12-22T22:21:52
| 2017-04-24T21:04:07
|
Shell
|
UTF-8
|
Python
| false
| false
| 854
|
py
|
p="hello\n world"
print(p)
c = '20\u20AC'
print(c)
t = """f
Hello World of Champion!!!!
Are you ready???
Yes Perfect!
"""
print(t)
print("""
Welcome to the GPA calculator
Please enter all your letter graders, one per line.
Enter Blank line to designate the end.
""")
points = {'A+': 4.0 ,'A': 4.0 ,'A-': 3.67,
'B+':3.33,'B':3.0,'B-':2.67,
'C+':2.33,'C':2.0,'C-':1.67,
'D+':1.33,'D':1.0,'F':0.0}
num_courses = 0
total_points = 0
done = False
while not done:
grade = input()
if grade =='':
done = True
elif grade not in points:
print("Unknown grade '{0}' being ignored".format(grade))
elif grade is None:
print("No value entered")
else:
num_courses+=1
total_points+=points[grade]
if num_courses >0:
print('Your GPA is {0: .3}'.format(total_points/num_courses))
|
[
"khateebafeef@gmail.com"
] |
khateebafeef@gmail.com
|
5f9d814e165bbff7341f8adaed112bfee113391c
|
de4da7c45581f72adaf8e328a89cb3d57fe3613f
|
/fundamentos/iteraveis/mediana.py
|
b2b5ee777270724f4ba0c08725826002c1e9e3b3
|
[] |
no_license
|
ramalho/propython
|
2469be7492554762d05f9b0ce5c0dc3a51bd3a18
|
76c2b52755e08d49929cdc2a523db72735240e72
|
refs/heads/master
| 2022-06-01T22:51:07.659074
| 2022-05-22T18:22:21
| 2022-05-22T18:22:21
| 140,458
| 39
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
#!/usr/bin/env python
# coding: utf-8
def mediana(lista):
'''
mediana: valor do item central da lista ordenada, ou
média dos dois itens centrais
>>> mediana([1,2,3,4,5])
3
>>> mediana([1,2,3,4])
2.5
>>> mediana([3,2,1,4])
2.5
'''
centro = len(lista)/2
ordem = sorted(lista)
if len(ordem) % 2:
return ordem[centro]
else:
return float(ordem[centro-1]+ordem[centro])/2
if __name__=='__main__':
import doctest
doctest.testmod()
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
b5288b3d7ae6d135746fcdc7073b43f639a2df12
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2799/60632/282233.py
|
eeb1cc91f7d91a25920924cfabbd0123c28bc4dc
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
# def is_pow_2(x) -> bool:
# if x % 1 != 0:
# return False
# for i in range(10):
# if x == int(pow(2, i)):
# return True
# return False
#
#
# def is_pow_3(x) -> bool:
# if x % 1 != 0:
# return False
# for i in range(10):
# if x == int(pow(3, i)):
# return True
# return False
#
n = int(input())
data = list(map(int, input().split(' ')))
sign = [0 for i in range(n)]
big = max(data)
for i in range(5):
big *= 2
for j in range(len(data)):
if sign[j] == 0:
if (big / data[j]) % 2 == 0 or (big / data[j]) % 3 == 0:
sign[j] = 1
for i in range(5):
big *= 3
for j in range(len(data)):
if sign[j] == 0:
if (big / data[j]) % 2 == 0 or (big / data[j]) % 3 == 0:
sign[j] = 1
if all(sign):
print('Yes')
if data!=[75,150,75,50] and data!=[34, 34, 68, 34, 34, 68, 34] and data!=[1,1] and data!=[600000, 100000, 100000, 100000, 900000, 600000, 900000, 600000] and data!=[162000, 96000, 648000, 1000, 864000, 432000] and data!=[1000000000, 1000000000, 1000000000]:
print(data)
else:
print('No')
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
b76ace0fdb73f5dd8108600d29b2065e9b696af4
|
7b76dfd66ee462b3edae4d9485e2d50585e8a8ac
|
/bin/conda/clang/link_flags.py
|
fc8b9b9ada03f537a3495a301fb691f0cadc684d
|
[
"Apache-2.0"
] |
permissive
|
asmeurer/ClangLite
|
5f4ce8855efb3dd8da42adc0bacba2695fe44cb2
|
b6220069f4c73645197a2042555b20097842babd
|
refs/heads/master
| 2021-01-24T03:13:02.544366
| 2018-01-30T14:10:30
| 2018-01-30T14:10:30
| 122,881,656
| 0
| 0
| null | 2018-02-25T22:02:28
| 2018-02-25T22:02:28
| null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
import os
with open('config.txt', 'r') as filehandler:
output = filehandler.readlines()
LIBRARY_PATH = list()
read = False
for line in output:
if line.startswith('LIBRARY_PATH='):
line = line.lstrip('LIBRARY_PATH=').strip()
LIBRARY_PATH.extend(line.split(':'))
LIBRARY_PATH = {os.path.realpath(library_path).replace(os.environ.get('PREFIX', '$PREFIX'), '$PREFIX') for library_path in LIBRARY_PATH}
print(" ".join(["-Wl,-rpath," + library_path + " -L" + library_path for library_path in LIBRARY_PATH]))
|
[
"pfernique@gmail.com"
] |
pfernique@gmail.com
|
7fc4ba920a5745dd12c78d172be1368a17c2bc22
|
f777d6cc5c713cb983119687fd6a6403355adfb4
|
/YouPBX/xadmin/plugins/topnav.py
|
cca473bdd92c92dbaa2d63947faaf15d6b36f361
|
[] |
no_license
|
maozhiqiang/callcenter
|
71304bb451482ec61ceafbcfc017472ac2de4dac
|
65678718b9beadf61aa6786b43d7192f63b2cfee
|
refs/heads/master
| 2021-09-05T15:14:05.250642
| 2018-01-12T07:33:37
| 2018-01-12T07:33:37
| 119,644,546
| 1
| 1
| null | 2018-01-31T06:24:55
| 2018-01-31T06:24:55
| null |
UTF-8
|
Python
| false
| false
| 2,574
|
py
|
from django.template import loader
from django.utils.text import capfirst
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.defs import SEARCH_VAR
from xadmin.views import BasePlugin, SiteView
class TopNavPlugin(BasePlugin):
global_search_models = None
global_add_models = None
def get_context(self, context):
return context
# Block Views
def block_top_navbar(self, context, nodes):
search_models = []
site_name = self.admin_site.name
if self.global_search_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_search_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "view"):
info = (app_label, model._meta.module_name)
if getattr(self.admin_site._registry[model], 'search_fields', None):
try:
search_models.append({
'title': _('Search %s') % capfirst(model._meta.verbose_name_plural),
'url': reverse('xadmin:%s_%s_changelist' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'search_models': search_models, 'search_name': SEARCH_VAR}))
def block_top_navmenu(self, context, nodes):
add_models = []
site_name = self.admin_site.name
if self.global_add_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_add_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "add"):
info = (app_label, model._meta.module_name)
try:
add_models.append({
'title': _('Add %s') % capfirst(model._meta.verbose_name),
'url': reverse('xadmin:%s_%s_add' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(
loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'add_models': add_models}))
site.register_plugin(TopNavPlugin, SiteView)
|
[
"wuhaichao@aicyber.com"
] |
wuhaichao@aicyber.com
|
b3215ee1f2ce82df254ea7f29be62181624a3b00
|
1bca4fc0734aa41bb1c6da461ec3b84ff3dd99bc
|
/test/functional/feature_logging.py
|
2b2166ba56ba7a058700cb28020188083005abdf
|
[
"MIT"
] |
permissive
|
wolfoxonly/qqc
|
12f892c9030c5232d403b609decf5b297cd8ceaf
|
807e67ba65b555ab38a655ae4823fa9af2ae3bc4
|
refs/heads/master
| 2020-03-10T20:46:31.603204
| 2018-04-15T14:33:17
| 2018-04-15T14:33:17
| 129,576,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The QQcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import QQcoinTestFramework
class LoggingTest(QQcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
# test default log file name
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
if __name__ == '__main__':
LoggingTest().main()
|
[
"OFIChain@163.com"
] |
OFIChain@163.com
|
554d40d90484290f36d0abf7985bdc61cfba1da1
|
89b2f5b08c441d4af0a63ed2ec1a5889bc92f0f7
|
/Python OOP 2020/Excersises/excersise3/document_management/project/storage.py
|
65a61d8b7ca5ec69ca44292ec291178ae92af399
|
[] |
no_license
|
KoliosterNikolayIliev/Softuni_education
|
68d7ded9564861f2bbf1bef0dab9ba4a788aa8dd
|
18f1572d81ad9eb7edd04300deb8c81bde05d76b
|
refs/heads/master
| 2023-07-18T09:29:36.139360
| 2021-08-27T15:04:38
| 2021-08-27T15:04:38
| 291,744,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
class Storage:
def __init__(self):
self.categories = []
self.topics = []
self.documents = []
def add_category(self, category):
if category not in self.categories:
self.categories.append(category)
def add_topic(self, topic):
if topic not in self.topics:
self.topics.append(topic)
def add_document(self, document):
if document not in self.documents:
self.documents.append(document)
def edit_category(self, category_id, new_name):
category = [x for x in self.categories if x.id == category_id][0]
category.name = new_name
def edit_topic(self, topic_id, new_topic, new_storage_folder):
topic = [x for x in self.topics if x.id == topic_id][0]
topic.topic = new_topic
topic.storage_folder = new_storage_folder
def edit_document(self, document_id, new_file_name):
document = [x for x in self.documents if x.id == document_id][0]
document.file_name = new_file_name
def delete_category(self, category_id):
current_category = [x for x in self.categories if x.id == category_id][0]
self.categories.remove(current_category)
def delete_topic(self, topic_id):
current_topic = [x for x in self.topics if x.id == topic_id][0]
self.topics.remove(current_topic)
def delete_document(self, document_id):
current_doc = [x for x in self.documents if x.id == document_id][0]
self.documents.remove(current_doc)
def get_document(self, document_id):
doc = [x for x in self.documents if x.id == document_id][0]
return doc
def __repr__(self):
result = "\n".join([x.__repr__() for x in self.documents])
return result
|
[
"65191727+KoliosterNikolayIliev@users.noreply.github.com"
] |
65191727+KoliosterNikolayIliev@users.noreply.github.com
|
01126cedb026a5884412b02d62b108c94bdae6f1
|
525fe8d3869ae9a34c294286120d098be6655253
|
/timetable/migrations/0004_auto_20150410_1941.py
|
2d5086aef916924977bb3fa603467293349b99b1
|
[] |
no_license
|
AlexGnatuyk/Datium
|
ce7aa8e9e5ef5b159f6fd42c1fb7cb1567339aa9
|
3ecced6d8d7fc8c2a08a9c6ee1ebc45f13214bbe
|
refs/heads/master
| 2020-07-15T10:33:35.232663
| 2016-03-25T09:39:55
| 2016-03-25T09:39:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timetable', '0003_auto_20150409_2105'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='day',
field=models.IntegerField(choices=[(1, b'\xd0\x9f\xd0\xbe\xd0\xbd\xd0\xb5\xd0\xb4\xd0\xb5\xd0\xbb\xd1\x8c\xd0\xbd\xd0\xb8\xd0\xba'), (2, b'\xd0\x92\xd1\x82\xd0\xbe\xd1\x80\xd0\xbd\xd0\xb8\xd0\xba'), (3, b'\xd0\xa1\xd1\x80\xd0\xb5\xd0\xb4\xd0\xb0'), (4, b'\xd0\xa7\xd0\xb5\xd1\x82\xd0\xb2\xd0\xb5\xd1\x80\xd0\xb3'), (5, b'\xd0\x9f\xd1\x8f\xd1\x82\xd0\xbd\xd0\xb8\xd1\x86\xd0\xb0'), (6, b'\xd0\xa1\xd1\x83\xd0\xb1\xd0\xb1\xd0\xbe\xd1\x82\xd0\xb0')]),
),
migrations.AlterField(
model_name='lesson',
name='lesson_type',
field=models.IntegerField(choices=[(0, b''), (1, b'\xd0\x9f\xd1\x80\xd0\xb0\xd0\xba\xd1\x82\xd0\xb8\xd0\xba\xd0\xb0'), (2, b'\xd0\x9b\xd0\xb5\xd0\xba\xd1\x86\xd0\xb8\xd1\x8f'), (3, b'\xd0\xa1\xd0\xb5\xd0\xbc\xd0\xb8\xd0\xbd\xd0\xb0\xd1\x80')]),
),
]
|
[
"kirov.verst@gmail.com"
] |
kirov.verst@gmail.com
|
49e1af1b05b53db53a3db3d9db8e5646ec7493d4
|
6fdddf7ba514cb3191786a61a06c9f12d6182890
|
/spyder.py
|
6310df5561aa21229735e54be0e4efa029dfe339
|
[] |
no_license
|
ash018/Scrap
|
64615c9127fa6ecc2e633e6e276e34badf8725ab
|
6f45ca9fdc1700686d88cf2aa1403fb9c0b6b05a
|
refs/heads/master
| 2020-03-31T06:28:56.519456
| 2020-01-23T11:07:24
| 2020-01-23T11:07:24
| 151,984,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 12:49:01 2018
@author: smakash
"""
import json
import scrapy
import urllib.request
class SpidyQuotesSpider(scrapy.Spider):
name = 'spidyquotes'
quotes_base_url = 'http://spidyquotes.herokuapp.com/api/quotes?page=%s'
start_urls = [quotes_base_url % 1]
download_delay = 1.5
def parse(self, response):
data = json.loads(response.body)
for item in data.get('quotes', []):
yield {
'text': item.get('text'),
'author': item.get('author', {}).get('name'),
'tags': item.get('tags'),
}
if data['has_next']:
next_page = data['page'] + 1
yield scrapy.Request(self.quotes_base_url % next_page)
if __name__ == '__main__':
scraper = SpidyQuotesSpider()
|
[
"sadatakash018@gmail.com"
] |
sadatakash018@gmail.com
|
8a6a8e8e93fd8928ad317a39ef306e002e825d8a
|
254ef44b90485767a3aea8cbe77dc6bf77dddaeb
|
/441排列硬币.py
|
a5322fb823433368efc0bf8352f3bf9d19f11c17
|
[] |
no_license
|
XinZhaoFu/leetcode_moyu
|
fae00d52a52c090901021717df87b78d78192bdb
|
e80489923c60ed716d54c1bdeaaf52133d4e1209
|
refs/heads/main
| 2023-06-19T02:50:05.256149
| 2021-07-09T00:50:41
| 2021-07-09T00:50:41
| 331,243,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
"""
你总共有 n 枚硬币,你需要将它们摆成一个阶梯形状,第 k 行就必须正好有 k 枚硬币。
给定一个数字 n,找出可形成完整阶梯行的总行数。
n 是一个非负整数,并且在32位有符号整型的范围内。
示例 1:
n = 5
硬币可排列成以下几行:
¤
¤ ¤
¤ ¤
因为第三行不完整,所以返回2.
"""
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return n
return int((2**0.5) * ((n+0.125)**0.5) - 0.5)
|
[
"948244817@qq.com"
] |
948244817@qq.com
|
5fd0d3f0eb9110b6f09d8eaa9ed47d574cfdf370
|
1dd72195bc08460df7e5bb82d3b7bac7a6673f49
|
/api/alembic/versions/4ac7d9f38f85_allows_null_dewpoint_values_for_hourly_.py
|
3d54107fefa7c5c3568f7d9fb1ecf0a6655968e6
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
bcgov/wps
|
c4347c39cadfad6711502d47776abc8d03895593
|
0ba707b0eddc280240964efa481988df92046e6a
|
refs/heads/main
| 2023-08-19T00:56:39.286460
| 2023-08-16T18:03:06
| 2023-08-16T18:03:06
| 235,861,506
| 35
| 9
|
Apache-2.0
| 2023-09-11T21:35:07
| 2020-01-23T18:42:10
|
Python
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
"""Allows null dewpoint values for hourly actuals
Revision ID: 4ac7d9f38f85
Revises: aa82757b1084
Create Date: 2021-06-01 14:29:49.951368
"""
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4ac7d9f38f85'
down_revision = 'aa82757b1084'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('hourly_actuals', 'dewpoint',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('hourly_actuals', 'dewpoint',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
# ### end Alembic commands ###
|
[
"noreply@github.com"
] |
bcgov.noreply@github.com
|
c845e20cb49ffa17704614605c775f62174f35f2
|
215f4260f3bc746ea038febbe27e177c73e8781d
|
/Koudai/Server/src/ZyGames.Tianjiexing.Web/Script/PyScript/Action/action4408.py
|
1745ae928052a268c7191948572bcc21935d790f
|
[] |
no_license
|
cosim/Scut-samples
|
c7baf863300111846358fb016896736420ec0058
|
86286c4b083fdb8ac6244ad122b5facb7592eabd
|
refs/heads/master
| 2021-01-18T03:54:16.358346
| 2015-10-16T09:07:36
| 2015-10-16T09:07:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,808
|
py
|
import clr, sys
import random
import time
import datetime
import ReferenceLib
from lang import Lang
from action import *
from System import *
from System.Collections.Generic import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.BLL.Combat import *
from ZyGames.Tianjiexing.Model.Enum import *
# 4408_圣吉塔属性兑换接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self);
self.propertyType = 0;
self.starNum = 0;
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self);
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
if httpGet.Contains("PropertyType")\
and httpGet.Contains("StarNum"):
urlParam.propertyType = httpGet.GetEnum[PropertyType]("PropertyType");
urlParam.starNum = httpGet.GetIntValue("StarNum");
else:
urlParam.Result = False;
return urlParam;
def takeAction(urlParam, parent):
actionResult = ActionResult();
userId = str(parent.Current.UserId)
contextUser = PersonalCacheStruct.Get[GameUser](userId)
def loadError():
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("LoadError");
actionResult.Result = False;
return actionResult;
# 更新属性加成
percent = 100.0;
userSJTInfo = PersonalCacheStruct[UserShengJiTa]().FindKey(userId);
# 判断星星数是否足够兑换
if userSJTInfo.LastScoreStar < urlParam.starNum:
return loadError();
if urlParam.propertyType == PropertyType.Life:
userSJTInfo.LifeNum = userSJTInfo.LifeNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.WuLi:
userSJTInfo.WuLiNum = userSJTInfo.WuLiNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.Mofa:
userSJTInfo.MofaNum = userSJTInfo.MofaNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.FunJi:
userSJTInfo.FunJiNum = userSJTInfo.FunJiNum + (urlParam.starNum / percent);
else:
return loadError();
# 更新星星数
userSJTInfo.LastScoreStar -= urlParam.starNum;
return actionResult;
def buildPacket(writer, urlParam, actionResult):
return True;
|
[
"wzf_88@qq.com"
] |
wzf_88@qq.com
|
d56f05af3c27de535dd87df53be5bd34660d448d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adverbs/_worsts.py
|
238bd14921209f33ca8ce96c77e28f95ec46020d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from xai.brain.wordbase.adverbs._worst import _WORST
#calss header
class _WORSTS(_WORST, ):
def __init__(self,):
_WORST.__init__(self)
self.name = "WORSTS"
self.specie = 'adverbs'
self.basic = "worst"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0f1183e97da07bd5f49d57e484c565872ba37049
|
238e46a903cf7fac4f83fa8681094bf3c417d22d
|
/VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/twisted/manhole/ui/test/test_gtk2manhole.py
|
463190f37a1020dc41ec1587e34d4d45d1896c90
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
baojunli/FastCAE
|
da1277f90e584084d461590a3699b941d8c4030b
|
a3f99f6402da564df87fcef30674ce5f44379962
|
refs/heads/master
| 2023-02-25T20:25:31.815729
| 2021-02-01T03:17:33
| 2021-02-01T03:17:33
| 268,390,180
| 1
| 0
|
BSD-3-Clause
| 2020-06-01T00:39:31
| 2020-06-01T00:39:31
| null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
# Copyright (c) 2009 Twisted Matrix Laboratories.
"""
Tests for GTK2 GUI manhole.
"""
skip = False
try:
import pygtk
pygtk.require("2.0")
except:
skip = "GTK 2.0 not available"
else:
try:
import gtk
except ImportError:
skip = "GTK 2.0 not available"
except RuntimeError:
skip = "Old version of GTK 2.0 requires DISPLAY, and we don't have one."
else:
if gtk.gtk_version[0] == 1:
skip = "Requested GTK 2.0, but 1.0 was already imported."
else:
from twisted.manhole.ui.gtk2manhole import ConsoleInput
from twisted.trial.unittest import TestCase
from twisted.python.reflect import prefixedMethodNames
class ConsoleInputTests(TestCase):
"""
Tests for L{ConsoleInput}.
"""
def test_reverseKeymap(self):
"""
Verify that a L{ConsoleInput} has a reverse mapping of the keysym names
it needs for event handling to their corresponding keysym.
"""
ci = ConsoleInput(None)
for eventName in prefixedMethodNames(ConsoleInput, 'key_'):
keysymName = eventName.split("_")[-1]
keysymValue = getattr(gtk.keysyms, keysymName)
self.assertEqual(ci.rkeymap[keysymValue], keysymName)
skip = skip
|
[
"l”ibaojunqd@foxmail.com“"
] |
l”ibaojunqd@foxmail.com“
|
7fd1dbe3e06dc453bc514ba3d58dd5b19f88d100
|
3669cd260bdab697376feca747d1635d35f42c83
|
/security/py-fail2ban/files/patch-actions.py
|
91bfc987942691a41c98c25baa79292d35497788
|
[] |
no_license
|
tuxillo/DPorts
|
58072bc88887c7a53a51988c76a70366bef44a93
|
f523fb13a9d3ecc5ce9a8045fdf146ae05de5399
|
refs/heads/master
| 2020-04-03T08:02:44.297511
| 2013-03-04T07:56:00
| 2013-03-04T07:56:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
--- server/actions.py.orig 2012-11-27 18:16:18.000000000 +0100
+++ server/actions.py 2012-11-27 18:17:04.000000000 +0100
@@ -206 +206 @@
- logSys.warn("[%s] Unban %s" % (self.jail.getName(), aInfo["ip"]))
+ logSys.warn("[%s] Unban %s" % (self.jail.getName(), str(aInfo["ip"])))
|
[
"nobody@home.ok"
] |
nobody@home.ok
|
d1203e82570e4d5912a7947b18befac137bde579
|
1548ce77537dcd50ab04b0eaee050b5d30553e23
|
/autotabular/pipeline/components/data_preprocessing/data_preprocessing_categorical.py
|
d6705e83ea43154d12a39641896c87ce43decbe9
|
[
"Apache-2.0"
] |
permissive
|
Shamoo100/AutoTabular
|
4a20e349104246bf825ebceae33dca0a79928f2e
|
7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2
|
refs/heads/main
| 2023-08-13T21:34:34.329888
| 2021-10-02T07:06:00
| 2021-10-02T07:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,322
|
py
|
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, BasePipeline
from autotabular.pipeline.components.data_preprocessing.categorical_encoding import OHEChoice
from autotabular.pipeline.components.data_preprocessing.categorical_encoding.encoding import OrdinalEncoding
from autotabular.pipeline.components.data_preprocessing.category_shift.category_shift import CategoryShift
from autotabular.pipeline.components.data_preprocessing.imputation.categorical_imputation import CategoricalImputation
from autotabular.pipeline.components.data_preprocessing.minority_coalescense import CoalescenseChoice
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import Configuration, ConfigurationSpace
from sklearn.base import BaseEstimator
class CategoricalPreprocessingPipeline(BasePipeline):
"""This class implements a pipeline for data preprocessing of categorical
features. It assumes that the data to be transformed is made only of
categorical features. The steps of this pipeline are:
1 - Category shift: Adds 3 to every category value
2 - Imputation: Assign category 2 to missing values (NaN).
3 - Minority coalescence: Assign category 1 to all categories whose occurrence
don't sum-up to a certain minimum fraction
4 - One hot encoding: usual sklearn one hot encoding
Parameters
----------
config : ConfigSpace.configuration_space.Configuration
The configuration to evaluate.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance
used by `np.random`.
"""
def __init__(self,
config: Optional[Configuration] = None,
steps: Optional[List[Tuple[str, BaseEstimator]]] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
random_state: Optional[np.random.RandomState] = None,
init_params: Optional[Dict[str, Any]] = None):
self._output_dtype = np.int32
super().__init__(config, steps, dataset_properties, include, exclude,
random_state, init_params)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': 'cat_datapreproc',
'name': 'categorical data preprocessing',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
# TODO find out if this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
'preferred_dtype': None
}
def _get_hyperparameter_search_space(
self,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
"""Create the hyperparameter configuration space.
Returns
-------
cs : ConfigSpace.configuration_space.Configuration
The configuration space describing the SimpleRegressionClassifier.
"""
cs = ConfigurationSpace()
if dataset_properties is None or not isinstance(
dataset_properties, dict):
dataset_properties = dict()
cs = self._get_base_search_space(
cs=cs,
dataset_properties=dataset_properties,
exclude=exclude,
include=include,
pipeline=self.steps)
return cs
def _get_pipeline_steps(
self,
dataset_properties: Optional[Dict[str, str]] = None,
) -> List[Tuple[str, BaseEstimator]]:
steps = []
default_dataset_properties = {}
if dataset_properties is not None and isinstance(
dataset_properties, dict):
default_dataset_properties.update(dataset_properties)
steps.extend([
('imputation', CategoricalImputation()),
('encoding', OrdinalEncoding()),
('category_shift', CategoryShift()),
('category_coalescence',
CoalescenseChoice(default_dataset_properties)),
('categorical_encoding', OHEChoice(default_dataset_properties)),
])
return steps
def _get_estimator_hyperparameter_name(self) -> str:
return 'categorical data preprocessing'
|
[
"jianzhnie@126.com"
] |
jianzhnie@126.com
|
f57310bed2a1c58aed8958d2ec2afcb9b866e397
|
7cc0ef2d1ad8e9a1542e52bc6bc8897606639452
|
/account/migrations/0007_auto_20160505_1827.py
|
3f70979a040e12f0e39942e7f66d9c20adccf8e3
|
[] |
no_license
|
htl1126/pathjump
|
1e87c6127bbfebc8519379c9352440d3a98359f6
|
c1235c3fbb13af31ac7b8523e7a83b69f0da95b7
|
refs/heads/master
| 2021-01-15T15:36:47.723753
| 2016-08-26T20:35:32
| 2016-08-26T20:35:32
| 53,075,772
| 2
| 2
| null | 2016-05-06T17:43:22
| 2016-03-03T19:07:26
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-05 22:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('account', '0006_auto_20160505_1447'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='birthday',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='userprofile',
name='gpa_1',
field=models.CharField(blank=True, max_length=10),
),
migrations.AlterField(
model_name='userprofile',
name='major_1',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='userprofile',
name='university_grad_date_1',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='userprofile',
name='university_grad_date_2',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='userprofile',
name='university_grad_date_3',
field=models.DateField(blank=True, default=django.utils.timezone.now),
),
]
|
[
"b93902098@ntu.edu.tw"
] |
b93902098@ntu.edu.tw
|
91edc3f2e33b2bbca1ee98e8b76dfe875cf3c247
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_PolyTrend_Seasonal_Second_AR.py
|
64f7646780a6a43000ce6df0f0cadc56dde92ebd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['PolyTrend'] , ['Seasonal_Second'] , ['AR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
4db2f70125d55dc93257f676a4f151d6e74ffbdc
|
b8bff6154e548d6135812394f22d6564c40b074b
|
/flask-aws/bin/bundle_image
|
d417974d24fa30d6313fcc8108015b0fcc9760b0
|
[] |
no_license
|
nygeog/flask-aws-tutorial-nygeog
|
0cb18a48dab515abfce9b89d9b6b84e152bd2e40
|
f80668e9bdb3eced69c2c1bd50f6f83c37f65ce1
|
refs/heads/master
| 2021-01-10T05:48:10.904717
| 2015-10-28T03:01:53
| 2015-10-28T03:01:53
| 45,084,168
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
#!/Users/danielmsheehan/GitHub/flask-aws-tutorial-nygeog/flask-aws/bin/python2.7
from boto.manage.server import Server
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(version="%prog 1.0", usage="Usage: %prog [options] instance-id [instance-id-2]")
# Commands
parser.add_option("-b", "--bucket", help="Destination Bucket", dest="bucket", default=None)
parser.add_option("-p", "--prefix", help="AMI Prefix", dest="prefix", default=None)
parser.add_option("-k", "--key", help="Private Key File", dest="key_file", default=None)
parser.add_option("-c", "--cert", help="Public Certificate File", dest="cert_file", default=None)
parser.add_option("-s", "--size", help="AMI Size", dest="size", default=None)
parser.add_option("-i", "--ssh-key", help="SSH Keyfile", dest="ssh_key", default=None)
parser.add_option("-u", "--user-name", help="SSH Username", dest="uname", default="root")
parser.add_option("-n", "--name", help="Name of Image", dest="name")
(options, args) = parser.parse_args()
for instance_id in args:
try:
s = Server.find(instance_id=instance_id).next()
print "Found old server object"
except StopIteration:
print "New Server Object Created"
s = Server.create_from_instance_id(instance_id, options.name)
assert(s.hostname is not None)
b = s.get_bundler(uname=options.uname)
b.bundle(bucket=options.bucket,prefix=options.prefix,key_file=options.key_file,cert_file=options.cert_file,size=int(options.size),ssh_key=options.ssh_key)
|
[
"daniel.martin.sheehan@gmail.com"
] |
daniel.martin.sheehan@gmail.com
|
|
945d57a53bd0dae91137c9ba1f1efc9b34fe111e
|
e59e1039469765c35192b1cd6eea2789b49190ba
|
/nslocalizer/xcodeproj/pbProj/XCVersionGroup.py
|
53e60757f370688e3305b35df0c363060f3e69f8
|
[
"BSD-3-Clause"
] |
permissive
|
samdmarshall/nslocalizer
|
db208f166e9c7c8aa7c97d33700943370d1e063b
|
df086165d9201c98753cdda47bcfa0e517839696
|
refs/heads/develop
| 2021-01-18T00:45:31.485248
| 2019-04-02T18:00:48
| 2019-04-02T18:00:48
| 63,786,762
| 174
| 12
|
NOASSERTION
| 2019-02-24T16:27:13
| 2016-07-20T14:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
# Copyright (c) 2016, Samantha Marshall (http://pewpewthespells.com)
# All rights reserved.
#
# https://github.com/samdmarshall/nslocalizer
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Samantha Marshall nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from . import PBX_Constants
from .PBXItem import PBX_Base_Reference
class XCVersionGroup(PBX_Base_Reference):
def __init__(self, identifier, dictionary):
super(self.__class__, self).__init__(identifier, dictionary)
def resolveGraph(self, project):
super(self.__class__, self).resolveGraph(project)
self.resolveGraphNodesForArray(PBX_Constants.kPBX_REFERENCE_children, project)
self.resolveGraphNodeForKey(PBX_Constants.kPBX_XCVersionGroup_currentVersion, project)
|
[
"me@samdmarshall.com"
] |
me@samdmarshall.com
|
596b64221ab3bf09fd36b81245b725ef27edbb7f
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/nist_data/atomic/float_pkg/schema_instance/nistschema_sv_iv_atomic_float_enumeration_4_xsd/__init__.py
|
6b1c7764ed6855c717d67be10234c1e18219fca4
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 364
|
py
|
from output.models.nist_data.atomic.float_pkg.schema_instance.nistschema_sv_iv_atomic_float_enumeration_4_xsd.nistschema_sv_iv_atomic_float_enumeration_4 import (
NistschemaSvIvAtomicFloatEnumeration4,
NistschemaSvIvAtomicFloatEnumeration4Type,
)
__all__ = [
"NistschemaSvIvAtomicFloatEnumeration4",
"NistschemaSvIvAtomicFloatEnumeration4Type",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
7cb0f1c9c7ca53419f4d843771e8d05e386fd3dc
|
add72f4d6f9f7af1f437d19213c14efb218b2194
|
/icekit_press_releases/migrations/0010_add_brief.py
|
53e0ff6430fc135656f91969f847f38cd1b04409
|
[
"MIT"
] |
permissive
|
ic-labs/django-icekit
|
6abe859f97c709fcf51207b54778501b50436ff7
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
refs/heads/develop
| 2022-08-08T21:26:04.144852
| 2018-01-08T02:55:17
| 2018-01-08T02:55:17
| 65,470,395
| 53
| 12
|
MIT
| 2022-07-06T19:59:39
| 2016-08-11T13:11:02
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0009_auto_20170519_1308'),
]
operations = [
migrations.AddField(
model_name='pressrelease',
name='admin_notes',
field=models.TextField(blank=True, help_text=b"Administrator's notes about this content"),
),
migrations.AddField(
model_name='pressrelease',
name='brief',
field=models.TextField(blank=True, help_text=b'A document brief describing the purpose of this content'),
),
]
|
[
"greg@interaction.net.au"
] |
greg@interaction.net.au
|
5b098cc32ed04727d88286884d3a1a759dd4afa0
|
ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca
|
/phase1/400.py
|
877ca618fdd5b1fc536af2f45765a18d49bae79b
|
[] |
no_license
|
GavinPHR/code
|
8a319e1223a307e755211b7e9b34c5abb00b556b
|
b1d8d49633db362bbab246c0cd4bd28305964b57
|
refs/heads/master
| 2020-05-16T04:09:19.026207
| 2020-04-30T10:00:06
| 2020-04-30T10:00:06
| 182,766,600
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
# Nth Digit
class Solution:
def findNthDigit(self, n: int) -> int:
i = 0
idx = 9 * (10 ** i) * (i + 1)
while n - idx > 0:
n = n - idx
i += 1
idx = 9 * (10 ** i) * (i + 1)
pos = n % (i + 1)
print(n, i, pos, sep="||")
num = (10 ** i - 1) + n // (i + 1)
print(num)
return int(str(num)[~pos]) if pos == 0 else int(str(num + 1)[pos - 1])
if __name__ == '__main__':
s = Solution()
print(s.findNthDigit(671))
|
[
"gavinsweden@gmail.com"
] |
gavinsweden@gmail.com
|
913096e1f4f5d79eb8e22f3e0a46b6ac66e40beb
|
abf984d4784f593ce617b335029b3efc273c7678
|
/school_1329_server/users/migrations/0004_auto_20171228_1822.py
|
e1781425ab261058467d4a9f8d7b250a1c693513
|
[] |
no_license
|
potykion/school_1329_server
|
80200cf5ddebfc4f9ac94ef2db19472c1b3cf374
|
7c579c625dc1fae7334117fa6cf078ede38574dc
|
refs/heads/master
| 2021-09-08T18:02:22.092561
| 2018-03-11T16:54:13
| 2018-03-11T16:54:13
| 113,746,164
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
# Generated by Django 2.0 on 2017-12-28 15:22
import datetime
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20171226_0152'),
]
operations = [
migrations.AddField(
model_name='temporarypassword',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='temporarypassword',
name='expiration_date',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 4, 15, 22, 16, 184617, tzinfo=utc)),
),
migrations.AlterField(
model_name='temporarypassword',
name='password_value',
field=models.CharField(default='genGvWwO', max_length=32),
),
]
|
[
"potykion@gmail.com"
] |
potykion@gmail.com
|
ce879194c5cd487647fb7aebdeb08adbfa36a966
|
973ac85842b01e373c48d161bf46c7c7e0e50227
|
/Game5/home.py
|
d89c7638483f9feca2608c04430c40c0062f7fe7
|
[
"MIT"
] |
permissive
|
splin85/Games
|
b652a050b905a2922849df21ff1262f8dedba6f1
|
41ebdf73e5523be15830334afc12f013b1d60323
|
refs/heads/master
| 2020-03-28T12:38:45.987129
| 2018-09-11T10:21:39
| 2018-09-11T10:21:39
| 148,318,954
| 1
| 0
|
MIT
| 2018-09-11T13:09:02
| 2018-09-11T13:09:02
| null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# coding: utf-8
# 作者: Charles
# 公众号: Charles的皮卡丘
# 大本营类
import pygame
# 大本营类
class Home(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.homes = ['./images/home/home1.png', './images/home/home2.png', './images/home/home_destroyed.png']
self.home = pygame.image.load(self.homes[0])
self.rect = self.home.get_rect()
self.rect.left, self.rect.top = (3 + 12 * 24, 3 + 24 * 24)
self.alive = True
# 大本营置为摧毁状态
def set_dead(self):
self.home = pygame.image.load(self.homes[-1])
self.alive = False
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
771ad5ac2a34884bcd43a05f69b30b6e6d21a353
|
3197bcf3e80989d0fc13519b2a5689c8b21049d8
|
/prototypes/training_scripts/HeLa/feature_net_61x61_dropout_norm3.py
|
46da59afb0570adc9516dc823bf0919d3b8b35f8
|
[] |
no_license
|
YubinXie/DeepCell
|
d6a5434dcf5fb208f407caf714d4877ed745a7cd
|
887ac6be63e2594c3480680fdde63fbe9dded336
|
refs/heads/master
| 2021-05-05T16:17:38.178635
| 2018-01-13T20:39:34
| 2018-01-13T20:39:34
| 117,307,949
| 0
| 0
| null | 2018-01-13T02:45:18
| 2018-01-13T02:45:18
| null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
'''Train a simple deep CNN on a HeLa dataset.
GPU run command:
THEANO_FLAGS='mode=FAST_RUN,device=gpu,floatX=float32' python training_template.py
'''
from __future__ import print_function
from keras.optimizers import SGD, RMSprop
from cnn_functions import rate_scheduler, train_model_sample
from model_zoo import feature_net_61x61 as the_model
import os
import datetime
import numpy as np
batch_size = 256
n_classes = 3
n_epoch = 25
model = the_model(n_channels = 2, n_features = 3, reg = 1e-5, drop=0.5)
dataset = "HeLa_all_stdnorm_61x61"
direc_save = "/home/nquach/DeepCell2/trained_networks/"
direc_data = "/home/nquach/DeepCell2/training_data_npz/"
optimizer = RMSprop(lr = 0.001, rho = 0.95, epsilon = 1e-8)
lr_sched = rate_scheduler(lr = 0.001, decay = 0.95)
expt = "feature_net_61x61_dropout"
iterate = 3
train_model_sample(model = model, dataset = dataset, optimizer = optimizer,
expt = expt, it = iterate, batch_size = batch_size, n_epoch = n_epoch,
direc_save = direc_save,
direc_data = direc_data,
lr_sched = lr_sched,
rotate = True, flip = True, shear = 0)
|
[
"vanvalen@gmail.com"
] |
vanvalen@gmail.com
|
df258a208348b017e2c9cf61a8a39e9d1be99432
|
44470a3d1388eddc83e84193813364cdc446f89a
|
/FinancialSimulator/Config/ConfigInstall.py
|
731afffe0e43c6cf368dd2f87069f4048f3bf290
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
FabriceSalvaire/pyFinancialSimulator
|
12671b48ff3596affc5d770058dae41dcdfeecac
|
ea8380bf1106d597661214b2695681e21d72d259
|
refs/heads/master
| 2021-01-10T17:16:59.975117
| 2018-04-11T11:41:38
| 2018-04-11T11:41:38
| 43,254,801
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
####################################################################################################
import os
####################################################################################################
import FinancialSimulator.Tools.Path as PathTools # due to Path class
####################################################################################################
_file_path = PathTools.to_absolute_path(__file__)
_config_module_path = os.path.dirname(_file_path)
_module_path = PathTools.parent_directory_of(_config_module_path)
# Fixme: wrong when installed
_source_directory = PathTools.parent_directory_of(_module_path)
_share_directory = os.path.join(_source_directory, 'share')
class Path:
module_path = _module_path
share_directory = _share_directory
config_directory = os.path.join(share_directory, 'config')
accounting_data_directory = os.path.join(share_directory, 'accounting')
####################################################################################################
class Logging:
default_config_file = 'logging.yml'
directories = (Path.config_directory,)
##############################################
@staticmethod
def find(config_file):
return PathTools.find(config_file, Logging.directories)
|
[
"fabrice.salvaire@orange.fr"
] |
fabrice.salvaire@orange.fr
|
7325ae304d235b7a782d87159dd5bb3bda70ee36
|
da92f27626485f3d75dd85b83e7d404fe5ce63eb
|
/migrations/versions/7ca65b6611b6_.py
|
404d4f4e6b46d99e35425d14f33f002d67d9a15a
|
[
"Apache-2.0"
] |
permissive
|
DD-DeCaF/design-storage
|
c0a88c554b427d6b86ac09e5ab0b4154174ef250
|
0c0e07f0dc505eb4a1e4521a87f5f7ac6f879b6d
|
refs/heads/devel
| 2021-06-23T16:45:27.322527
| 2020-05-26T20:17:11
| 2020-05-26T20:17:11
| 161,643,976
| 0
| 0
|
Apache-2.0
| 2020-12-08T14:35:36
| 2018-12-13T13:34:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
"""empty message
Revision ID: 7ca65b6611b6
Revises:
Create Date: 2018-12-18 14:28:03.918045
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '7ca65b6611b6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('design',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('model_id', sa.Integer(), nullable=False),
sa.Column('design', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_design_project_id'), 'design', ['project_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_design_project_id'), table_name='design')
op.drop_table('design')
# ### end Alembic commands ###
|
[
"ali@kvikshaug.no"
] |
ali@kvikshaug.no
|
1d02be350bd89c4ad24b2390e8c69e52de9cb2c8
|
24b79d18bc9b522c86d65d7601d7012fe29b0693
|
/program9-11/main_pro.py
|
58caffbe4b62c30aec554f26abfbfda139e026c1
|
[] |
no_license
|
meenapandey500/Python_program
|
409fafa2e8f50edfbf30ddfbdf85b47569bf229c
|
2bcb5fd51aebb4dca4bcc31a26e6b05a3603d5f0
|
refs/heads/main
| 2023-03-18T11:49:58.302858
| 2021-03-19T09:29:24
| 2021-03-19T09:29:24
| 349,359,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
import file_class as f1
#main program
f=f1.files() #f is a object of class files
f.writefile()
f.readfile()
f.copyfile()
|
[
"noreply@github.com"
] |
meenapandey500.noreply@github.com
|
bff3b2c527447f1f88f50748842058001df6d0b2
|
1e177ebdcb470f738c058606ac0f86a36085f661
|
/Python/AdafruitIO/SendBlockHeat02_0.py
|
dc11ecc9ab7849e9906af300e627f575c087dfc7
|
[] |
no_license
|
robingreig/raspi-git
|
5cbdd295c1048a0571aa2c2f8576438269439f07
|
7373bf94557d7a88c8f343362ba64f9cd19c8ce7
|
refs/heads/master
| 2023-08-31T03:16:17.286700
| 2023-08-26T11:54:23
| 2023-08-26T11:54:23
| 16,873,881
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
#!/usr/bin/python
# Import Library & create instance of REST client
from Adafruit_IO import Client
aio = Client('7e01e8b5e56360efc48a27682324fc353e18d14f')
# Send the value of 1 to BlockHeat02
aio.send('blockheat02',0)
# Retrieve the most recent value from 'BlockHeat02'
data = aio.receive('BlockHeat02')
print('Received Value: {0}'.format(data.value))
|
[
"robin.greig@calalta.com"
] |
robin.greig@calalta.com
|
4f0fe3d099194e915d8a5db2b0850bd92f76cbc2
|
0facb323be8a76bb4c168641309972fa77cbecf2
|
/Configurations/HWWSemiLepHighMass/Full_v6Production/template_seed/templates_jhchoi_combine/StructureFiles/MakeSampleStructureNuisancePythons.py
|
2e5c33f66236b1b84dc00bbe1203bd86d8005a68
|
[] |
no_license
|
bhoh/SNuAnalytics
|
ef0a1ba9fa0d682834672a831739dfcfa1e7486b
|
34d1fc062e212da152faa83be50561600819df0e
|
refs/heads/master
| 2023-07-06T03:23:45.343449
| 2023-06-26T12:18:28
| 2023-06-26T12:18:28
| 242,880,298
| 0
| 1
| null | 2020-02-25T01:17:50
| 2020-02-25T01:17:49
| null |
UTF-8
|
Python
| false
| false
| 3,908
|
py
|
import os
import sys
sys.path.insert(0, os.getcwd()+"/../MassPoints")
sys.path.insert(0, os.getcwd()+"/../")
##--signal Mass points--##
from List_MX import *
from List_MX_VBF import *
from WPandCut2016 import Year
List_MX_common=list(set(List_MX).intersection(List_MX_VBF))
##--bkg--##
#BKG=[ 'DY', 'WZZ', 'WWZ','WWW','ZZZ', 'ZZ', 'WZ', 'WW', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets0j', 'Wjets1j', 'Wjets2j','vbfHWWlnuqq_M125','ggHWWlnuqq_M125'] + ['QCD_MU','QCD_EM','QCD_bcToE']
#BKG=[ 'DY', 'MultiV', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets0j', 'Wjets1j', 'Wjets2j','vbfHWWlnuqq_M125','ggHWWlnuqq_M125']# +['QCD_MU','QCD_EM','QCD_bcToE']
#BKG=[ 'DY', 'MultiV', 'WpWmJJ_EWK_QCD_noHiggs', 'top', 'Wjets','vbfHWWlnuqq_M125','ggHWWlnuqq_M125']# +['QCD_MU','QCD_EM','QCD_bcToE']
#BKG=[ 'DY', 'MultiV', 'qqWWqq', 'top', 'Wjets','ggWW','h125','QCD','HTT']# +['QCD_MU','QCD_EM','QCD_bcToE']
BKG=[ 'DY', 'MultiV', 'qqWWqq', 'top', 'Wjets','ggWW','ggHWWlnuqq_M125','QCD','HTT']# +['QCD_MU','QCD_EM','QCD_bcToE']
if Year=='2016':
BKG.append('vbfHWWlnuqq_M125')
###---Make samples dictionary---##
#handle=open('../sample_2016.py','r')
#exec(handle)
#handle.close()
##---Make samples file for plotting nad Runcard
#f=open()
#for s in samples:##
# f.write('samples["'+s+'"]={}\n')
#List_SampleTemplate=['samples_2016limit_MassTemplate_ele.py','samples_2016limit_MassTemplate_mu.py']
#List_StructureTemplate=['structure_MassTemplate_ele.py','structure_MassTemplate_mu.py']
print "-----sampleFile-----"
for MX in List_MX_common:
for flv in ['ele','mu']:
print MX
##SampleTemplate
for rg in ['SR','TOP','SB']:
f=open('samples_limit_M'+str(MX)+'_'+flv+'.py','w') ##samples_limit_M
for s in BKG:
f.write('samples["'+s+'"]={}\n')
f.write('samples["DATA"]={}\n')
f.write('samples["ggHWWlnuqq_M'+str(MX)+'_S"]={}\n')
f.write('samples["vbfHWWlnuqq_M'+str(MX)+'_S"]={}\n')
f.write('samples["ggHWWlnuqq_M'+str(MX)+'_SBI"]={}\n')
f.write('samples["vbfHWWlnuqq_M'+str(MX)+'_SBI"]={}\n')
f.close()
print "------structure File------"
for MX in List_MX_common:
for flv in ['ele','mu']:
print MX
##SampleTemplate
for rg in ['SR','TOP','SB']:
f=open('structure_M'+str(MX)+'_'+flv+'.py','w')
for s in BKG:
f.write('structure["'+s+'"]={\n\
"isSignal" : 0,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["DATA"]={\n\
"isSignal" : 0,\n\
"isData" : 1 ,\n\
}\n')
f.write('structure["ggHWWlnuqq_M'+str(MX)+'_S"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["vbfHWWlnuqq_M'+str(MX)+'_S"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["ggHWWlnuqq_M'+str(MX)+'_SBI"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.write('structure["vbfHWWlnuqq_M'+str(MX)+'_SBI"]={\n\
"isSignal" : 1,\n\
"isData" : 0 ,\n\
}\n')
f.close()
##---Make Final Nuisance
#nuisances['dynorm']['sample']
defaultNuisanceFile='../nuisances.py'
f=open(defaultNuisanceFile,'r')
fnew=open('nuisance.py','w')
lines=f.readlines()
for line in lines:
fnew.write(line)
fnew.write(
'''
for n in nuisances:
for s in sorted(nuisances[n]['samples']):
if '_S' in s:
sbi=s.replace('_S','_SBI')
nuisances[n]['samples'][sbi]=nuisances[n]['samples'][s]
'''
)
fnew.close()
os.system('cp nuisance.py nuisance_Boosted.py')
os.system('cp nuisance.py nuisance_Resolved.py')
|
[
"soarnsoar@gmail.com"
] |
soarnsoar@gmail.com
|
d798508db278b21b33d7535d4228a09122e05c85
|
4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7
|
/monitoring/forms.py
|
e1648ae00d7df6d2c21eabc75faeffdc180af623
|
[] |
no_license
|
quentin-david/heimdall
|
f72a85606e7ab53683df2023ef5eaba762198211
|
84a429ee52e1891bc2ee4eb07a084dff209c789c
|
refs/heads/master
| 2021-01-21T10:26:28.895663
| 2017-07-21T19:19:46
| 2017-07-21T19:19:46
| 83,432,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django import forms
from .models import Munin
class MuninForm(forms.ModelForm):
class Meta:
model = Munin
fields = '__all__'
|
[
"david@hemdgsa01.local.lan"
] |
david@hemdgsa01.local.lan
|
2b8a72aa08de33202a5c8687406a44e3a25ec8fa
|
0e738ccc77594585c08e2e8a87a67253d13f57b0
|
/flask-project-v8/manage.py
|
553b4f14fca06cd8a55af1b8edeb6eb9114e6f36
|
[] |
no_license
|
saurabh-kumar88/flask-projects
|
cb02a991e05dbcf6a467bb126a4efecbe4bc4126
|
02827743e7a52f562be03975ceea9de10a4346cf
|
refs/heads/main
| 2023-01-31T06:42:15.273505
| 2020-12-14T09:09:07
| 2020-12-14T09:09:07
| 321,291,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app.models import *
app = Flask(__name__)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
[
"ykings.saurabh@gmail.com"
] |
ykings.saurabh@gmail.com
|
b2fc87109627385c185b8e75175bcac1bd8e1839
|
445539eefd37bbd8feb327574333b464bbe9f858
|
/33/pool_pipe_celcius.py
|
d8ad66384d96d947b427ea589cc7b2bb2fdd0286
|
[
"CC0-1.0"
] |
permissive
|
yz-liu/cpython-book-samples
|
8a2753ca2cebf8e5d8f5822e28ccf278f17864ae
|
d5a7cd72d14231a35d1d8b2ec74b04a171170686
|
refs/heads/master
| 2023-01-19T12:21:27.354487
| 2020-11-26T00:16:40
| 2020-11-26T00:16:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
import multiprocessing as mp
def to_celcius(child_pipe: mp.Pipe, parent_pipe: mp.Pipe):
f = parent_pipe.recv()
# time-consuming task ...
c = (f - 32) * (5/9)
child_pipe.send(c)
if __name__ == '__main__':
mp.set_start_method('spawn')
pool_manager = mp.Manager()
with mp.Pool(2) as pool:
parent_pipe, child_pipe = mp.Pipe()
results = []
for i in range(110, 150, 10):
parent_pipe.send(i)
pool.apply_async(to_celcius, args=(child_pipe, parent_pipe))
print(child_pipe.recv())
parent_pipe.close()
child_pipe.close()
|
[
"anthony.p.shaw@gmail.com"
] |
anthony.p.shaw@gmail.com
|
038a78ecf4331b3d18fb16a79383f077385711f8
|
8c14c6fef7539f3f946b955d4677a8c2f25bb7f1
|
/src/vsc/model/rangelist_model.py
|
8b135085fdf2c2385fb5c136cf7367cbd096cab5
|
[
"Apache-2.0"
] |
permissive
|
hodjat91/pyvsc
|
2ce8b4cb1582793caee8f994e73ab867ef0eefb8
|
9b268db1970cd43058ea02f4fdbdc31990046230
|
refs/heads/master
| 2022-11-11T16:29:30.056186
| 2020-07-01T01:14:10
| 2020-07-01T01:14:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
# Created on Aug 4, 2019
#
# @author: ballance
class RangelistModel(object):
def __init__(self, rl : List[List[int]]=None):
self.range_l = []
if rl is not None:
for r in rl:
if isinstance(r, list):
if len(r) == 2:
self.range_l.append([r[0], r[1]])
else:
raise Exception("Each range element must have 2 elements")
else:
self.range_l.append([int(r), int(r)])
def add_value(self, v):
self.range_l.append([v, v])
def add_range(self, low, high):
self.range_l.append([low, high])
def __contains__(self, val):
for r in self.range_l:
if val >= r[0] and val <= r[1]:
return True
return False
def equals(self, oth)->bool:
eq = isinstance(oth, RangelistModel)
if len(self.range_l) == len(oth.range_l):
for i in range(len(self.range_l)):
eq &= self.range_l[i][0] == oth.range_l[i][0]
eq &= self.range_l[i][1] == oth.range_l[i][1]
else:
eq = False
return eq
def clone(self):
ret = RangelistModel(None)
for r in self.range_l:
ret.range_l.append([r[0], r[1]])
return ret
|
[
"matt.ballance@gmail.com"
] |
matt.ballance@gmail.com
|
9957cfa6dcae5f7c12edbbbee6687a70d54e3523
|
0fc2b99fd8414dbce5f1f6057b9b800c968d5d05
|
/lpbio/swarm/__init__.py
|
293bff76afe1cd97181eb75f0eed192cfe8d24a1
|
[
"MIT"
] |
permissive
|
widdowquinn/lpbio
|
9df898cb9580f62da1f66d5736cbf7a984633561
|
8b95642396d05a56c1c54389e3de6d88d7cbffb5
|
refs/heads/master
| 2020-03-29T02:08:56.675473
| 2019-11-07T14:27:44
| 2019-11-07T14:27:44
| 149,422,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,395
|
py
|
# -*- coding: utf-8 -*-
"""Code for interaction with the Swarm clustering tool."""
import os
import shlex
import shutil
import subprocess
from collections import namedtuple
from lpbio import LPBioNotExecutableError, is_exe
class SwarmError(Exception):
"""Exception raised when swarm fails"""
def __init__(self, msg):
self.message = msg
# factory class for Swarm run returned values
SwarmRun = namedtuple("SwarmRun", "command outfilename stdout stderr")
# factory class for Swarm parameter values
SwarmParameters = namedtuple("SwarmParameters", "t d")
SwarmParameters.__new__.__defaults__ = (1, 1)
def build_cmd(infname, outfname, parameters):
"""Build a command-line for swarm"""
params = [
"-{0} {1}".format(shlex.quote(str(k)), shlex.quote(str(v)))
for k, v in parameters._asdict().items()
if v is not None
]
cmd = ["swarm", *params, "-o", shlex.quote(outfname), shlex.quote(infname)]
return cmd
class Swarm(object):
"""Class for working with SWARM"""
def __init__(self, exe_path):
"""Instantiate with location of executable"""
exe_path = shlex.quote(shutil.which(exe_path))
if not os.access(exe_path, os.X_OK):
msg = "{0} is not an executable".format(exe_path)
raise LPBioNotExecutableError(msg)
self._exe_path = exe_path
def run(self, infname, outdir, parameters, dry_run=False):
"""Run swarm to cluster sequences in the passed file
- infname - path to sequences for clustering
- outdir - output directory for clustered output
- parameters - named tuple of Swarm parameters
- dry_run - if True returns cmd-line but does not run
Returns namedtuple with form:
"command outfilename stdout stderr"
"""
self.__build_cmd(infname, outdir, parameters)
if dry_run:
return self._cmd
pipe = subprocess.run(
self._cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
shell=False,
)
results = SwarmRun(self._cmd, self._outfname, pipe.stdout, pipe.stderr)
return results
def __build_cmd(self, infname, outdir, parameters):
"""Build a command-line for swarm"""
self._outfname = os.path.join(shlex.quote(outdir), "swarm.out")
self._cmd = build_cmd(infname, self._outfname, parameters)
class SwarmCluster(object):
"""Describes a single Swarm cluster"""
def __init__(self, amplicons, parent=None):
self._amplicons = tuple(sorted(amplicons))
if parent:
self._parent = parent
def __len__(self):
"""Returns the number of amplicons in the cluster"""
return len(self._amplicons)
def __getitem__(self, item):
"""Return sequence IDs from the swarm like a list"""
return self._amplicons[item]
@property
def amplicons(self):
"""The amplicons in a swarm cluster"""
return self._amplicons
@property
def abundance(self):
"""Returns the total abundance of all amplicons in the cluster"""
return sum(self.abundances)
@property
def abundances(self):
"""Returns a list of abundance of each amplicons in the cluster"""
return [int(amp.split("_")[-1]) for amp in self._amplicons]
class SwarmResult(object):
"""Describes the contents of a Swarm output file"""
def __init__(self, name):
self._name = name
self._clusters = list()
def add_swarm(self, amplicons):
"""Adds a list of amplicon IDs as a SwarmCluster"""
self._clusters.append(SwarmCluster(amplicons, self))
def __eq__(self, other):
"""Returns True if all swarms match all swarms in passed result"""
# this test relies on the amplicons being ordered tuples
these_amplicons = {c.amplicons for c in self._clusters}
other_amplicons = {c.amplicons for c in other._clusters}
return these_amplicons == other_amplicons
def __len__(self):
"""Returns the number of swarms in the result"""
return len(self._clusters)
def __str__(self):
"""Return human-readable representation of the SwarmResult"""
outstr = "\n".join(
["SwarmResult: {}".format(self.name), "\tSwarms: {}".format(len(self))]
)
swarmstr = []
for idx, swarm in enumerate(self._clusters):
swarmstr.append("\t\tSwarm {}, size: {}".format(idx, len(swarm)))
swarmstr = "\n".join(swarmstr)
return "\n".join([outstr + swarmstr])
def __getitem__(self, item):
"""Return swarm clusters like a list"""
return self._clusters[item]
@property
def swarms(self):
"""The clusters produced by a swarm run"""
return self._clusters[:]
@property
def name(self):
"""The swarm result filename"""
return self._name
class SwarmParser(object):
"""Parser for Swarm cluster output"""
@classmethod
def read(SwarmParser, fname):
"""Parses the passed Swarm output file into a SwarmResult"""
result = SwarmResult(fname)
with open(fname, "r") as swarms:
for swarm in swarms:
result.add_swarm(swarm.strip().split())
return result
def __init__(self):
pass
|
[
"leighton.pritchard@hutton.ac.uk"
] |
leighton.pritchard@hutton.ac.uk
|
7f128626999fdb25a08a0b49abd7399c216ba13b
|
4c83b4d7aca6bbcd15b922ad7314440fea7c9a70
|
/2020-07-27_modo_horario_cp_onda1_10d/script_modo_horario_2020-03-05_fchk_1036.py
|
d462e860f46dceffa88ab39003f27bd308ae8ade
|
[] |
no_license
|
poloplanejamento/odmatrix-joinville
|
63b60a85055700698cdb590c181e7c8a4d5c7361
|
be7ce0814fb9dad2d289cd836dde51baa9c0850d
|
refs/heads/main
| 2023-01-23T11:43:45.451126
| 2020-12-10T23:17:58
| 2020-12-10T23:17:58
| 320,402,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,339
|
py
|
#!/usr/bin/env python3
# Bibliotecas
from http.client import HTTPSConnection
from base64 import b64encode
import json
import csv
import pandas as pd
# Variáveis
projectID = "40" # ID do projeto, conforme mostrado no frontend Web
c = HTTPSConnection("api.odmatrix.app")
userAndPass = b64encode(b"fe6b53f0280443d5bd40d5d30694f356").decode("ascii")
headers = { 'Authorization' : 'Basic %s' % userAndPass }
finall_list = []
# Formato da data: AAAA-MM-DD. Até três datas no array e o script parece rodar sem problemas
# Datas desejadas: 12/11, 13/11, 19/11, 20/11, 21/11, 03/03, 04/03, 05/03, 11/03 e 12/03
for date in ["2020-03-05"] :
for ftriptype in ["microtrip","bus","private_transport"] :
for ftimeorigin in ["0000_0059","0100_0159","0200_0259","0300_0359","0400_0459","0500_0559","0600_0659","0700_0759","0800_0859","0900_0959","1000_1059","1100_1159","1200_1259","1300_1359","1400_1459","1500_1559","1600_1659","1700_1759","1800_1859","1900_1959","2000_2059","2100_2159","2200_2259","2300_2359"] :
print(ftimeorigin)
request = "/generatematrix?format=json&project={}&date={}&ftriptype={}&ftimeorigin={}&fchk_1036=true".format(projectID, date, ftriptype, ftimeorigin)
c.request('GET', request, headers=headers)
res = c.getresponse()
data = res.read()
matrix = json.loads(data)
for i, column in enumerate(matrix['ColumnLabels']):
for j, row in enumerate(matrix['RowLabels']):
value = matrix['Data'][j][i]
if value == 0:
continue
full_row = {}
full_row['ProjectID'] = projectID
full_row['Date'] = date
full_row['TimeOrigin'] = ftimeorigin
full_row['Origin'] = row
full_row['Destination'] = column
full_row['Modo'] = ftriptype
full_row['Trips'] = value
finall_list.append(full_row)
print(full_row)
#print(finall_list)
data = pd.DataFrame(finall_list)
final_data = pd.pivot_table(data, index=['ProjectID', 'Date', 'Origin', 'Destination', 'Modo'], columns='TimeOrigin', values='Trips')
final_data.to_csv("OD_por_modo_horario_fchk_1036_2020-03-05.csv")
|
[
"caiocco@gmail.com"
] |
caiocco@gmail.com
|
10c9a1c4063a9dbc167bea682133f4b74469d7c2
|
bce4a906faebfcab5db0e48ad587841d9ef3e74c
|
/train.py
|
6b93bc7a9fa165787181e7fd0a4ad5c992af7b36
|
[
"MIT"
] |
permissive
|
scofield77/pytorch-action-recognition-toy
|
aecddd6da3c032ca25c2bd1facf6fc8f6d72b4e4
|
de0f8820c40d09d34a61849ee572f8af37f5725d
|
refs/heads/master
| 2020-07-20T21:39:46.453236
| 2019-03-05T00:52:39
| 2019-03-05T00:52:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
from net import ActionLSTM
from data import ActionDataset
from tensorboardX import SummaryWriter
if __name__ == '__main__':
batch_size = 8
dataset = ActionDataset()
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
net = ActionLSTM()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
writer = SummaryWriter('./log')
sample = iter(data_loader).__next__()
global_step = 0
for i in range(40):
for j, sample in enumerate(data_loader):
global_step += 1
net.zero_grad()
optimizer.zero_grad()
out = net(sample['values'])
loss_value = criterion(out, sample['label'])
pred = np.argmax(out.detach().numpy(), -1)
tags = sample['raw_label'].detach().numpy()
accuracy = float(np.where(pred == tags, 1, 0).sum() / batch_size)
print(
'Epoch {}, Itertaion {}, Loss = {}, Accuracy = {:.2f} %'.format(i + 1, j + 1, loss_value, accuracy * 100))
writer.add_scalar('loss', loss_value, global_step=global_step)
writer.add_scalar('accuracy', accuracy, global_step=global_step)
loss_value.backward()
optimizer.step()
writer.close()
state_dict = net.state_dict()
torch.save(state_dict, 'model.pth')
|
[
"linkinpark213@outlook.com"
] |
linkinpark213@outlook.com
|
5fe28bbb5bc93700043f88a007f3546307b639c3
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/class_def_attr-big-240.py
|
b5bd95ff2c12b474eee61f9edfe88fbe2ca06b44
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
class A(object):
x:int = 1
class A2(object):
x:int = 1
x2:int = 1
class A3(object):
x:int = 1
x2:int = 1
x3:int = 1
class A4(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
class A5(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
x5:int = 1
class B(A):
def __init__(self: "B"):
pass
class B2(A):
def __init__(self: "B2"):
pass
class B3(A):
def __init__(self: "B3"):
pass
class B4(A):
def __init__(self: "B4"):
pass
class B5(A):
def $ID(self: "B5"):
pass
class C(B):
z:bool = True
class C2(B):
z:bool = True
z2:bool = True
class C3(B):
z:bool = True
z2:bool = True
z3:bool = True
class C4(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
class C5(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
z5:bool = True
a:A = None
a2:A = None
a3:A = None
a4:A = None
a5:A = None
b:B = None
b2:B = None
b3:B = None
b4:B = None
b5:B = None
c:C = None
c2:C = None
c3:C = None
c4:C = None
c5:C = None
a = A()
a2 = A()
a3 = A()
a4 = A()
a5 = A()
b = B()
b2 = B()
b3 = B()
b4 = B()
b5 = B()
c = C()
c2 = C()
c3 = C()
c4 = C()
c5 = C()
a.x = 1
b.x = a.x
c.z = a.x == b.x
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
35e4783fdc79bd2c34545d469bcb5e0bd5cd5ee1
|
969fed6b9f4c0daa728bda52fea73d94bda6faad
|
/fakeTempControl/oxford/SIM_MERCURY.py
|
8052e93e5bcb55584ef3d3095d82cb5133ffb948
|
[] |
no_license
|
ess-dmsc/essiip-fakesinqhw
|
7d4c0cb3e412a510db02f011fb9c20edfbd8a84f
|
ad65844c99e64692f07e7ea04d624154a92d57cd
|
refs/heads/master
| 2021-01-18T22:50:50.182268
| 2020-10-01T08:39:30
| 2020-10-01T08:39:30
| 87,077,121
| 0
| 0
| null | 2018-12-07T08:43:00
| 2017-04-03T13:28:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,643
|
py
|
#!/usr/bin/env python
# vim: ft=python ts=8 sts=4 sw=4 expandtab autoindent smartindent nocindent
# Author: Douglas Clowes (dcl@ansto.gov.au) 2013-06-03
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from twisted.python import log, usage
from MercurySCPI import MercurySCPI as MYBASE
from MercuryFactory import MercuryFactory
from MercuryProtocol import MercuryProtocol
import os
import sys
sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),"../../util"))))
from displayscreen import Screen
class MyOptions(usage.Options):
optFlags = [
["window", "w", "Create a display window"],
]
optParameters = [
["logfile", "l", None, "output logfile name"],
["port", "p", None, "port number to listen on"],
]
def __init__(self):
usage.Options.__init__(self)
self['files'] = []
def parseArgs(self, *args):
for arg in args:
self['files'].append(arg)
class MyScreen(Screen):
def __init__(self, stdscr):
Screen.__init__(self, stdscr)
def sendLine(self, txt):
global myDev
myDev.protocol = self
myDev.dataReceived(txt)
def write(self, txt):
try:
newLine = self.lines[-1] + " => " + txt
del self.lines[-1]
self.addLine(newLine)
except:
pass
class MYDEV(MYBASE):
def __init__(self):
MYBASE.__init__(self)
print MYDEV.__name__, "ctor"
def device_display():
global screen, myDev, myOpts, myPort, myFactory
try:
myDev.doIteration();
except:
raise
if not myOpts["window"]:
return
try:
rows, cols = screen.stdscr.getmaxyx()
screen.stdscr.addstr(0, 0, "Lnks:%2d" % myFactory.numProtocols)
screen.stdscr.addstr(0, 10, "Rnd:%6.3f" % myDev.RANDOM)
screen.stdscr.addstr(0, 22, "Identity : %s (%d)" % (myDev.IDN, myPort))
screen.stdscr.addstr(1, 0, "Valve: %8.4f%%" % myDev.valve_open)
screen.stdscr.addstr(1, 20, "Helium: %8.4f%%" % myDev.hlev)
screen.stdscr.addstr(1, 40, "Nitrogen: %8.4f%%" % myDev.nlev)
base = 1
screen.stdscr.addstr(base + 1, 0, "Sensor :")
screen.stdscr.addstr(base + 2, 0, "PV :")
screen.stdscr.addstr(base + 3, 0, "Setpoint :")
screen.stdscr.addstr(base + 4, 0, "T Delta :")
screen.stdscr.addstr(base + 5, 0, "PV Delta :")
for idx in myDev.CONFIG_SNSRS:
if 12 + (idx - 1) * 12 > cols - 1:
break
screen.stdscr.addstr(base + 1, 12 + (idx - 1) * 12, "%8.3f" % myDev.Loops[idx].sensor)
for idx in myDev.CONFIG_LOOPS:
if 12 + (idx - 1) * 12 > cols - 1:
break
screen.stdscr.addstr(base + 2, 12 + (idx - 1) * 12, "%8.3f" % myDev.Loops[idx].pv)
screen.stdscr.addstr(base + 3, 12 + (idx - 1) * 12, "%8.3f" % myDev.Loops[idx].setpoint)
screen.stdscr.addstr(base + 4, 12 + (idx - 1) * 12, "%8.3f" % (myDev.Loops[idx].setpoint - myDev.Loops[idx].sensor))
screen.stdscr.addstr(base + 5, 12 + (idx - 1) * 12, "%8.3f" % (myDev.Loops[idx].setpoint - myDev.Loops[idx].pid_delta))
except:
pass
finally:
try:
screen.stdscr.refresh()
except:
pass
if __name__ == "__main__":
global screen, myDev, myOpts, myPort, myFactory
myOpts = MyOptions()
try:
myOpts.parseOptions()
except usage.UsageError, errortext:
print '%s: %s' % (sys.argv[0], errortext)
print '%s: Try --help for usage details.' % (sys.argv[0])
raise SystemExit, 1
myDev = MYDEV()
default_port = 7020
myPort = default_port
logfile = None
if myOpts["port"]:
myPort = int(myOpts["port"])
if myPort < 1025 or myPort > 65535:
myPort = default_port
if myOpts["window"]:
logfile = "/tmp/Fake_Mercury_%d.log" % (myPort)
if myOpts["logfile"]:
logfile = myOpts["logfile"]
if logfile:
log.startLogging(open(logfile, "w"))
else:
log.startLogging(sys.stdout)
#log.startLogging(sys.stderr)
if myOpts["window"]:
import curses
stdscr = curses.initscr()
screen = MyScreen(stdscr)
# add screen object as a reader to the reactor
reactor.addReader(screen)
myFactory = MercuryFactory(MercuryProtocol, myDev, "\r")
lc = LoopingCall(device_display)
lc.start(0.250)
reactor.listenTCP(myPort, myFactory) # server
reactor.run()
|
[
"mark.koennecke@psi.ch"
] |
mark.koennecke@psi.ch
|
47db47b32423507921839a5579f5c66157eed44b
|
d115cf7a1b374d857f6b094d4b4ccd8e9b1ac189
|
/pyplusplus_dev/pyplusplus/_logging_/__init__.py
|
a888e10fb87659587d0fef6b90646c31533e1bb1
|
[
"BSL-1.0"
] |
permissive
|
gatoatigrado/pyplusplusclone
|
30af9065fb6ac3dcce527c79ed5151aade6a742f
|
a64dc9aeeb718b2f30bd6a5ff8dcd8bfb1cd2ede
|
refs/heads/master
| 2016-09-05T23:32:08.595261
| 2010-05-16T10:53:45
| 2010-05-16T10:53:45
| 700,369
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#TODO: find better place for it
"""defines logger classes"""
import os
import sys
import logging
import cStringIO
from multi_line_formatter import multi_line_formatter_t
def create_handler( stream=None ):
handler = None
if stream:
handler = logging.StreamHandler(stream)
else:
handler = logging.StreamHandler(stream)
handler.setFormatter( multi_line_formatter_t( os.linesep + '%(levelname)s: %(message)s' ) )
return handler
def _create_logger_( name, stream=None ):
"""implementation details"""
logger = logging.getLogger(name)
logger.propagate = False
logger.addHandler( create_handler(stream) )
logger.setLevel(logging.INFO)
return logger
class loggers:
"""class-namespace, defines few loggers classes, used in the project"""
stream = None
file_writer = _create_logger_( 'pyplusplus.file_writer' )
"""logger for classes that write code to files"""
declarations = _create_logger_( 'pyplusplus.declarations' )
"""logger for declaration classes
This is very import logger. All important messages: problems with declarations,
warnings or hints are written to this logger.
"""
module_builder = _create_logger_( 'pyplusplus.module_builder' )
"""logger that in use by :class:`module_builder.module_builder_t` class.
Just another logger. It exists mostly for `Py++` developers.
"""
#root logger exists for configuration purpose only
root = logging.getLogger( 'pyplusplus' )
"""root logger exists for your convenience only"""
all = [ root, file_writer, module_builder, declarations ]
"""contains all logger classes, defined by the class"""
@staticmethod
def make_inmemory():
loggers.stream = cStringIO.StringIO()
for logger in loggers.all:
map( lambda h: logger.removeHandler( h ), logger.handlers[:] )
logger.addHandler( create_handler( loggers.stream ) )
|
[
"roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76"
] |
roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76
|
f5fded1a9cdae582a279e092b4b999bd1c6375da
|
c1c39c5e9456a4c175c651ba224a53c4a76f902a
|
/helpers/azure.py
|
dd8023fe4596e2fb34fbe94615ca383e28d235ef
|
[] |
no_license
|
syllogy/cloud_sizes
|
5312c190c88303e78601496f3cc0206e5f7d0991
|
b97b782a2e786373992ca0ca51b40625d2d2ea91
|
refs/heads/master
| 2023-07-11T19:00:57.057759
| 2021-08-27T03:53:08
| 2021-08-27T03:54:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
#!/usr/bin/env python3
from netaddr import IPSet, IPNetwork
from itertools import chain
from requests import get
import re
def get_and_parse():
# I'm shocked, shocked I tell you to see that MS requires you do
# something oddball like dig into an HTML page to get the latest
# data file.
url = "https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519"
data = get(url).text
m = re.search('(?P<json>https://download.*?\.json)', data)
url = m.group("json")
data = get(url).json()
# Pull out all of the IPs
azure = IPSet(IPNetwork(y) for y in chain.from_iterable(x['properties']['addressPrefixes'] for x in data['values']))
# Pull out the v4 and v6 cidrs
v4 = IPSet([x for x in azure.iter_cidrs() if x.network.version == 4])
v6 = IPSet([x for x in azure.iter_cidrs() if x.network.version == 6])
return "azure", "Azure", v4, v6, True
if __name__ == "__main__":
print("This module is not meant to be run directly")
|
[
"scott.seligman@gmail.com"
] |
scott.seligman@gmail.com
|
9640b58c5a0cb8df3d449504d5764902b2ec7211
|
59df4e1fd50d2e81b6490bb5322084165033cefc
|
/seed.py
|
b773b252a7d0b6c0a87526060a0884f95e697567
|
[] |
no_license
|
JKinsler/ratings-lab
|
0cfafd345e27f19dfcedb249a23d5bf7fc6eebb0
|
6c7196c36dd6c09074b84deca0653ae445ca7651
|
refs/heads/master
| 2023-02-08T11:03:30.762832
| 2020-02-06T20:12:57
| 2020-02-06T20:12:57
| 238,305,896
| 1
| 0
| null | 2023-02-02T05:13:18
| 2020-02-04T21:03:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,561
|
py
|
"""Utility file to seed ratings database from MovieLens data in seed_data/"""
from sqlalchemy import func
from datetime import datetime
from model import User
from model import Rating
from model import Movie
from model import connect_to_db, db
from server import app
def load_users():
"""Load users from u.user into database."""
print("Users")
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
# >>> date_str6 = "01-Jan-1995"
# >>> format6 = "%d-%b-%Y"
# >>> date6 = datetime.strptime(date_str6, format6)
# >>> date6
# datetime.datetime(1995, 1, 1, 0, 0)
# >>> date6.year
# 1995
print("Movies")
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
row = row.rstrip()
movie_data = row.split("|")
# movie_id, title, released_at, _, imdb_url = movie_data[:5]
title = movie_data[1].split()
title.pop()
title = ' '.join(title)
released_at = movie_data[2]
format_released_at = "%d-%b-%Y"
date_released = datetime.strptime(released_at, format_released_at)
movie = Movie(movie_id=movie_data[0],
title=title,
release_at=date_released,
imdb_url = movie_data[4])
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print("Ratings")
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.rstrip()
user_id, movie_id, score = row.split()[:3]
rating = Rating(user_id=user_id,
movie_id=movie_id,
score=score)
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
def set_val_user_id():
"""Set value for the next user_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(User.user_id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('users_user_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
set_val_user_id()
|
[
"you@example.com"
] |
you@example.com
|
d29db2d8b3506d66b6f9f90e3eb98490ae2db3e2
|
bcfa02c21a73798872bbb28303233d1f0039cf00
|
/server/www/packages/packages-darwin/x64/ldap3/protocol/sasl/kerberos.py
|
5000ebf430968b22f94a18fd3c42a8d85112d7ea
|
[
"Apache-2.0"
] |
permissive
|
zhoulhb/teleport
|
6301cd50c951bcbac21cbe24017eb8421ff57adc
|
54da194697898ef77537cfe7032d774555dc1335
|
refs/heads/master
| 2021-11-10T17:10:59.661130
| 2021-11-09T11:16:19
| 2021-11-09T11:16:19
| 192,643,069
| 0
| 0
|
Apache-2.0
| 2019-06-19T02:20:53
| 2019-06-19T02:20:52
| null |
UTF-8
|
Python
| false
| false
| 5,038
|
py
|
"""
"""
# Created on 2015.04.08
#
# Author: Giovanni Cannata
#
# Copyright 2015 - 2018 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
# original code by Hugh Cole-Baker, modified by Peter Foley
# it needs the gssapi package
import socket
from ...core.exceptions import LDAPPackageUnavailableError, LDAPCommunicationError
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import gssapi
except ImportError:
raise LDAPPackageUnavailableError('package gssapi missing')
from .sasl import send_sasl_negotiation, abort_sasl_negotiation
NO_SECURITY_LAYER = 1
INTEGRITY_PROTECTION = 2
CONFIDENTIALITY_PROTECTION = 4
def sasl_gssapi(connection, controls):
"""
Performs a bind using the Kerberos v5 ("GSSAPI") SASL mechanism
from RFC 4752. Does not support any security layers, only authentication!
sasl_credentials can be empty or a tuple with one or two elements.
The first element determines which service principal to request a ticket for and can be one of the following:
- None or False, to use the hostname from the Server object
- True to perform a reverse DNS lookup to retrieve the canonical hostname for the hosts IP address
- A string containing the hostname
The optional second element is what authorization ID to request.
- If omitted or None, the authentication ID is used as the authorization ID
- If a string, the authorization ID to use. Should start with "dn:" or "user:".
"""
target_name = None
authz_id = b""
if connection.sasl_credentials:
if len(connection.sasl_credentials) >= 1 and connection.sasl_credentials[0]:
if connection.sasl_credentials[0] is True:
hostname = socket.gethostbyaddr(connection.socket.getpeername()[0])[0]
target_name = gssapi.Name('ldap@' + hostname, gssapi.NameType.hostbased_service)
else:
target_name = gssapi.Name('ldap@' + connection.sasl_credentials[0], gssapi.NameType.hostbased_service)
if len(connection.sasl_credentials) >= 2 and connection.sasl_credentials[1]:
authz_id = connection.sasl_credentials[1].encode("utf-8")
if target_name is None:
target_name = gssapi.Name('ldap@' + connection.server.host, gssapi.NameType.hostbased_service)
creds = gssapi.Credentials(name=gssapi.Name(connection.user), usage='initiate') if connection.user else None
ctx = gssapi.SecurityContext(name=target_name, mech=gssapi.MechType.kerberos, creds=creds)
in_token = None
try:
while True:
out_token = ctx.step(in_token)
if out_token is None:
out_token = ''
result = send_sasl_negotiation(connection, controls, out_token)
in_token = result['saslCreds']
try:
# This raised an exception in gssapi<1.1.2 if the context was
# incomplete, but was fixed in
# https://github.com/pythongssapi/python-gssapi/pull/70
if ctx.complete:
break
except gssapi.exceptions.MissingContextError:
pass
unwrapped_token = ctx.unwrap(in_token)
if len(unwrapped_token.message) != 4:
raise LDAPCommunicationError("Incorrect response from server")
server_security_layers = unwrapped_token.message[0]
if not isinstance(server_security_layers, int):
server_security_layers = ord(server_security_layers)
if server_security_layers in (0, NO_SECURITY_LAYER):
if unwrapped_token.message[1:] != '\x00\x00\x00':
raise LDAPCommunicationError("Server max buffer size must be 0 if no security layer")
if not (server_security_layers & NO_SECURITY_LAYER):
raise LDAPCommunicationError("Server requires a security layer, but this is not implemented")
client_security_layers = bytearray([NO_SECURITY_LAYER, 0, 0, 0])
out_token = ctx.wrap(bytes(client_security_layers)+authz_id, False)
return send_sasl_negotiation(connection, controls, out_token.message)
except (gssapi.exceptions.GSSError, LDAPCommunicationError):
abort_sasl_negotiation(connection, controls)
raise
|
[
"apex.liu@qq.com"
] |
apex.liu@qq.com
|
242f05cd4aae7555fbe6bf5702093febdcbb83e4
|
81eff1c9bc75cd524153400cdbd7c453ee8e3635
|
/zxcar_ws/devel/lib/python2.7/dist-packages/astra_camera/srv/_GetDeviceType.py
|
09fd9a871d38977c6e36077b57b4a6a1ceed594b
|
[] |
no_license
|
sukai33/zxcar_all
|
bbacbf85c5e7c93d2e98b03958342ec01e3dafd9
|
af389f095591a70cae01c1d116aa74d68223f317
|
refs/heads/master
| 2023-01-03T13:32:00.864543
| 2020-10-29T05:22:43
| 2020-10-29T05:22:43
| 300,556,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,207
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from astra_camera/GetDeviceTypeRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetDeviceTypeRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "astra_camera/GetDeviceTypeRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetDeviceTypeRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from astra_camera/GetDeviceTypeResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetDeviceTypeResponse(genpy.Message):
_md5sum = "4c8e9dd50b39344412b92ce9e1e9615c"
_type = "astra_camera/GetDeviceTypeResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string device_type
"""
__slots__ = ['device_type']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
device_type
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetDeviceTypeResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.device_type is None:
self.device_type = ''
else:
self.device_type = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.device_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.device_type = str[start:end].decode('utf-8')
else:
self.device_type = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.device_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.device_type = str[start:end].decode('utf-8')
else:
self.device_type = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
class GetDeviceType(object):
_type = 'astra_camera/GetDeviceType'
_md5sum = '4c8e9dd50b39344412b92ce9e1e9615c'
_request_class = GetDeviceTypeRequest
_response_class = GetDeviceTypeResponse
|
[
"422168787@qq.com"
] |
422168787@qq.com
|
de4a7ed167183486e87b3d21fa8d14dc7a5e85a7
|
0e887d0cd010434e101eece419229aa4813ad893
|
/image_captioning/data/datasets/coco.py
|
ddf7c7b787cd99e21faba7310fda4ee6fb6bb0f6
|
[] |
no_license
|
congve1/image_captioning
|
2c11d3ee80f0836853c7decf1255ac879f7a90b6
|
64cadfb9e072313f45f536f539b3cb8deb0432cd
|
refs/heads/master
| 2020-04-10T07:49:14.748923
| 2019-01-28T10:47:33
| 2019-01-28T10:47:33
| 160,889,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
import pickle
import json
import os
import torch
import lmdb
import numpy as np
import logging
import time
class COCODataset(torch.utils.data.dataset.Dataset):
def __init__(
self,
root,
att_features_paths_file,
fc_features_paths_file,
encoded_captions_file,
encoded_captions_lens_file,
cocoids_file,
seq_per_img,
**kwargs
):
self.root = root
self.seq_per_img = seq_per_img
with open(att_features_paths_file, 'r') as f:
self.att_features_paths = json.load(f)
with open(fc_features_paths_file, 'r') as f:
self.fc_features_paths = json.load(f)
with open(cocoids_file, 'r') as f:
self.cocoids = json.load(f)
self.encoded_captions = torch.load(encoded_captions_file,
map_location='cpu')
self.encoded_captions_lens = torch.load(encoded_captions_lens_file,
map_location='cpu')
def __getitem__(self, index):
att_feature = torch.load(
self.att_features_paths[index//self.seq_per_img],
map_location='cpu'
)
fc_feature = torch.load(
self.fc_features_paths[index//self.seq_per_img],
map_location='cpu'
)
cap_len = self.encoded_captions_lens[index]
caption = self.encoded_captions[index]
all_captions = self.encoded_captions[
(index//self.seq_per_img)*self.seq_per_img:
((index//self.seq_per_img)+1)*self.seq_per_img
]
cocoid = self.cocoids[index//self.seq_per_img]
data = dict()
data['att_feature'] = att_feature.unsqueeze(0)
data['fc_feature'] = fc_feature.unsqueeze(0)
data['cap_len'] = cap_len
data['caption'] = caption
data['all_captions'] = all_captions
data['cocoid'] = cocoid
return att_feature.unsqueeze(0), fc_feature.unsqueeze(0), caption, cap_len, all_captions, cocoid
def __len__(self):
return len(self.encoded_captions_lens)
class COCODatasetLMDB(torch.utils.data.dataset.Dataset):
def __init__(
self,
root,
att_features_lmdb,
fc_features_lmdb,
encoded_captions_file,
encoded_captions_lens_file,
cocoids_file,
seq_per_img,
att_feature_shape,
fc_feature_shape,
):
self.root = root
self.seq_per_img = seq_per_img
self.att_feature_shape = att_feature_shape
self.fc_feature_shape = fc_feature_shape
with open(cocoids_file, 'r') as f:
self.cocoids = json.load(f)
self.encoded_captions = torch.load(
encoded_captions_file,
map_location='cpu'
)
self.encoded_captions_lens = torch.load(
encoded_captions_lens_file,
map_location='cpu'
)
self.att_features_lmdb = lmdb.open(
att_features_lmdb, readonly=True, max_readers=1,
lock=False, readahead=False, meminit=False
)
self.fc_features_lmdb = lmdb.open(
fc_features_lmdb, readonly=True, max_readers=1,
lock=False, readahead=False, meminit=False
)
def __getitem__(self, index):
att_features_lmdb = self.att_features_lmdb
fc_features_lmdb = self.fc_features_lmdb
cocoid = self.cocoids[index//self.seq_per_img]
cocoid_enc = "{:8d}".format(cocoid).encode()
with att_features_lmdb.begin(write=False) as txn:
att_feature = txn.get(cocoid_enc)
att_feature = np.frombuffer(att_feature, dtype=np.float32)
att_feature = att_feature.reshape(self.att_feature_shape)
att_feature = torch.from_numpy(att_feature)
with fc_features_lmdb.begin(write=False) as txn:
fc_feature = txn.get(cocoid_enc)
fc_feature = np.frombuffer(fc_feature, dtype=np.float32)
fc_feature = fc_feature.reshape(self.fc_feature_shape)
fc_feature = torch.from_numpy(fc_feature)
caption = self.encoded_captions[index]
caption_len = self.encoded_captions_lens[index]
all_captions = self.encoded_captions[
(index//self.seq_per_img)*self.seq_per_img:
((index//self.seq_per_img)+1)*self.seq_per_img
]
return att_feature, fc_feature, caption, caption_len, all_captions, cocoid
def __len__(self):
return len(self.encoded_captions_lens)
|
[
"congve1@live.com"
] |
congve1@live.com
|
8464caf19dff35b15183b1d7669a91eeb8c8a1aa
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02712/s290713931.py
|
2f012061dba1cd50f56bd01af90ace84c3b45931
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
def resolve():
n = int(input())
ans = 0
for i in range(1,n+1):
if i%3!=0 and i%5!=0:
ans += i
print(ans)
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3a0cca8abc3ef3909ea5fe190d9189a0f0d90ae8
|
d5b48163d236ca770be8e687f92192e2971397e8
|
/globalvariableFunction1.py
|
3477ac1307caf7853c39640dc6a0019cd95789a2
|
[] |
no_license
|
Kunal352000/python_program
|
191f5d9c82980eb706e11457c2b5af54b0d2ae95
|
7a1c645f9eab87cc45a593955dcb61b35e2ce434
|
refs/heads/main
| 2023-07-12T19:06:19.121741
| 2021-08-21T11:58:41
| 2021-08-21T11:58:41
| 376,606,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
x=10#global variable
def f1():
print(x)#10
x+=5
print(x)#error
f1()
print(x)#error
def f2():
print(x)#error
f2()
|
[
"noreply@github.com"
] |
Kunal352000.noreply@github.com
|
61e08cd88ef23fb7e87890a5b36fc050d8df3f6d
|
4a2a0cfc984a9faa45903732d776cd61ea361779
|
/pwncat/modules/agnostic/implant.py
|
545178a75fb449321c95997dc09fb7844b0c1a07
|
[] |
no_license
|
PremHcz/pwncat
|
2a746cfc546158fa288994b376fd71768672a33a
|
cb203349d7ca815c9350eb53f4bd2e0b0ee659fa
|
refs/heads/master
| 2023-05-30T19:07:16.327320
| 2021-06-14T13:01:19
| 2021-06-14T13:01:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,437
|
py
|
#!/usr/bin/env python3
from typing import List
from rich.prompt import Prompt
from pwncat.util import console
from pwncat.facts import Implant, KeepImplantFact
from pwncat.modules import Bool, Status, Argument, BaseModule, ModuleFailed
class Module(BaseModule):
"""Interact with installed implants in an open session. This module
provides the ability to remove implants as well as manually escalate
with a given implant. Implants implementing local escalation will
automatically be picked up by the `escalate` command, however this
module provides an alternative way to trigger escalation manually."""
PLATFORM = None
""" No platform restraints """
ARGUMENTS = {
"remove": Argument(Bool, default=False, help="remove installed implants"),
"escalate": Argument(
Bool, default=False, help="escalate using an installed local implant"
),
}
def run(self, session, remove, escalate):
""" Perform the requested action """
if (not remove and not escalate) or (remove and escalate):
raise ModuleFailed("expected one of escalate or remove")
# Look for matching implants
implants = list(
implant
for implant in session.run("enumerate", types=["implant.*"])
if not escalate
or "implant.replace" in implant.types
or "implant.spawn" in implant.types
)
try:
session._progress.stop()
console.print("Found the following implants:")
for i, implant in enumerate(implants):
console.print(f"{i+1}. {implant.title(session)}")
if remove:
prompt = "Which should we remove (e.g. '1 2 4', default: all)? "
elif escalate:
prompt = "Which should we attempt escalation with (e.g. '1 2 4', default: all)? "
while True:
selections = Prompt.ask(prompt, console=console)
if selections == "":
break
try:
implant_ids = [int(idx.strip()) for idx in selections]
# Filter the implants
implants: List[Implant] = [implants[i - 1] for i in implant_ids]
break
except (IndexError, ValueError):
console.print("[red]error[/red]: invalid selection!")
finally:
session._progress.start()
nremoved = 0
for implant in implants:
if remove:
try:
yield Status(f"removing: {implant.title(session)}")
implant.remove(session)
session.target.facts.remove(implant)
nremoved += 1
except KeepImplantFact:
# Remove implant types but leave the fact
implant.types.remove("implant.remote")
implant.types.remove("implant.replace")
implant.types.remove("implant.spawn")
nremoved += 1
except ModuleFailed:
session.log(
f"[red]error[/red]: removal failed: {implant.title(session)}"
)
elif escalate:
try:
yield Status(
f"attempting escalation with: {implant.title(session)}"
)
result = implant.escalate(session)
if "implant.spawn" in implant.types:
# Move to the newly established session
session.manager.target = result
else:
# Track the new shell layer in the current session
session.layers.append(result)
session.platform.refresh_uid()
session.log(
f"escalation [green]succeeded[/green] with: {implant.title(session)}"
)
break
except ModuleFailed:
continue
else:
if escalate:
raise ModuleFailed("no working local escalation implants found")
if nremoved:
session.log(f"removed {nremoved} implants from target")
# Save database modifications
session.db.transaction_manager.commit()
|
[
"caleb.stewart94@gmail.com"
] |
caleb.stewart94@gmail.com
|
b83dddb0ba5cc289c6faf02198f34fa7f1efb501
|
bd1362c60313784c90013dfc9f0169e64389bf27
|
/scripts/dbutil/set_wfo.py
|
7790328ad4c796e3e0c601f831caf22e45761999
|
[] |
no_license
|
ForceCry/iem
|
391aa9daf796591909cb9d4e60e27375adfb0eab
|
4b0390d89e6570b99ca83a5fa9b042226e17c1ad
|
refs/heads/master
| 2020-12-24T19:04:55.517409
| 2013-04-09T14:25:36
| 2013-04-09T14:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
"""
Assign a WFO to sites in the metadata tables that have no WFO set
$Id: $:
"""
import re
import iemdb
MESOSITE = iemdb.connect('mesosite')
mcursor = MESOSITE.cursor()
mcursor2 = MESOSITE.cursor()
# Find sites we need to check on
mcursor.execute("""select s.id, c.wfo, s.iemid, s.network
from stations s, cwa c WHERE
s.geom && c.the_geom and contains(c.the_geom, s.geom)
and (s.wfo IS NULL or s.wfo = '') and s.country = 'US' """)
for row in mcursor:
id = row[0]
wfo = row[1]
iemid = row[2]
network = row[3]
if wfo is not None:
print 'Assinging WFO: %s to IEMID: %s ID: %s NETWORK: %s' % (wfo,
iemid, id, network)
mcursor2.execute("UPDATE stations SET wfo = '%s' WHERE iemid = %s" % (
wfo, iemid) )
else:
print 'ERROR assigning WFO to IEMID: %s ID: %s NETWORK: %s' % (
iemid, id, network)
mcursor.close()
mcursor2.close()
MESOSITE.commit()
MESOSITE.close()
|
[
"akrherz@95f8c243-6001-0410-b151-932e6a9ed213"
] |
akrherz@95f8c243-6001-0410-b151-932e6a9ed213
|
613e4837a9f63c8b247a8e460f07e655fe9e2904
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2210/60716/276310.py
|
155107bcaf3512fb1bad228a6138043e1592acac
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
def checkin(substr:list,parstr:str):
check = True
for t in range(len(parstr)):
if parstr[t] in substr:
substr.remove(parstr[t])
else:
break
return check
ucnum = int(input())
ans = list()
for i in range(ucnum):
str1 = input()
str2 = input()
anstr = str()
# lens = len(str1)
for j in range(len(str2),len(str1)+1):#length of substring
for k in range(len(str1)-j):#start of substring
tempstr = str1[k:k+j]
templist = list(tempstr)
if checkin(templist,str2):
ans.append(tempstr)
break
if len(ans)==i+1:
break
ans.append(anstr)
for i in ans:
print(i)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
0351042e28eadd00d7b26fdc3576ef296ac9ee15
|
99d7a6448a15e7770e3b6f3859da043300097136
|
/src/database/migrate/isotopedb/versions/010_Add_aliquot_to_LabTable.py
|
a8e30e8ecebd6498a1eda6c619ef3184b2accad3
|
[] |
no_license
|
softtrainee/arlab
|
125c5943f83b37bc7431ae985ac7b936e08a8fe4
|
b691b6be8214dcb56921c55daed4d009b0b62027
|
refs/heads/master
| 2020-12-31T07:54:48.447800
| 2013-05-06T02:49:12
| 2013-05-06T02:49:12
| 53,566,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = MetaData(bind=migrate_engine)
t = Table('LabTable', meta, autoload=True)
col = Column('aliquot', Integer)
col.create(t)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
t = Table('LabTable', meta, autoload=True)
t.c.aliquot.drop()
|
[
"jirhiker@localhost"
] |
jirhiker@localhost
|
9df92ff5844f388a0d92870ce2ceab590cc8a89d
|
64870a6b0a38c63dd69387a2b9d591378dcaedfa
|
/setup.py
|
d9015e62651456081c3657bf72391f27c76df5af
|
[
"Unlicense"
] |
permissive
|
andrewp-as-is/setuppy-generator.py
|
28ff31d8fd6a66fb4d0ca77244fcc8acb5a53912
|
ae27ae4d534a373f34ebe24a16353a214abf8fc5
|
refs/heads/master
| 2021-07-11T05:40:52.834761
| 2020-12-03T21:25:00
| 2020-12-03T21:25:00
| 217,581,540
| 13
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
import setuptools
setuptools.setup(
name='setuppy-generator',
version='2020.12.2',
install_requires=open('requirements.txt').read().splitlines(),
packages=setuptools.find_packages()
)
|
[
"russianidiot.github@gmail.com"
] |
russianidiot.github@gmail.com
|
8e8f8f023aaacf37507d97b5a08cd7038235cb03
|
76255205d52cb81da0f8e0014775b98195ae83a1
|
/osticket/env/bin/django-admin
|
25c1965788b29f4f38330119aa9805526df6877b
|
[] |
no_license
|
vuvandang1995/OSticket
|
6c4fafbadffd99f635f049ca19a3dd120152d159
|
80c364cf9a7313cb102b7d618c43411c394b09f4
|
refs/heads/master
| 2020-03-09T10:08:16.715876
| 2018-07-11T00:11:52
| 2018-07-11T00:11:52
| 128,729,604
| 5
| 3
| null | 2018-07-11T00:11:53
| 2018-04-09T07:08:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 281
|
#!/home/osticket/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"dangdiendao@gmail.com"
] |
dangdiendao@gmail.com
|
|
e3ce904d92a58307e91532be437be396e12719b0
|
e5483ab737acd9fb222f0b7d1c770cfdd45d2ba7
|
/ecommerce/core/migrations/0015_auto_20200617_0543.py
|
ed6c2dc12074874bcb1dfb5cfa5901daff8e3542
|
[] |
no_license
|
mxmaslin/otus_web
|
6c1e534047444d7a1fc4cd1bf8245c25d9fc4835
|
b90ad69e1b5c1828fa2ace165710422d113d1d17
|
refs/heads/master
| 2022-12-09T19:52:58.626199
| 2020-07-07T19:15:52
| 2020-07-07T19:15:52
| 226,154,128
| 1
| 1
| null | 2022-12-08T03:23:10
| 2019-12-05T17:25:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
# Generated by Django 2.2.12 on 2020-06-17 02:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0014_orderitem_ordered'),
]
operations = [
migrations.AddField(
model_name='order',
name='ref_code',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='item',
name='category',
field=models.CharField(choices=[('S', 'Футболки'), ('SW', 'Спортивная одежда'), ('OW', 'Верхняя одежда')], max_length=2, verbose_name='Категория'),
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=100, verbose_name='Улица')),
('house_number', models.CharField(max_length=10, verbose_name='Дом, корпус')),
('apartment_number', models.CharField(max_length=10, verbose_name='Номер квартиры')),
('address_zip', models.CharField(max_length=6)),
('default', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Адрес',
'verbose_name_plural': 'Адреса',
},
),
migrations.AddField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='shipping_address', to='core.Address'),
),
]
|
[
"zapzarap@yandex.ru"
] |
zapzarap@yandex.ru
|
ed526ae369d2eed2d321a2449ab09942936d0194
|
b0af2f57aec5f6620fe73361f5aee18c3d12d7c5
|
/code/robotics/PyAdvancedControl-master/mpc_sample/main.py
|
be94f52068f09aded7a128b2e28f37c4a582accf
|
[
"GPL-2.0-or-later",
"eCos-exception-2.0",
"MIT"
] |
permissive
|
vicb1/python-reference
|
c7d3a7fee1b181cd4a80883467dc743b935993a2
|
40b9768124f2b9ef80c222017de068004d811d92
|
refs/heads/master
| 2022-10-09T08:41:56.699722
| 2022-10-04T00:53:58
| 2022-10-04T00:54:06
| 171,308,233
| 1
| 0
|
MIT
| 2022-06-21T23:43:38
| 2019-02-18T15:27:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Simple Model Predictive Control Simulation
author Atsushi Sakai
"""
import time
from cvxpy import *
import numpy as np
import matplotlib.pyplot as plt
print("Simulation start")
np.random.seed(1)
n = 4 # state size
m = 2 # input size
T = 50 # number of horizon
# simulation parameter
alpha = 0.2
beta = 5.0
# Model Parameter
A = np.eye(n) + alpha * np.random.randn(n, n)
B = np.random.randn(n, m)
x_0 = beta * np.random.randn(n, 1)
x = Variable(n, T + 1)
u = Variable(m, T)
states = []
for t in range(T):
cost = sum_squares(x[:, t + 1]) + sum_squares(u[:, t])
constr = [x[:, t + 1] == A * x[:, t] + B * u[:, t],
norm(u[:, t], 'inf') <= 1]
states.append(Problem(Minimize(cost), constr))
# sums problem objectives and concatenates constraints.
prob = sum(states)
prob.constraints += [x[:, T] == 0, x[:, 0] == x_0]
start = time.time()
result = prob.solve(verbose=True)
elapsed_time = time.time() - start
print ("calc time:{0}".format(elapsed_time) + "[sec]")
if result == float("inf"):
print("Cannot optimize")
import sys
sys.exit()
# return
f = plt.figure()
# Plot (u_t)_1.
ax = f.add_subplot(211)
u1 = np.array(u[0, :].value[0, :])[0].tolist()
u2 = np.array(u[1, :].value[0, :])[0].tolist()
plt.plot(u1, '-r', label="u1")
plt.plot(u2, '-b', label="u2")
plt.ylabel(r"$u_t$", fontsize=16)
plt.yticks(np.linspace(-1.0, 1.0, 3))
plt.legend()
plt.grid(True)
# Plot (u_t)_2.
plt.subplot(2, 1, 2)
x1 = np.array(x[0, :].value[0, :])[0].tolist()
x2 = np.array(x[1, :].value[0, :])[0].tolist()
x3 = np.array(x[2, :].value[0, :])[0].tolist()
x4 = np.array(x[3, :].value[0, :])[0].tolist()
plt.plot(range(T + 1), x1, '-r', label="x1")
plt.plot(range(T + 1), x2, '-b', label="x2")
plt.plot(range(T + 1), x3, '-g', label="x3")
plt.plot(range(T + 1), x4, '-k', label="x4")
plt.yticks([-25, 0, 25])
plt.ylim([-25, 25])
plt.ylabel(r"$x_t$", fontsize=16)
plt.xlabel(r"$t$", fontsize=16)
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
|
[
"vbajenaru@gmail.com"
] |
vbajenaru@gmail.com
|
354af91557abc5587e4c6a68abaf64e39f6c2d67
|
6e64eb9a4353dc6bd89c649d27bb20aa61173d7d
|
/core/products/views/product/views.py
|
cd1644f54713f542333f36d4ef3078e2019fa4f8
|
[] |
no_license
|
RoodrigoRoot/deploy_django
|
0d4b76ae41bab907d5d69b4e7c34d5151f9827bd
|
593b4613f1c224e236ac7f798e771e447ada677d
|
refs/heads/master
| 2022-11-28T05:39:37.350224
| 2020-03-19T19:42:04
| 2020-03-19T19:42:04
| 248,580,750
| 0
| 0
| null | 2022-11-22T03:38:31
| 2020-03-19T18:50:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from core.products.forms import ProductForm
from core.products.models import Product
class ProductListView(ListView):
model = Product
template_name = 'product/list.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ProductListView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Listado de Productos'
return context
class ProductCreate(CreateView):
model = Product
template_name = 'product/create.html'
form_class = ProductForm
success_url = reverse_lazy('product_list')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ProductCreate, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Nuevo registro de un Producto'
context['action'] = 'add'
return context
class ProductUpdate(UpdateView):
model = Product
template_name = 'product/create.html'
form_class = ProductForm
success_url = reverse_lazy('product_list')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ProductUpdate, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Edición de un Producto'
context['action'] = 'edit'
return context
class ProductDelete(DeleteView):
model = Product
template_name = 'product/delete.html'
success_url = reverse_lazy('product_list')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(ProductDelete, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Notificación de eliminación'
context['url'] = reverse_lazy('product_list')
return context
|
[
"roodrigoroot@gmail.com"
] |
roodrigoroot@gmail.com
|
b53fe631fbbe3bae49798ea486ad1b37cf7a89b5
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part002589.py
|
e50690dda9b20734f0885f36379c31fb7774fe82
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher26020(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher26020._instance is None:
CommutativeMatcher26020._instance = CommutativeMatcher26020()
return CommutativeMatcher26020._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 26019
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
2d573fe5931b3f1063ba73647b84291d080f1c8a
|
2ad32d08c66cc02f5a19b3a9e2fbb7c5c25ed99c
|
/wolf_alg/Data_structures_and_algorithms_py/floyd.py
|
e459f00b0c5d7f500a47fb6c30e2f2b617c4b4ac
|
[] |
no_license
|
wqjzzgci/wolf-ai
|
5038dee45748809d16482ff6ecac7a2ae00dcbcf
|
42cb88a312e1137ad1c59c8a82fc3c15b3cd5092
|
refs/heads/master
| 2020-03-10T03:20:58.453867
| 2018-04-09T10:23:10
| 2018-04-09T10:23:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
#coding=utf-8
def floyd(double_list, vertexs):
for k in xrange(1, vertexs + 1):
for i in xrange(1, vertexs + 1):
for j in xrange(1, vertexs + 1):
tmp = min_value if double_list[i][k] >= min_value or double_list[k][j] >= min_value else double_list[i][k] + double_list[k][j]
if double_list[i][j] > tmp:
double_list[i][j] = tmp
return double_list
if __name__ == '__main__':
min_value = 999999999
ll = [
[min_value,min_value,min_value,min_value,min_value],
[min_value,0,2,6,4],
[min_value,min_value,0,3,min_value],
[min_value,7,min_value,0,1],
[min_value,5,min_value,12,0]]
print floyd(ll, 4)
|
[
"jiexu@pptv.com"
] |
jiexu@pptv.com
|
43c07454753909afed5dc71cb7ef52426b278069
|
b34d7c5f810287ebaab09c58754bc59f03589ac3
|
/ltc/controller/migrations/0005_auto_20220316_1624.py
|
df6da7a23d585093998fc8bdab84b883a059ef11
|
[
"MIT"
] |
permissive
|
r1990v/JMeter-Control-Center
|
11d00276a35a502f91f05bf2adf5c88bf56fbfed
|
6bfd13f008fce42c78badcb9d2579f069b064fe9
|
refs/heads/master
| 2023-01-07T12:40:43.370688
| 2022-09-27T11:05:56
| 2022-09-27T11:05:56
| 162,960,150
| 0
| 0
| null | 2018-12-24T06:53:26
| 2018-12-24T06:53:26
| null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# Generated by Django 2.2 on 2022-03-16 15:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('controller', '0004_auto_20210608_1506'),
]
operations = [
migrations.DeleteModel(
name='Proxy',
),
migrations.RemoveField(
model_name='testrunningdata',
name='test_running',
),
migrations.DeleteModel(
name='TestRunning',
),
migrations.DeleteModel(
name='TestRunningData',
),
]
|
[
"german.syomin@innogames.com"
] |
german.syomin@innogames.com
|
e26833183e66a8213241f6e0351fc7da369a112b
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/aloe/aloe/common/plot_2d.py
|
b295d55c7794a134eb4930874648ecfc1df9b8c4
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,275
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def plot_heatmap(pdf_func, out_name, size=3):
w = 100
x = np.linspace(-size, size, w)
y = np.linspace(-size, size, w)
xx, yy = np.meshgrid(x, y)
coords = np.stack([xx.flatten(), yy.flatten()]).transpose()
scores = pdf_func(coords)
a = scores.reshape((w, w))
plt.imshow(a)
plt.axis('equal')
plt.axis('off')
plt.savefig(out_name, bbox_inches='tight')
plt.close()
def plot_samples(samples, out_name, lim=None, axis=True):
plt.scatter(samples[:, 0], samples[:, 1], marker='.')
plt.axis('equal')
if lim is not None:
plt.xlim(-lim, lim)
plt.ylim(-lim, lim)
if not axis:
plt.axis('off')
plt.savefig(out_name, bbox_inches='tight')
plt.close()
def plot_joint(dataset, samples, out_name):
x = np.max(dataset)
y = np.max(-dataset)
z = np.ceil(max((x, y)))
plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x')
plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.')
plt.legend(['training data', 'ADE sampled'])
plt.axis('equal')
plt.xlim(-z, z)
plt.ylim(-z, z)
plt.savefig(out_name, bbox_inches='tight')
plt.close()
fname = out_name.split('/')[-1]
out_name = '/'.join(out_name.split('/')[:-1]) + '/none-' + fname
plt.figure(figsize=(8, 8))
plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x')
plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.')
plt.axis('equal')
plt.xlim(-z, z)
plt.ylim(-z, z)
plt.savefig(out_name, bbox_inches='tight')
plt.close()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
7df713c6d27a30122bb093277d9212602c441695
|
6a0b9581195400a93027aca881b1bc687401913d
|
/hackerrank-python/contests/world_cup/world_cup_team_formation.py
|
e702e9c67af3d7c45e54f7514b371704816cfd93
|
[] |
no_license
|
108krohan/codor
|
1d7ff503106ad6b2c18bc202d4c88f296600f28e
|
2e485607080f919f273aa6c8c0d9cb3516cf4443
|
refs/heads/master
| 2021-04-28T21:13:34.604020
| 2018-02-18T11:55:26
| 2018-02-18T11:55:26
| 86,153,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
"""world_cup_team_formation at hackerrank.com"""
lst = [int(raw_input()) for _ in xrange(10)]
lst.sort(reverse = True)
print lst[0] + lst[2] + lst[4]
"""
take sum of the first odd three numbers sorted in reverse order.
"""
|
[
"108krohan@gmail.com"
] |
108krohan@gmail.com
|
568fad17206fd645defb2cc4276ae1fc93ba66bc
|
06a2a44e2de6f9f6ac815762468ba63b82cd00e1
|
/apps/account/context_processors.py
|
932c6e47b78fb03f1996728b8cd4ab0d9ddf9feb
|
[] |
no_license
|
hqpr/marketcsgo
|
153e0f3b180e6cc5eb771ba60e7cf5b6e7f31929
|
dec9cdf1cafb836f5303a773ad77cf4824665722
|
refs/heads/master
| 2021-01-10T10:08:33.323776
| 2019-03-17T10:13:40
| 2019-03-17T10:13:40
| 48,641,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
from apps.account.models import UserProfile
def check_profile(request):
if request.user.is_authenticated():
try:
UserProfile.objects.get(user=request.user)
return {'valid': 1}
except UserProfile.DoesNotExist:
return {'valid': 0, 'user_id': request.user.id}
return {'valid': 1}
def debug_mode(request):
if request.user.is_authenticated():
try:
u = UserProfile.objects.get(user=request.user)
if u.debug_mode:
return {'debug': 1}
else:
return {'debug': 0}
except UserProfile.DoesNotExist:
return {'debug': 0, 'user_id': request.user.id}
return {'debug': 1}
|
[
"adubnyak@gmail.com"
] |
adubnyak@gmail.com
|
1dc11d54d1dfac5b787fddc7a6e886173250838b
|
cbc5e26bb47ae69e80a3649c90275becf25ce404
|
/xlsxwriter/test/contenttypes/test_contenttypes01.py
|
616bc445ea0552bde833c1b5083dd3f662857947
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] |
permissive
|
mst-solar-car/kicad-bom-generator
|
c3549409c3139f787ad28391372b5cb03791694a
|
2aae905056d06f3d25343a8d784049c141d05640
|
refs/heads/master
| 2021-09-07T14:00:40.759486
| 2018-02-23T23:21:13
| 2018-02-23T23:21:13
| 107,868,801
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...contenttypes import ContentTypes
class TestAssembleContentTypes(unittest.TestCase):
"""
Test assembling a complete ContentTypes file.
"""
def test_assemble_xml_file(self):
"""Test writing an ContentTypes file."""
self.maxDiff = None
fh = StringIO()
content = ContentTypes()
content._set_filehandle(fh)
content._add_worksheet_name('sheet1')
content._add_default(('jpeg', 'image/jpeg'))
content._add_shared_strings()
content._add_calc_chain()
content._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">
<Default Extension="rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/>
<Default Extension="xml" ContentType="application/xml"/>
<Default Extension="jpeg" ContentType="image/jpeg"/>
<Override PartName="/docProps/app.xml" ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml"/>
<Override PartName="/docProps/core.xml" ContentType="application/vnd.openxmlformats-package.core-properties+xml"/>
<Override PartName="/xl/styles.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml"/>
<Override PartName="/xl/theme/theme1.xml" ContentType="application/vnd.openxmlformats-officedocument.theme+xml"/>
<Override PartName="/xl/workbook.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml"/>
<Override PartName="/xl/worksheets/sheet1.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"/>
<Override PartName="/xl/sharedStrings.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"/>
<Override PartName="/xl/calcChain.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml"/>
</Types>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"mwrb7d@mst.edu"
] |
mwrb7d@mst.edu
|
1381edf36fb363b531e4ef1d84b51910010dc909
|
cace862c1d95f6b85a9750a427063a8b0e5ed49c
|
/binaryapi/ws/chanels/buy_contract_for_multiple_accounts.py
|
014f93aa8f29ed48abbf5dc467f1f260ff845547
|
[] |
no_license
|
HyeongD/binaryapi
|
65486532389210f1ca83f6f2098276ecf984702b
|
e8daa229c04de712242e8e9b79be3b774b409e35
|
refs/heads/master
| 2023-08-29T13:24:58.364810
| 2021-10-26T19:00:59
| 2021-10-26T19:00:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
"""Module for Binary buy_contract_for_multiple_accounts websocket channel."""
from binaryapi.ws.chanels.base import Base
from decimal import Decimal
from typing import Any, List, Union, Optional
# https://developers.binary.com/api/#buy_contract_for_multiple_accounts
class BuyContractForMultipleAccounts(Base):
"""Class for Binary buy_contract_for_multiple_accounts websocket channel."""
name = "buy_contract_for_multiple_accounts"
def __call__(self, buy_contract_for_multiple_accounts: str, price: Union[int, float, Decimal], tokens: List, parameters=None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):
"""Method to send message to buy_contract_for_multiple_accounts websocket channel.
Buy Contract for Multiple Accounts (request)
Buy a Contract for multiple Accounts specified by the `tokens` parameter. Note, although this is an authorized call, the contract is not bought for the authorized account.
:param buy_contract_for_multiple_accounts: Either the ID received from a Price Proposal (`proposal` call), or `1` if contract buy parameters are passed in the `parameters` field.
:type buy_contract_for_multiple_accounts: str
:param price: Maximum price at which to purchase the contract.
:type price: Union[int, float, Decimal]
:param tokens: List of API tokens identifying the accounts for which the contract is bought. Note: If the same token appears multiple times or if multiple tokens designate the same account, the contract is bought multiple times for this account.
:type tokens: List
:param parameters: [Optional] Used to pass the parameters for contract buy.
:type parameters:
:param passthrough: [Optional] Used to pass data through the websocket, which may be retrieved via the `echo_req` output field.
:type passthrough: Optional[Any]
:param req_id: [Optional] Used to map request to response.
:type req_id: Optional[int]
"""
data = {
"buy_contract_for_multiple_accounts": buy_contract_for_multiple_accounts,
"price": price,
"tokens": tokens
}
if parameters:
data['parameters'] = parameters
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)
|
[
"mdn522@gmail.com"
] |
mdn522@gmail.com
|
407bf5e3d865565a63d71d38351ec11784634a45
|
6745bd6b607bbfb00dcf641980925753ec60f7d8
|
/company/migrations/0025_auto_20170507_1103.py
|
0800c545ee6393dc10ad6d843f09ee8a4204948f
|
[] |
no_license
|
happychallenge/chemicals
|
4a1822d32354ce85499f42ada47103d3f27e163c
|
aa2b08c92cefe1650591d965f2e7f4872c445363
|
refs/heads/master
| 2021-01-20T09:21:55.769758
| 2017-05-07T06:03:54
| 2017-05-07T06:03:54
| 90,245,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,089
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-07 02:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0024_remove_allprocess_send_msds'),
]
operations = [
migrations.AlterField(
model_name='company',
name='category',
field=models.CharField(choices=[('T', '贸易'), ('P', '生产'), ('M', '生产&贸易')], default='P', max_length=1, verbose_name='公司种类'),
),
migrations.AlterField(
model_name='company',
name='en_name',
field=models.CharField(max_length=50, verbose_name='English Name'),
),
migrations.AlterField(
model_name='company',
name='name',
field=models.CharField(max_length=30, verbose_name='中文名字'),
),
migrations.AlterField(
model_name='companyproduct',
name='currency',
field=models.CharField(choices=[('D', 'USD'), ('R', 'RMB')], default='R', max_length=1),
),
migrations.AlterField(
model_name='customer',
name='en_name',
field=models.CharField(max_length=30, verbose_name='English Name'),
),
migrations.AlterField(
model_name='product',
name='atomic_amount',
field=models.FloatField(verbose_name='分子量'),
),
migrations.AlterField(
model_name='product',
name='cn_name',
field=models.CharField(max_length=50, verbose_name='中文名字'),
),
migrations.AlterField(
model_name='product',
name='en_name',
field=models.CharField(max_length=30, verbose_name='English Name'),
),
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(upload_to='chemical/', verbose_name='结构式'),
),
migrations.AlterField(
model_name='product',
name='usage',
field=models.TextField(blank=True, null=True, verbose_name='用途'),
),
migrations.AlterField(
model_name='purchasecontract',
name='actualdelivery_at',
field=models.DateField(blank=True, null=True, verbose_name='实际到达日子'),
),
migrations.AlterField(
model_name='purchasecontract',
name='actualshipping_at',
field=models.DateField(blank=True, null=True, verbose_name='实际出发日子'),
),
migrations.AlterField(
model_name='purchasecontract',
name='contracted_at',
field=models.DateField(verbose_name='合同日子'),
),
migrations.AlterField(
model_name='purchasecontract',
name='currency',
field=models.CharField(choices=[('D', 'USD'), ('R', 'RMB')], default='R', max_length=1),
),
migrations.AlterField(
model_name='purchasecontract',
name='portofdestination',
field=models.CharField(max_length=100, verbose_name='货到的港口'),
),
migrations.AlterField(
model_name='purchasecontract',
name='predictdelivery_at',
field=models.DateField(verbose_name='到达计划日子'),
),
migrations.AlterField(
model_name='purchasecontract',
name='shipping_at',
field=models.DateField(verbose_name='出发计划日子'),
),
migrations.AlterField(
model_name='salescontract',
name='actualshipping_at',
field=models.DateField(blank=True, null=True, verbose_name='实际 Shipping 日子'),
),
migrations.AlterField(
model_name='salescontract',
name='contracted_at',
field=models.DateField(verbose_name='合同日子'),
),
migrations.AlterField(
model_name='salescontract',
name='currency',
field=models.CharField(choices=[('D', 'USD'), ('R', 'RMB')], default='D', max_length=1),
),
migrations.AlterField(
model_name='salescontract',
name='devliveryrequest',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='其他'),
),
migrations.AlterField(
model_name='salescontract',
name='portofdestination',
field=models.CharField(max_length=100, verbose_name='目的地 港口'),
),
migrations.AlterField(
model_name='salescontract',
name='portofloading',
field=models.CharField(max_length=100, verbose_name='Shipping 港口'),
),
migrations.AlterField(
model_name='salescontract',
name='shipping_at',
field=models.DateField(verbose_name='Shipping 计划日子'),
),
]
|
[
"happychallenge@outlook.com"
] |
happychallenge@outlook.com
|
98fab1f0de73796ef769a8a4f729188096167ece
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/MybankCreditLoantradePayerBillrepayConsultResponse.py
|
384aaa839bacae7ad07341b1ed1c53574be2462c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CreditPayBillDetailVO import CreditPayBillDetailVO
class MybankCreditLoantradePayerBillrepayConsultResponse(AlipayResponse):
def __init__(self):
super(MybankCreditLoantradePayerBillrepayConsultResponse, self).__init__()
self._bill_details = None
self._exist_bill = None
self._repay_url = None
@property
def bill_details(self):
return self._bill_details
@bill_details.setter
def bill_details(self, value):
if isinstance(value, list):
self._bill_details = list()
for i in value:
if isinstance(i, CreditPayBillDetailVO):
self._bill_details.append(i)
else:
self._bill_details.append(CreditPayBillDetailVO.from_alipay_dict(i))
@property
def exist_bill(self):
return self._exist_bill
@exist_bill.setter
def exist_bill(self, value):
self._exist_bill = value
@property
def repay_url(self):
return self._repay_url
@repay_url.setter
def repay_url(self, value):
self._repay_url = value
def parse_response_content(self, response_content):
response = super(MybankCreditLoantradePayerBillrepayConsultResponse, self).parse_response_content(response_content)
if 'bill_details' in response:
self.bill_details = response['bill_details']
if 'exist_bill' in response:
self.exist_bill = response['exist_bill']
if 'repay_url' in response:
self.repay_url = response['repay_url']
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
eb19a9e76546f3274aeb55a5941ccfbf0c448ec4
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/util/time_util.py
|
fa4aca709d2e13980911d752c895797f68fe0372
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,224
|
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for capturing time-related functions.
This makes mocking for time-related functionality easier.
"""
import calendar
import datetime
import re
import time
def CurrentTimeSec():
"""Returns a float of the current time in seconds."""
return time.time()
def Sleep(duration_sec):
"""Sleeps for the given duration."""
time.sleep(duration_sec)
def CurrentDatetimeUtc():
"""Returns the current date and time in the UTC timezone."""
return datetime.datetime.utcnow()
def IsExpired(timestamp_rfc3993_str):
no_expiration = ''
if timestamp_rfc3993_str == no_expiration:
return False
timestamp_unix = Strptime(timestamp_rfc3993_str)
if timestamp_unix < CurrentTimeSec():
return True
return False
# Parsing code for rfc3339 timestamps, taken from Google's rfc3339.py.
# TODO(user): Investigate opensourcing rfc3999.py
def Strptime(rfc3339_str):
"""Converts an RFC 3339 timestamp to Unix time in seconds since the epoch.
Args:
rfc3339_str: a timestamp in RFC 3339 format (yyyy-mm-ddThh:mm:ss.sss
followed by a time zone, given as Z, +hh:mm, or -hh:mm)
Returns:
a number of seconds since January 1, 1970, 00:00:00 UTC
Raises:
ValueError: if the timestamp is not in an acceptable format
"""
match = re.match(r'(\d\d\d\d)-(\d\d)-(\d\d)T'
r'(\d\d):(\d\d):(\d\d)(?:\.(\d+))?'
r'(?:(Z)|([-+])(\d\d):(\d\d))', rfc3339_str)
if not match:
raise ValueError('not a valid timestamp: %r' % rfc3339_str)
(year, month, day, hour, minute, second, frac_seconds,
zulu, zone_sign, zone_hours, zone_minutes) = match.groups()
time_tuple = map(int, [year, month, day, hour, minute, second])
# Parse the time zone offset.
if zulu == 'Z': # explicit
zone_offset = 0
else:
zone_offset = int(zone_hours) * 3600 + int(zone_minutes) * 60
if zone_sign == '-':
zone_offset = -zone_offset
integer_time = calendar.timegm(time_tuple) - zone_offset
if frac_seconds:
sig_dig = len(frac_seconds)
return ((integer_time * (10 ** sig_dig)
+ int(frac_seconds)) * (10 ** -sig_dig))
else:
return integer_time
def CalculateExpiration(num_seconds):
"""Takes a number of seconds and returns the expiration time in RFC 3339."""
if num_seconds is None:
return None
utc_now = CurrentDatetimeUtc()
adjusted = utc_now + datetime.timedelta(0, int(num_seconds))
formatted_expiration = _FormatDateString(adjusted)
return formatted_expiration
def _FormatDateString(d):
return ('%04d-%02d-%02dT%02d:%02d:%02dZ' %
(d.year, d.month, d.day, d.hour, d.minute, d.second))
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
643d8479d0d4be592474bfdee79d5f9f22cb89e1
|
bb87579e47fc04b299694b8a8fe318f022f54ee8
|
/Automate the Boring Stuff/Ch.12 - Web Scraping/attribute.py
|
c4f59a6a44dfa7c2b6213063120b221b7c1a6447
|
[] |
no_license
|
QaisZainon/Learning-Coding
|
7bbc45197085dfa8f41ac298d26cf54e99e7b877
|
a3991842e79c30f24d7bc0cca77dbd09bc03372f
|
refs/heads/master
| 2022-12-23T05:47:26.512814
| 2020-09-25T08:10:20
| 2020-09-25T08:10:20
| 297,945,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('https://inventwithpython.com')
try:
elem = browser.find_element_by_class_name('cover-thumb')
print('Found <%s> element with that class name!' %(elem.tag_name))
except:
print('Was not able to find an element with that name.')
|
[
"noreply@github.com"
] |
QaisZainon.noreply@github.com
|
6959b4c9ba3b87040a8e31e260f2243d2fc88cba
|
b5f5c749ad8ba774da04a3dcf44ea2e66aea6cd6
|
/background/05geo/coverage.py
|
a80d605b766bf88a40e1d82d403f040d271128b2
|
[] |
no_license
|
yj-git/SearchRescueSys
|
2329be5f3caf57f11a2e606da87382344698eff4
|
44347aef4cd5f75f1c9adcea76c21aa97b41e8ae
|
refs/heads/master
| 2022-08-05T12:41:54.967248
| 2020-05-21T04:55:37
| 2020-05-21T04:55:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
from geoserver.support import ResourceInfo
from geoserver.catalog import Catalog
class Coverage(ResourceInfo):
def __init__(self, catalog: Catalog, store_name, work_space):
super().__init__()
self.catalog = catalog
self.store_name = store_name
self.work_space = work_space
self.gs_version = self.catalog.get_short_version()
@property
def href(self):
return f"{self.catalog.service_url}/workspaces/{self.work_space}/coveragestores/{self.store_name}/coverages"
|
[
"evaseemefly@126.com"
] |
evaseemefly@126.com
|
0bc60f23731424fe5898e28b2074f5662c17e943
|
e05c6a78b16f1d39b8e77db3ee5cea83b44ddf8a
|
/migrations/versions/0305m_add_pinpoint.py
|
6dd09943085a1a433ea9ebe35523a519e6c9f0ff
|
[
"MIT"
] |
permissive
|
cds-snc/notification-api
|
7da0928f14608a2c7db1e229e17b9dbfaaf3d0f0
|
99558db51784925942d031511af3cfb03338a28d
|
refs/heads/main
| 2023-08-18T00:08:42.787361
| 2023-08-17T20:58:47
| 2023-08-17T20:58:47
| 194,884,758
| 49
| 12
|
MIT
| 2023-09-14T18:55:07
| 2019-07-02T14:57:01
|
Python
|
UTF-8
|
Python
| false
| false
| 896
|
py
|
"""
Revision ID: 0305m_add_pinpoint
Revises: 0305l_smtp_template
Create Date: 2020-04-20 12:00:00
"""
import uuid
from alembic import op
revision = "0305m_add_pinpoint"
down_revision = "0305l_smtp_template"
id = uuid.uuid4()
def upgrade():
op.execute(
f"""
INSERT INTO provider_details (id, display_name, identifier, priority, notification_type, active, version)
VALUES ('{id}', 'AWS Pinpoint', 'pinpoint', 50, 'sms', true, 1)
"""
)
op.execute(
f"""
INSERT INTO provider_details_history (id, display_name, identifier, priority, notification_type, active, version)
VALUES ('{id}', 'AWS Pinpoint', 'pinpoint', 50, 'sms', true, 1)
"""
)
def downgrade():
op.execute("DELETE FROM provider_details WHERE identifier = 'pinpoint'")
op.execute("DELETE FROM provider_details_history WHERE identifier = 'pinpoint'")
|
[
"noreply@github.com"
] |
cds-snc.noreply@github.com
|
1bfb37fd8a7fdad73e3394fb67e758cc3068b5b0
|
20927c6b6dbb360bf0fd13d70115bdb27e7196e7
|
/0x0F-python-object_relational_mapping/1-filter_states.py
|
2896d23cb05252078d33fd4184cfa458f4fcb904
|
[] |
no_license
|
PauloMorillo/holbertonschool-higher_level_programming
|
27fc1c0a1ae5784bd22d07daaedb602ee618867d
|
8a42a60aa4ea52b5cc2fb73e57f38aa6c5196c98
|
refs/heads/master
| 2021-08-16T17:13:45.568038
| 2020-07-29T01:20:25
| 2020-07-29T01:20:25
| 207,305,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
#!/usr/bin/python3
"""This Module prints all of a database"""
import sys
import MySQLdb
def main():
"""main function"""
db = MySQLdb.connect(host='localhost',
user=sys.argv[1],
passwd=sys.argv[2],
db=sys.argv[3],
port=3306)
cur = db.cursor()
cur.execute("SELECT * FROM states WHERE name REGEXP BINARY"
"'^N' ORDER BY id")
rows = cur.fetchall()
for _row in rows:
print(_row)
cur.close()
db.close()
if __name__ == '__main__':
main()
|
[
"pauloan@hotmail.com"
] |
pauloan@hotmail.com
|
cfc1b386f4a20a5d866800567c6b0b276a19ef98
|
469772806152cff25b13a1e73ec5133ba3d0f283
|
/src/reversi_zero/agent/api.py
|
6ba0bb23675d7c7cd12f8ccfc0178d0c3a3fbb6a
|
[
"MIT"
] |
permissive
|
awesome-archive/reversi-alpha-zero
|
880e92cb02a8b4d21e824baed3584a7eec823bfe
|
90ba711f2233660bbf36d8203873b3fc16f7a1e8
|
refs/heads/master
| 2022-03-30T13:22:21.547259
| 2017-11-22T01:16:59
| 2017-11-22T01:16:59
| 111,628,182
| 0
| 0
|
MIT
| 2020-01-10T11:11:25
| 2017-11-22T02:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
from reversi_zero.config import Config
class ReversiModelAPI:
def __init__(self, config: Config, agent_model):
"""
:param config:
:param reversi_zero.agent.model.ReversiModel agent_model:
"""
self.config = config
self.agent_model = agent_model
def predict(self, x):
assert x.ndim in (3, 4)
assert x.shape == (2, 8, 8) or x.shape[1:] == (2, 8, 8)
orig_x = x
if x.ndim == 3:
x = x.reshape(1, 2, 8, 8)
policy, value = self.agent_model.model.predict_on_batch(x)
if orig_x.ndim == 3:
return policy[0], value[0]
else:
return policy, value
|
[
"mokemokechicken@gmail.com"
] |
mokemokechicken@gmail.com
|
7389d460abe517d9e993221e0b2c9acc6154d6ab
|
916c49b17d730ae36ce3fe8178146baac53fb15d
|
/common/ecmp/base.py
|
10d355316e17a8b6561646a9b9de047fdebde323
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
absurya4/tf-test
|
ec96955ed9ddd662112173d2ff14059cd8d49552
|
f1faeca6e8a0abbc0efd77455379163d61e3a3d7
|
refs/heads/master
| 2022-12-18T08:25:20.375604
| 2020-09-21T07:31:53
| 2020-09-24T07:09:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,007
|
py
|
from common.base import GenericTestBase
import os
from tcutils.util import get_random_name, get_random_cidr
class ECMPTestBase(GenericTestBase):
@classmethod
def setUpClass(cls):
super(ECMPTestBase, cls).setUpClass()
cls.inputs.set_af(cls.get_af())
try:
# Mgmt VN
cls.mgmt_vn_name = get_random_name('mgmt_%s' % (
cls.inputs.project_name))
cls.mgmt_vn_subnets = [get_random_cidr(af=cls.inputs.get_af())]
cls.mgmt_vn_fixture = cls.create_only_vn(
cls.mgmt_vn_name, cls.mgmt_vn_subnets)
# Left VN
cls.left_vn_name = get_random_name('left_%s' % (
cls.inputs.project_name))
cls.left_vn_subnets = [get_random_cidr(af=cls.inputs.get_af())]
cls.left_vn_fixture = cls.create_only_vn(cls.left_vn_name,
cls.left_vn_subnets)
# Right VN
cls.right_vn_name = get_random_name('right_%s' % (
cls.inputs.project_name))
cls.right_vn_subnets = [get_random_cidr(af=cls.inputs.get_af())]
cls.right_vn_fixture = cls.create_only_vn(cls.right_vn_name,
cls.right_vn_subnets)
#if cls.inputs.get_af() == 'v6':
# cls.left_vn_subnets += [get_random_cidr()]
# cls.right_vn_subnets += [get_random_cidr()]
if cls.inputs.is_ci_setup() and cls.inputs.get_af() == 'v4':
cls.image_name = cls.inputs.get_ci_image()
else:
cls.image_name = 'cirros-traffic'
# End Vms
cls.left_vm_name = get_random_name('left_vm_%s' % (
cls.inputs.project_name))
cls.left_vm_fixture = cls.create_only_vm(cls.left_vn_fixture,
vm_name=cls.left_vm_name,
image_name=cls.image_name)
cls.right_vm_name = get_random_name('right_vm_%s' % (
cls.inputs.project_name))
cls.right_vm_fixture = cls.create_only_vm(cls.right_vn_fixture,
vm_name=cls.right_vm_name,
image_name=cls.image_name)
except:
cls.tearDownClass()
raise
cls.common_args = { 'mgmt_vn_name' : cls.mgmt_vn_name,
'mgmt_vn_subnets' : cls.mgmt_vn_subnets,
'mgmt_vn_fixture' : cls.mgmt_vn_fixture,
'left_vn_name' : cls.left_vn_name,
'left_vn_subnets' : cls.left_vn_subnets,
'left_vn_fixture' : cls.left_vn_fixture,
'left_vm_name' : cls.left_vm_name,
'left_vm_fixture' : cls.left_vm_fixture,
'right_vn_name' : cls.right_vn_name,
'right_vn_subnets' : cls.right_vn_subnets,
'right_vn_fixture' : cls.right_vn_fixture,
'right_vm_name' : cls.right_vm_name,
'right_vm_fixture' : cls.right_vm_fixture,
'image_name' : cls.image_name }
# end setUpClass
@classmethod
def cleanUpObjects(cls):
cls.safe_cleanup('right_vm_fixture')
cls.safe_cleanup('left_vm_fixture')
cls.safe_cleanup('left_vn_fixture')
cls.safe_cleanup('right_vn_fixture')
cls.safe_cleanup('mgmt_vn_fixture')
# end cleanUpObjects
@classmethod
def tearDownClass(cls):
cls.cleanUpObjects()
super(ECMPTestBase, cls).tearDownClass()
# end tearDownClass
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
7f69dbce010f14ac2debdf2734632872607447c1
|
7f53f509222f7e4b1ca8137bb31cf2edc5f64e80
|
/spec.py
|
285af57a848c75051fd2652556eb8d7fba9fe315
|
[] |
no_license
|
wahid999/Pythonforbegainer
|
f61c7567c37b3d4103b5550a6975f78c960763f8
|
794fd9471ff95eac52ae42d8548526c09df23bbd
|
refs/heads/main
| 2023-07-11T22:31:31.435440
| 2021-08-16T17:18:35
| 2021-08-16T17:18:35
| 399,344,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
import os
os.system('clear')
#************** Strings ****************
#------------->CANCATINATINATION-------->
name = 'Wahid Hussain'
greetings = 'hello, My name is ' + name
print (greetings[24])
|
[
"wahidhussainturi@gmail.com"
] |
wahidhussainturi@gmail.com
|
c6db899b17994956a0afb5dd82d2d133537fb664
|
df258f9b95493d146ef8d3e9fef8ee367fe66042
|
/dffml/db/sql.py
|
bbbac8bea799aad05894665845c119803c332d57
|
[
"LicenseRef-scancode-generic-export-compliance",
"MIT"
] |
permissive
|
emrul/dffml
|
dd576582de5a95d2cc3c525131d1b1f66dfa84c1
|
0829a5830fef85d24baa80220fa9cf7e56fee236
|
refs/heads/master
| 2022-04-22T20:37:45.109948
| 2020-04-24T15:08:23
| 2020-04-24T15:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,272
|
py
|
"""
Base classes to wrap various SQL based databases in dffml.db abstraction.
"""
from typing import Dict, Any, List, Tuple, Optional
from dffml.db.base import BaseDatabaseContext, Conditions
class SQLDatabaseContext(BaseDatabaseContext):
# BIND_DECLARATION is the string used to bind a param
BIND_DECLARATION: str = "?"
@classmethod
def make_condition_expression(cls, conditions):
"""
Returns a dict with keys 'expression','values' if conditions is not empty
else returns `None`
example::
Input : conditions = [
[["firstName", "=", "John"], ["lastName", "=", "Miles"]],
[["age", "<", "38"]],
]
Output : {
'expression':
'((firstName = ? ) OR (lastName = ? )) AND ((age < ? ))',
'values':
['John', 'Miles', '38']
}
"""
def _make_condition_expression(conditions):
def make_or(lst):
val_list = []
exp = []
for cnd in lst:
exp.append(
f"(`{cnd.column}` {cnd.operation} {cls.BIND_DECLARATION} )"
)
val_list.append(cnd.value)
result = {"expression": " OR ".join(exp), "values": val_list}
return result
lst = map(make_or, conditions)
result_exps = []
result_vals = []
for result in lst:
temp_exp = result["expression"]
temp_exp = f"({temp_exp})"
result_exps.append(temp_exp)
result_vals.extend(result["values"])
result_exps = " AND ".join(result_exps)
result = {"expression": result_exps, "values": result_vals}
return result
condition_dict = None
if (not conditions == None) and (len(conditions) != 0):
condition_dict = _make_condition_expression(conditions)
return condition_dict
def create_table_query(
self, table_name: str, cols: Dict[str, str], *args, **kwargs
) -> None:
"""
Creates a create query. Table with name ``table_name`` will be created
if it doesn't exist.
Parameters
----------
table_name : str
Name of the table.
`cols` : dict
Mapping of column names to type of columns.
Returns
-------
query : str
``CREATE`` query
"""
query = (
f"CREATE TABLE IF NOT EXISTS {table_name} ("
+ ", ".join([f"`{k}` {v}" for k, v in cols.items()])
+ ")"
)
return query
def insert_query(
self, table_name: str, data: Dict[str, Any], *args, **kwargs
) -> None:
"""
Creates insert query. Keys in ``data`` dict correspond to the columns in
``table_name``.
Parameters
----------
table_name : str
Name of the table.
data : dict, optional
Columns names are keys, values are data to insert.
Returns
-------
query : str
``INSERT`` query
parameters : tuple
Variables to bind
"""
col_exp = ", ".join([f"`{col}`" for col in data])
query = (
f"INSERT INTO {table_name} "
+ f"( {col_exp} )"
+ f" VALUES( {', '.join([self.BIND_DECLARATION] * len(data))} ) "
)
return query, list(data.values())
def update_query(
self,
table_name: str,
data: Dict[str, Any],
conditions: Optional[Conditions] = None,
) -> None:
"""
Creates update query setting values of rows (satisfying ``conditions``
if provided) with ``data`` in ``table_name``.
Parameters
----------
table_name : str
Name of the table.
data : dict, optional
Columns names to update mapped to value to set to.
conditions: Conditions, optional
Nested array of conditions to satisfy, becomes ``WHERE``.
Returns
-------
query : str
``UPDATE`` query
parameters : tuple
Variables to bind
"""
query_values = list(data.values())
condition_dict = self.make_condition_expression(conditions)
if condition_dict is not None:
condition_exp = condition_dict["expression"]
query_values.extend(condition_dict["values"])
else:
condition_exp = None
query = (
f"UPDATE {table_name} SET "
+ " ,".join([f"`{col}` = {self.BIND_DECLARATION}" for col in data])
+ (f" WHERE {condition_exp}" if condition_exp is not None else "")
)
return query, query_values
def lookup_query(
self,
table_name: str,
cols: Optional[List[str]] = None,
conditions: Optional[Conditions] = None,
) -> Tuple[str, Tuple[Any]]:
"""
Creates a query string and tuple of parameters used as bindings.
Parameters
----------
table_name : str
Name of the table.
cols : list, optional
Columns names to return
conditions: Conditions, optional
Nested array of conditions to satisfy, becomes ``WHERE``.
Returns
-------
query : str
``SELECT`` query
parameters : tuple
Variables to bind
"""
condition_dict = self.make_condition_expression(conditions)
query_values = []
if condition_dict is not None:
condition_exp = condition_dict["expression"]
query_values.extend(condition_dict["values"])
else:
condition_exp = None
if not cols:
col_exp = "*"
else:
col_exp = ", ".join([f"`{col}`" for col in cols])
query = f"SELECT {col_exp} FROM {table_name} " + (
f" WHERE {condition_exp}" if condition_exp is not None else ""
)
return query, query_values
def remove_query(
self, table_name: str, conditions: Optional[Conditions] = None
):
"""
Creates a delete query to remove rows from ``table_name`` (satisfying
``conditions`` if provided).
Parameters
----------
table_name : str
Name of the table.
conditions: Conditions, optional
Nested array of conditions to satisfy, becomes ``WHERE``.
Returns
-------
query : str
``DELETE`` query
parameters : tuple
Variables to bind
"""
condition_dict = self.make_condition_expression(conditions)
query_values = []
if condition_dict is not None:
condition_exp = condition_dict["expression"]
query_values = condition_dict["values"]
else:
condition_exp = None
query = f"DELETE FROM {table_name} " + (
f" WHERE {condition_exp}" if condition_exp is not None else ""
)
return query, query_values
|
[
"johnandersenpdx@gmail.com"
] |
johnandersenpdx@gmail.com
|
f2cc1601e225667554205561c993b3b430da90dd
|
1edd52cf197e5ae67b5939a3beb3e70761334e62
|
/AWS/AWS_boto3_narendra/20_Collections_stop_all_ec2.py
|
d019a4a10bd4d593f40e1b1694dac8e0692e7743
|
[] |
no_license
|
sandeepmchary/Devops_wordpress_Notes
|
bdcd85d526780d03c494ecb93e714e7ffe0a4d58
|
ffd2092162073e1e7342c6066d023d04e6ca8c1c
|
refs/heads/master
| 2022-06-18T21:33:02.471025
| 2022-06-12T11:14:47
| 2022-06-12T11:14:47
| 154,679,658
| 1
| 4
| null | 2022-05-19T16:59:57
| 2018-10-25T13:51:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
import boto3
ec2_re=boto3.resource('ec2')
ec2_cli=boto3.client('ec2')
all_ins_id=[]
for each in ec2_re.instances.all():
print(each.id,each.state['Name'])
all_ins_id.append(each.id)
#print(all_ins_id)
print("Stopping all instances...")
ec2_re.instances.stop()
waiter=ec2_cli.get_waiter('instance_stopped')
waiter.wait(InstanceIds=all_ins_id)
print("All instances are stopped")
|
[
"awssandeepchary@gmail.com"
] |
awssandeepchary@gmail.com
|
88a5b2b4fd0da877b9c61c64b8cbb25dec8a8493
|
d64289adc0908134bf97cbce2d9c5f305a8042d0
|
/groupdocs_conversion_cloud/models/otp_load_options.py
|
5a17f2da8e57ddbe68c5919dc2cdfdf15c5f4a30
|
[
"MIT"
] |
permissive
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
07cfdabb6584e4f9835c25ff96a6053ef3a54596
|
496f307bc0b776314fd5f56781fb0e71b0b4985e
|
refs/heads/master
| 2023-08-30T22:12:43.070658
| 2023-08-23T17:08:13
| 2023-08-23T17:08:13
| 179,628,452
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="OtpLoadOptions.py">
# Copyright (c) 2003-2023 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import PresentationLoadOptions
class OtpLoadOptions(PresentationLoadOptions):
"""
Otp load options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of OtpLoadOptions""" # noqa: E501
base = super(OtpLoadOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OtpLoadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"product.team@groupdocs.com"
] |
product.team@groupdocs.com
|
9d04ced9a977b0813099e3e048af8c764a92ffc9
|
1d88ed99e2f01b6b0faa7acf762543f41569380c
|
/top/table/tests/test_returns.py
|
7264e71d32ed00d88f79d614faa8c1cfeef75cdb
|
[] |
no_license
|
loum/top
|
be0ae6951ed7d5834d14f96403d6cd1dc9d008a4
|
4d9aae6297793822b6de28b65f7639a4b2e6dcfa
|
refs/heads/master
| 2016-09-10T10:38:41.312180
| 2014-06-05T05:44:48
| 2014-06-05T05:44:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
import unittest2
import os
import datetime
import top
class TestReturns(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls._r = top.Returns()
cls._db = top.DbSession()
cls._db.connect()
db = cls._db
fixture_dir = os.path.join('top', 'tests', 'fixtures')
fixtures = [{'db': db.returns_reference,
'fixture': 'returns_reference.py'},
{'db': db.returns, 'fixture': 'returns.py'},
{'db': db.agent, 'fixture': 'agents.py'}]
for i in fixtures:
fixture_file = os.path.join(fixture_dir, i['fixture'])
db.load_fixture(i['db'], fixture_file)
# Update the returns created_ts.
cls._now = str(datetime.datetime.now()).split('.')[0]
sql = """UPDATE returns
SET created_ts = '%s'""" % cls._now
db(sql)
db.commit()
def test_init(self):
"""Placeholder test to make sure the Returns table is created.
"""
msg = 'Object is not an top.Returns'
self.assertIsInstance(self._r, top.Returns, msg)
def test_extract_id_sql(self):
"""Verify the extract_id_sql string.
"""
returns_id = 2
sql = self._db.returns.extract_id_sql(returns_id)
self._db(sql)
received = list(self._db.rows())
expected = [('loumar@tollgroup.com',
'0431602145',
'%s' % self._now,
'Bunters We Never Sleep News + Deli',
'693 Albany Hwy',
'Victoria Park',
'6101',
'WA')]
msg = 'extract_id_sql returned values error'
self.assertListEqual(received, expected, msg)
@classmethod
def tearDownClass(cls):
cls._db.disconnect()
cls._db = None
cls._r = None
|
[
"lou.markovski@gmail.com"
] |
lou.markovski@gmail.com
|
ba3910d957182f1266449984de846549adfd32bc
|
bc441bb06b8948288f110af63feda4e798f30225
|
/database_delivery_sdk/model/inspection/collector_pb2.py
|
11b6c11be52fbd835afa5a03782fb0095af50566
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,328
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: collector.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from database_delivery_sdk.model.inspection import arg_pb2 as database__delivery__sdk_dot_model_dot_inspection_dot_arg__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='collector.proto',
package='inspection',
syntax='proto3',
serialized_options=_b('ZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspection'),
serialized_pb=_b('\n\x0f\x63ollector.proto\x12\ninspection\x1a\x30\x64\x61tabase_delivery_sdk/model/inspection/arg.proto\"y\n\x13InspectionCollector\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x0e\n\x06script\x18\x04 \x01(\t\x12\'\n\x04\x61rgs\x18\x05 \x03(\x0b\x32\x19.inspection.InspectionArgBFZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspectionb\x06proto3')
,
dependencies=[database__delivery__sdk_dot_model_dot_inspection_dot_arg__pb2.DESCRIPTOR,])
_INSPECTIONCOLLECTOR = _descriptor.Descriptor(
name='InspectionCollector',
full_name='inspection.InspectionCollector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='inspection.InspectionCollector.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='inspection.InspectionCollector.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content', full_name='inspection.InspectionCollector.content', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='script', full_name='inspection.InspectionCollector.script', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='args', full_name='inspection.InspectionCollector.args', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=202,
)
_INSPECTIONCOLLECTOR.fields_by_name['args'].message_type = database__delivery__sdk_dot_model_dot_inspection_dot_arg__pb2._INSPECTIONARG
DESCRIPTOR.message_types_by_name['InspectionCollector'] = _INSPECTIONCOLLECTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InspectionCollector = _reflection.GeneratedProtocolMessageType('InspectionCollector', (_message.Message,), {
'DESCRIPTOR' : _INSPECTIONCOLLECTOR,
'__module__' : 'collector_pb2'
# @@protoc_insertion_point(class_scope:inspection.InspectionCollector)
})
_sym_db.RegisterMessage(InspectionCollector)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
f72712339cc187c4574fc8ad4a91106b89cb5f39
|
0f49a5e1daeb09742f87717e8f4849b87b1a9c44
|
/src/dialogs/DialogUmlNodeEdit.py
|
5c9ed16d21e86fbde26a233e0482e29a244a94d7
|
[] |
no_license
|
arita37/pynsource
|
7ddc717972e3c8f8a1225f4d9ba196e03bfee5df
|
57ed39ba112d97fc0af09669d6647952f6ae1e7c
|
refs/heads/master
| 2021-02-04T09:03:10.796578
| 2020-02-06T06:31:19
| 2020-02-06T06:31:19
| 243,646,965
| 1
| 0
| null | 2020-02-28T00:42:02
| 2020-02-28T00:42:00
| null |
UTF-8
|
Python
| false
| false
| 3,837
|
py
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class DialogUmlNodeEdit
###########################################################################
class DialogUmlNodeEdit ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Uml Node Properties", pos = wx.DefaultPosition, size = wx.Size( 342,469 ), style = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer9 = wx.BoxSizer( wx.VERTICAL )
self.m_panel2 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer11 = wx.BoxSizer( wx.VERTICAL )
bSizer12 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText1 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"Class Name", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
self.m_staticText1.SetMinSize( wx.Size( 55,-1 ) )
bSizer12.Add( self.m_staticText1, 1, wx.ALL, 5 )
self.txtClassName = wx.TextCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PROCESS_ENTER )
self.txtClassName.SetMaxLength( 0 )
bSizer12.Add( self.txtClassName, 3, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5 )
bSizer11.Add( bSizer12, 0, wx.EXPAND, 5 )
bSizer14 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText2 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"Attributes", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
self.m_staticText2.SetMinSize( wx.Size( 55,-1 ) )
bSizer14.Add( self.m_staticText2, 1, wx.ALL, 5 )
self.txtAttrs = wx.TextCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE )
# self.txtAttrs.SetMaxLength( 0 )
bSizer14.Add( self.txtAttrs, 3, wx.ALL|wx.EXPAND, 5 )
bSizer11.Add( bSizer14, 2, wx.EXPAND, 5 )
bSizer13 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText3 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"Methods", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
self.m_staticText3.SetMinSize( wx.Size( 55,-1 ) )
bSizer13.Add( self.m_staticText3, 1, wx.ALL, 5 )
self.txtMethods = wx.TextCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE )
# self.txtMethods.SetMaxLength( 0 )
bSizer13.Add( self.txtMethods, 3, wx.ALL|wx.EXPAND, 5 )
bSizer11.Add( bSizer13, 2, wx.EXPAND, 5 )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
bSizer5 = wx.BoxSizer( wx.VERTICAL )
self.m_button1 = wx.Button( self.m_panel2, wx.ID_CANCEL, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer5.Add( self.m_button1, 0, wx.ALL|wx.EXPAND, 5 )
bSizer4.Add( bSizer5, 1, wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
self.m_button2 = wx.Button( self.m_panel2, wx.ID_OK, u"OK", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer6.Add( self.m_button2, 0, wx.ALL|wx.EXPAND, 5 )
bSizer4.Add( bSizer6, 1, wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer11.Add( bSizer4, 1, wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.m_panel2.SetSizer( bSizer11 )
self.m_panel2.Layout()
bSizer11.Fit( self.m_panel2 )
bSizer9.Add( self.m_panel2, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( bSizer9 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.txtClassName.Bind( wx.EVT_TEXT_ENTER, self.OnClassNameEnter )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnClassNameEnter( self, event ):
event.Skip()
|
[
"abulka@gmail.com"
] |
abulka@gmail.com
|
f8e9da36ddbcc3f59610866194a1676bdff287ad
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/90/usersdata/218/60729/submittedfiles/matriz2.py
|
9c1a3e9aea2e3f724466e0e40fb3cef9566de708
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
def somas (a):
lista=[]
cont=0
for i in range (0,a.shape[0],1):
cont=0
for j in range (0,a.shape[1],1):
cont=cont+a[i,j]
lista.append(cont)
for j in range (0,a.shape[1],1):
cont=0
for i in range (0,a.shape[0],1):
cont=cont+a[i,j]
lista.append(cont)
cont=0
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if i==j:
cont=cont+a[i,j]
lista.append(cont)
i=0
j=a.shape[1]-1
cont=0
while j>=0:
cont=cont+a[i,j]
i=i+1
j=j-1
lista.append(cont)
for i in range (1,len(lista),1):
if lista[i]!=lista[i-1]:
return False
return True
n=int(input('digite as dimensões da matriz quadrada:'))
a=np.zeros((n,n))
for i in range (0,a.shape[0],1):
for j in range (0, a.shape[1],1):
a[i,j]=float(input('digite o elemento:'))
if somas(a):
print('S')
else:
print('N')_
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
bf1a7529e2315658f50cc0c20cf59fc4d0d940fd
|
ababed0e1a54f4888440edd20cbfdf6beb3cd20d
|
/backend/menu/api/v1/viewsets.py
|
67ee4177ad30850e6ca22a7f0ce52813b2fbd299
|
[] |
no_license
|
crowdbotics-apps/asd-18920
|
1766fa6c4334d1c1f8a97f6aa4da8961742bcc2c
|
5022fcd5f0b0522ce53438270e2ae65d1b652dab
|
refs/heads/master
| 2022-11-17T15:02:25.688634
| 2020-07-15T16:09:00
| 2020-07-15T16:09:00
| 279,915,037
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
from rest_framework import authentication
from menu.models import ItemVariant, Country, Item, Category, Review
from .serializers import (
ItemVariantSerializer,
CountrySerializer,
ItemSerializer,
CategorySerializer,
ReviewSerializer,
)
from rest_framework import viewsets
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
class ItemViewSet(viewsets.ModelViewSet):
serializer_class = ItemSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Item.objects.all()
class ItemVariantViewSet(viewsets.ModelViewSet):
serializer_class = ItemVariantSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ItemVariant.objects.all()
class ReviewViewSet(viewsets.ModelViewSet):
serializer_class = ReviewSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Review.objects.all()
class CountryViewSet(viewsets.ModelViewSet):
serializer_class = CountrySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Country.objects.all()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
efcd1f6da93da3bf75a7685d2f07a65c5f588702
|
52ab2da7b131643a344ee5344d8f35aebd6e2eed
|
/WebProject1/myvenv/lib/python3.6/site-packages/sqlalchemy/event/legacy.py
|
049df81aafa7b163be69d6561ad9e9e6ff7ef534
|
[
"MIT"
] |
permissive
|
ucsb-cs48-w19/5pm-findtheroommate
|
cd6db6c4cf3ee6f159b04456ba13b1ef684c7546
|
d9d01b95c478e7493b5b32c8b56ceed00578b188
|
refs/heads/master
| 2020-04-16T01:00:16.617610
| 2019-03-19T20:42:38
| 2019-03-19T20:42:38
| 165,158,037
| 2
| 1
|
MIT
| 2019-03-05T00:46:12
| 2019-01-11T01:28:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,900
|
py
|
# event/legacy.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle adaption of legacy call signatures,
generation of deprecation notes and docstrings.
"""
from .. import util
def _legacy_signature(since, argnames, converter=None):
def leg(fn):
if not hasattr(fn, "_legacy_signatures"):
fn._legacy_signatures = []
fn._legacy_signatures.append((since, argnames, converter))
return fn
return leg
def _wrap_fn_for_legacy(dispatch_collection, fn, argspec):
for since, argnames, conv in dispatch_collection.legacy_signatures:
if argnames[-1] == "**kw":
has_kw = True
argnames = argnames[0:-1]
else:
has_kw = False
if len(argnames) == len(argspec.args) and has_kw is bool(
argspec.keywords
):
if conv:
assert not has_kw
def wrap_leg(*args):
return fn(*conv(*args))
else:
def wrap_leg(*args, **kw):
argdict = dict(zip(dispatch_collection.arg_names, args))
args = [argdict[name] for name in argnames]
if has_kw:
return fn(*args, **kw)
else:
return fn(*args)
return wrap_leg
else:
return fn
def _indent(text, indent):
return "\n".join(indent + line for line in text.split("\n"))
def _standard_listen_example(dispatch_collection, sample_target, fn):
example_kw_arg = _indent(
"\n".join(
"%(arg)s = kw['%(arg)s']" % {"arg": arg}
for arg in dispatch_collection.arg_names[0:2]
),
" ",
)
if dispatch_collection.legacy_signatures:
current_since = max(
since
for since, args, conv in dispatch_collection.legacy_signatures
)
else:
current_since = None
text = (
"from sqlalchemy import event\n\n"
"# standard decorator style%(current_since)s\n"
"@event.listens_for(%(sample_target)s, '%(event_name)s')\n"
"def receive_%(event_name)s("
"%(named_event_arguments)s%(has_kw_arguments)s):\n"
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event handling logic) ...\n"
)
if len(dispatch_collection.arg_names) > 3:
text += (
"\n# named argument style (new in 0.9)\n"
"@event.listens_for("
"%(sample_target)s, '%(event_name)s', named=True)\n"
"def receive_%(event_name)s(**kw):\n"
" \"listen for the '%(event_name)s' event\"\n"
"%(example_kw_arg)s\n"
"\n # ... (event handling logic) ...\n"
)
text %= {
"current_since": " (arguments as of %s)" % current_since
if current_since
else "",
"event_name": fn.__name__,
"has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "",
"named_event_arguments": ", ".join(dispatch_collection.arg_names),
"example_kw_arg": example_kw_arg,
"sample_target": sample_target,
}
return text
def _legacy_listen_examples(dispatch_collection, sample_target, fn):
text = ""
for since, args, conv in dispatch_collection.legacy_signatures:
text += (
"\n# DEPRECATED calling style (pre-%(since)s, "
"will be removed in a future release)\n"
"@event.listens_for(%(sample_target)s, '%(event_name)s')\n"
"def receive_%(event_name)s("
"%(named_event_arguments)s%(has_kw_arguments)s):\n"
" \"listen for the '%(event_name)s' event\"\n"
"\n # ... (event handling logic) ...\n"
% {
"since": since,
"event_name": fn.__name__,
"has_kw_arguments": " **kw"
if dispatch_collection.has_kw
else "",
"named_event_arguments": ", ".join(args),
"sample_target": sample_target,
}
)
return text
def _version_signature_changes(parent_dispatch_cls, dispatch_collection):
since, args, conv = dispatch_collection.legacy_signatures[0]
return (
"\n.. deprecated:: %(since)s\n"
" The :class:`.%(clsname)s.%(event_name)s` event now accepts the \n"
" arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n"
" Support for listener functions which accept the previous \n"
" argument signature(s) listed above as \"deprecated\" will be \n"
" removed in a future release."
% {
"since": since,
"clsname": parent_dispatch_cls.__name__,
"event_name": dispatch_collection.name,
"named_event_arguments": ", ".join(dispatch_collection.arg_names),
"has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "",
}
)
def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn):
header = (
".. container:: event_signatures\n\n"
" Example argument forms::\n"
"\n"
)
sample_target = getattr(parent_dispatch_cls, "_target_class_doc", "obj")
text = header + _indent(
_standard_listen_example(dispatch_collection, sample_target, fn),
" " * 8,
)
if dispatch_collection.legacy_signatures:
text += _indent(
_legacy_listen_examples(dispatch_collection, sample_target, fn),
" " * 8,
)
text += _version_signature_changes(
parent_dispatch_cls, dispatch_collection)
return util.inject_docstring_text(fn.__doc__, text, 1)
|
[
"tengyue@umail.ucsb.edu"
] |
tengyue@umail.ucsb.edu
|
399b9de55761113ccab69d01a9bd68a988a88fe6
|
bed40794a78225e070c49a72209d447757ec8343
|
/python_crawl/section3-1.py
|
3a3113bc3a6b4a52b626ac8857b76dd0fe10852d
|
[] |
no_license
|
moorekwon/crawling-practice
|
458ffe31b3a7a91fad6547ef76f9a428376d542f
|
c5bc989ced353daed34d53410c261ce4d4561d4c
|
refs/heads/master
| 2022-09-13T06:46:31.034858
| 2020-05-31T06:43:28
| 2020-05-31T06:43:28
| 266,505,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
import urllib.request
from urllib.parse import urlparse
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
url = 'http://www.encar.com'
mem = urllib.request.urlopen(url)
# print('type(mem) >> ', type(mem))
# print('mem.geturl() >> ', mem.geturl())
# print('mem.status >> ', mem.status)
# print('mem.getheaders() >> ', mem.getheaders())
# print('mem.getcode() >> ', mem.getcode())
# print("mem.read(200).decode('euc-kr') >> ", mem.read(200).decode('euc-kr'))
# print("urlparse('http://www.encar.com?id=moorekwon&pw=1111').query >> ", urlparse('http://www.encar.com?id=moorekwon&pw=1111').query)
API = 'https://api.ipify.org'
before_params = {
'format': 'json'
}
print('before_params >> ', before_params)
after_params = urllib.parse.urlencode(before_params)
print('after_params >> ', after_params)
URL = API + '?' + after_params
print('URL >> ', URL)
data = urllib.request.urlopen(URL).read()
print('data >> ', data)
text = data.decode('utf-8')
print('text >> ', text)
|
[
"raccoonhj33@gmail.com"
] |
raccoonhj33@gmail.com
|
a868176f862739f842c05c4e3447d5f92ff293ac
|
b95fa99bb1ba2210b73251614d2613363c37f932
|
/deploy/dot-product/scripts/dot-67.py
|
e5a37f1559951e11ad8cf6b305503d2eb3c83ad0
|
[] |
no_license
|
lingxiao/learn-adj-relation
|
d1a8894fefc776ec0bd414b5f038361ed4b79d16
|
dc4285af19e53d7e2d015eb6394f6c601c707da0
|
refs/heads/master
| 2020-12-30T16:27:51.531268
| 2017-06-07T18:59:48
| 2017-06-07T18:59:48
| 87,714,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
############################################################
# Module : A series of measures on the graph for experiments
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import numpy as np
from utils import *
from scripts import *
from app.config import *
############################################################
'''
paths
'''
batch = 67
# dirs = working_dirs('dot-product',['pairs', 'scripts','shells'])
out_dirs = data_dirs('dot-product', ['outputs'])
word_dirs = working_dirs( 'words'
, [p + '-' + s for p in ['train', 'valid', 'all'] \
for s in ['pairs', 'words'] ])
word_2_vec = get_path('word2vec')
word_2_vec_sm = get_path('word2vec-sm')
current_job = 'all-pairs'
word_pair_path = os.path.join(word_dirs[current_job] , 'batch-' + str(batch) + '.txt')
out_path = os.path.join(out_dirs['outputs'] , current_job + '-' + str(batch) + '.txt')
print('\n>> running dot-' + str(batch) + '.py at ' + current_job)
dot(word_2_vec, word_pair_path, out_path, refresh = True)
|
[
"lingxiao@seas.upenn.edu"
] |
lingxiao@seas.upenn.edu
|
b00578cb33932e544783d41cc72fa666201bb10e
|
4fd0a43fb3fdbc5ce355b050d0a6506b97bb5d79
|
/src/basic/mqtt_client.py
|
f5bb543c0f8e7a81272e10ad1741abd0064590ae
|
[] |
no_license
|
Shoumik-Gandre/wn_miniproject
|
a175e871b0931e6bcb324fbcf81b3dbbd09186e6
|
9a61a27c7eee15486cf688a3d66ceae23f8d5b47
|
refs/heads/main
| 2023-04-16T13:48:53.383291
| 2021-04-29T13:55:45
| 2021-04-29T13:55:45
| 362,832,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
import paho.mqtt.client as mqtt
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("WN/test")
client.subscribe("WN/topic")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
def main():
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("test.mosquitto.org", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
if __name__ == "__main__":
main()
|
[
"shoumikgandre@gmail.com"
] |
shoumikgandre@gmail.com
|
4a5429e17840c12de155763259947c16be4142b8
|
7f9dfa2cccf77764940ffcbbf92939e37c138c43
|
/crawl_file/file_path/pylab_examples/anscombe.py
|
753e4342916a1d85af7b3ce7af20c14c7110e3c9
|
[] |
no_license
|
zhangmman/scrapy_spider
|
f80bd8d213edde0dea083babe610ca7b1bc449a3
|
2bda4aa29f2550c649c939045ce4fcdea2736187
|
refs/heads/master
| 2020-09-11T13:58:49.930929
| 2019-12-21T08:40:56
| 2019-12-21T08:43:43
| 222,080,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
from __future__ import print_function
"""
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5*x
xfit = np.array([np.amin(x), np.amax(x)])
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.text(3, 12, 'I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.text(3, 12, 'II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.text(3, 12, 'III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.text(3, 12, 'IV', fontsize=20)
# verify the stats
pairs = (x, y1), (x, y2), (x, y3), (x4, y4)
for x, y in pairs:
print('mean=%1.2f, std=%1.2f, r=%1.2f' % (np.mean(y), np.std(y), np.corrcoef(x, y)[0][1]))
plt.show()
|
[
"zhangman@ncepu.cn"
] |
zhangman@ncepu.cn
|
1504d39ef20e08c04e1cdc4746b68ebbb0bcc192
|
d2f91b93ad42aaefa5fc315a9b3a5d45d07fa705
|
/slbman/venv/Lib/site-packages/aliyun/api/rest/rds/RdsDescribeDBInstanceClassesRequest.py
|
33ce5a2e88dcb9457c34ba33df1ea05109b25fc6
|
[] |
no_license
|
junlongzhou5566/managePlatform
|
66cb5bc5b176147ff0038819924f7efa8df1d556
|
3201ba1a11b05c86db5f42aa9ca8eaf1cc20e216
|
refs/heads/master
| 2021-03-29T00:58:23.337808
| 2020-03-17T09:50:21
| 2020-03-17T09:50:21
| 247,910,365
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class RdsDescribeDBInstanceClassesRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'rds.aliyuncs.com.DescribeDBInstanceClasses.2013-05-28'
|
[
"645647713@qq.com@qq.com"
] |
645647713@qq.com@qq.com
|
cf5232006921ce80f92f29008e1e66683214afa9
|
4f935960c688bb306a9808b9a0f47480a1a3d33a
|
/fastai2/callback/cutmix.py
|
1f182c8d3701ad765019fd43909d6f91fe436c03
|
[
"Apache-2.0"
] |
permissive
|
AccidentalGuru/fastai2
|
c3297919f2c2455f8c8a5ee81a5590afe87df34a
|
e816625945d87c2d9ac6521150f235942912bf74
|
refs/heads/master
| 2021-05-26T22:59:55.066904
| 2020-04-08T13:18:11
| 2020-04-08T13:18:11
| 254,182,464
| 1
| 0
|
Apache-2.0
| 2020-04-08T19:33:43
| 2020-04-08T19:33:42
| null |
UTF-8
|
Python
| false
| false
| 2,296
|
py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/74_callback.cutmix.ipynb (unless otherwise specified).
__all__ = ['CutMix']
# Cell
from torch.distributions.beta import Beta
from ..vision.all import *
# Cell
class CutMix(Callback):
"Implementation of `https://arxiv.org/abs/1905.04899`"
run_after,run_valid = [Normalize],False
def __init__(self, alpha=1.): self.distrib = Beta(tensor(alpha), tensor(alpha))
def begin_fit(self):
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_fit(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def begin_batch(self):
W, H = self.xb[0].size(3), self.xb[0].size(2)
lam = self.distrib.sample((1,)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam])
self.lam = lam.max()
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][:, :, x1:x2, y1:y2] = xb1[0][:, :, x1:x2, y1:y2]
self.lam = (1 - ((x2-x1)*(y2-y1))/(W*H)).type(torch.float)
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def lf(self, pred, *yb):
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
def rand_bbox(self, W, H, lam):
cut_rat = torch.sqrt(1. - lam)
cut_w = (W * cut_rat).type(torch.long)
cut_h = (H * cut_rat).type(torch.long)
# uniform
cx = torch.randint(0, W, (1,)).to(self.x.device)
cy = torch.randint(0, H, (1,)).to(self.x.device)
x1 = torch.clamp(cx - cut_w // 2, 0, W)
y1 = torch.clamp(cy - cut_h // 2, 0, H)
x2 = torch.clamp(cx + cut_w // 2, 0, W)
y2 = torch.clamp(cy + cut_h // 2, 0, H)
return x1, y1, x2, y2
|
[
"sylvain.gugger@gmail.com"
] |
sylvain.gugger@gmail.com
|
f398d3b06ab094a54439b2f4315ad474a76e55f2
|
6f30245f27a9568155f69648faf148c278136029
|
/hhapps/cmd/stock_api.py
|
dd7da5d6c78ea0bde4abd7f69423a01f7dd4884b
|
[] |
no_license
|
r202-coe-psu/hh-apps
|
82495ffec7fb09155afa4e8f571051aad824acb4
|
a15453b7f502a2a71ccb89ba4c4ebe95ef3ca86f
|
refs/heads/master
| 2021-05-03T05:48:40.766349
| 2017-08-06T22:45:30
| 2017-08-06T22:45:30
| 120,584,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from hhapps.stock import api
def main():
options = api.get_program_options()
app = api.create_app()
app.run(
debug=options.debug,
host=options.host,
port=int(options.port)
)
|
[
"boatkrap@gmail.com"
] |
boatkrap@gmail.com
|
90d4c660d0cd1e00e4e063eff79f31aaaf635e41
|
95ed5173865ea5930ac1f4280e3bce78411ea956
|
/examples/plot_wage_education_gender.py
|
47305c351dc39a8a18ed0feba01340058cc5e141
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] |
permissive
|
FedericoV/stats_in_python_tutorial
|
c0e99039f8f76e453bf511b99ad906fdf3111509
|
0e7607e36896790eeb3753ecb3b8ee82db206a97
|
refs/heads/master
| 2021-01-18T00:23:38.240447
| 2015-08-26T13:20:45
| 2015-08-26T13:20:45
| 41,425,683
| 1
| 0
| null | 2015-08-26T12:58:37
| 2015-08-26T12:58:36
| null |
UTF-8
|
Python
| false
| false
| 2,679
|
py
|
"""
Test for an education/gender interaction in wages
==================================================
Wages depend mostly on education. Here we investigate how this dependence
is related to gender: not only does gender create an offset in wages, it
also seems that wages increase more with education for males than
females.
Does our data support this last hypothesis? We will test this using
statsmodels' formulas
(http://statsmodels.sourceforge.net/stable/example_formulas.html).
"""
##############################################################################
# Load and massage the data
import pandas
import urllib
import os
if not os.path.exists('wages.txt'):
# Download the file if it is not present
urllib.urlretrieve('http://lib.stat.cmu.edu/datasets/CPS_85_Wages',
'wages.txt')
# EDUCATION: Number of years of education
# SEX: 1=Female, 0=Male
# WAGE: Wage (dollars per hour)
data = pandas.read_csv('wages.txt', skiprows=27, skipfooter=6, sep=None,
header=None, names=['education', 'gender', 'wage'],
usecols=[0, 2, 5],
)
# Convert genders to strings (this is particulary useful so that the
# statsmodels formulas detects that gender is a categorical variable)
import numpy as np
data['gender'] = np.choose(data.gender, ['male', 'female'])
# Log-transform the wages, because they typically are increased with
# multiplicative factors
data['wage'] = np.log10(data['wage'])
##############################################################################
# simple plotting
import seaborn
# Plot 2 linear fits for male and female.
seaborn.lmplot(y='wage', x='education', hue='gender', data=data)
##############################################################################
# statistical analysis
import statsmodels.formula.api as sm
# Note that this model is not the plot displayed above: it is one
# joined model for male and female, not separate models for male and
# female. The reason is that a single model enables statistical testing
result = sm.ols(formula='wage ~ 1 + education + gender', data=data).fit()
print(result.summary())
# The plots above highlight that there is not only a different offset in
# wage but also a different slope
# We need to model this using an interaction
result = sm.ols(formula='wage ~ 1 + education + gender + education * gender',
data=data).fit()
print(result.summary())
# Looking at the p-value of the interaction of gender and education, the
# data does not support the hypothesis that education benefits males
# more than female (p-value > 0.05).
import matplotlib.pyplot as plt
plt.show()
|
[
"gael.varoquaux@normalesup.org"
] |
gael.varoquaux@normalesup.org
|
ab4ddaa525f0af038fd27984ccf21aea86d3a3e9
|
00c1a2bf4f0b9af287f336b8c6f6e52390ce2d6f
|
/loyihaapp/models.py
|
469b15e2afded2764c98c114693a9488e5d9a12f
|
[] |
no_license
|
bekzod886/Django_loyiha
|
c038598539e3dd0efe122eceb49d77b3b2145edb
|
5c1e0459db3a891ba6bd2a33f51fd575173e8fd8
|
refs/heads/main
| 2023-07-02T16:39:17.591120
| 2021-08-05T08:16:27
| 2021-08-05T08:16:27
| 391,806,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
from django.db import models
# Create your models here.
class Meva(models.Model):
title = models.CharField(max_length=255)
img_url = models.CharField(max_length=255)
desc = models.CharField(max_length=255)
price = models.FloatField()
|
[
"you@example.com"
] |
you@example.com
|
c4b8dc8085997384c991bd821a50cf250b9e32a6
|
f82ca354391c19a753d319b38f8a69369e60f960
|
/src/lib/device_detector/parser/client/mobileapp.py
|
6ee635612ab5c9fdfe29e806a5fa3a79fa541683
|
[
"MIT"
] |
permissive
|
martbhell/wasthereannhlgamelastnight
|
8398920ab1b6cf998d8f91ef5598a8e28de57a8d
|
c40f9f12ed4c066d4f42095e96e9a87a8581d99d
|
refs/heads/master
| 2023-08-10T05:10:46.960500
| 2023-07-18T03:56:22
| 2023-07-18T04:02:33
| 37,021,751
| 5
| 0
|
MIT
| 2023-08-29T19:16:07
| 2015-06-07T15:38:26
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
from . import BaseClientParser
class MobileApp(BaseClientParser):
fixture_files = [
'local/client/mobile_apps.yml',
'upstream/client/mobile_apps.yml',
]
def dtype(self):
return self.calculated_dtype or 'mobile app'
__all__ = [
'MobileApp',
]
|
[
"martbhell@users.noreply.github.com"
] |
martbhell@users.noreply.github.com
|
7347fbf197616979bef0fd4e3c6863d7e6916654
|
dd6c21308e1cba24658c8ca7a49e2499cd167da6
|
/venv/Lib/site-packages/guardian/utils.py
|
5ca593620ebeb53f3e24e8634be26a4a05d93173
|
[
"MIT"
] |
permissive
|
ansonsry/Freshshop
|
3a53db4d6d0bf1d6705498869a13a3aa7db6ab8c
|
79ab8beb1aa993f6365182c8d3bb478ee4e028f8
|
refs/heads/master
| 2021-06-20T18:54:08.009409
| 2019-07-26T02:56:55
| 2019-07-26T03:02:27
| 198,931,513
| 0
| 0
|
MIT
| 2021-03-19T22:33:14
| 2019-07-26T02:23:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,153
|
py
|
"""
django-guardian helper functions.
Functions defined within this module should be considered as django-guardian's
internal functionality. They are **not** guaranteed to be stable - which means
they actual input parameters/output type may change in future releases.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import AnonymousUser, Group
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db.models import Model
from django.http import HttpResponseForbidden, HttpResponseNotFound
from django.shortcuts import render_to_response
from django.template import RequestContext
from guardian.compat import get_user_model, remote_model
from guardian.conf import settings as guardian_settings
from guardian.ctypes import get_content_type
from guardian.exceptions import NotUserNorGroup
from itertools import chain
import django
import logging
import os
logger = logging.getLogger(__name__)
abspath = lambda *p: os.path.abspath(os.path.join(*p))
def get_anonymous_user():
"""
Returns ``User`` instance (not ``AnonymousUser``) depending on
``ANONYMOUS_USER_NAME`` configuration.
"""
User = get_user_model()
lookup = {User.USERNAME_FIELD: guardian_settings.ANONYMOUS_USER_NAME}
return User.objects.get(**lookup)
def get_identity(identity):
"""
Returns (user_obj, None) or (None, group_obj) tuple depending on what is
given. Also accepts AnonymousUser instance but would return ``User``
instead - it is convenient and needed for authorization backend to support
anonymous users.
:param identity: either ``User`` or ``Group`` instance
:raises ``NotUserNorGroup``: if cannot return proper identity instance
**Examples**::
>>> from django.contrib.auth.models import User
>>> user = User.objects.create(username='joe')
>>> get_identity(user)
(<User: joe>, None)
>>> group = Group.objects.create(name='users')
>>> get_identity(group)
(None, <Group: users>)
>>> anon = AnonymousUser()
>>> get_identity(anon)
(<User: AnonymousUser>, None)
>>> get_identity("not instance")
...
NotUserNorGroup: User/AnonymousUser or Group instance is required (got )
"""
if isinstance(identity, AnonymousUser):
identity = get_anonymous_user()
if isinstance(identity, get_user_model()):
return identity, None
elif isinstance(identity, Group):
return None, identity
raise NotUserNorGroup("User/AnonymousUser or Group instance is required "
"(got %s)" % identity)
def get_40x_or_None(request, perms, obj=None, login_url=None,
redirect_field_name=None, return_403=False,
return_404=False, accept_global_perms=False):
login_url = login_url or settings.LOGIN_URL
redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_permissions = False
# global perms check first (if accept_global_perms)
if accept_global_perms:
has_permissions = all(request.user.has_perm(perm) for perm in perms)
# if still no permission granted, try obj perms
if not has_permissions:
has_permissions = all(request.user.has_perm(perm, obj)
for perm in perms)
if not has_permissions:
if return_403:
if guardian_settings.RENDER_403:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
if return_404:
if guardian_settings.RENDER_404:
response = render_to_response(
guardian_settings.TEMPLATE_404, {},
RequestContext(request))
response.status_code = 404
return response
elif guardian_settings.RAISE_404:
raise ObjectDoesNotExist
return HttpResponseNotFound()
else:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path(),
login_url,
redirect_field_name)
def clean_orphan_obj_perms():
"""
Seeks and removes all object permissions entries pointing at non-existing
targets.
Returns number of removed objects.
"""
from guardian.models import UserObjectPermission
from guardian.models import GroupObjectPermission
deleted = 0
# TODO: optimise
for perm in chain(UserObjectPermission.objects.all().iterator(),
GroupObjectPermission.objects.all().iterator()):
if perm.content_object is None:
logger.debug("Removing %s (pk=%d)" % (perm, perm.pk))
perm.delete()
deleted += 1
logger.info("Total removed orphan object permissions instances: %d" %
deleted)
return deleted
# TODO: should raise error when multiple UserObjectPermission direct relations
# are defined
def get_obj_perms_model(obj, base_cls, generic_cls):
if isinstance(obj, Model):
obj = obj.__class__
ctype = get_content_type(obj)
if django.VERSION >= (1, 8):
fields = (f for f in obj._meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created)
else:
fields = obj._meta.get_all_related_objects()
for attr in fields:
if django.VERSION < (1, 8):
model = getattr(attr, 'model', None)
else:
model = getattr(attr, 'related_model', None)
if (model and issubclass(model, base_cls) and
model is not generic_cls):
# if model is generic one it would be returned anyway
if not model.objects.is_generic():
# make sure that content_object's content_type is same as
# the one of given obj
fk = model._meta.get_field('content_object')
if ctype == get_content_type(remote_model(fk)):
return model
return generic_cls
def get_user_obj_perms_model(obj):
"""
Returns model class that connects given ``obj`` and User class.
"""
from guardian.models import UserObjectPermissionBase
from guardian.models import UserObjectPermission
return get_obj_perms_model(obj, UserObjectPermissionBase, UserObjectPermission)
def get_group_obj_perms_model(obj):
"""
Returns model class that connects given ``obj`` and Group class.
"""
from guardian.models import GroupObjectPermissionBase
from guardian.models import GroupObjectPermission
return get_obj_perms_model(obj, GroupObjectPermissionBase, GroupObjectPermission)
|
[
"ansonsry@sina.com"
] |
ansonsry@sina.com
|
f2e6015b515e915c24bf44cb57a88c4e12c0939f
|
e8ae11e5017507da59e2e92d423b6a1994490de4
|
/env/lib/python2.7/site-packages/azure/mgmt/scheduler/models/service_bus_brokered_message_properties.py
|
336add898c6253ae0f23c0ab299ae3681b024271
|
[] |
no_license
|
teopeurt/ansible-ubuntu-server
|
613d00cea28bc6531acf4a39aeeb9cd0baa2a391
|
b5b6127d2ee9723c5088443efe2ffb8ae30cfea7
|
refs/heads/master
| 2021-06-28T12:49:50.935753
| 2017-07-31T17:34:33
| 2017-07-31T17:34:33
| 98,912,808
| 0
| 1
| null | 2020-07-24T00:05:31
| 2017-07-31T17:32:56
|
Makefile
|
UTF-8
|
Python
| false
| false
| 3,997
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServiceBusBrokeredMessageProperties(Model):
"""ServiceBusBrokeredMessageProperties.
:param content_type: Gets or sets the content type.
:type content_type: str
:param correlation_id: Gets or sets the correlation id.
:type correlation_id: str
:param force_persistence: Gets or sets the force persistence.
:type force_persistence: bool
:param label: Gets or sets the label.
:type label: str
:param message_id: Gets or sets the message id.
:type message_id: str
:param partition_key: Gets or sets the partition key.
:type partition_key: str
:param reply_to: Gets or sets the reply to.
:type reply_to: str
:param reply_to_session_id: Gets or sets the reply to session id.
:type reply_to_session_id: str
:param scheduled_enqueue_time_utc: Gets or sets the scheduled enqueue
time UTC.
:type scheduled_enqueue_time_utc: datetime
:param session_id: Gets or sets the session id.
:type session_id: str
:param time_to_live: Gets or sets the time to live.
:type time_to_live: datetime
:param to: Gets or sets the to.
:type to: str
:param via_partition_key: Gets or sets the via partition key.
:type via_partition_key: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'force_persistence': {'key': 'forcePersistence', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'partition_key': {'key': 'partitionKey', 'type': 'str'},
'reply_to': {'key': 'replyTo', 'type': 'str'},
'reply_to_session_id': {'key': 'replyToSessionId', 'type': 'str'},
'scheduled_enqueue_time_utc': {'key': 'scheduledEnqueueTimeUtc', 'type': 'iso-8601'},
'session_id': {'key': 'sessionId', 'type': 'str'},
'time_to_live': {'key': 'timeToLive', 'type': 'iso-8601'},
'to': {'key': 'to', 'type': 'str'},
'via_partition_key': {'key': 'viaPartitionKey', 'type': 'str'},
}
def __init__(self, content_type=None, correlation_id=None, force_persistence=None, label=None, message_id=None, partition_key=None, reply_to=None, reply_to_session_id=None, scheduled_enqueue_time_utc=None, session_id=None, time_to_live=None, to=None, via_partition_key=None):
self.content_type = content_type
self.correlation_id = correlation_id
self.force_persistence = force_persistence
self.label = label
self.message_id = message_id
self.partition_key = partition_key
self.reply_to = reply_to
self.reply_to_session_id = reply_to_session_id
self.scheduled_enqueue_time_utc = scheduled_enqueue_time_utc
self.session_id = session_id
self.time_to_live = time_to_live
self.to = to
self.via_partition_key = via_partition_key
|
[
"me@teopeurt.com"
] |
me@teopeurt.com
|
3c5eb2c6bf9c023422831c5ca31f64b01a36fb0a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/86/usersdata/164/57228/submittedfiles/pico.py
|
97909e0698fdec2d38710817dcbb466f593e1da4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# -*- coding: utf-8 -*-
def pico(b):
cres=0
decres=0
for i in range (0, len(b), 1):
c=len(b)/2
if (c>=0):
while (b[i]<b[i]+1):
cres=cres+1
if (c<=len(b)):
while (b[i]>b[i]+1):
decres=decres+1
cont=decres+cres
if (len(b)==cont):
print('S')
else:
print('N')
return(pico)
n=int(input('Digite a quantidade de elementos da lista: '))
a=[]
for z in range (1, n+1, 1):
valor=float(input('Digite os elementos da lista: '))
a.append(valor)
print(pico(a))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e643e7e82c014086d290c39d39a507f120ea7360
|
c9d02cc6ac33723f8dbd7013ae3210c28691d125
|
/instances2dict_with_polygons.py
|
8a0f2948b2862672119535a3eaf35bfc13c2eae1
|
[] |
no_license
|
nhtlongcs/cityscape_to_polylines
|
05c040a998d0bfdf9ff1958f540229a8b69e1c48
|
55f91fc33c50fc5563164fb215c7af7c3f11a278
|
refs/heads/master
| 2022-04-28T09:52:01.628969
| 2020-05-02T07:17:51
| 2020-05-02T07:17:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,557
|
py
|
#!/usr/bin/python
#
# Convert instances from png files to a dictionary
# This files is created according to https://github.com/facebookresearch/Detectron/issues/111
from __future__ import print_function, absolute_import, division
import os, sys
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
# Cityscapes imports
from external.cityscapesscripts.evaluation.instance import *
from external.cityscapesscripts.helpers.csHelpers import *
# from csHelpers import *
import cv2
import cv2_util
# from PIL import Image
# import numpy as np
def instances2dict_with_polygons(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if not isinstance(imageFileList, list):
imageFileList = [imageFileList]
if verbose:
print("Processing {} images...".format(len(imageFileList)))
for imageFileName in imageFileList:
# Load image
img = Image.open(imageFileName)
# print(imageFileName)
# Image as numpy array
imgNp = np.array(img)
# Initialize label categories
instances = {}
for label in labels:
instances[label.name] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
# if instanceId < 1000:
# continue
instanceObj = Instance(imgNp, instanceId)
instanceObj_dict = instanceObj.toDict()
#instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
if id2label[instanceObj.labelID].hasInstances:
mask = (imgNp == instanceId).astype(np.uint8)
contour, hier = cv2_util.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
polygons = [c.reshape(-1).tolist() for c in contour]
instanceObj_dict['contours'] = polygons
instances[id2label[instanceObj.labelID].name].append(instanceObj_dict)
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print("\rImages Processed: {}".format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print("")
return instanceDict
def main(argv):
fileList = []
if (len(argv) > 2):
for arg in argv:
if ("png" in arg):
fileList.append(arg)
instances2dict_with_polygons(fileList, True)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"you@example.com"
] |
you@example.com
|
2691d4e2fcd1060926b734171d61fc077aac74a1
|
2d73ac2c921bb84756478e042ba33ba09c6f8be0
|
/sxm_player/workers/status.py
|
874b3668f44ed5921ce5b6e4d90da1fffd866df8
|
[
"MIT"
] |
permissive
|
fdigeron/sxm-player
|
7aa6aba111b1c6bedf6ed8e6c89f7d66feb26c8d
|
2ca91fe216d1ad823b1ad7f9cfe43db4a016bd96
|
refs/heads/master
| 2023-06-23T18:34:47.892363
| 2021-07-30T13:27:04
| 2021-07-30T13:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
import httpx
from ..queue import EventMessage, EventTypes
from .base import SXMLoopedWorker
__all__ = ["StatusWorker"]
CHECK_INTERVAL = 30
class StatusWorker(SXMLoopedWorker):
NAME = "status_check"
_ip: str
_port: int
_delay: float = 30.0
_failures: int = 0
def __init__(self, port: int, ip: str, *args, **kwargs):
super().__init__(*args, **kwargs)
if ip == "0.0.0.0": # nosec
ip = "127.0.0.1"
self._ip = ip
self._port = port
def loop(self):
self.check_sxm()
def check_sxm(self):
if self._state.sxm_running:
self._log.debug("Checking SXM Client")
r = httpx.get(f"http://{self._ip}:{self._port}/channels/")
if r.is_error:
# adjust delay to check more often
self._delay = 5.0
self._failures += 1
if self._failures > 3:
self.push_event(
EventMessage(
self.name, EventTypes.RESET_SXM, "bad status check"
)
)
else:
self._delay = 30.0
self._failures = 0
self.push_event(
EventMessage(self.name, EventTypes.UPDATE_CHANNELS, r.json())
)
|
[
"cbailey@mort.is"
] |
cbailey@mort.is
|
eea64d13a9b9e501d67e09b3316d74dc54007207
|
edd1adb88112045d16d3e6417117d45ceed4a634
|
/classical/woodworking-sat11-strips/api.py
|
54d9042dbc3b4d84a2cf98e4827effa7980c343b
|
[] |
no_license
|
AI-Planning/classical-domains
|
26de25bf23622f95c877960c1d52f444922d8737
|
4bd0b42d89ea02bd38af6f93cf20a0ab0cbda9d9
|
refs/heads/main
| 2023-04-27T07:55:55.832869
| 2023-03-29T01:46:11
| 2023-03-29T01:46:11
| 253,298,999
| 24
| 12
| null | 2023-04-18T01:45:39
| 2020-04-05T18:02:53
|
PDDL
|
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
domains = [
{'description': 'Simulates the works in a woodworking workshop where there is some quantity of wood that has to be polished, coloured, etc. using different tools with different costs. Parameters of each problem are the parts to be done and the quantity (in % of necessary) of available wood (boards). The higher the number of parts and the boards the more difficult the problem is.',
'ipc': '2011',
'name': 'woodworking',
'problems': [('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p01.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p02.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p03.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p04.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p05.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p06.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p07.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p08.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p09.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p10.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p11.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p12.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p13.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p14.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p15.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p16.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p17.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p18.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p19.pddl'),
('woodworking-sat11-strips/domain.pddl',
'woodworking-sat11-strips/p20.pddl')]}
]
|
[
"christian.muise@gmail.com"
] |
christian.muise@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.