blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb781d9d988c44c11a77c73ac37e5299553ac34a
|
c74b29b68211a51d7283d57b24d7cf83422a8ceb
|
/classertest.py
|
ffa1e7ece13d8dbad8941e217e8ee1bf19fb8681
|
[] |
no_license
|
proycon/nlpsandbox
|
63359e7cdd709dd81d66aed9bf1437f8ecf706a0
|
22e5f85852b7b2a658c6b94c3dedd425a5d6396f
|
refs/heads/master
| 2020-12-09T19:37:10.040962
| 2019-04-23T17:17:15
| 2019-04-23T17:17:15
| 2,347,265
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from pynlpl.statistics import FrequencyList
from pynlpl.textprocessors import crude_tokenizer, Classer
import sys
import codecs
import asizeof
freqlist = FrequencyList()
f = codecs.open(sys.argv[1], 'r','utf-8')
for line in f:
line = crude_tokenizer(line.strip())
freqlist.append(line)
f.close()
print "FREQLIST: " ,asizeof.asizeof(freqlist)
classer = Classer(freqlist)
print "CLASSER: " ,asizeof.asizeof(classer)
classer2 = Classer(freqlist, False,True)
print "CLASSER (ONLY DECODER): " ,asizeof.asizeof(classer2)
freqlist2 = FrequencyList()
f = codecs.open(sys.argv[1], 'r','utf-8')
for line in f:
line = crude_tokenizer(line.strip())
freqlist2.append(classer.encodeseq(line))
f.close()
print "FREQLIST-AFTER-CLASSER: " ,asizeof.asizeof(freqlist2)
|
[
"proycon@anaproy.nl"
] |
proycon@anaproy.nl
|
b152b59d05c3e3d5cdabcc403e4d35db597bceab
|
235fb362b5af1f7dbd90dc3819fe63f18e074e9d
|
/learn_pyqt/pyqt5-cv2-multithreaded-master/SharedImageBuffer.py
|
faf4fedcd3df098ddfccf082c84eb6906e8874fe
|
[] |
no_license
|
cener-1999/learn_about_python
|
74c9b8c6a546224261d5577183a946a78ca7e84f
|
86cfc0a5621f86fc8a1885a39847d40b33137c49
|
refs/heads/master
| 2023-04-30T06:38:34.459506
| 2021-05-18T14:20:29
| 2021-05-18T14:20:29
| 368,473,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
from PyQt5.QtCore import QMutexLocker, QMutex, QWaitCondition
class SharedImageBuffer(object):
def __init__(self):
# Initialize variables(s)
self.nArrived = 0
self.doSync = False
self.syncSet = set()
self.wc = QWaitCondition()
self.imageBufferDict = dict()
self.mutex = QMutex()
def add(self, deviceUrl, imageBuffer, sync=False):
# Device stream is to be synchronized
if sync:
with QMutexLocker(self.mutex):
self.syncSet.add(deviceUrl)
# Add image buffer to map
self.imageBufferDict[deviceUrl] = imageBuffer
def getByDeviceUrl(self, deviceUrl):
return self.imageBufferDict[deviceUrl]
def removeByDeviceUrl(self, deviceUrl):
# Remove buffer for device from imageBufferDict
self.imageBufferDict.pop(deviceUrl)
# Also remove from syncSet (if present)
with QMutexLocker(self.mutex):
if self.syncSet.__contains__(deviceUrl):
self.syncSet.remove(deviceUrl)
self.wc.wakeAll()
def sync(self, deviceUrl):
# Only perform sync if enabled for specified device/stream
self.mutex.lock()
if self.syncSet.__contains__(deviceUrl):
# Increment arrived count
self.nArrived += 1
# We are the last to arrive: wake all waiting threads
if self.doSync and self.nArrived == len(self.syncSet):
self.wc.wakeAll()
# Still waiting for other streams to arrive: wait
else:
self.wc.wait(self.mutex)
# Decrement arrived count
self.nArrived -= 1
self.mutex.unlock()
def wakeAll(self):
with QMutexLocker(self.mutex):
self.wc.wakeAll()
def setSyncEnabled(self, enable):
self.doSync = enable
def isSyncEnabledForDeviceUrl(self, deviceUrl):
return self.syncSet.__contains__(deviceUrl)
def getSyncEnabled(self):
return self.doSync
def containsImageBufferForDeviceUrl(self, deviceUrl):
return self.imageBufferDict.__contains__(deviceUrl)
|
[
"1065802928@qq.com"
] |
1065802928@qq.com
|
18da7de4cc349c8c1d2580cadf1d38fed8ba6dfc
|
1800155dcdb48bf956fa423858a8cc20ed27e6cb
|
/two-sum-iii-data-structure-design.py
|
24185cd468ecda7dacfaaced0516ae3647362d6b
|
[] |
no_license
|
gitprouser/LeetCode-3
|
1cc2d1dbbf439af4b3768da388dafd514cc5432b
|
530ea79f0377e1fc3fbfb5c5cfe7768159144e57
|
refs/heads/master
| 2021-06-06T16:30:14.795093
| 2016-08-22T21:40:01
| 2016-08-22T21:40:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
class TwoSum(object):
def __init__(self):
"""
initialize your data structure here
"""
self.table = {}
def add(self, number):
"""
Add the number to an internal data structure.
:rtype: nothing
"""
self.table[number] = self.table.get(number, 0) + 1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for i in self.table.keys():
j = value - i
if i == j and self.table[i] > 1 or i != j and j in self.table:
return True
return False
# Your TwoSum object will be instantiated and called as such:
# twoSum = TwoSum()
# twoSum.add(number)
# twoSum.find(value)
|
[
"tohaowu@gmail.com"
] |
tohaowu@gmail.com
|
e35651f899532537e810f36a9aa113eb399a5eaa
|
382034646e9d3e32c8e63e8d83d2dd7da5be4ef3
|
/workery/shared_foundation/tests/models/test_opening_hours_specification.py
|
ee5abb18bc973e46a7b73620b7db5b912280e1f9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wahello/workery-django
|
80c88ecb7968951719af6857711891ec3787cf46
|
289318b0333d830c089f4492716c38d409c365ed
|
refs/heads/master
| 2020-03-30T04:21:48.642659
| 2018-09-28T01:30:22
| 2018-09-28T01:30:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
# -*- coding: utf-8 -*-
from django.core.management import call_command
from starterkit.utils import get_unique_username_from_email
from django_tenants.test.cases import TenantTestCase
from django_tenants.test.client import TenantClient
from django.urls import reverse
from shared_foundation.models import *
TEST_USER_EMAIL = "bart@workery.ca"
TEST_USER_USERNAME = "bart@workery.ca"
TEST_USER_PASSWORD = "123P@$$w0rd"
TEST_USER_TEL_NUM = "123 123-1234"
TEST_USER_TEL_EX_NUM = ""
TEST_USER_CELL_NUM = "123 123-1234"
class TestSharedOpeningHoursSpecification(TenantTestCase):
"""
Console:
python manage.py test shared_foundation.tests.models.test_opening_hours_specification
"""
def setUp(self):
super(TestSharedOpeningHoursSpecification, self).setUp()
self.c = TenantClient(self.tenant)
self.user = SharedUser.objects.create(
first_name="Bart",
last_name="Mika",
email=TEST_USER_EMAIL,
is_active=True,
)
self.obj = SharedOpeningHoursSpecification.objects.create(
owner=self.user,
closes="9:00 PM",
day_of_week="Monday",
opens="8:00 AM"
)
def tearDown(self):
del self.c
self.obj.delete()
super(TestSharedOpeningHoursSpecification, self).tearDown()
def test_str(self):
self.assertIsNotNone(str(self.obj))
self.assertIn("9:00 PM", self.obj.closes)
def test_delete_all(self):
SharedOpeningHoursSpecification.objects.delete_all()
try:
obj = SharedOpeningHoursSpecification.objects.get()
except SharedOpeningHoursSpecification.DoesNotExist:
self.assertTrue(True)
|
[
"bart@mikasoftware.com"
] |
bart@mikasoftware.com
|
27139caa2ff73ba9f0c5c0772b1633c315e9aa52
|
a939e018333a9ecd26ddc618f99835b7eb381686
|
/deploy/vertical_crawler_youtube/le_crawler/common/page_local_writer.py
|
3a63fce7ba2cc572e552cac20ddeef9e74c0b3e1
|
[] |
no_license
|
cash2one/crawl_youtube
|
bff5ba254001c2f31f770e55a4aca39bc54e45ee
|
0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc
|
refs/heads/master
| 2021-01-16T22:30:17.800282
| 2016-02-18T11:50:09
| 2016-02-18T11:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,190
|
py
|
#!/usr/bin/python
#
# Copyright 2014 LeTV Inc. All Rights Reserved.
__author__ = 'guoxiaohe@letv.com'
import time
import os
import Queue
import threading
from scrapy import log
from scrapy.utils.project import get_project_settings
from ..core.page_writer import PageWriterBase
"""
for json format data writer
"""
class PageLocalJsonWriter(PageWriterBase):
def __init__(self, spider):
PageWriterBase.__init__(self, spider)
self._init(
get_project_settings().getint('LOCAL_PAGE_WRITER_DATA_TIME_LIMIT', 86400),
get_project_settings().getint('LOCAL_PAGE_WRITER_DATA_FLUSH_LIMIT', 20000),
get_project_settings().get('LOCAL_PAGE_WRITER_DATA_DIR', '/letv/crawler_delta/')
)
self.set_name('PageLocalJsonWriter')
def _init(self, gen_max_time = 86400, file_max_nums = 2000, data_dir = ""):
if not os.path.isdir(data_dir):
raise Exception('%s is not dir' % (data_dir))
self.file_fp_ = None
self.current_file_name_ = ''
self.total_items_ = 0
self.current_nums_ = 0
self.gen_file_max_time_threshold_ = gen_max_time # 10min
self.max_lines_per_file_ = file_max_nums
self.last_flush_time_ = int(time.time())
self.data_dir_ = data_dir
self.data_queue_ = Queue.LifoQueue(maxsize = 10240)
thread = threading.Thread(target = self.file_writer_manger, args = ())
thread.start()
def finalize(self):
self.exit_ = True
while not self.data_queue_.empty():
self.spider_.log('page page_local_writer que[%d]' % (self.data_queue_.qsize()), log.INFO)
time.sleep(1)
self.spider_.log('%s write items[%s]' % (self.name, self.total_items_),
log.INFO)
def process_item(self, item):
self.add_item(item)
def add_item(self, item):
if not item:
return
while True:
try:
self.data_queue_.put(item, block = True, timeout = 5)
return
except Exception, e:
self.spider_.log('try to put item into queu error %s, size %d' % (e, self.data_queue_.qsize()))
continue
def status(self):
return 'total item wrote: %s' % (self.total_items_)
def gen_filestr(self):
return os.path.join(self.data_dir_, '%s_%d'%(time.strftime('%Y%m%d_%H%M%S',
time.localtime()),
os.getpid()))
def gen_json_str(self, item):
if not item:
return None
try:
return item.to_json_str()
except:
self.spider_.log('Failed decoding [%s] with [%s]' %(dict['url'],
dict['page_encoding']), log.WARNING)
dict['page'] = 'error decoding'
return None
def _prepare_writer(self):
if self.file_fp_:
self._dump_file()
self.current_file_name_ = self.gen_filestr()
self.file_fp_ = open(self.current_file_name_ + '.tmp', 'w+')
self.current_nums_ = 0
def _dump_file(self):
try:
if not self.file_fp_:
return False
self.file_fp_.close()
self.last_flush_time_ = int(time.time())
self.file_fp_ = None
if self.current_nums_ == 0:
os.remove(self.current_file_name_ + '.tmp')
else:
os.rename(self.current_file_name_ + '.tmp', self.current_file_name_ + '.json')
return True
except Exception, e:
print e
self.spider_.log('Error while dump file:[%s]' % self.current_file_name_,
log.ERROR)
return False
def file_writer_manger(self):
while not self.exit_ or not self.data_queue_.empty():
item = None
try:
item = self.data_queue_.get(block = True, timeout = 10)
except Exception, e:
self.spider_.log('get item from queu timeout[%s]' %(e), log.DEBUG)
item = None
while not self.file_fp_:
self._prepare_writer()
self.spider_.log('prepare file ptr:[%s]' % self.current_file_name_,
log.INFO)
time.sleep(1)
if item:
line_str = self.gen_json_str(item)
if line_str:
try:
#line_zip = zlib.compress(line_str, zlib.Z_BEST_COMPRESSION)
self.file_fp_.write(line_str)
self.file_fp_.write('\n')
self.current_nums_ += 1
self.total_items_ += 1
if self.current_nums_ > 0 and self.current_nums_ % 1000 == 0:
self.spider_.log('Flush result with [%d]' % (self.current_nums_), log.INFO)
self.file_fp_.flush()
except Exception, e:
print time.localtime()
print e
self.spider_.log('Error while write to file[%s]' % (self.current_file_name_))
nows = int(time.time())
if self.current_nums_ >= self.max_lines_per_file_ or (self.current_nums_ > 0
and (nows - self.last_flush_time_) >= self.gen_file_max_time_threshold_):
# flush file to disk
if not self._dump_file():
self.spider_.log('flush file error:[%s]' % self.current_file_name_,
log.ERROR)
self.spider_.log('flush:[ %s ] with [%d]' %(self.current_file_name_,
self.current_nums_), log.INFO)
self.spider_.log('page_local_writer manager exit normal', log.INFO)
self._dump_file()
|
[
"zjc0516@126.com"
] |
zjc0516@126.com
|
21a34dcd8837c42f9ea42b7bc1e4e5db25cfe7a5
|
f485dff7fcb036868d6e4053a7a6ccd7f95214bf
|
/week09/employee_091.py
|
62e4238ccb5f78116cc7d56777ee671bba48bdc0
|
[] |
no_license
|
jamesfallon99/CA117
|
aa4f851365aafe8a4888c85e1b8b2f571e2c9b2a
|
0055ccbbd710453c9574930b361c26fcde2b9036
|
refs/heads/master
| 2020-06-28T14:26:06.829418
| 2019-08-02T16:41:38
| 2019-08-02T16:41:38
| 200,254,591
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
#!/usr/bin/env python3
class Employee(object):
def __init__(self, name, number):
self.name = name
self.number = number
def wages(self):
return 0
def __str__(self):
l = []
l.append("Name: {}".format(self.name))
l.append("Number: {}".format(self.number))
l.append("Wages: {:.2f}".format(self.wages()))
return "\n".join(l)
class Manager(Employee):
def __init__(self, name, number, salary):
super().__init__(name, number)
self.salary = salary
def wages(self):
return self.salary / 52
class AssemblyWorker(Employee):
def __init__(self, name, number, hourly_rate, hours):
super().__init__(name, number)
self.hourly_rate = hourly_rate
self.hours = hours
def wages(self):
return self.hourly_rate * self.hours
|
[
"noreply@github.com"
] |
jamesfallon99.noreply@github.com
|
52be7e9c34bece4e6db3c03156d40c378d72b6ca
|
a1684facd42cba1cd8af003ccffb530f56582e9a
|
/backend/pet/admin.py
|
94b98dc921d660340f7a676c08544c25cf98bd86
|
[] |
no_license
|
dunderlabs/siteong7vidas
|
e13749863a8f5d64d469b735765044893cc38536
|
fb3c529025e05adcc9aab17e1eeada909a193e56
|
refs/heads/master
| 2021-04-26T21:47:58.638150
| 2018-10-30T15:37:19
| 2018-10-30T15:37:19
| 124,160,847
| 6
| 1
| null | 2018-11-28T02:05:10
| 2018-03-07T01:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
from django.contrib import admin
from .models import Pet, PetBreed, PetPelage
class PetAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class PetBreedAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class PetPelageAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Pet, PetAdmin)
admin.site.register(PetBreed, PetBreedAdmin)
admin.site.register(PetPelage, PetPelageAdmin)
|
[
"luizrodrigo46@hotmail.com"
] |
luizrodrigo46@hotmail.com
|
c30f230f2e18f35e186cf375fa987efcef0c253c
|
3712a929d1124f514ea7af1ac0d4a1de03bb6773
|
/开班笔记/python数据分析机器学习部分/机器学习/day08/kpca.py
|
3ccd0a9d11baf3e0fd08e50a8ae07ade3578adfb
|
[] |
no_license
|
jiyabing/learning
|
abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9
|
6059006b0f86aee9a74cfc116d2284eb44173f41
|
refs/heads/master
| 2020-04-02T20:47:33.025331
| 2018-10-26T05:46:10
| 2018-10-26T05:46:10
| 154,779,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sklearn.datasets as sd
import sklearn.decomposition as dc
import matplotlib.pyplot as mp
x, y = sd.make_circles(n_samples=500, factor=0.2,
noise=0.04)
model = dc.KernelPCA(kernel='rbf',
fit_inverse_transform=True,
gamma=10)
kpca_x = model.fit_transform(x)
mp.figure('Original', facecolor='lightgray')
mp.title('Original', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.scatter(x[:, 0], x[:, 1], s=60, c=y, cmap='brg',
alpha=0.5)
mp.figure('KPCA', facecolor='lightgray')
mp.title('KPCA', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.scatter(kpca_x[:, 0], kpca_x[:, 1], s=60, c=y,
cmap='brg', alpha=0.5)
mp.show()
|
[
"yabing_ji@163.com"
] |
yabing_ji@163.com
|
82893714841e99045c7d0a2c1bbfa4dc32f9deac
|
3d1a8ccef4153b6154c0aa0232787b73f45137ba
|
/services/customer/server.py
|
02bd6b68c181a44732e11ad2a8e09a2eb503637c
|
[] |
no_license
|
jan25/hotrod-python
|
a0527930b2afc33ca3589c1cf7ae07814148535a
|
dbce7df1bc2d764351dd2ba1122078fc525caed7
|
refs/heads/master
| 2020-06-03T14:59:35.627093
| 2019-06-22T16:52:19
| 2019-06-22T16:52:19
| 191,616,546
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
from flask import Flask, request, jsonify
from uwsgidecorators import postfork
import services.common.middleware as middleware
import services.config.settings as config
import services.common.serializer as serializer
from . import db
app = Flask(__name__)
@postfork
def postfork():
middleware.init_tracer('customer')
@app.before_request
def before_request():
return middleware.before_request(request)
@app.after_request
def after_request(response):
return middleware.after_request(response)
@app.route('/customer')
def get_customer():
customer_id = request.args.get('id')
customer_obj = db.get_customer_by_id(customer_id)
return jsonify(serializer.obj_to_json(customer_obj))
def start_server(debug):
app.run(host='0.0.0.0', port=config.CUSTOMER_PORT, debug=debug)
if __name__ == '__main__': start_server(True)
|
[
"abhilashgnan@gmail.com"
] |
abhilashgnan@gmail.com
|
1552ae664ea965da697ab12f85e1cc327fb30124
|
6be8aa517e679b33b47d35f100e6590902a8a1db
|
/DP/Problem19.py
|
07c8de548b63717ec4070b31545cf9ef5c3e6a61
|
[] |
no_license
|
LeeJuhae/Algorithm-Python
|
7ca4762712e5e84d1e277abecb3bf39c9cbd4e56
|
729947b4428205adfbac194a5527b0eeafe1c525
|
refs/heads/master
| 2023-04-24T01:02:36.430970
| 2021-05-23T07:17:25
| 2021-05-23T07:17:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# https://www.acmicpc.net/problem/11049
import sys
read = sys.stdin.readline
n = int(read().strip())
arr = [tuple(map(int, read().strip().split())) for _ in range(n)]
dp = [[-1 for _ in range(n)] for _ in range(n)]
def mul(a, b):
if dp[a][b] != -1:
return dp[a][b]
if a == b:
return 0
ret = float('inf')
for k in range(a, b):
ret = min(ret, mul(a, k) + mul(k + 1, b) + arr[a][0] * arr[k][1] * arr[b][1])
dp[a][b] = ret
return ret
print(mul(0, n - 1))
|
[
"gusdn0657@gmail.com"
] |
gusdn0657@gmail.com
|
f269f66869c2a17070ed2079a79139e33939efe5
|
377b908723f157ab30733fa5ff15db90c0be9693
|
/build/celex4/catkin_generated/pkg.installspace.context.pc.py
|
f861cb62d3e2dc6d77d7eb7eb7bef0ed002c193b
|
[] |
no_license
|
eleboss/celex4_ros
|
af0b28ec0ba79016ae80e90e0cd3d270a8865fa1
|
28500367e8e28e6e5384036b6f9bd8981a7b932e
|
refs/heads/master
| 2020-04-30T14:03:43.823980
| 2019-03-21T05:59:05
| 2019-03-21T05:59:05
| 176,877,974
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;dvs_msgs;image_transport;roscpp;roslib;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "celex4"
PROJECT_SPACE_DIR = "/home/ubuntu/Documents/celex4_ros/install"
PROJECT_VERSION = "0.0.0"
|
[
"you@example.com"
] |
you@example.com
|
2eee67dfdbd5f09719f524851ce744a41a9b3f32
|
ce18877752c43eb66f03bdc169e3ef45a1720d15
|
/src/apps_common/mailerlite/__init__.py
|
2ab0448f4bb1aa6cdcd84a17a2a573dbbf178cb1
|
[] |
no_license
|
ajlexgit/robin
|
26e8682ae09795acf0f3fc1297d20044285b83df
|
25ac1c3455838fc26656cfa16d05b2943d0cbba6
|
refs/heads/master
| 2021-07-13T22:49:09.177207
| 2017-10-13T07:44:42
| 2017-10-13T07:44:42
| 103,655,240
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
"""
Модуль подписки на рассылку.
Зависит от:
premailer
libs.associative_request
libs.color_field
libs.pipeline
libs.templatetags
Установка:
# settings.py:
INSTALLED_APPS = (
...
'mailerlite',
...
)
SUIT_CONFIG = {
...
{
'app': 'mailerlite',
'icon': 'icon-envelope',
'models': (
'Campaign',
'Subscriber',
'Group',
'MailerConfig',
)
},
}
MAILERLITE_APIKEY = '438b16c79cbd9acea354a1c1ad5eda08'
# urls.py:
...
url(r'^mailerlite/', include('mailerlite.urls', namespace='mailerlite')),
...
# crontab
*/15 * * * * . $HOME/.profile; ~/aor.com/env/bin/python3 ~/aor.com/src/manage.py mailerlite -ig -es -ic -ec
10 * * * * . $HOME/.profile; ~/aor.com/env/bin/python3 ~/aor.com/src/manage.py mailerlite -ig -eg -es -is
Использование:
# views.py:
from mailerlite import SubscribeForm
class IndexView(View):
def get(self, request, *args, **kwargs):
...
return self.render_to_response({
subscribe_form': SubscribeForm(),
...
})
# template.html:
<form action="" method="post" id="subscribe-form">
{% render_form subscribe_form %}
<input type="submit" value="Subscribe" class="btn">
</form>
"""
default_app_config = 'mailerlite.apps.Config'
|
[
"pix666@ya.ru"
] |
pix666@ya.ru
|
ae241182ad08298d4046ba796efca16ca15a0257
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/arc028/A/4640482.py
|
31948a2b01b51da02f93d6230fbd993758c38420
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
n,a,b = map(int,input().split())
turn = 1
while True:
if turn == 1:
if n > a:
n -= a
turn *= -1
else:
print("Ant")
exit()
else:
if n > b:
n -= b
turn *= -1
else:
print("Bug")
exit()
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
cf526d0998ec05ba8a704de0e3067c5e2d706869
|
89b45e528f3d495f1dd6f5bcdd1a38ff96870e25
|
/AutomateTheBoringStuffWithPython/chapter12/spreadsheet_to_text_files.py
|
40698e57cc6c206b11f50b49c18978d9f96df97b
|
[] |
no_license
|
imatyukin/python
|
2ec6e712d4d988335fc815c7f8da049968cc1161
|
58e72e43c835fa96fb2e8e800fe1a370c7328a39
|
refs/heads/master
| 2023-07-21T13:00:31.433336
| 2022-08-24T13:34:32
| 2022-08-24T13:34:32
| 98,356,174
| 2
| 0
| null | 2023-07-16T02:31:48
| 2017-07-25T22:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
#!/usr/bin/env python3
import openpyxl
def main():
spreadsheet = "text_to_spreadsheet.xlsx"
wb = openpyxl.load_workbook(spreadsheet)
sheet = wb.active
nrows = sheet.max_row
ncols = sheet.max_column
for col in range(1, ncols+1):
text_file = "spreadsheet_" + str(col) + ".txt"
with open(text_file, 'w') as f:
for row in range(1, nrows+1):
content = sheet.cell(row=row, column=col).value
if content is None:
continue
f.write(str(content))
if __name__ == "__main__":
main()
|
[
"i.matukin@gmail.com"
] |
i.matukin@gmail.com
|
4b17c0924e25f282c9da1e9945670cf8ad43f50d
|
10b205507a3598da489bfcfa10bea769b3700b07
|
/snaffle/snaffle.py
|
cd4fe50b80793721a82cfb75e639e8c4f18464c9
|
[] |
no_license
|
yattom/snaffle
|
fd65b4aebdb2ad9083ff6bc9be96b366399974f5
|
fce2cec664c569a584925dd4d89f1eb7a2acd9a0
|
refs/heads/master
| 2021-01-10T01:22:49.387838
| 2016-02-10T01:01:17
| 2016-02-10T01:01:17
| 51,367,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
# coding: utf-8
import time
import logging
import webbrowser
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
import snaffle.ws_server
class Snaffle:
def __init__(self, start=True):
if start:
self.start()
def start(self):
snaffle.ws_server.start_server_tornado()
webbrowser.open('http://localhost:9999/index')
def shutdown(self):
snaffle.ws_server.shutdown()
def write_something(self, msg):
snaffle.ws_server.write_something(msg)
def send_script(self, script):
msg = '''
{{
"type": "script",
"content": "{0}"
}}
'''.format(script)
self.write_something(msg)
|
[
"tsutomu.yasui@gmail.com"
] |
tsutomu.yasui@gmail.com
|
1ca7985800be5d1b8bc8cec044cb5e460feeb211
|
74ed8d533e86d57c7db9eca879a9fb5b979b8eaf
|
/stanford_corenlp_demo/common.py
|
35504554c6705b83898f4a7c6159a7f78dc7a351
|
[
"MIT"
] |
permissive
|
GyxChen/AmusingPythonCodes
|
97c5a2080d47399080df005a0643eddb56bceb25
|
388e90c78c67b79c23b4a8fc1ebe29f26394a54b
|
refs/heads/master
| 2020-06-23T12:03:56.708448
| 2019-05-15T05:19:57
| 2019-05-15T05:19:57
| 198,617,528
| 0
| 1
| null | 2019-07-24T10:57:29
| 2019-07-24T10:57:29
| null |
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
# encoding: utf-8
import xml.etree.ElementTree as ETree
import requests
import re
def dataset_xml_iterator(filename):
"""An iterator to convert xml-format dataset to more readable text format"""
instances = ETree.parse(filename).getroot()
for instance in instances:
paragraph = instance.find('text').text
questions = instance.findall('questions')[0]
queries = []
for question in questions.findall('question'):
tmp_dict = {'Text': question.get('text')}
for answer in question.findall('answer'):
tmp_dict[answer.get('correct')] = answer.get('text')
queries.append(tmp_dict)
yield paragraph, queries
def read_nth_data(filename, n):
"""Read Nth paragraph and corresponding queries"""
index = 0
for paragraph, queries in dataset_xml_iterator(filename):
index += 1
if n == index:
# para = paragraph
# que = queries
return paragraph, queries
return None
def extract_conceptnet(phrase):
"""Access ConceptNet API and read relational triples as well as their weight and simple example"""
url_head = 'http://api.conceptnet.io/c/en/' # access ConceptNet API
raw_json = requests.get(url_head + phrase).json()
edges = raw_json['edges']
if not edges: # if edges is empty, which means ConceptNet doesn't contain such concept or node
return None
concepts = []
for edge in edges:
triple = re.findall(r'/a/\[/r/(.*?)/.*?,/c/en/(.*?)/.*?,/c/en/(.*?)/.*?\]', edge['@id'])[0] # ERE triple
surface_text = re.sub(r'[\[\]]', '', '' if edge['surfaceText'] is None else edge['surfaceText']) # example
weight = edge['weight'] # weight
concepts.append({'Triple': triple, 'weight': weight, 'example': surface_text})
return concepts
|
[
"isaac.changhau@gmail.com"
] |
isaac.changhau@gmail.com
|
c6e68b46d7b871e2b7567e4b2422530a93f57df6
|
3dc3bbe607ab7b583eb52dbaae86636eb642960a
|
/configs/skeleton/posec3d/rgbpose_conv3d/pose_only.py
|
ad413da6a64196514be0bf0a8fef32008dad7d92
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmaction2
|
659c36c6083fd3d9d072e074a8d4b3a50342b9bd
|
582b78fd6c3240500d5cacd292339d7d1ddbb056
|
refs/heads/main
| 2023-08-28T18:14:50.423980
| 2023-08-10T09:20:06
| 2023-08-10T09:20:06
| 278,810,244
| 3,498
| 1,028
|
Apache-2.0
| 2023-09-07T06:50:44
| 2020-07-11T07:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,987
|
py
|
_base_ = '../../../_base_/default_runtime.py'
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
in_channels=17,
base_channels=32,
num_stages=3,
out_indices=(2, ),
stage_blocks=(4, 6, 3),
conv1_stride_s=1,
pool1_stride_s=1,
inflate=(0, 1, 1),
spatial_strides=(2, 2, 2),
temporal_strides=(1, 1, 1),
dilations=(1, 1, 1)),
cls_head=dict(
type='I3DHead',
in_channels=512,
num_classes=60,
dropout_ratio=0.5,
average_clips='prob'))
dataset_type = 'PoseDataset'
ann_file = 'data/skeleton/ntu60_2d.pkl'
left_kp = [1, 3, 5, 7, 9, 11, 13, 15]
right_kp = [2, 4, 6, 8, 10, 12, 14, 16]
train_pipeline = [
dict(type='UniformSampleFrames', clip_len=32),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(64, 64), keep_ratio=False),
dict(type='RandomResizedCrop', area_range=(0.56, 1.0)),
dict(type='Resize', scale=(56, 56), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5, left_kp=left_kp, right_kp=right_kp),
dict(type='GeneratePoseTarget', with_kp=True, with_limb=False),
dict(type='FormatShape', input_format='NCTHW_Heatmap'),
dict(type='PackActionInputs')
]
val_pipeline = [
dict(type='UniformSampleFrames', clip_len=32, num_clips=1, test_mode=True),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(64, 64), keep_ratio=False),
dict(type='GeneratePoseTarget', with_kp=True, with_limb=False),
dict(type='FormatShape', input_format='NCTHW_Heatmap'),
dict(type='PackActionInputs')
]
test_pipeline = [
dict(
type='UniformSampleFrames', clip_len=32, num_clips=10, test_mode=True),
dict(type='PoseDecode'),
dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),
dict(type='Resize', scale=(64, 64), keep_ratio=False),
dict(
type='GeneratePoseTarget',
with_kp=True,
with_limb=False,
left_kp=left_kp,
right_kp=right_kp),
dict(type='FormatShape', input_format='NCTHW_Heatmap'),
dict(type='PackActionInputs')
]
train_dataloader = dict(
batch_size=16,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=10,
dataset=dict(
type=dataset_type,
ann_file=ann_file,
split='xsub_train',
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=16,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
ann_file=ann_file,
split='xsub_val',
pipeline=val_pipeline,
test_mode=True))
test_dataloader = dict(
batch_size=1,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
ann_file=ann_file,
split='xsub_val',
pipeline=test_pipeline,
test_mode=True))
val_evaluator = [dict(type='AccMetric')]
test_evaluator = val_evaluator
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=18, val_begin=1, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='CosineAnnealingLR',
eta_min=0,
T_max=18,
by_epoch=True,
convert_to_iter_based=True)
]
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.0003),
clip_grad=dict(max_norm=40, norm_type=2))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (16 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=128)
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
16f19364595328e4296082867545a96c7427556e
|
da29f1f5b4459fbfec968bb694bedb9586f87b14
|
/new_algs/Sequence+algorithms/Binary+search+algorithm/palindromes.py
|
72730563f8e9eb671891b4dc6258d98dc0c977f9
|
[] |
no_license
|
coolsnake/JupyterNotebook
|
547806a45a663f090f313dc3e70f779ad9b213c0
|
20d8df6172906337f81583dabb841d66b8f31857
|
refs/heads/master
| 2023-01-13T18:55:38.615312
| 2020-11-17T22:55:12
| 2020-11-17T22:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,685
|
py
|
#!python
"""STARTER CODE FROM NEPTUNIUS"""
import string
import re
# Hint: Use these string constants to ignore capitalization and/or punctuation
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
def is_palindrome(text):
"""A string of characters is a palindrome if it reads the same forwards and
backwards, ignoring punctuation, whitespace, and letter casing."""
# implement is_palindrome_iterative and is_palindrome_recursive below, then
# change this to call your implementation to verify it passes all tests
assert isinstance(text, str), 'input is not a string: {}'.format(text)
return is_palindrome_recursive(text)
# return is_palindrome_recursive(text)
def is_palindrome_iterative(text):
#implements the is_palindrome function iteratively here
regex = re.compile('[^a-zA-Z]')
text = regex.sub('', text)
text = text.upper()
thing_1 = 0
thing_2 = len(text)-1
while thing_1 < thing_2:
if text[thing_1] != text[thing_2]:
return False
thing_1+=1
thing_2-=1
return True
# once implemented, change is_palindrome to call is_palindrome_iterative
# to verify that your iterative implementation passes all tests
def is_palindrome_recursive(text, left=None, right=None):
#implements the is_palindrome function recursively here
if left is None:
regex = re.compile('[^a-zA-Z]')
text = regex.sub('', text)
text = text.upper()
left = 0
right = len(text)-1
if text == '':
return True
if text[left] != text[right]:
return False
if left < right:
return is_palindrome_recursive(text, left=left+1, right=right-1)
else:
return True
# once implemented, change is_palindrome to call is_palindrome_recursive
# to verify that your iterative implementation passes all tests
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) > 0:
for arg in args:
is_pal = is_palindrome(arg)
result = 'PASS' if is_pal else 'FAIL'
is_str = 'is' if is_pal else 'is not'
print('{}: {} {} a palindrome'.format(result, repr(arg), is_str))
else:
print('Usage: {} string1 string2 ... stringN'.format(sys.argv[0]))
print(' checks if each argument given is a palindrome')
if __name__ == '__main__':
#print(is_palindrome_iterative("talcat"))
print(is_palindrome_recursive("TAC!!!Oc at", left=None, right=None))
print(is_palindrome_iterative("no, on!"))
|
[
"chenqh@uci.edu"
] |
chenqh@uci.edu
|
c9de463f2a5670eae402bcb6f8934038fef09461
|
b5550fc728b23cb5890fd58ccc5e1668548dc4e3
|
/virt/imagecache.py
|
182e8745ec9ade92d2338ff13e8e74b23e599763
|
[] |
no_license
|
bopopescu/nova-24
|
0de13f078cf7a2b845cf01e613aaca2d3ae6104c
|
3247a7199932abf9718fb3260db23e9e40013731
|
refs/heads/master
| 2022-11-20T00:48:53.224075
| 2016-12-22T09:09:57
| 2016-12-22T09:09:57
| 282,140,423
| 0
| 0
| null | 2020-07-24T06:24:14
| 2020-07-24T06:24:13
| null |
UTF-8
|
Python
| false
| false
| 5,230
|
py
|
#coding:utf-8
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.compute import task_states
from nova.compute import vm_states
imagecache_opts = [
cfg.IntOpt('image_cache_manager_interval',
default=2400,
help='Number of seconds to wait between runs of the image '
'cache manager. Set to -1 to disable. '
'Setting this to 0 will disable, but this will change in '
'the K release to mean "run at the default rate".'),
# TODO(gilliard): Clean the above message after the K release
cfg.StrOpt('image_cache_subdirectory_name',
default='_base',
help='Where cached images are stored under $instances_path. '
'This is NOT the full path - just a folder name. '
'For per-compute-host cached images, set to _base_$my_ip'),
cfg.BoolOpt('remove_unused_base_images',
default=True,
help='Should unused base images be removed?'),
cfg.IntOpt('remove_unused_original_minimum_age_seconds',
default=(24 * 3600),
help='Unused unresized base images younger than this will not '
'be removed'),
]
CONF = cfg.CONF
CONF.register_opts(imagecache_opts)
CONF.import_opt('host', 'nova.netconf')
class ImageCacheManager(object):
"""Base class for the image cache manager.
This class will provide a generic interface to the image cache manager.
"""
def __init__(self):
self.remove_unused_base_images = CONF.remove_unused_base_images
self.resize_states = [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
def _get_base(self):
"""Returns the base directory of the cached images."""
raise NotImplementedError()
def _list_running_instances(self, context, all_instances):
"""List running instances (on all compute nodes).
This method returns a dictionary with the following keys:
- used_images
- image_popularity
- instance_names
"""
used_images = {}
image_popularity = {}
instance_names = set()
for instance in all_instances:
# NOTE(mikal): "instance name" here means "the name of a directory
# which might contain an instance" and therefore needs to include
# historical permutations as well as the current one.
instance_names.add(instance.name)
instance_names.add(instance.uuid)
if (instance.task_state in self.resize_states or
instance.vm_state == vm_states.RESIZED):
instance_names.add(instance.name + '_resize')
instance_names.add(instance.uuid + '_resize')
for image_key in ['image_ref', 'kernel_id', 'ramdisk_id']:
image_ref_str = getattr(instance, image_key)
if image_ref_str is None:
continue
local, remote, insts = used_images.get(image_ref_str,
(0, 0, []))
if instance.host == CONF.host:
local += 1
else:
remote += 1
insts.append(instance.name)
used_images[image_ref_str] = (local, remote, insts)
image_popularity.setdefault(image_ref_str, 0)
image_popularity[image_ref_str] += 1
return {'used_images': used_images,
'image_popularity': image_popularity,
'instance_names': instance_names}
def _list_base_images(self, base_dir):
"""Return a list of the images present in _base.
This method returns a dictionary with the following keys:
- unexplained_images
- originals
"""
return {'unexplained_images': [],
'originals': []}
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
"""Ages and verifies cached images."""
raise NotImplementedError()
def update(self, context, all_instances):
"""The cache manager.
This will invoke the cache manager. This will update the cache
according to the defined cache management scheme. The information
populated in the cached stats will be used for the cache management.
"""
raise NotImplementedError()
|
[
"719184289@qq.com"
] |
719184289@qq.com
|
42b9566db4361ab8b254d2a2264e24c1714fe831
|
1285703d35b5a37734e40121cd660e9c1a73b076
|
/leetcode/trees/979_distribute_coins_in_binary_tree.py
|
bcc55f5a0967dd5c383882f927dcf475948b6d2e
|
[] |
no_license
|
takin6/algorithm-practice
|
21826c711f57131108168775f08e4e13d07a3b38
|
f4098bea2085a77d11c29e1593b3cc3f579c24aa
|
refs/heads/master
| 2022-11-30T09:40:58.083766
| 2020-08-07T22:07:46
| 2020-08-07T22:07:46
| 283,609,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def distributeCoins(self, root: TreeNode) -> int:
self.res = 0
def dfs(root):
if root is None: return 0
excess_from_left = dfs(root.left)
excess_from_right = dfs(root.right)
print(root.val, excess_from_left, excess_from_right)
self.res += abs(excess_from_left) + abs(excess_from_right)
return root.val + excess_from_left + excess_from_right - 1
dfs(root)
return self.res
# t = TreeNode(3)
# t.left = TreeNode(0)
# t.right = TreeNode(0)
# print(Solution().distributeCoins(t))
# print("---------------")
# t = TreeNode(0)
# t.left = TreeNode(3)
# t.right = TreeNode(0)
# print(Solution().distributeCoins(t))
print("---------------")
# [1,0,0,null,3]
t = TreeNode(1)
t.left = TreeNode(0)
t.right = TreeNode(0)
t.left.right = TreeNode(3)
print(Solution().distributeCoins(t))
|
[
"takayukiinoue116@gmail.com"
] |
takayukiinoue116@gmail.com
|
b66c5203b859c841cc828a61e62cc99f9eb553fa
|
f6f632bee57875e76e1a2aa713fdbe9f25e18d66
|
/python/_0001_0500/0494_target-sum.py
|
7deeae6aad4be02d4582bbd2b5c096fd901b4cfb
|
[] |
no_license
|
Wang-Yann/LeetCodeMe
|
b50ee60beeeb3661869bb948bef4fbe21fc6d904
|
44765a7d89423b7ec2c159f70b1a6f6e446523c2
|
refs/heads/master
| 2023-08-07T05:31:23.428240
| 2021-09-30T15:33:53
| 2021-09-30T15:33:53
| 253,497,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-05-07 08:00:00
# @Last Modified : 2020-05-07 08:00:00
# @Mail : lostlorder@gmail.com
# @Version : alpha-1.0
"""
# 给定一个非负整数数组,a1, a2, ..., an, 和一个目标数,S。现在你有两个符号 + 和 -。对于数组中的任意一个整数,你都可以从 + 或 -中选
# 择一个符号添加在前面。
#
# 返回可以使最终数组和为目标数 S 的所有添加符号的方法数。
#
# 示例 1:
#
# 输入: nums: [1, 1, 1, 1, 1], S: 3
# 输出: 5
# 解释:
#
# -1+1+1+1+1 = 3
# +1-1+1+1+1 = 3
# +1+1-1+1+1 = 3
# +1+1+1-1+1 = 3
# +1+1+1+1-1 = 3
#
# 一共有5种方法让最终目标和为3。
#
#
# 注意:
#
#
# 数组非空,且长度不会超过20。
# 初始的数组的和不会超过1000。
# 保证返回的最终结果能被32位整数存下。
#
# Related Topics 深度优先搜索 动态规划
"""
import functools
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def findTargetSumWays(self, nums: List[int], S: int) -> int:
"""
https://leetcode-cn.com/problems/target-sum/solution/0-1bei-bao-you-hua-jie-fa-by-sunrise-z/
sum(a)+sum(b)=sum(nums)
sum(a)-sum(b)=S
a代表所有非负数组,b代表所有非正数组,
一正一负,正的和负的绝对值为总和,正的-负的为我们的目标S
那么可以求得sum(a) = (sum(nums)+S) /2
即在数组nums中取子集,满足子集的和为(sum(nums)+S) /2,看看这样的条件有多少种
转化为 0-1 背包
二维的话
dp[i][j] = x 表示,若只在前 i 个物品中选择,若当前背包的容量为 j,则最多有 x 种方法可以恰好装满背包
dp[i][j] = dp[i-1][j] + dp[i-1][j-nums[i-1]];
"""
total = sum(nums)
if total < S:
return 0
tmp = total + S
if tmp & 0b1:
return 0
target = tmp // 2
dp = [0] * (target + 1)
dp[0] = 1
for num in nums:
for v in range(target, num - 1, -1):
dp[v] += dp[v - num]
return dp[target]
# leetcode submit region end(Prohibit modification and deletion)
class Solution1:
def findTargetSumWays(self, nums: List[int], S: int) -> int:
"""
初始版本 超时
优化后可以过
"""
N = len(nums)
@functools.lru_cache(None)
def dfs(i, cur_sum):
ans = 0
if i == N:
if cur_sum == S:
return 1
else:
ans += dfs(i + 1, cur_sum + nums[i]) + dfs(i + 1, cur_sum - nums[i])
return ans
return dfs(0, 0)
@pytest.mark.parametrize("kw,expected", [
[dict(nums=[1, 1, 1, 1, 1], S=3), 5],
])
def test_solutions(kw, expected):
assert Solution().findTargetSumWays(**kw) == expected
assert Solution1().findTargetSumWays(**kw) == expected
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=no", __file__])
|
[
"rock@get.com.mm"
] |
rock@get.com.mm
|
43f91e28b0ab99020b437977fe568b429178b0b3
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/day10/homework/FTP/server/core/server_common.py
|
18fad1a82f4d7f6fccba1a4c2bf4fd51ff3efa68
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,055
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'caiqinxiong_cai'
# 2019/9/3 14:35
import struct
import json
import os
import sys
import hashlib
from core.server_auth import ServerAuth as sa
from core.log import Log as log
from conf import settings as ss
class Common:
'''公共类'''
@staticmethod
def mySend(conn, msgb, dic=False):
'''发送数据时,解决粘包问题'''
if dic: msgb = json.dumps(msgb).encode('utf-8')
len_msg = len(msgb)
pack_len = struct.pack('i', len_msg)
conn.send(pack_len)
conn.send(msgb)
@staticmethod
def myRecv(conn, dic=False):
'''接收数据时,解决粘包问题'''
pack_len = conn.recv(4) # struct机制,在发送数据前,加上固定长度4字节的头部
len_msg = struct.unpack('i', pack_len)[0] # 解包,得到元组。
msg_b = conn.recv(len_msg)
if dic: msg_b = json.loads(msg_b.decode('utf-8'))
return msg_b
@staticmethod
def processBar(num, total):
'''打印进度条'''
rate = num / total
rate_num = int(rate * 100)
bar = ('>' * rate_num, rate_num,) # 展示的进度条符号
r = '\r%s>%d%%\n' % bar if rate_num == 100 else '\r%s>%d%%' % bar
sys.stdout.write(r) # 覆盖写入
return sys.stdout.flush # 实时刷新
@staticmethod
def updateQuota(file, name, quota_new):
'''更新磁盘配额'''
with open(file, mode='r', encoding='utf-8') as f1, open(file + '.bak', mode='w', encoding='utf-8') as f2:
for line in f1:
if line.strip():
if name in line:
usr, pwd, quota_old = line.split('|')
line = usr + '|' + pwd + '|' + quota_new + '\n'
f2.write(line)
os.remove(file)
os.rename(file + '.bak', file)
@staticmethod
def checkQuota(file, dic):
'''检查磁盘配额'''
for n, p, q in sa.readInfo(file):
if dic['name'] == n:
dic['msg'] = '用户%s当前磁盘配额剩余:%s字节\n上传文件大小为:%s字节' % (dic['name'], q, dic['filesize'])
num = int(q) - int(dic['filesize'])
dic['flag'] = False if num < 0 else True
if not dic['flag']: dic['msg'] = '%s用户磁盘配额不足!\n' % dic['name'] + dic['msg']
dic['total'] = q
dic['quota'] = str(num)
return dic
@classmethod
def startTransfer(cls, conn, dic, kind, file, mode, b_size=1024000):
'''开始传输,提取上传下载公共代码'''
md5 = hashlib.md5() # 发送数据时,添加MD5校验,就不用再单独打开一次文件做校验了
if dic['exist_size']: log.debug('文件上次已经%s了%s字节,开始断点续传!' % (kind, dic['exist_size']))
with open(file, mode) as f:
if kind == '下载': f.seek(dic['exist_size']) # 将指针移动到指定位置开始读
while dic['filesize'] > 0:
if kind == '下载':
line = f.read(b_size)
conn.send(line) # 发生粘包也没有关系,反正最后把文件传完就行
elif kind == '上传':
line = conn.recv(b_size) # 发生粘包也没有关系,反正最后把文件传完就行
f.write(line)
dic['exist_size'] += len(line) # 累计发送文件大小,传输进度条用
dic['filesize'] -= len(line) # 退出循环用
cls.processBar(dic['exist_size'], dic['total_size'])
md5.update(line)
dic['server_md5'] = md5.hexdigest() # 自己发送数据的MD5值
dic['client_md5'] = cls.myRecv(conn).decode('utf-8') # 接收对方的MD5值
dic['msg'] = 'MD5校验OK,文件传输成功!' if dic['client_md5'] == dic['server_md5'] else 'MD5不一致,文件传输失败!'
if not dic['msg'].find('成功') < 0 and kind == '上传':
cls.updateQuota(ss.USER_FILE, dic['name'], dic['quota']) # 传输成功时更新磁盘配额
dic['msg'] = dic['msg'] + '\n文件上传位置:' + dic['upload_file'] + '\nMD5值为:' + dic['server_md5'] + '\n磁盘配额剩余:%s字节' % dic['quota']
elif not dic['msg'].find('成功') < 0 and kind == '下载':
dic['msg'] = dic['msg'] + '\n文件下载位置:' + dic['download_file'] + '\nMD5值为:' + dic['server_md5']
log.readAndWrite(dic['msg'])
cls.mySend(conn, dic, True)
return dic
@classmethod
def startGetFile(cls, conn, dic):
'''客户端从服务器下载文件'''
return cls.startTransfer(conn, dic, kind='下载', file=dic['file_path'], mode='rb')
@classmethod
def startPutFile(cls, conn, dic):
'''从客户端上传文件到服务器'''
return cls.startTransfer(conn, dic, kind='上传', file=dic['upload_file'], mode='ab')
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
bd232b3367effd7cfbda818926e79471f68ef3ce
|
5b4fe473179b5fadaf59ec96d55b2ec4cb326f65
|
/test/runtime/frontend_test/chainer_test/pow_var_var_test.py
|
608f866af8cbbd74d77544442bc55e0e6a2adf6b
|
[
"Zlib",
"MIT"
] |
permissive
|
TarrySingh/webdnn
|
13d3f1ec4936916abacfb67e270f48571e2fcff2
|
b31b19de0798d8ca198b78d19cb06e4fce1bc260
|
refs/heads/master
| 2021-05-07T02:24:47.500746
| 2017-11-13T13:00:24
| 2017-11-13T13:00:24
| 110,582,816
| 0
| 1
| null | 2017-11-13T18:03:46
| 2017-11-13T18:03:46
| null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
import chainer
import numpy as np
from test.util import generate_kernel_test_case
from webdnn.frontend.chainer.converter import ChainerConverter
def test():
vx1 = chainer.Variable(np.random.rand(2, 4, 6, 8).astype(np.float32))
vx2 = chainer.Variable(np.random.rand(2, 4, 6, 8).astype(np.float32))
vy = vx1 ** vx2
graph = ChainerConverter().convert([vx1, vx2], [vy])
x1 = graph.inputs[0]
x2 = graph.inputs[1]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] F.PowVarVar",
graph=graph,
inputs={
x1: vx1.data,
x2: vx2.data
},
expected={y: vy.data},
)
def test_itself():
vx = chainer.Variable(np.random.rand(2, 4, 6, 8))
vy = vx ** vx
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] F.PowVarVar itself",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
)
|
[
"y.kikura@gmail.com"
] |
y.kikura@gmail.com
|
0f36fc3d21dc7041777d4a12b840417a029e470f
|
9929ba720faf432a5bf3f5cc51dc9f429c24cb84
|
/QUANTTOOLS/QAStockETL/QASU/save_usstock_alpha.py
|
a924c44ce2d05151a07caabb07f54d287cf3f1c4
|
[] |
no_license
|
chaopaoo12/QuantTools
|
45fb344fc085bd7a40d94f646d0982d6b93db1a8
|
2bb1c5ad6aab3d454cfe32b6e6c86107992bed0c
|
refs/heads/master
| 2023-08-18T04:03:11.944128
| 2023-08-13T10:58:49
| 2023-08-13T10:58:49
| 174,860,433
| 9
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,002
|
py
|
from QUANTAXIS.QAUtil import (DATABASE, QA_util_log_info,QA_util_to_json_from_pandas,QA_util_today_str)
from QUANTTOOLS.QAStockETL.QAUtil import (QA_util_get_trade_range, QA_util_if_trade)
from QUANTTOOLS.QAStockETL.QAFetch import QA_fetch_usstock_list
from QUANTTOOLS.QAStockETL.QAFetch import (QA_fetch_get_usstock_alpha,
QA_fetch_get_usstock_alpha101)
import pymongo
import gc
def QA_SU_save_usstock_alpha_day(code = None, start_date = None, end_date = None, client=DATABASE, ui_log = None, ui_progress = None):
'''
save stock_day
保存财报日历
历史全部数据
:return:
'''
if end_date is None:
end_date = QA_util_today_str()
if start_date is None:
start_date = '2009-01-01'
deal_date_list = QA_util_get_trade_range(start_date, end_date, 'us')
if code is None:
code = list(QA_fetch_usstock_list()['code'])
stock_alpha = client.usstock_alpha
stock_alpha.create_index([("code", pymongo.ASCENDING), ("date_stamp", pymongo.ASCENDING)], unique=True)
err = []
def __saving_work(date, code):
try:
QA_util_log_info(
'##JOB01 Now Saving USStock Alpha191==== {}'.format(str(date)), ui_log)
data = QA_fetch_get_usstock_alpha(code, date)
if data is not None:
stock_alpha.insert_many(QA_util_to_json_from_pandas(data), ordered=False)
gc.collect()
except Exception as error0:
print(error0)
err.append(str(date))
for item in deal_date_list:
QA_util_log_info('The {} of Total {}'.format
((deal_date_list.index(item) +1), len(deal_date_list)))
strProgressToLog = 'DOWNLOAD PROGRESS {}'.format(str(float((deal_date_list.index(item) +1) / len(deal_date_list) * 100))[0:4] + '%', ui_log)
intProgressToLog = int(float((deal_date_list.index(item) +1) / len(deal_date_list) * 100))
QA_util_log_info(strProgressToLog, ui_log= ui_log, ui_progress= ui_progress, ui_progress_int_value= intProgressToLog)
if QA_util_if_trade(item) == True:
__saving_work( item, code)
if len(err) < 1:
QA_util_log_info('SUCCESS save USStock Alpha191 ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
def QA_SU_save_usstock_alpha_his(code = None, start_date = None, end_date = None, client=DATABASE, ui_log = None, ui_progress = None):
'''
save stock_day
保存财报日历
反向查询四个季度财报
:return:
'''
if code is None:
code = list(QA_fetch_usstock_list()['code'])
if end_date is None:
end_date = QA_util_today_str()
if start_date is None:
start_date = '2009-01-01'
deal_date_list = QA_util_get_trade_range(start_date, end_date, 'us')
stock_alpha = client.usstock_alpha
stock_alpha.create_index([("code", pymongo.ASCENDING), ("date_stamp", pymongo.ASCENDING)], unique=True)
err = []
def __saving_work(code, date):
try:
QA_util_log_info(
'##JOB01 Now Saving USStock Alpha191==== {}'.format(str(date)), ui_log)
data = QA_fetch_get_usstock_alpha(code, date)
if data is not None:
stock_alpha.insert_many(QA_util_to_json_from_pandas(data), ordered=False)
except Exception as error0:
print(error0)
err.append(str(date))
for item in deal_date_list:
QA_util_log_info('The {} of Total {}'.format
((deal_date_list.index(item) +1), len(deal_date_list)))
strProgressToLog = 'DOWNLOAD PROGRESS {}'.format(str(float((deal_date_list.index(item) +1) / len(deal_date_list) * 100))[0:4] + '%', ui_log)
intProgressToLog = int(float((deal_date_list.index(item) + 1)/ len(deal_date_list) * 100))
QA_util_log_info(strProgressToLog, ui_log= ui_log, ui_progress= ui_progress, ui_progress_int_value= intProgressToLog)
if QA_util_if_trade(item) == True:
__saving_work(code, item)
if len(err) < 1:
QA_util_log_info('SUCCESS save USStock Alpha191 ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
def QA_SU_save_usstock_alpha101_day(code = None, start_date = None, end_date = None, client=DATABASE, ui_log = None, ui_progress = None):
'''
save stock_day
保存财报日历
历史全部数据
:return:
'''
if end_date is None:
end_date = QA_util_today_str()
if start_date is None:
start_date = '2009-01-01'
codes = code
if codes is None:
codes = list(QA_fetch_usstock_list()['code'])
stock_alpha = client.usstock_alpha101
stock_alpha.create_index([("code", pymongo.ASCENDING), ("date_stamp", pymongo.ASCENDING)], unique=True)
err = []
def __saving_work(code,start,end):
try:
QA_util_log_info(
'##JOB01 Now Saving USStock Alpha101==== {}'.format(str(code)), ui_log)
data = QA_fetch_get_usstock_alpha101(code,start,end)
if data is not None:
stock_alpha.insert_many(QA_util_to_json_from_pandas(data), ordered=False)
gc.collect()
except Exception as error0:
print(error0)
err.append(str(code))
for code in codes:
QA_util_log_info('The {} of Total {}'.format
((codes.index(code) +1), len(codes)))
strProgressToLog = 'DOWNLOAD PROGRESS {}'.format(str(float((codes.index(code) +1) / len(codes) * 100))[0:4] + '%', ui_log)
intProgressToLog = int(float((codes.index(code) +1) / len(codes) * 100))
QA_util_log_info(strProgressToLog, ui_log= ui_log, ui_progress= ui_progress, ui_progress_int_value= intProgressToLog)
__saving_work(code,start_date,end_date)
if len(err) < 1:
QA_util_log_info('SUCCESS save USStock Alpha101 ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
def QA_SU_save_usstock_alpha101_his(code = None, start_date = None, end_date = None, client=DATABASE, ui_log = None, ui_progress = None):
'''
save stock_day
保存财报日历
历史全部数据
:return:
'''
if end_date is None:
end_date = QA_util_today_str()
if start_date is None:
start_date = '2009-01-01'
codes = code
if codes is None:
codes = list(QA_fetch_usstock_list()['code'])
stock_alpha = client.usstock_alpha101
stock_alpha.create_index([("code", pymongo.ASCENDING), ("date_stamp", pymongo.ASCENDING)], unique=True)
err = []
def __saving_work(code,start,end):
try:
QA_util_log_info(
'##JOB01 Now Saving USStock Alpha101==== {}'.format(str(code)), ui_log)
data = QA_fetch_get_usstock_alpha101(code,start,end)
if data is not None:
stock_alpha.insert_many(QA_util_to_json_from_pandas(data), ordered=False)
gc.collect()
except Exception as error0:
print(error0)
err.append(str(code))
for code in codes:
QA_util_log_info('The {} of Total {}'.format
((codes.index(code) +1), len(codes)))
strProgressToLog = 'DOWNLOAD PROGRESS {}'.format(str(float((codes.index(code) +1) / len(codes) * 100))[0:4] + '%', ui_log)
intProgressToLog = int(float((codes.index(code) +1) / len(codes) * 100))
QA_util_log_info(strProgressToLog, ui_log= ui_log, ui_progress= ui_progress, ui_progress_int_value= intProgressToLog)
__saving_work(code,start_date,end_date)
if len(err) < 1:
QA_util_log_info('SUCCESS save USStock Alpha101 ^_^', ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log)
if __name__ == '__main__':
pass
|
[
"chaopaoo12@hotmail.com"
] |
chaopaoo12@hotmail.com
|
312a8e05f383be3790e403f2863f1c553e88a5c0
|
6f56cf11d2d7750edb193831f368c8c7d156b974
|
/test/mitmproxy/test_flow_export/locust_get.py
|
72d5932aa5bef794e7caeda7833a2d335e7cbb34
|
[
"MIT"
] |
permissive
|
lifeNrun/mitmproxy
|
000ad22e7262948ee6d4835c96d49b4a96ae1597
|
a7b9e3033db29a27344c9f5d968c2af25d4a9ac0
|
refs/heads/master
| 2021-01-12T21:36:59.985896
| 2016-05-19T05:51:27
| 2016-05-19T05:51:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
''' on_start is called when a Locust start before any task is scheduled '''
self.path()
@task()
def path(self):
url = self.locust.host + '/path'
headers = {
'header': 'qvalue',
'content-length': '7',
}
self.response = self.client.request(
method='GET',
url=url,
headers=headers,
)
### Additional tasks can go here ###
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 1000
max_wait = 3000
|
[
"aldo@nullcube.com"
] |
aldo@nullcube.com
|
d52d3f946f64395395f21a74aa35b9864124e73b
|
0df0bd96bea3e3f8ed8d339f0180c1a9fe529471
|
/shipments/migrations/0012_auto_20141029_1158.py
|
26fb7a1bdffad5cebdf7f0e74d184a1b6a505803
|
[
"BSD-3-Clause"
] |
permissive
|
theirc/CTS
|
d04141c4a7db1c32e915d65369e286c9c04ab9b9
|
43eb3e3b78c19f9e1dc02158ca12fc0c5d6bb270
|
refs/heads/develop
| 2020-12-03T05:26:07.564049
| 2018-03-21T14:47:53
| 2018-03-21T14:47:53
| 35,951,007
| 25
| 9
|
BSD-3-Clause
| 2018-03-21T14:47:54
| 2015-05-20T13:52:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 702
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shipments', '0011_auto_20141023_1542'),
]
operations = [
migrations.AlterField(
model_name='location',
name='latitude',
field=models.DecimalField(null=True, max_digits=13, decimal_places=10),
preserve_default=True,
),
migrations.AlterField(
model_name='location',
name='longitude',
field=models.DecimalField(null=True, max_digits=13, decimal_places=10),
preserve_default=True,
),
]
|
[
"dpoirier@caktusgroup.com"
] |
dpoirier@caktusgroup.com
|
fc7808e28aefa7f89ef9a9bd7a27c3525fb6ef52
|
71e838612daddbfc9bda01d9ba0ca76fab48c3bd
|
/full-stack-web-app/api/DjangoAPI/EmployeeApp/serializers.py
|
712aa9a1fbbb1256c71c9576bc81feb251dcd9c1
|
[] |
no_license
|
Nourreddine1920/full-stack-web-app
|
d77e23955f3f11f853af51fbf02504e6237458a1
|
11a793c371d29ee592ee38c1ccfc83fb9b0401ce
|
refs/heads/main
| 2023-06-29T21:38:57.416787
| 2021-08-06T12:50:15
| 2021-08-06T12:50:15
| 393,375,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from rest_framework import serializers
from EmployeeApp.models import Departments,Employees
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model=Departments
fields=('DepartmentId','DepartmentName')
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model=Employees
fields=('EmployeeId','EmployeeName','Department','DateOfJoining','PhotoFileName')
|
[
"Vous@exemple.com"
] |
Vous@exemple.com
|
34a752ae52ae267613e3396371a6eb4159ae908f
|
4520f56d4952c788e198ee7eee39911c9a76c60f
|
/03_Bigdata/02_Standardization_Analysis/2. Excel/7pandas_column_by_index.py
|
3ca0166f2800315d790994cafc78d6cdd7fced1e
|
[] |
no_license
|
SuHyeonJung/iot_python2019
|
bef8877a1cd41981ad2125291f5af44f4fd1701c
|
7860630ae28c53677a3c2761c9e997b28ea55f26
|
refs/heads/master
| 2020-06-14T22:18:27.503781
| 2019-11-08T05:50:41
| 2019-11-08T05:50:41
| 195,142,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
import sys
import pandas as pd
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pd.read_excel(input_file, 'january_2013', index_col=None)
data_frame_column_by_index = data_frame.iloc[:, [1, 4]]
writer = pd.ExcelWriter(output_file)
data_frame_column_by_index.to_excel(writer, sheet_name='jan_13_output', index=False)
writer.save()
|
[
"galma94815@naver.com"
] |
galma94815@naver.com
|
0e4c3531a57387a683f6035ab1f6f0d2d72d85ed
|
e90a772733e73e45b4cdbb5f240ef3b4a9e71de1
|
/18. 4Sum.py
|
1bf91d01a1717a31da740d4bb0ec68e689638928
|
[] |
no_license
|
jiewu-stanford/leetcode
|
102829fcbcace17909e4de49c01c3d705b6e6e3a
|
cbd47f713d3307f900daf55c8f27301c70542fc4
|
refs/heads/master
| 2022-05-28T18:25:00.885047
| 2022-05-18T05:16:22
| 2022-05-18T05:16:22
| 214,486,622
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,893
|
py
|
'''
Title : 18. 4Sum
Problem : https://leetcode.com/problems/4sum/
'''
''' pair up to convert to the 2-sum problem '''
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
pair_nums = {}
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
pair_num = nums[i] + nums[j]
if pair_num in pair_nums:
pair_nums[pair_num].append((i, j))
else:
pair_nums[pair_num] = [(i, j)]
quadruplets = set()
for key, val in pair_nums.items():
dif = target - key
if dif in pair_nums:
pair1, pair2 = val, pair_nums[dif]
for i, j in pair1:
for k, l in pair2:
quad = [i, j, k, l]
if len(set(quad)) != len(quad): continue
quadruplet = [nums[i], nums[j], nums[k], nums[l]]
quadruplet.sort()
quadruplets.add(tuple(quadruplet))
return list(quadruplets)
''' iterative solution, more comprehensible but much slower '''
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
nums.sort()
quadruplets = []
L = len(nums)
for i in range(L-3):
if i > 0 and nums[i-1] == nums[i]: continue
for j in range(i+1, L-2):
if j > i + 1 and nums[j-1] == nums[j]: continue
dif = target - nums[i] - nums[j]
l, r = j+1, L-1
while l < r:
if nums[l] + nums[r] == dif:
quadruplets.append((nums[i], nums[j], nums[l], nums[r]))
r -= 1
l += 1
while l < r and nums[l-1] == nums[l]:
l += 1
while l < r and nums[r] == nums[r+1]:
r -= 1
elif nums[l] + nums[r] > dif:
r -= 1
else:
l += 1
return quadruplets
'''
combine the solution of 1. Two Sum and 15. 3Sum
Reference: https://programmer.help/blogs/leetcode-2sum-3sum-4sum-python.html
'''
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
if len(nums) < 4: return []
nums.sort()
def threeSum(nums, target): # the 15. solution
def twoSum(nums, target, num, triplets): # the 1. solution
l, r, tgt = 0, len(nums)-1, target-num
while l != r:
if nums[l] + nums[r] < tgt:
l += 1
elif nums[l] + nums[r] > tgt:
r -= 1
else:
triplet = [num, nums[l], nums[r]]
l += 1
while l != r and nums[l-1] == nums[l]:
l += 1
triplets.append(triplet)
return triplets
result = []
for i in range(len(nums)-2):
if i > 0 and nums[i-1] == nums[i]:
continue
else:
remnant = nums[i+1:]
result = twoSum(remnant, target, nums[i], result)
return result
res = []
for i in range(len(nums)-3):
if i > 0 and nums[i-1] == nums[i]:
continue
num = nums[i]
trisum = target - num
rem = nums[i+1:]
triples = threeSum(rem, trisum)
if len(triples) > 0:
for triple in triples:
triple.append(num)
res.append(triple)
return res
|
[
"bayernscience@hotmail.com"
] |
bayernscience@hotmail.com
|
6d2b146447d9c996712e4fcf47c58fe6cf589442
|
8e127527301ef9439960725784d5522af824bc9b
|
/account_flujo_caja_it/__manifest__.py
|
cee213aea2a18577408ba87507f6b8fbdac513ff
|
[] |
no_license
|
Makelator/heleo
|
b8fcf2beef7359ec91c20ef8930753bd0f6f4670
|
a5a04203f1f19f813f495a484e590cd22886edf7
|
refs/heads/master
| 2020-07-30T15:09:27.224489
| 2019-09-23T05:43:10
| 2019-09-23T05:43:10
| 210,272,436
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# -*- encoding: utf-8 -*-
{
'name': 'Saldos Comprobantes Analisis IT Extended',
'category': 'account',
'author': 'ITGRUPO-COMPATIBLE-BO',
'depends': ['analisis_saldos_comprobantes_periodo_it','account_sheet_work','account_multipayment_invoices_it','account_multipayment_invoices_it_advance'],
'version': '1.0',
'description':"""
Analisis de Saldos por Comprobantes version 2017
""",
'auto_install': False,
'demo': [],
'data': ['wizard/account_contable_period_view.xml'],
'installable': True
}
|
[
"angel_afla@hotmail.com"
] |
angel_afla@hotmail.com
|
ff7c384224d512d1d88787ac98df6f64a52eb2ab
|
ca6e4edfc31439aeaed4b8e9e75ea9c8b679c44f
|
/autofill_users.py
|
ace539b5d555e832bdd4a7310c5b2ed3644d450a
|
[
"MIT"
] |
permissive
|
prateekchandan/pickup-server
|
b74f949c5d3b0471d7318fd72417b3bd0b1ccfc2
|
9132c3ef9c0e95ba34a69d9ed2a9fb980356372a
|
refs/heads/master
| 2021-01-22T15:58:43.838560
| 2015-10-07T15:41:23
| 2015-10-07T15:41:23
| 33,261,955
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
import requests
import os
import json
loginURL = "http://pickup.prateekchandan.me/add_user"
HTTPSession = requests.session()
start_locations=['Larsen+Tourbo+Powai','IIT+Bombay+Hostel+9','Hiranandani+Hospital','Hiranandani','Kanjur+Marg+Station',
'Chandivali','Raheja+Vihar','Supreme+Powai','Galleria+Hiranandani','Powai+Plaza']
for i in range(100):
postData = {'key':'9f83c32cf3c9d529e' ,'fbid':i ,'name':'person'+str(i) , 'email':'person'+str(i)+'@gmail.com' ,
'device_id':i , 'gcm_id':i, 'mac_addr':i , 'gender':'male'}
afterLoginPage = HTTPSession.post(loginURL, data = postData )
print afterLoginPage.content
#afterLoginPage = HTTPSession.post(loginURL, data = postData )
#print afterLoginPage.content
|
[
"kalpeshk2011@gmail.com"
] |
kalpeshk2011@gmail.com
|
92cdb2d2c042b5c2f5c428f0f9f04b40d75ffe1f
|
772a82205af92d2f2d2b490ac6bc23fdb7456124
|
/hadoop-python/TopPopularLinksMapper.py
|
e761e3029501002bdccab3db35d35e3963e3d614
|
[] |
no_license
|
atashi/LLL
|
4f777b3a06c6ed38eab4323d2072dbbec22eee92
|
857b8c7fccfe8216da59228c1cf3675444855673
|
refs/heads/master
| 2021-05-17T10:11:28.946779
| 2019-11-25T15:56:14
| 2019-11-25T15:56:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!/usr/bin/env python
import sys
from collections import Counter
counter = Counter()
for line in sys.stdin:
line = line.strip()
k, v = line.split('\t')
try:
v = int(v)
except ValueError:
continue
counter.update({k: v})
counter_list = counter.items()
sort_counter = sorted(counter_list, key=lambda x: x[1], reverse=True)
for k, v in sort_counter[:10]:
print("%s\t%d" % (k, v))
|
[
"rebornwwp@gmail.com"
] |
rebornwwp@gmail.com
|
540660b234ff1db182a1e336b27f5d1fc440103d
|
fac96b4c97150e02f1405f7430c89b115e4c27f7
|
/ch03/ex3-9.motorcycles.py
|
f1492ae35d421f4f18f5f5c3779654508b24f7b5
|
[] |
no_license
|
gustavonvp/PYTHON-CRASH-COURSE
|
37478990ff3c3c368da505eb9e5a35dee5d1960b
|
8033e2eb84cf6d85fd4ff42ae0550f38dcd23f62
|
refs/heads/master
| 2023-04-03T00:42:20.333183
| 2017-10-24T05:47:01
| 2017-10-24T05:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
# Appending Elements to the End of a List
motorcycles = []
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
|
[
"freebz@hananet.net"
] |
freebz@hananet.net
|
50f391a8148e8c120e820f8f5d3e3228e7e44471
|
a88afb87020530b8736841f3570fc125b5ded163
|
/Python_Scripts/model_numbers.py
|
a5a6cc938421db752e5e93130f08208165847e52
|
[
"MIT"
] |
permissive
|
ICB-DCM/solverstudy
|
efb915189b63fb2ca005097b9d27054fbfbfb2c1
|
0aea105c115bbf92d13cc19d88ab554438abdd38
|
refs/heads/master
| 2023-01-19T17:36:08.394305
| 2020-11-23T22:53:03
| 2020-11-23T22:53:03
| 307,409,328
| 0
| 0
|
MIT
| 2020-11-23T22:53:05
| 2020-10-26T14:59:53
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
"""Extract some basic information on model numbers."""
import os
import pandas as pd
from C import DIR_MODELS
df = pd.read_csv(os.path.join(DIR_MODELS, 'model_summary.tsv'), sep='\t')
print("Column names", df.columns)
print("Number of models:", df.shape[0])
print("Number of model groups:", len(df.short_id.unique()))
print("Number of AMICI importable models:", sum(df.amici_import == 'OK'))
print("Number of COPASI importable models:", sum(~pd.isnull(df.copasi_path)))
df_imp = df[(df.amici_import == 'OK') & (~pd.isnull(df.copasi_path))]
print("Number of importable models:", df_imp.shape[0])
print("Number of model groups:", len(df_imp.short_id.unique()))
df_acc = df_imp[df_imp.accepted]
print("Number of accepted models:", df_acc.shape[0])
print("Number of accepted model groups:", len(df_acc.short_id.unique()))
|
[
"noreply@github.com"
] |
ICB-DCM.noreply@github.com
|
9a6db226d7bcbf0cc22bb1d38a97777b59a02fb7
|
d3afd01b844f314a25e231d49eb18419b38de40b
|
/NotMnist/make_datasets.py
|
f3bd5286b969e7137ed08ebede391fdd13bdef7d
|
[] |
no_license
|
nanigasi-san/Chainer_DL
|
35cb6f58ef05ce45f371990af5f7ebc675ee2472
|
ee8fb1faec9e5aad67a5d366681acc3979946c4b
|
refs/heads/master
| 2020-04-29T14:40:00.004462
| 2019-06-20T08:16:51
| 2019-06-20T08:16:51
| 176,203,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
from skimage import io
import glob
import numpy as np
from chainer.datasets import TupleDataset
from random import randint
#Tuple_Datasetを作る
def make_tupledata_set_train(size=100):
alphabet_list = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image_list = []
answer_list = []
def make_image_set():
image_path_list = glob.glob("F://notMnist_large/{0}/*".format(alphabet))
count = 0
_dataset = []
for image_path in image_path_list[:size+100]:
try:
_dataset.append(io.imread(image_path)/255)
count += 1
except:
continue
if count == size:
break
return _dataset
def make_answer_set():
return np.array( [alphabet_list.index(alphabet)] * size)
for alphabet in alphabet_list[:10]:
image_list.extend(make_image_set())
answer_list.extend(make_answer_set())
return TupleDataset(np.array(image_list,dtype=np.float32),np.array(answer_list))
def make_tupledata_set_test(size=10):
alphabet_list = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image_list = []
answer_list = []
def make_image_set():
image_path_list = glob.glob("F://notMnist_large/{0}/*".format(alphabet))
count = 0
_dataset = []
for i in range(size+50):
try:
_dataset.append(io.imread(image_path_list[randint(0,30000)])/255)
count += 1
except:
continue
if count == size:
break
return _dataset
def make_answer_set():
return np.array( [alphabet_list.index(alphabet)] * size)
for alphabet in alphabet_list[:10]:
image_list.extend(make_image_set())
answer_list.extend(make_answer_set())
return TupleDataset(np.array(image_list,dtype=np.float32),np.array(answer_list))
|
[
"nanigasi.py@gmail.com"
] |
nanigasi.py@gmail.com
|
c7256651b31945c3782d5e628fbc0571bb324f0e
|
a1da48c4376c8676cda8872443461e84fff6dc13
|
/torchblocks/processor/sequence_labeling_processor.py
|
d57f31eb8b14683380853117be282ddb9580c728
|
[
"MIT"
] |
permissive
|
topDreamer/TorchBlocks
|
6f9b2dc3be1dae143f0aeaa07057a53071ac841a
|
a5baecb9a2470ff175087475630f2b7db3f7ef51
|
refs/heads/master
| 2022-12-29T15:13:42.779220
| 2020-10-18T02:18:11
| 2020-10-18T02:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,137
|
py
|
import logging
from .base import DataProcessor
from .utils import InputFeatures
logger = logging.getLogger(__name__)
class SequenceLabelingProcessor(DataProcessor):
'''
special_token_label: [CLS]和[SEP]对应的标签, defalult: 'O'
pad_label_id: padding对应的标签id, 默认使用'X',即default: 0
'''
def __init__(self, tokenizer, data_dir,
prefix='',
encode_mode='one',
truncate_label=True,
special_token_label='O',
add_special_tokens=True,
pad_to_max_length=True,
pad_label_id=0):
super().__init__(data_dir=data_dir,
prefix=prefix,
tokenizer=tokenizer,
encode_mode=encode_mode,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
truncate_label=truncate_label)
self.pad_label_id = pad_label_id
self.special_token_label = special_token_label
def convert_to_features(self, examples, label_list, max_seq_length, **kwargs):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
texts = example.texts
inputs = self.encode(texts=texts, max_seq_length=max_seq_length)
label_ids = example.label_ids
if label_ids is None or not isinstance(label_ids, list):
raise ValueError("label_ids is not correct")
special_toekns_num = 2 if self.add_special_tokens else 0
if len(label_ids) > max_seq_length - special_toekns_num: # [CLS] and [SEP]
label_ids = label_ids[:(max_seq_length - special_toekns_num)]
label_ids = [label_map[x] for x in label_ids]
label_ids = [label_map[self.special_token_label]] + label_ids + [label_map[self.special_token_label]]
label_ids += [self.pad_label_id] * (max_seq_length - len(label_ids)) # padding
inputs['guid'] = example.guid
inputs['label_ids'] = label_ids
if ex_index < 5:
self.print_examples(**inputs)
features.append(InputFeatures(**inputs))
return features
class SequenceLabelingSpanProcessor(DataProcessor):
'''
span sequence labeling
'''
def __init__(self, tokenizer, data_dir,
prefix='',
encode_mode='one',
truncate_label=True,
add_special_tokens=True,
pad_to_max_length=True,
pad_label_id=0):
super().__init__(data_dir=data_dir,
prefix=prefix,
encode_mode=encode_mode,
tokenizer=tokenizer,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
truncate_label=truncate_label)
self.pad_label_id = pad_label_id
def get_batch_keys(self):
return ['input_ids', 'attention_mask', 'token_type_ids', 'start_positions', 'end_positions']
def convert_to_features(self, examples, label_list, max_seq_length):
label2id = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
texts = example.texts
inputs = self.encode(texts=texts, max_seq_length=max_seq_length)
start_positions = [self.pad_label_id] * max_seq_length
end_positions = [self.pad_label_id] * max_seq_length
for span in example.label_ids:
label = span[0]
if self.add_special_tokens:
start = span[1] + 1 # cls
end = span[2] + 1 # cls
special_num = 2
else:
start = span[1]
end = span[2]
special_num = 0
if start > max_seq_length - special_num:
continue
start_positions[start] = label2id[label]
if end > max_seq_length - special_num:
continue
end_positions[end] = label2id[label]
assert len(start_positions) == max_seq_length
assert len(end_positions) == max_seq_length
inputs['guid'] = example.guid
inputs['start_positions'] = start_positions
inputs['end_positions'] = end_positions
if ex_index < 5:
self.print_examples(**inputs)
features.append(InputFeatures(**inputs))
return features
|
[
"1436496575@qq.com"
] |
1436496575@qq.com
|
d3f68f0ea82a4d0dbaf2ae04832775a6e8124729
|
92db89aaa332d2a0ea0318932c635c27e2ac5ff7
|
/chap04_Classification/lecture_1x/step05_softmax_classifier.py
|
b2a0c2b0c34af50a17fb6d5bf97458f396355a39
|
[] |
no_license
|
DominKim/Tensorflow_DNN_CNN_RNN_Basic
|
daf40100c777a9d154996e4a02c8e19c35daa5fb
|
1de11219800169b3bc0c95872d5952e76cbc3227
|
refs/heads/master
| 2022-11-09T11:16:06.576660
| 2020-06-30T05:50:04
| 2020-06-30T05:50:04
| 275,949,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
# -*- coding: utf-8 -*-
"""
step05_softmax_classifier
- 활성함수 : Softmax(model)
- 손실함수 : Cross Entropy
"""
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from sklearn.metrics import accuracy_score
# 1. x, y 공급 data
# [털, 날개]
x_data = np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 1], [1, 1]]) # [6, 2]
# [기타, 포유류, 조류] : [6, 3] -> one hot encoding
y_data = np.array([
[1, 0, 0], # 기타[0]
[0, 1, 0], # 포유류[1]
[0, 0, 1], # 조류[2]
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
# 2. X, Y 변수 정의
X = tf.placeholder(dtype = tf.float32, shape = [None, 2]) # [관측치, 입력수]
Y = tf.placeholder(dtype = tf.float32, shape = [None, 3]) # [관측치, 출력수]
# 3. w, b
w = tf.Variable(tf.random_normal([2,3])) # [입력수, 출력수]
b = tf.Variable(tf.random_normal([3])) # [출력수]
# 4. softmax 분류기
# 1) 회귀방정식 : 예측치
model = tf.matmul(X, w) + b # 회귀모델
# softmax(예측치)
softmax = tf.nn.softmax(model) # 활성함수 적용(0 ~ 1) : y1 : 0.8, y2 : 0.1, y3 : 0.1
# (2) loss function : Entropy 이용 : -sum(Y * log(model))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels = Y, logits = model))
loss = -tf.reduce_mean(Y * tf.log(softmax) + (1 - Y) * tf.log(1 - softmax))
# 3) optimizer : 오차 최소화(w, b update)
train = tf.train.AdamOptimizer(0.1).minimize(loss) # 오차 최소화
# 4) argmax() : encoding(2) -> decoding(10)
y_pred = tf.argmax(softmax, axis = 1)
y_true = tf.argmax(Y, axis = 1)
# 5.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 반복학습 : 500회
for step in range(500):
_, loss_val = sess.run([train, loss], feed_dict = {X:x_data, Y: y_data})
if (step + 1) % 50 == 0:
print(f"step = {step + 1}, loss = {loss_val}")
# mode result
print(sess.run(softmax, feed_dict = {X:x_data}))
y_pred_re = sess.run(y_pred, feed_dict = {X:x_data}) # 예측치
y_true_re = sess.run(y_true, feed_dict = {Y:y_data}) # 정답
acc = accuracy_score(y_true_re, y_pred_re)
print("y_pred =", y_pred_re)
print("y_true =", y_true_re)
print("accuracy =", acc)
'''
y_pred = [0 1 1 0 0 1]
y_true = [0 1 2 0 0 2]
accuracy = 0.6666666666666666
'''
|
[
"hyungm3@gmail.com"
] |
hyungm3@gmail.com
|
5dda0c6d02518f56ac74730f1e83e27e23506133
|
4d718292ec9f90444eeda13d18febb10757da894
|
/Exercices/6/Q Sauvegarde.py
|
a8020c51bbc6b578e78ff326e200ba24eb36fbe6
|
[] |
no_license
|
rverschuren/Info
|
b40fb04a6260dacfc95d12e63c99abd82b140e06
|
c9aa0bdc1b026c8ba8134b878b5fae7d49d75e19
|
refs/heads/master
| 2020-04-16T07:29:49.847812
| 2019-01-14T14:50:18
| 2019-01-14T14:50:18
| 165,389,281
| 1
| 2
| null | 2019-01-12T18:56:01
| 2019-01-12T13:12:46
|
Python
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
#Wiaux Bastien
def save_data(filename, life, mana, position_x, position_y):
with open(filename,"w") as fichier:
fichier.write("{}\n{}\n{}\n{}".format(life, mana, position_x, position_y))
def load_data(filename):
with open(filename,'r') as fichier:
data = [int(i) for i in fichier.read().strip().split("\n")]
return data[0],data[1],data[2],data[3]
|
[
"bastien.wiaux@gmail.com"
] |
bastien.wiaux@gmail.com
|
e9fe621b0279a36d6e766bad4eb8aebbfc560b6d
|
3f01eb21ce140e6e8d6e9f6c037a0ed3acfd0e1b
|
/home/context_processors.py
|
fa9f1a4cf3ebef508c48420baf109b26c74ba6b1
|
[
"MIT"
] |
permissive
|
manushah17/Capstone_2019
|
01d45e3d8f925dac88c1911d853ec1b8762d5b1f
|
381094fc778906810e13d7611bfdb2c74cac326e
|
refs/heads/master
| 2022-12-16T21:08:29.385969
| 2019-09-07T15:08:42
| 2019-09-07T15:08:42
| 206,984,224
| 0
| 0
|
MIT
| 2022-12-08T01:22:56
| 2019-09-07T15:04:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
from django.conf import settings
def global_settings(request):
return {
'GOOGLE_MAPS_API_KEY': settings.GOOGLE_MAPS_API_KEY
}
|
[
"manushah@unomaha.edu"
] |
manushah@unomaha.edu
|
f413f0f6bb08ff5c76498ec2113b6004d38abe5c
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/recent/FromPropertyDescriptor.spec
|
900befebba85c916d2b96653e1366f4b02b2d2c9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712
| 2022-02-27T04:19:33
| 2022-02-27T11:06:14
| 384,045,526
| 6
| 4
|
NOASSERTION
| 2022-02-27T11:05:26
| 2021-07-08T07:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,107
|
spec
|
1. If _Desc_ is *undefined*, return *undefined*.
1. Let _obj_ be ! OrdinaryObjectCreate(%Object.prototype%).
1. Assert: _obj_ is an extensible ordinary object with no own properties.
1. If _Desc_ has a [[Value]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"value"*, _Desc_.[[Value]]).
1. If _Desc_ has a [[Writable]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"writable"*, _Desc_.[[Writable]]).
1. If _Desc_ has a [[Get]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"get"*, _Desc_.[[Get]]).
1. If _Desc_ has a [[Set]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"set"*, _Desc_.[[Set]]).
1. If _Desc_ has an [[Enumerable]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"enumerable"*, _Desc_.[[Enumerable]]).
1. If _Desc_ has a [[Configurable]] field, then
1. Perform ! CreateDataPropertyOrThrow(_obj_, *"configurable"*, _Desc_.[[Configurable]]).
1. Return _obj_.
|
[
"h2oche22@gmail.com"
] |
h2oche22@gmail.com
|
cefbf59aba6168247c1a7dd09e5d28cd50a6b679
|
941cbcc815da9927c16291fd0cf341fdf26d4b4b
|
/Web網頁框架/框架(Django)/200502_cookie&session/mysite/app01/views.py
|
bea45499ae8ecefde206c362d44a68fecc8ee90c
|
[] |
no_license
|
narru888/PythonWork-py37-
|
27de004157efdf42972f66b20872e17de8bc676c
|
f9cb1670fb84b9eb8aaaf7cd5cf9139ab4ef4053
|
refs/heads/master
| 2022-12-09T04:58:06.068302
| 2020-09-23T09:47:40
| 2020-09-23T09:47:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
from django.shortcuts import render, redirect
def login(request):
print('COOKIES', request.COOKIES)
print('SESSION', request.session)
if request.method == 'POST':
name = request.POST.get('user')
pwd = request.POST.get('pwd')
if name == 'sb' and pwd == '123':
# 純COOKIE(不安全)
# ret = redirect('/index/')
# # set_cookie(key, values, 有效存在時間(s))
# ret.set_cookie('data', {'user': name, 'pwd': pwd}, max_age=5) # cookie加入信息
# return ret
# COOKIE & SESSION
request.session['is_login'] = True
request.session['user'] = name
# request.session.set_expiry(value)
# 如果value是個整數,session會在些秒數後失效。
# 如果value是個datatime或timedelta,session就會在這個時間後失效。
# 如果value是0, 用戶關閉瀏覽器session就會失效。
# 如果value是None, session會依賴全局session失效策略。
request.session.set_expiry(10)
return redirect('/index/')
return render(request, 'login.html')
def index(request):
# 純COOKIE(不安全)
# if request.COOKIES.get('data', None): # 沒有抓到回傳None
# user = request.COOKIES.get('data')
# return render(request, 'index.html', locals())
# COOKIE & SESSION
if request.session.get('is_login', None):
user = request.session.get('user')
return render(request, 'index.html', locals())
else:
return redirect('/login/')
|
[
"as124122323@gmail.com"
] |
as124122323@gmail.com
|
53efc7d0e74f2edd93a30082008734d0a0524e74
|
72c6e91223602b29ae34499f2813d5197dcf5f00
|
/p15_three_sum.py
|
59dc6ae80fd912eef3f7c89fe42a18ff5dacc7e2
|
[] |
no_license
|
koyo922/leetcode
|
d730d6aab6ee368b75ca59bce8492c548e7e2d6d
|
e8e561adea5e92cd00e374b613ea52a64be4e766
|
refs/heads/master
| 2020-08-02T17:32:59.512106
| 2019-12-02T11:04:53
| 2019-12-02T11:04:53
| 211,447,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
给定一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/3sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
Authors: qianweishuo<qzy922@gmail.com>
Date: 2019/7/4 上午8:46
"""
class Solution(object):
def bruteforce(self, nums):
res = set()
nums.sort() # 有利于减少重复,减轻判重压力
for i, x in enumerate(nums):
for j, y in enumerate(nums[i + 1:], start=i + 1): # 注意start
for z in nums[j + 1:]:
if x + y + z == 0:
res.add((x, y, z))
return list(res)
def using_set(self, nums):
if len(nums) < 3: # 边界条件
return []
res = set()
nums.sort() # 先排序,减轻判重压力
for i, v in enumerate(nums[:-2]):
if i - 1 >= 0 and nums[i - 1] == v: # 如果跟左边值相等,避免重复解
continue
trap = set() # 类似two-sum,不过返回的是值而非下标;所以能用set代替dict
for x in nums[i + 1:]:
if x in trap:
res.add((v, -v - x, x))
else:
trap.add(-v - x)
return res
def using_pinch(self, nums):
res = set()
nums.sort()
for i, v in enumerate(nums[:-2]):
if i - 1 >= 0 and nums[i - 1] == v: # 重复值跳过
continue
l, r = i + 1, len(nums) - 1 # 开始就地两边夹逼,避免了额外空间
while l < r:
s = v + nums[l] + nums[r]
if s < 0:
l += 1
elif s > 0:
r -= 1
else:
res.add((v, nums[l], nums[r]))
# 如果res是list,则此处要手动去重 while l<r and nums[l+1]==nums[l]: l+=1
l += 1
r -= 1
return res
def threeSum(self, nums):
# return self.bruteforce(nums)
# return self.using_set(nums)
return self.using_pinch(nums)
if __name__ == '__main__':
print(Solution().threeSum([-1, 0, 1, 2, -1, -4]))
|
[
"koyo922@qq.com"
] |
koyo922@qq.com
|
f26e81267ca6aa0a3f6dce527dfefd1185dacee0
|
3da6b8a0c049a403374e787149d9523012a1f0fc
|
/Coder_Old/pycharm_daima/爬虫大师班/10-关系型数据库/数据库操作.py
|
8d6f74f0337cf0e51be57211571c88b065331126
|
[] |
no_license
|
AndersonHJB/PyCharm_Coder
|
d65250d943e84b523f022f65ef74b13e7c5bc348
|
32f2866f68cc3a391795247d6aba69a7156e6196
|
refs/heads/master
| 2022-07-25T11:43:58.057376
| 2021-08-03T02:50:01
| 2021-08-03T02:50:01
| 348,922,058
| 3
| 3
| null | 2021-09-05T02:20:10
| 2021-03-18T02:57:16
|
Python
|
UTF-8
|
Python
| false
| false
| 7,139
|
py
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:AI悦创 @DateTime :2020/2/16 16:02 @Function :功能 Development_tool :PyCharm
# code is far away from bugs with the god animal protecting
# I love animals. They taste delicious.
from sqlalchemy import create_engine
from sqlalchemy import Table,Column,String,Integer,MetaData
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() # 创建基类
engine = create_engine(
"mysql+pymysql://root:123456@127.0.0.1:3306/test",
max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
pool_size = 10, # 连接池大小
echo = True, # 调试信息展示
)
metadata = MetaData()
class Host(Base):
# 表名为 host
__tablename__ = 'host'
# 表结构
# primary_key 等于主键
# unique 唯一
# nullable 可为空的
id = Column(Integer, primary_key=True, autoincrement=True)
hostname = Column(String(64), unique=True, nullable=False)
ip_addr = Column(String(128),unique=True, nullable=False)
port = Column(Integer, default=22)
# @classmethod
# def filter(cls, param):
# pass
# Base.metadata.create_all(engine) # 创建表
# res = sess.query(Host).filter_by(id=1).all()
if __name__ == '__main__':
Session = sessionmaker(bind = engine)
sess = Session()
res = sess.query(Host).filter(Host.id==1)
for r in res:
print(r)
print(r.hostname)
print(r.ip_addr)
sess.commit()
h = Host(hostname='test1', ip_addr='127.0.0.1')
h2 = Host(hostname='test2', ip_addr='192.168.0.1', port=8080)
h3 = Host(hostname='test3', ip_addr='192.170.1.0', port=3030)
# sess.query(Host).filter(Host.id==1).update({'port':9999})
# sess.commit()
# 循环加入多个数据
# if __name__ == '__main__':
# Session = sessionmaker(bind = engine)
# sess = Session()
# data_list = ['AI悦创', 'aiyc', 12, 1314.520, '黄']
# for index, data in enumerate(data_list):
# h = Host(hostname=data,ip_addr=index)
# # h = Host(hostname='{}'.format(data), ip_addr='{}'.format(index))
# sess.add(h)
# sess.commit()
# user = Table('mybank', metadata,
# Column('id', Integer, primary_key=True, autoincrement=True),
# Column('name', String(10))
# )
# connect = engine.connect()
# connect.execute(user.delete().where(user.c.id==1))
# connect.close()
# # res = connect.execute(select([user.c.name,]))
# # # res = connect.execute(select([user.c.id==1,user.c.id==2]))
# # print(res.fetchall())
# # connect.close()
# # metadata.create_all(engine)
# # conn = engine.connect()
# # conn.execute(user.update().where(user.c.id==1).values(name='Python'))
# # conn.close()
# # res = engine.execute('select * from user2020')
# # print(res)
# # for i in res:
# # print(i)
# # print(i[0])
# # print(i[1])
# # print(type(i))
# # engine.execute("insert into user2020 (City, name) values ('AIYC', 'huang')")
# # 同时添加多个数据,字段名称的位置可以不按数据库表中的来写
#
# # engine.execute("update user2020 set id=5, name='Python' where id=1")
# # engine.execute("update user2020 set name='Python',id=10 where id=5")
#
#
#
# # engine.execute('update 操作对象(表) set (更新数据操作) 所要修改的数据在哪里(where)')
#
# # engine.execute("insert into user2020 (name) values ('AIYC')")
#
# # metadata = MetaData() # 取得元数据,介绍数据库
# # data = Table('user', metadata,
# # Column('id', Integer, primary_key = True, autoincrement=True),
# # Column('name', String(10)),
# # Column('City', String(255))
# # )
# # # metadata.create_all(engine)
# # connect = engine.connect()
# # # connect.execute(data.update(data.c.id==1).values(City="Beijing",name="AI悦创"))
# # # connect.execute(select([]))
# # connect.close()
# from sqlalchemy import create_engine,MetaData,Table,engine
# from sqlalchemy import Column,String,Integer
#
#
# engine = create_engine(
# "mysql+pymysql://root:123456@127.0.0.1:3306/test",# (里面的 root 要填写你的密码),注意:mysql+pymysql 之间不要加空格
# # "mysql + pymysql://root:root@localhost/test",
# max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
# pool_size = 10, # 连接池大小
# echo = True, # 调试信息展示
# )
#
# metadata = MetaData() # 获得元数据,介绍数据库
#
# # 定义表
# user_table = Table('user_table', metadata,
# Column("id", Integer, primary_key=True,autoincrement=True),
# Column("教学表",String(10)))
# metadata.create_all(engine) # 创建表
#
# # 修改表中数据
# conn = engine.connect() # 获取一个连接
# # 增加数据
# conn.execute(user_table.insert(),{"教学表":"hjb_two"})
# # # 更新数据-更新全部数据
# conn.execute(user_table.update(),{"教学表":"AI悦创"})
# # # 更新指定数据
# conn.execute(user_table.update().where(user_table.c.id==1).values(id=1000))
# conn.execute(user_table.update().where(user_table.c.id==2).values(教学表='AIYC'))
# # # where(user.c.id==2) 查找的位置,或者说要修改的位置
# conn.close()
#
# from sqlalchemy import create_engine,MetaData,Table,engine
# from sqlalchemy import Column,String,Integer
#
#
# engine = create_engine(
# "mysql+pymysql://root:123456@127.0.0.1:3306/test",
# max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
# pool_size = 10, # 连接池大小
# echo = True, # 调试信息展示
# )
#
# metadata = MetaData() # 获得元数据,介绍数据库
#
# # 定义表
# user = Table('mybank', metadata,
# Column("id", Integer, primary_key=True,autoincrement=True),
# Column("教学表",String(10)))
# metadata.create_all(engine) # 创建表
# # ----------------------------插入数据----------------------------------
# # 修改表中数据
# conn = engine.connect() # 获取一个连接
# # 增加数据
# conn.execute(user.insert(),{"教学表":"hjb_two"})
# conn.close()
# # ----------------------------更新数据----------------------------------
# # # 更新数据-更新全部数据
# conn.execute(user.update(),{"教学表":"AI悦创"})
# # # 更新指定数据
# conn.execute(user.update().where(user.c.id==1).values(id=1000))
# conn.execute(user.update().where(user.c.id==2).values(教学表='AIYC'))
# # 同时更新多个数据
# conn.execute(user.update().where(user.c.id==1).values(City="Beijing",name="AI悦创"))
# # # where(user.c.id==2) 查找的位置,或者说要修改的位置
# conn.close()
# # ----------------------------查询数据----------------------------------
# # 需要导入:select
# from sqlalchemy import select
# conn = engine.execute()
# res = conn.execute(select([user.c.name,]))
# # res = conn.execute(select([user.c.id==1, user.c.id==2]))
# print(res.fetchall())
# conn.close()
# # ----------------------------删除数据----------------------------------
# conn = engine.execute()
# conn.execute(user.delete().where(user.c.id==1))
# conn.close()
|
[
"1432803776@qq.com"
] |
1432803776@qq.com
|
5fa70f6b2467740079445fbd4bd24d17e263db56
|
e7bb432a333c74f90f7bd95f6cd6c3647552d8ff
|
/uniq.py
|
1b6a247631f40f107df89ecf6b7f86667c2e3097
|
[] |
no_license
|
ponyatov/world
|
6e44dcd7f0aa3821232eaf5c00f543f2430d80eb
|
7705422a444419d04a9a6c13826f4fda09dcfc37
|
refs/heads/master
| 2021-01-13T06:55:54.261074
| 2017-02-08T15:13:23
| 2017-02-08T15:13:23
| 81,317,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
registry = {}
class Data:
def __repr__(self): return str(self.val)
def __init__(self,V):
# lookup for existant object
if V in registry: self = registry[V]
else:
self.val = V
registry[V] = self # register created object
print registry,Data(0)
print registry,Data(0)
print registry,Data(1)
print registry,Data(1)
|
[
"dponyatov@gmail.com"
] |
dponyatov@gmail.com
|
7dd104b89946246aa48bfd8fc5c1cdb54c5b4ff1
|
ee441564d68e45fa8df6828d6fc724dce4216028
|
/test_R.py
|
da5c16d5ead939b36d0bbd69e14ae17ca4beae1f
|
[] |
no_license
|
abandonsea/Revisiting-Feature-Fusion-for-RGB-T-Salient-Object-Detection
|
3bfe78cbb181d17e177404c30d65f15b0d675098
|
79f6c2234c87b8a6a0237a8d98aeb4f2be1fc0fe
|
refs/heads/master
| 2023-08-27T08:33:06.882790
| 2021-10-21T15:41:34
| 2021-10-21T15:41:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
import cv2
import numpy as np
import T_train
import os
import sys
import tensorflow as tf
import time
import vgg16
import math
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
#img_t_mean=[101.515, 78.315, 140.606]
#img_t_mean=[85.971, 56.608, 151.944]
#img_t_mean=[127.493, 126.314, 127.453] #small
def load_img_list(dataset):
if dataset == 'MSRA-B':
path = 'dataset/MSRA-B/image'
elif dataset == 'DUT-OMRON':
path = 'dataset/DUT-OMRON/DUT-OMRON-image'
imgs = os.listdir(path)
return path, imgs
def image_entropy(input):
tmp = []
for i in range(256):
tmp.append(0)
val = 0
k = 0
res = 0
#image = input.convert('L')
img = np.array(input)
for i in range(len(img)):
for j in range(len(img[i])):
val = img[i][j]
tmp[val] = float(tmp[val] + 1)
k = float(k + 1)
for i in range(len(tmp)):
tmp[i] = float(tmp[i] / k)
for i in range(len(tmp)):
if(tmp[i] == 0):
res = res
else:
res = float(res - tmp[i] * (math.log(tmp[i]) / math.log(2.0)))
res_ = res / 8.0
return res_
if __name__ == "__main__":
model = T_train.Model()
model.build_model()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
img_size = T_train.img_size
label_size = T_train.label_size
ckpt = tf.train.get_checkpoint_state('Model-thermal/')
saver = tf.train.Saver()
saver.restore(sess, 'Model-thermal/model.ckpt-14')
datasets = ['MSRA-B', 'DUT-OMRON']
if not os.path.exists('Result'):
os.mkdir('Result')
#for dataset in datasets:
#path, imgs = load_img_list(dataset)
#save_dir = 'Result/' + dataset
#if not os.path.exists(save_dir):
#os.mkdir(save_dir)
#save_dir = 'Result/' + dataset + '/NLDF_'
#if not os.path.exists(save_dir):
#os.mkdir(save_dir)
imgs_r = os.listdir('DATA/thermal-test')
for f_img_r in imgs_r:
img_r = cv2.imread(os.path.join('DATA/thermal-test', f_img_r))
img_name, ext = os.path.splitext(f_img_r)
if img_r is not None:
#ori_img = img.copy()
img_shape = img_r.shape
img_r = cv2.resize(img_r, (img_size, img_size)) #- R_train.img_r_mean
img_r = img_r.astype(np.float32) / 255.
img_r = img_r.reshape((1, img_size, img_size, 3))
start_time = time.time()
result = sess.run(model.Prob,
feed_dict={model.input_holder_t: img_r})
print("--- %s seconds ---" % (time.time() - start_time))
result = np.reshape(result, (label_size, label_size, 2))
result = result[:, :, 0]
result = cv2.resize(np.squeeze(result), (img_shape[1], img_shape[0]))
save_name = os.path.join('Result', img_name+'.png')
cv2.imwrite(save_name, (result*255).astype(np.uint8))
sess.close()
|
[
"noreply@github.com"
] |
abandonsea.noreply@github.com
|
9eb9671c3137c3bce0ad495c24dce9f83e498907
|
270f1b82d494c474df0bf02f60d106f30e6d3fa3
|
/src/drivers/driver.py
|
08d668314e1d6ade5820de9c60810ecb3994b599
|
[
"MIT"
] |
permissive
|
KDahlgren/nyctea
|
cbb0f7c1023d13f600e90c864c070592c3240d74
|
725940d46a63ca4189283bcc716ad0c96aab48ec
|
refs/heads/master
| 2021-05-11T02:28:24.086022
| 2018-02-03T07:42:03
| 2018-02-03T07:42:03
| 118,362,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
#!/usr/bin/env python
'''
driver.py
'''
# **************************************** #
#############
# IMPORTS #
#############
# standard python packages
import inspect, itertools, logging, os, sqlite3, string, sys, time
# ------------------------------------------------------ #
# import sibling packages HERE!!!
if not os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) )
from dedt import dedt, dedalusParser
from utils import parseCommandLineInput, tools
from evaluators import c4_evaluator
# **************************************** #
####################
# CLASS #
####################
|
[
"kdahlgren15@gmail.com"
] |
kdahlgren15@gmail.com
|
3fcc00faa020fb3330f1af7af5b154afb0be26ce
|
51d0377511a5da902033fb9d80184db0e096fe2c
|
/31-customer-analytics-and-ab-testing-in-python/4-analyzing-ab-testing-results/05-understanding-confidence-intervals.py
|
7cdaf7e13d6506a7916e804755c8cd0d7ad97aa8
|
[] |
no_license
|
sashakrasnov/datacamp
|
c28c6bda178163337baed646220b2f7dcc36047d
|
759f4cec297883907e21118f24a3449d84c80761
|
refs/heads/master
| 2021-12-07T02:54:51.190672
| 2021-09-17T21:05:29
| 2021-09-17T21:05:29
| 157,093,632
| 6
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
'''
Understanding confidence intervals
In this exercise, you'll develop your intuition for how various parameter values impact confidence intervals. Specifically, you will explore through the get_ci() function how changes widen or tighten the confidence interval. This is the function signature, where cl is the confidence level and sd is the standard deviation.
'''
import pandas as pd
import scipy.stats as sci
def get_ci(value, cl, sd):
loc = sci.norm.ppf(1 - cl/2)
rng_val = sci.norm.cdf(loc - value/sd)
lwr_bnd = value - rng_val
upr_bnd = value + rng_val
return_val = (lwr_bnd, upr_bnd)
return(return_val)
'''
INSTRUCTIONS 1/3
* Find the confidence interval with a value of 1, a confidence level of 0.975 and a standard deviation of 0.5.
'''
# Compute and print the confidence interval
confidence_interval = get_ci(1, 0.975, 0.5)
print(confidence_interval)
'''
INSTRUCTIONS 2/3
* Repeat the calculation, updating the confidence level to 0.95 and the standard deviation to 2. Leave the value as 1
'''
# Compute and print the confidence interval
confidence_interval = get_ci(1, 0.95, 2)
print(confidence_interval)
'''
INSTRUCTIONS 3/3
* Finally, update your code such that the standard deviation is 0.001 while leaving the confidence level and value the same as the previous exercise part. Compare the three confidence intervals outputted. How do they seem to relate to the parameters used?
'''
# Compute and print the confidence interval
confidence_interval = get_ci(1, 0.95, 0.001)
print(confidence_interval)
'''
(0.9755040421682947, 1.0244959578317054)
(0.6690506448818785, 1.3309493551181215)
(1.0, 1.0)
As our standard deviation decreases so too does the width of our confidence interval. Great work!
'''
|
[
"a@skrasnov.com"
] |
a@skrasnov.com
|
b29bffcc2e7d2e3616ff332e06f3397623fdd0ed
|
132787c692753ce56cc87abce863af61367e4c41
|
/tests/test_flexmaxpool.py
|
8c82d49dc0c9325e94e1e659e7ba73da72d85bae
|
[
"Apache-2.0"
] |
permissive
|
LMescheder/torch_flexconvs
|
4cff0b2195e9c0db4bdfbfe4b59d6bf6fdddebae
|
72a6aa4eb7dd029b6c446def6031ce56b9fb8bfd
|
refs/heads/master
| 2022-11-16T04:32:53.550675
| 2019-02-21T16:08:21
| 2019-02-21T16:08:21
| 173,277,013
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import torch
from torch.autograd import gradcheck
from scipy.spatial import cKDTree
from torch_flexconv import FlexMaxPool, flex_maxpool
def test_flexmaxpool():
B = 16
p = torch.rand(B, 3, 1000)
p_np = p.squeeze().numpy()
idx_nn = []
for i in range(B):
idx = cKDTree(p_np[i].T).query(p_np[i].T, k=12)[1]
idx = torch.IntTensor(idx.T)
idx_nn.append(idx)
idx_nn = torch.stack(idx_nn, dim=0)
net = torch.rand(B, 32, 1000)
model = FlexMaxPool()
out1 = model(net, idx_nn)
net = net.cuda()
idx_nn = idx_nn.cuda()
model = model.cuda()
out2 = model(net, idx_nn).cpu()
print(((out1 - out2).abs()/out1.abs()).mean())
def test_flexmaxpool_grads():
B = 16
in_channels = 8
n_points = 20
p = torch.rand(B, 3, n_points)
p_np = p.squeeze().numpy()
idx_nn = []
for i in range(B):
idx = cKDTree(p_np[i].T).query(p_np[i].T, k=3)[1]
idx = torch.IntTensor(idx.T)
idx_nn.append(idx)
idx_nn = torch.stack(idx_nn, dim=0)
feat = torch.rand(B, in_channels, n_points)
# idx_nn = idx_nn.cuda()
# feat = feat.cuda()
feat = feat.to(torch.float64)
p = p.to(torch.float64)
feat.requires_grad_()
gradcheck(
flex_maxpool,
[feat, idx_nn])
test_flexmaxpool()
test_flexmaxpool_grads()
|
[
"lars.mescheder@tuebingen.mpg.de"
] |
lars.mescheder@tuebingen.mpg.de
|
1fc85c384cbc924fac454a4c31e5b8c74a901880
|
600a398c5bfebd8bb7aa8dd8349c710bee719d3a
|
/PRL/genK.py
|
e89375f819ca451210803b2a60591e3f7a5f8941
|
[
"MIT"
] |
permissive
|
balasbk/game-theory
|
00f57f206736953a44a7f5a23edc2a82a29474c0
|
958e093e64799e2dd445d18bd9966251270f81e7
|
refs/heads/master
| 2022-04-26T01:01:26.363820
| 2020-04-28T16:11:48
| 2020-04-28T16:11:48
| 259,680,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
import numpy as np
import random
import math
#TODO documentation
def apply2prefs(k_fun, p1, p2):
(x1p, y1p), (x1n, y1n) = p1
(x2p, y2p), (x2n, y2n) = p2
res = 0.
if y1p == y2p:
res += k_fun(x1p, x2p)
if y1n == y2n:
res += k_fun(x1n, x2n)
if y1p == y2n:
res -= k_fun(x1p, x2n)
if y1n == y2p:
res -= k_fun(x1n, x2p)
return res
class GenK:
def __init__(self):
pass
def get_random_kernel(self):
pass
def get_pref_kernel_function(self):
pass
def get_kernel_function(self):
pass
class GenKList(GenK):
def __init__(self, k_list):
self.kernel_list = k_list
def __repr__(self):
return "GenKList(n_kernel=%d)" %len(self.kernel_list)
def get_random_kernel(self):
return random.randint(0, len(self.kernel_list)-1)
def get_pref_kernel_function(self, d):
return lambda p1, p2: apply2prefs(self.get_kernel_function(d), p1, p2)
def get_kernel_function(self, d):
return self.kernel_list[d]
class GenHPK(GenK):
def __init__(self, min_deg=2, max_deg=2):
self.min_deg = min_deg
self.max_deg = max(max_deg, min_deg)
def __repr__(self):
return "GenHPK(dmin=%d, dmax=%d)" %(self.min_deg, self.max_deg)
def get_random_kernel(self):
return random.randint(self.min_deg, self.max_deg)
def get_pref_kernel_function(self, degree):
return lambda p1, p2: apply2prefs(self.get_kernel_function(degree), p1, p2)
def get_kernel_function(self, degree):
return lambda x,z: np.dot(x,z)**degree
class GenRBFK(GenKList):
def __init__(self, gamma_range):
self.gamma_range = gamma_range
def __repr__(self):
return "GenRBFK(gamma_range=%s)" %(self.gamma_range)
def get_random_kernel(self):
return random.choice(self.gamma_range)
def get_pref_kernel_function(self, gamma):
return lambda p1, p2: apply2prefs(self.get_kernel_function(gamma), p1, p2)
def get_kernel_function(self, gamma):
return lambda x,z: math.exp(-gamma * np.linalg.norm(x-z)**2)
#return lambda x,z: math.exp(-gamma * np.sum((x-z)**2))
|
[
"you@example.com"
] |
you@example.com
|
59df0dab074bd06ac5b02c4d0e64f12409454b00
|
5ac30246e65c6640ef71c737cdb5514b90ce5818
|
/audit_shell.py
|
878a782b17de9d90a895f913e99c7feb4ab59c99
|
[] |
no_license
|
yanlingsishao/lijump
|
bb03357797a5784849b35a7ab85f84a645d9bae7
|
c31e10d6b113e04fa5da77aac7a68a76cd2e34a1
|
refs/heads/master
| 2020-03-27T04:17:09.894332
| 2018-08-24T02:28:11
| 2018-08-24T02:28:11
| 145,927,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018-6-14 17:02
# @Author : Jerry Wang
# @Site :
# @File : audit_shell.py
# @Software: PyCharm
import sys,os
if __name__ == '__main__':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LuffyAudit.settings")
import django
django.setup() # 手动注册django所有的APP
from audit.backend import user_interactive
obj = user_interactive.UserShell(sys.argv)
obj.start()
|
[
"1209394579@qq.com"
] |
1209394579@qq.com
|
e530aaa767d12ea93c377b257ea84936bbd7066a
|
7cbc0963d88a5fb4eb241e2a55fd791098dd5f16
|
/capp/admin.py
|
5a5a374ca6b9cac630382226b588b284e51c8475
|
[] |
no_license
|
Kipngetich33/test-repo
|
a7a4ebeb4eff117db5cff90f40a2b34ed3a3fa66
|
e08e1a8a12196dcf806e2270f8efe930405a5379
|
refs/heads/master
| 2022-12-12T15:54:56.401732
| 2018-05-27T08:54:13
| 2018-05-27T08:54:13
| 134,891,667
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
from django.contrib import admin
from .models import Profile, Question, Comment, Session, Record, Doctor, Inpatient
# Register your models here.
admin.site.register(Inpatient)
admin.site.register(Profile)
admin.site.register(Comment)
admin.site.register(Question)
admin.site.register(Session)
admin.site.register(Record)
admin.site.register(Doctor)
|
[
"kephaokari@gmail.com"
] |
kephaokari@gmail.com
|
7cf617b5c56a3f2ea11220ca71f5a197cb36b863
|
050fc5ca698dfd7612dee42aa980fc7b5eee40a2
|
/skywalking/agent/protocol/interceptors_aio.py
|
1ade5fb7ffa878a86eac085cdaa32514df038639
|
[
"Apache-2.0"
] |
permissive
|
apache/skywalking-python
|
8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6
|
1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d
|
refs/heads/master
| 2023-09-05T02:45:56.225937
| 2023-08-28T22:19:24
| 2023-08-28T22:19:24
| 261,456,329
| 178
| 122
|
Apache-2.0
| 2023-08-28T22:19:26
| 2020-05-05T12:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import grpc
class _ClientInterceptorAsync(
grpc.aio.UnaryUnaryClientInterceptor,
grpc.aio.UnaryStreamClientInterceptor,
grpc.aio.StreamUnaryClientInterceptor,
grpc.aio.StreamStreamClientInterceptor
):
def __init__(self, interceptor_async_function):
self._fn = interceptor_async_function
async def intercept_unary_unary(self, continuation, client_call_details, request):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, iter((request,)), False, False)
response = await continuation(new_details, next(new_request_iterator))
return (await postprocess(response)) if postprocess else response
async def intercept_unary_stream(self, continuation, client_call_details, request):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, iter((request,)), False, True)
response_it = await continuation(new_details, next(new_request_iterator))
return (await postprocess(response_it)) if postprocess else response_it
async def intercept_stream_unary(self, continuation, client_call_details, request_iterator):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, request_iterator, True, False)
response = await continuation(new_details, new_request_iterator)
return (await postprocess(response)) if postprocess else response
async def intercept_stream_stream(self, continuation, client_call_details, request_iterator):
new_details, new_request_iterator, postprocess = await \
self._fn(client_call_details, request_iterator, True, True)
response_it = await continuation(new_details, new_request_iterator)
return (await postprocess(response_it)) if postprocess else response_it
def create(intercept_async_call):
return _ClientInterceptorAsync(intercept_async_call)
ClientCallDetails = namedtuple('ClientCallDetails', ('method', 'timeout', 'metadata', 'credentials'))
def header_adder_interceptor_async(header, value):
async def intercept_async_call(client_call_details, request_iterator, request_streaming, response_streaming):
metadata = list(client_call_details.metadata or ())
metadata.append((header, value))
client_call_details = ClientCallDetails(
client_call_details.method, client_call_details.timeout, metadata, client_call_details.credentials,
)
return client_call_details, request_iterator, None
return create(intercept_async_call)
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
ae7d7e80d8d08994ebaba9368bfbeb41259cdf19
|
0a2cc497665f2a14460577f129405f6e4f793791
|
/sdk/reservations/azure-mgmt-reservations/azure/mgmt/reservations/aio/operations/_exchange_operations.py
|
00a323e5f5ee98c8fe3be36a18fe084e24a20aee
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
hivyas/azure-sdk-for-python
|
112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b
|
8b3258fa45f5dc25236c22ad950e48aa4e1c181c
|
refs/heads/master
| 2023-06-17T12:01:26.392186
| 2021-05-18T19:56:01
| 2021-05-18T19:56:01
| 313,761,277
| 1
| 1
|
MIT
| 2020-12-02T17:48:22
| 2020-11-17T22:42:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,503
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExchangeOperations:
"""ExchangeOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.reservations.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _post_initial(
self,
body: "_models.ExchangeRequest",
**kwargs
) -> Optional["_models.ExchangeOperationResultResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExchangeOperationResultResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._post_initial.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'ExchangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExchangeOperationResultResponse', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_post_initial.metadata = {'url': '/providers/Microsoft.Capacity/exchange'} # type: ignore
async def begin_post(
self,
body: "_models.ExchangeRequest",
**kwargs
) -> AsyncLROPoller["_models.ExchangeOperationResultResponse"]:
"""Exchange Reservation(s).
Returns one or more ``Reservations`` in exchange for one or more ``Reservation`` purchases.
:param body: Request containing the refunds and purchases that need to be executed.
:type body: ~azure.mgmt.reservations.models.ExchangeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExchangeOperationResultResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.reservations.models.ExchangeOperationResultResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExchangeOperationResultResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._post_initial(
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExchangeOperationResultResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_post.metadata = {'url': '/providers/Microsoft.Capacity/exchange'} # type: ignore
|
[
"noreply@github.com"
] |
hivyas.noreply@github.com
|
3b9cdeea125b0d4b04ff8151a9af1f622e9f31b8
|
0388e6159a676944a26ffd10e413980120e2c338
|
/extra_foam/gui/ctrl_widgets/scan_button_set.py
|
70da587b8f8e5c72b49bde8e857ce1c7d6f2127a
|
[
"BSD-3-Clause"
] |
permissive
|
scottwedge/EXtra-foam
|
0bb3689ec11df7253ce407b9c5c53f68a405200f
|
578c6035af023575a5c026b0391d15884ca1df60
|
refs/heads/master
| 2021-02-07T20:06:18.266595
| 2020-02-24T14:08:50
| 2020-02-24T14:08:50
| 244,071,447
| 0
| 0
|
BSD-3-Clause
| 2020-03-01T02:05:39
| 2020-03-01T02:05:38
| null |
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QFrame, QHBoxLayout, QPushButton
class ScanButtonSet(QFrame):
scan_toggled_sgn = pyqtSignal(bool)
reset_sgn = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent=parent)
self._scan_btn = QPushButton("Scan")
self._pause_btn = QPushButton("Pause")
self._pause_btn.setEnabled(False)
self._reset_btn = QPushButton("Reset")
self.initUI()
self.initConnections()
def initUI(self):
layout = QHBoxLayout()
layout.addWidget(self._scan_btn)
layout.addWidget(self._pause_btn)
layout.addWidget(self._reset_btn)
self.setLayout(layout)
def initConnections(self):
self._scan_btn.clicked.connect(self._onStartScan)
self._pause_btn.clicked.connect(self._onStopScan)
self._reset_btn.clicked.connect(self.reset_sgn)
def _onStartScan(self):
self._scan_btn.setEnabled(False)
self._pause_btn.setEnabled(True)
self.scan_toggled_sgn.emit(True)
def _onStopScan(self):
self._pause_btn.setEnabled(False)
self._scan_btn.setEnabled(True)
self.scan_toggled_sgn.emit(False)
|
[
"zhujun981661@gmail.com"
] |
zhujun981661@gmail.com
|
a40d7e98cb98dcaf66ed024dcd8b9b94752de86f
|
e8f99a162207cba82d4e0f969d7bcdb2b9d8b522
|
/imooc/celery_learning/celery_app/celeryconfig.py
|
bfd26a86904e2b5b0b64ae6339b240819533c0c4
|
[] |
no_license
|
TesterCC/Python3Scripts
|
edb5446278ebf13edb64336001081941ca27d67d
|
58be67e1ffc74ef50289a885aa4ad05f58e2c383
|
refs/heads/master
| 2023-08-30T21:16:38.328045
| 2023-08-17T11:23:08
| 2023-08-17T11:23:08
| 93,401,996
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/12/4 09:14'
from datetime import timedelta
from celery.schedules import crontab
# broker_url消息中间件
BROKER_URL = 'amqp://guest:guestpwd@localhost:port/vhost_name' # with passwd, e.g. guestpwd is your password
# BROKER_URL = 'redis://127.0.0.1:6379/1' # without passwd
# BROKER_URL = 'redis://:xxx@127.0.0.1:6379/1' # with redis auth, e.g. xxx is your password
# backend_url主要用于存储任务执行结果
CELERY_RESULT_BACKEND = 'redis://:xxx@127.0.0.1:6379/4'
# localhost redis-server /usr/local/redis-4.0.1/etc/redis.conf
CELERY_TIMEZONE = 'Asia/Shanghai'
# 忽略返回任务结果
CELERY_IGNORE_RESULT = False
# UTC
# 导入指定的任务模块
CELERY_IMPORTS = (
'celery_app.task1',
'celery_app.task2',
'celery_app.task_send_email',
)
CELERYD_MAX_TASKS_PER_CHILD = 40
# 设置定时任务 task1 每10s执行一次, task2 每天17:30执行
CELERYBEAT_SCHEDULE = {
'task1': {
'task': 'celery_app.task1.add',
'schedule': timedelta(seconds=7),
'args': (2, 8)
},
'task2': {
'task': 'celery_app.task2.multiply',
# 'schedule': crontab(hour=17, minute=21),
'schedule': timedelta(seconds=3),
'args': (4, 5)
},
'task3': {
'task': 'celery_app.task_send_email.send_email',
# 'schedule': crontab(hour=17, minute=21),
'schedule': timedelta(seconds=5),
},
}
## Run in terminal: celery beat -A celery_app -l INFO
|
[
"testerlyx@foxmail.com"
] |
testerlyx@foxmail.com
|
93e02e28357482f45b11b6504bb548bf8ffb0bd3
|
7759122052337252217fff9d51ec6d125ef370e0
|
/iq/engine/gtk/gtkbox_manager.py
|
1428ab5c17caecb85a7e8a492c21f26fc90f3528
|
[] |
no_license
|
XHermitOne/iq_framework
|
3325670c74233d99e599921fad4bd41e5d8104f3
|
7550e242746cb2fb1219474463f8db21f8e3e114
|
refs/heads/master
| 2023-09-03T21:07:58.107750
| 2023-09-01T07:30:13
| 2023-09-01T07:30:13
| 195,210,479
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GtkBox manager.
"""
import gi
gi.require_version('Gtk', '3.0')
import gi.repository.Gtk
from ...util import log_func
# from ...util import spc_func
# from ...util import id_func
from . import base_manager
__version__ = (0, 0, 0, 1)
class iqGtkBoxManager(base_manager.iqBaseManager):
"""
GtkBox manager.
"""
def clearGtkBox(self, box=None):
"""
Clear GtkBox.
:param box: GtkBox object.
:return: True/False.
"""
assert issubclass(box.__class__, gi.repository.Gtk.Box), u'GtkBox manager type error'
try:
for child in box.get_children():
box.remove(child)
child.destroy()
return True
except:
log_func.fatal(u'Error clear box <%s>' % box.get_name())
return False
|
[
"xhermitone@gmail.com"
] |
xhermitone@gmail.com
|
41f45ca9f01ea667729fbcb4f7f1ad0903e8186f
|
9f9c0861a392d26c1ec0c317b2cba85515ddc627
|
/torch/nn/quantized/modules/rnn.py
|
7e523ba830d22bd85be26b8bb793722a0e45d7f2
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
zhuhaozhe/pytorch
|
be09e6aed6b2b229f4d1126fc170542e2aa02016
|
ba556961a7de900c0ad6f10ceba094b9f5a2a61e
|
refs/heads/master
| 2023-08-10T10:35:28.573545
| 2023-08-01T07:05:50
| 2023-08-01T07:05:50
| 205,321,942
| 0
| 1
|
NOASSERTION
| 2022-09-08T08:00:41
| 2019-08-30T06:46:46
|
C++
|
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
import torch
class LSTM(torch.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples in :class:`~torch.nn.quantizable.LSTM`
Examples::
>>> custom_module_config = {
... 'float_to_observed_custom_module_class': {
... nn.LSTM: nn.quantizable.LSTM,
... },
... 'observed_to_quantized_custom_module_class': {
... nn.quantizable.LSTM: nn.quantized.LSTM,
... }
... }
>>> tq.prepare(model, prepare_custom_module_class=custom_module_config)
>>> tq.convert(model, convert_custom_module_class=custom_module_config)
"""
_FLOAT_MODULE = torch.nn.quantizable.LSTM
def _get_name(self):
return 'QuantizedLSTM'
@classmethod
def from_float(cls, *args, **kwargs):
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError("It looks like you are trying to convert a "
"non-observed LSTM module. Please, see "
"the examples on quantizable LSTMs.")
@classmethod
def from_observed(cls, other):
assert type(other) == cls._FLOAT_MODULE
converted = torch.ao.quantization.convert(other, inplace=False,
remove_qconfig=True)
converted.__class__ = cls
return converted
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
911c9e93d8d43ad832b767d37f5c312e13acad79
|
55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850
|
/.history/master_20200119185018.py
|
7381a67978262b1e6f18702e2d351e7181371d37
|
[] |
no_license
|
StRobertCHSCS/final-project-team
|
c115dc11b318f7ac782c94860a8801bb558bd107
|
48907e72813c4dd3b48ff36f794f6fce04533219
|
refs/heads/master
| 2020-12-03T22:35:37.833893
| 2020-01-31T04:05:38
| 2020-01-31T04:05:38
| 231,506,873
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,440
|
py
|
'''
-**make snake longer when eaten
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
'''
import arcade
import random
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
up = False
down = False
left = False
right = False
player_x_column = 5
player_y_row = 5
snake_body = []
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_display = True
grid_texture = arcade.load_texture("29x51_grid.jpg")
def on_update(delta_time):
snake_move()
def on_draw():
arcade.start_render()
grid_background()
snake()
apple()
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
# for i in range (1):
# player_loaction_x = player_loaction_x(player_x_column)
# player_loaction_y.append(player_y_row)
else:
restart()
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row
global up, down, left, right
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
up = False
down = False
left = False
right = False
print ("You died")
def snake():
global player_x_column, player_y_row, apple_x, apple_y, snake_len, snake_body, snake_head
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_head = [player_x_column, player_y_row]
snake_len = [snake_head]
# if (len(snake_body) > 1):
# for num in range (1, len(snake_body):
# snake_len[i]= snake_len[i-1]
print("body", body)
# for index in range (body - 1, 0, -1):
# player_x_column = snake_len[index - 1][0]
# player_y_row = snake_len[index - 1][1]
# snake_len[index]
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, snake_body, snake_len
global SPEED
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
snake_body.append([10 + num, 10])
snake_head.append(snake_body)
print ("hit")
else:
apple_display = True
print (snake_len)
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
apple_display == True
def on_key_press(key, modifiers):
global up, down, left, right
if key == arcade.key.W:
up = True
down = False
right = False
left = False
elif key == arcade.key.S:
down = True
up = False
right = False
left = False
elif key == arcade.key.A:
left = True
up = False
down = False
right = False
elif key == arcade.key.D:
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
pass
def setup():
global grid
# global player_x_column, apple_x, player_y_row, apple_y, SPEED
# SPEED = 10
# if (player_x_column == apple_x) and (player_y_row == apple_y):
# SPEED += 5
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/10)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup()
|
[
"clementina1023@gmail.com"
] |
clementina1023@gmail.com
|
7abb383871da68acca8803c033a6cda606057eae
|
4ecb332ba2edd08d4a0a0021db675b41c3790dbd
|
/bc19-scaffold/bots/34.TacticsImprovedBot/pilgrims.py
|
f180cd4981648d302407dcd2442b86e3dd0796e0
|
[] |
no_license
|
Nischay-Pro/BattleCode2019
|
fdffdd235e8db60189e90e48c3f47f23bb32b30f
|
92193daf631687acca00176c1fa6a9255d7d4381
|
refs/heads/master
| 2020-04-16T10:15:39.875870
| 2019-01-27T21:08:52
| 2019-01-27T21:08:52
| 165,497,691
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,853
|
py
|
import constants
import utility
import pilgrims_utility
import movement
import check
def pilgrim(robot):
if robot.pilgrim_mine_ownership == None:
robot.steps_to_mine += 1
# communications.self_communicate_loop(robot)
# robot.log("Pilgrims current move destination is " + robot.current_move_destination)
carry_karb = robot.me.karbonite
carry_fuel = robot.me.fuel
# The pilgrim is on a mine and wants to deposit resources
if carry_fuel > 80 or carry_karb > 18:
# robot.log("Nearing capacity")
return pilgrim_full(robot)
# The pilgrim checks if it has a mine on it's current position
pilgrim_is_mining = pilgrim_mine(robot)
if pilgrim_is_mining !=0 and robot.fuel > 1 and robot.actual_round_number != None:
if robot.actual_round_number >= 6:
if robot.piligrim_did_i_shout_my_x_cord == False:
robot.castle_talk(robot.me.x + 64)
robot.piligrim_did_i_shout_my_x_cord = True
else:
if robot.piligrim_did_i_shout_my_y_cord == False:
robot.castle_talk(robot.me.y + 64)
robot.piligrim_did_i_shout_my_y_cord = True
return pilgrim_is_mining
# Receive signal from castle on which mine to go to
if robot.step == 0:
pilgrims_utility.receive_initial_signal(robot)
# Move Section
pilgrim_is_moving = pilgrim_move(robot)
if pilgrim_is_moving !=0 and robot.fuel > 30:
# robot.log(pilgrim_is_moving)
return pilgrim_is_moving
def pilgrim_move(robot):
# Emergency case, allows pilgrims to mine
if robot.fuel <= 2:
return 0
pos_x = robot.me.x
pos_y = robot.me.y
passable_map, occupied_map, karb_map, fuel_map = utility.get_all_maps(robot)
random_directions = utility.random_cells_around()
# May change for impossible resources
# pilgrims_utility.did_pilgrim_burn_out(robot)
# Capture and start mining any resource if more than 50 turns since creation and no mine
# TODO - Improve this code snippet to mine, if in visible region and empty
# if robot.me.turn > constants.pilgrim_will_scavenge_closeby_mines_after_turns: #and robot.me.turn < constants.pilgrim_will_scavenge_closeby_mines_before_turns:
# for direction in random_directions:
# if (not utility.is_cell_occupied(occupied_map, pos_x + direction[1], pos_y + direction[0])) and utility.is_cell_resourceful(karb_map, fuel_map, pos_x + direction[1], pos_y + direction[0]):
# robot.current_move_destination = None
# utility.default_movement_variables(robot)
# return robot.move(direction[1], direction[0])
# TODO - Make into scout if too old, which will scout enemy bases
# If the mine is already occupied
# pilgrims_utility.is_pilgrim_scavenging(robot)
# Just move
if not movement.is_completely_surrounded(robot):
if robot.current_move_destination == None and robot.pilgrim_mine_ownership != None:
robot.current_move_destination = robot.pilgrim_mine_ownership
move_command = movement.move_to_destination(robot)
if move_command != None:
return move_command
# Random Movement when not enough time
# for direction in random_directions:
# if not utility.is_cell_occupied(occupied_map, pos_x + direction[1], pos_y + direction[0]) and passable_map[pos_y + direction[0]][pos_x + direction[1]] == 1:
# robot.mov_path_between_location_and_destination = None
# return robot.move(direction[1], direction[0])
return 0
def pilgrim_mine(robot):
pos_x = robot.me.x
pos_y = robot.me.y
karb_map = robot.get_karbonite_map()
fuel_map = robot.get_fuel_map()
if utility.is_cell_resourceful(karb_map, fuel_map, pos_x, pos_y):
robot.signal(0, 0)
if utility.is_cell_fuel(fuel_map, pos_x, pos_y):
robot.karb_miner = False
robot.fuel_miner = True
robot.castle_talk(6)
elif utility.is_cell_karbonite(karb_map, pos_x, pos_y):
robot.karb_miner = True
robot.fuel_miner = False
robot.castle_talk(7)
# TRAVIS CHECK MINE 1
robot.pilgrim_mine_ownership = (pos_x, pos_y)
return check.mine_check(robot, 1)
else:
return 0
def pilgrim_full(robot):
# If we have adjacent castle/church or haven't reached the convoy age end
pilgrim_give_or_convoy = pilgrims_utility.give_or_mine(robot)
if pilgrim_give_or_convoy != 0 and robot.fuel > 4:
return pilgrim_give_or_convoy
# FIXME - Make churches not be built if castle/other church is in reasonable travel range
if robot.karbonite > 50 and robot.fuel > 200:
return pilgrims_utility.make_church(robot)
return None
|
[
"f2015845@hyderabad.bits-pilani.ac.in"
] |
f2015845@hyderabad.bits-pilani.ac.in
|
0c722d822e98d521f2af77ed2e6f29002c65e0a8
|
93ba28a7808ed5a406753748fedbdbaea5f3c8b2
|
/KSTest.py
|
92e5fb7c859a739fb20a3a6166aaad182051e025
|
[] |
no_license
|
zaixingmao/samples-plots
|
7a55005abab1e7644296d1eb2e76f603d160a37b
|
bb2371c7f664a84c454189ec648bb55630cb7565
|
refs/heads/master
| 2020-05-21T23:27:34.390427
| 2017-07-14T14:59:52
| 2017-07-14T14:59:52
| 24,139,867
| 0
| 0
| null | 2015-10-23T16:00:22
| 2014-09-17T10:17:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,000
|
py
|
#!/usr/bin/env python
import ROOT as r
import optparse
import tool
r.gROOT.SetBatch(True) # to suppress canvas pop-outs
def KSTest(ifile, ofile, name):
f = r.TFile(ifile)
testTree = f.Get("TestTree")
trainTree = f.Get("TrainTree")
nBins = 20
nBins2 = 100000
xMin = -1.0
xMax = 1.0
BDT_Sig_Train = r.TH1F('BDT_Sig_Train', 'BDT_Sig_Train', nBins, xMin, xMax)
BDT_Sig_Test = r.TH1F('BDT_Sig_Test', 'Overtraining Check (%s)' %name[:name.find('_')], nBins, xMin, xMax)
BDT_Bkg_Train = r.TH1F('BDT_Bkg_Train', 'BDT_Bkg_Train', nBins, xMin, xMax)
BDT_Bkg_Test = r.TH1F('BDT_Bkg_Test', 'BDT_Bkg_Test', nBins, xMin, xMax)
BDT_Sig_Train_4KS = r.TH1F('BDT_Sig_Train_4KS', 'BDT_Sig_Train_4KS', nBins2, xMin, xMax)
BDT_Sig_Test_4KS = r.TH1F('BDT_Sig_Test_4KS', 'BDT_Sig_Test_4KS', nBins2, xMin, xMax)
BDT_Bkg_Train_4KS = r.TH1F('BDT_Bkg_Train_4KS', 'BDT_Bkg_Train_4KS', nBins2, xMin, xMax)
BDT_Bkg_Test_4KS = r.TH1F('BDT_Bkg_Test_4KS', 'BDT_Bkg_Test_4KS', nBins2, xMin, xMax)
totalTest = testTree.GetEntries()
for i in range(totalTest):
testTree.GetEntry(i)
if testTree.className == "Signal":
BDT_Sig_Test.Fill(testTree.BDT, testTree.weight)
BDT_Sig_Test_4KS.Fill(testTree.BDT, testTree.weight)
else:
BDT_Bkg_Test.Fill(testTree.BDT, testTree.weight)
BDT_Bkg_Test_4KS.Fill(testTree.BDT, testTree.weight)
totalTrain = trainTree.GetEntries()
for i in range(totalTrain):
trainTree.GetEntry(i)
if trainTree.className == "Signal":
BDT_Sig_Train.Fill(trainTree.BDT, trainTree.weight)
BDT_Sig_Train_4KS.Fill(trainTree.BDT, trainTree.weight)
else:
BDT_Bkg_Train.Fill(trainTree.BDT, trainTree.weight)
BDT_Bkg_Train_4KS.Fill(trainTree.BDT, trainTree.weight)
BDT_Bkg_Train.Sumw2()
BDT_Sig_Train.Sumw2()
sigKS = BDT_Sig_Test_4KS.KolmogorovTest(BDT_Sig_Train_4KS)
bkgKS = BDT_Bkg_Test_4KS.KolmogorovTest(BDT_Bkg_Train_4KS)
print 'signal: %.4f' %sigKS
print 'background: %.4f' %bkgKS
BDT_Bkg_Train.Scale(1/BDT_Bkg_Train.Integral())
BDT_Bkg_Train.SetMarkerColor(r.kRed)
BDT_Bkg_Train.SetMarkerStyle(21)
BDT_Bkg_Test.SetLineColor(r.kRed)
BDT_Bkg_Test.SetFillColor(r.kRed)
BDT_Bkg_Test.SetFillStyle(3354)
BDT_Bkg_Test.Scale(1/BDT_Bkg_Test.Integral())
BDT_Sig_Train.Scale(1/BDT_Sig_Train.Integral())
BDT_Sig_Train.SetMarkerColor(r.kBlue)
BDT_Sig_Train.SetMarkerStyle(21)
BDT_Sig_Test.SetLineColor(r.kBlue)
BDT_Sig_Test.SetFillColor(r.kBlue)
BDT_Sig_Test.SetFillStyle(3001)
BDT_Sig_Test.Scale(1/BDT_Sig_Test.Integral())
legendHistos1 = []
legendHistos1.append((BDT_Bkg_Test, 'bkg test'))
legendHistos1.append((BDT_Bkg_Train, 'bkg train'))
# legendHistos1.append((BDT_Bkg_Train, 'KS: %0.3f' %bkgKS))
legendHistos2 = []
legendHistos2.append((BDT_Sig_Test, 'sig test'))
legendHistos2.append((BDT_Sig_Train, 'sig train'))
# legendHistos2.append((BDT_Sig_Train, 'KS: %0.3f' %sigKS))
l1 = tool.setMyLegend(lPosition=(0.2, 0.67, 0.5, 0.82), lHistList=legendHistos1)
l2 = tool.setMyLegend(lPosition=(0.6, 0.67, 0.9, 0.82), lHistList=legendHistos2)
r.gStyle.SetOptStat(0)
c = r.TCanvas("c","Test", 800, 600)
BDT_Sig_Test.Draw()
BDT_Sig_Test.GetXaxis().SetTitle("BDT")
BDT_Sig_Test.SetMaximum(0.5)
BDT_Sig_Train.Draw('sameE1P')
BDT_Bkg_Test.Draw('same')
BDT_Bkg_Train.Draw('sameE1P')
l1.Draw('same')
l2.Draw('same')
c.Print('%s.pdf' %ofile)
massPoints = ['260','270','280','290','300','310','320','330','340','350']
# massPoints = ['260','300','350']
nTreesList = ['150']
for nTrees in nTreesList:
for iMass in massPoints:
postFix = '_7_n%s_mJJ' %nTrees
KSTest('/nfs_scratch/zmao/TMVA/TMVA%s%s.root' %(iMass,postFix), '/nfs_scratch/zmao/TMVA/pdf/TMVA%s%s' %(iMass,postFix), 'H2hh%s_n%s' %(iMass, nTrees))
|
[
"zaixing.mao@cern.ch"
] |
zaixing.mao@cern.ch
|
b5559016125f4ba359d3ccc395429165da594707
|
4ba32be96850894f8c94597899a401b3b19f216e
|
/uotp/packet/time.py
|
b4e31801d2f99580fc5de05750169d19a2106202
|
[
"Unlicense"
] |
permissive
|
dlunch/uotp
|
2e204cf036f5a735d1f8fe3149d0dd08d96cbedf
|
bfa52a5aae4c7b40c10aebaaa4667c26d40b5ff7
|
refs/heads/master
| 2021-08-31T23:15:52.285195
| 2017-12-23T11:21:42
| 2017-12-23T11:21:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
from struct import unpack
from .base import Packet, Opcode
class TimeRequest(Packet):
OPCODE = Opcode.Time
SIMPLE = True
@classmethod
def _encode_payload(cls, data: dict) -> bytes:
return b''
@classmethod
def _decode_payload(cls, payload: bytes) -> dict:
time, = unpack("!I", payload)
return {
'time': time
}
|
[
"devunt@gmail.com"
] |
devunt@gmail.com
|
d2c98cb9d7f276f7a2cc3774ca7207c9c874da3a
|
dbce70b3685e04fe7b52687bfc4bc9d1c3325486
|
/src/filingcabinet/migrations/0019_auto_20210323_1404.py
|
30f74a4c0f93ad21c2ea0d031a18c8e6eefb9001
|
[] |
no_license
|
okfde/django-filingcabinet
|
d0fd8ea1deb7e990dcfe510df548bd497e96fe5e
|
5d5ff8f9f6573614d61def654b3e22805bf84934
|
refs/heads/main
| 2023-09-02T10:53:28.789501
| 2023-08-02T15:34:57
| 2023-08-02T15:34:57
| 144,304,373
| 7
| 4
| null | 2023-09-14T16:21:49
| 2018-08-10T15:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
# Generated by Django 3.1.6 on 2021-03-23 13:04
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.FILINGCABINET_DOCUMENT_MODEL),
("filingcabinet", "0018_auto_20200622_1302"),
]
operations = [
migrations.AddField(
model_name="document",
name="listed",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="documentcollection",
name="listed",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="documentcollection",
name="uid",
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AlterField(
model_name="page",
name="document",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pages",
to=settings.FILINGCABINET_DOCUMENT_MODEL,
),
),
]
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
49aeb6dcb89830cd7a6f3a7ffabb101c58b2a116
|
77ada1a21fd1086b00fe5e0f2a7e568bca8562c5
|
/visualization_data.py
|
9f0c9c5000ea270576f852d41355864e40b59c74
|
[] |
no_license
|
Tulin2010/LSTM_GoogleClusterTraceData
|
584f8d38395ffd159f30496487ad5c8161b4c331
|
a999b3a609bb1907b6fbe85c5783b0365078f53e
|
refs/heads/master
| 2023-03-17T13:06:17.392797
| 2017-11-21T15:18:02
| 2017-11-21T15:18:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
from pandas import read_csv
dataset = read_csv('/home/nguyen/learnRNNs/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
plt.plot(dataset)
plt.show()
# print a
|
[
"thangbk2209@gmail.com"
] |
thangbk2209@gmail.com
|
ed419545953e68ef8416ac79e6c96d621fb18c94
|
4dc5aa4f1a99b5a8ca20413640094149e025b49e
|
/project-addons/l10n_es_facturae_ph/models/__init__.py
|
ad2302ae21a5303ac4d03e0bea984332125d7098
|
[] |
no_license
|
digitalsatori/PXGO_00064_2014_PHA
|
469dd86e595a125a5ca1f24c51756182638a0847
|
fe27d2f456deb750f9fba528feaa075dcf4a1b02
|
refs/heads/master
| 2023-07-19T18:32:17.178115
| 2023-07-15T13:20:05
| 2023-07-15T13:20:05
| 62,711,911
| 0
| 0
| null | 2023-08-28T21:36:39
| 2016-07-06T10:14:56
|
Python
|
UTF-8
|
Python
| false
| false
| 168
|
py
|
# -*- coding: utf-8 -*-
# © 2022 Pharmadus Botanicals
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import account_invoice, facturae_invoice
|
[
"oscar.salvador@pharmadus.com"
] |
oscar.salvador@pharmadus.com
|
91b7938f4809073bba962f3a9dfd05892a57cfa0
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/application_gateway_url_path_map_py3.py
|
cc013f10ac410d7745b713e261396fb447cd6378
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,457
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for
PathBasedRouting.
:param id: Resource ID.
:type id: str
:param default_backend_address_pool: Default backend address pool resource
of URL path map.
:type default_backend_address_pool:
~azure.mgmt.network.v2017_06_01.models.SubResource
:param default_backend_http_settings: Default backend http settings
resource of URL path map.
:type default_backend_http_settings:
~azure.mgmt.network.v2017_06_01.models.SubResource
:param default_redirect_configuration: Default redirect configuration
resource of URL path map.
:type default_redirect_configuration:
~azure.mgmt.network.v2017_06_01.models.SubResource
:param path_rules: Path rule of URL path map resource.
:type path_rules:
list[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayPathRule]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayUrlPathMap, self).__init__(id=id, **kwargs)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.default_redirect_configuration = default_redirect_configuration
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
|
[
"noreply@github.com"
] |
xiafu-msft.noreply@github.com
|
fdc9507613f0c24c6bf4372dcdc2f935907300f4
|
f3693916a8b118bf139364604dac3f51235ed613
|
/functional/Components/Clients/Clients_POST/test_TC_43120_Clients_POST_Height_Gt.py
|
c91858daec38cff468e6ba12818cf956996a8beb
|
[] |
no_license
|
muktabehera/QE
|
e7d62284889d8241d22506f6ee20547f1cfe6db1
|
3fedde591568e35f7b80c5bf6cd6732f8eeab4f8
|
refs/heads/master
| 2021-03-31T02:19:15.369562
| 2018-03-13T02:45:10
| 2018-03-13T02:45:10
| 124,984,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
# -*- coding: UTF-8 -*-
"""PFE Component Tests - Clients.
* TC-43120 - Clients POST:
Verify that user is able to add source constraint rule with specific rule for parameter 'Height>GT(Greater Than) using request POST '/clients/'.
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients"
JSON data sent to PathFinder in this test:
{'id': 'sourceRuleHeightGT',
'matchingRule': {'groups': [], 'operator': 'ALL', 'rules': []},
'name': 'POST: Client with Source Rule Height GT',
'sourceSelectionRule': [{'groups': [],
'operator': 'ALL',
'rules': [{'contextField': 'heightPx',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 1000,
'operator': 'GT'}]}]}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.components
@pytest.allure.story('Clients')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Clients test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-43120')
@pytest.mark.Clients
@pytest.mark.POST
def test_TC_43120_POST_Clients_Height_Gt(self, context):
"""TC-43120 - Clients-POST
Verify that user is able to add source constraint rule with specific rule for parameter 'Height>GT(Greater Than) using request POST '/clients/'."""
# Define a test step
with pytest.allure.step("""Verify that user is able to add source constraint rule with specific rule for parameter 'Height>GT(Greater Than) using request POST '/clients/'."""):
# Test case configuration
clientDetails = context.sc.ClientDetails(
id='sourceRuleHeightGT',
matchingRule={'operator': 'ALL',
'rules': [],
'groups': []},
name='POST: Client with Source Rule Height GT',
sourceSelectionRule=[{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'heightPx',
'operator': 'GT',
'contextFieldType': 'String',
'matchValue': 1000,
'contextFieldKey': None
}],
'groups': []
}])
# createEntity the Clients.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Clients.createEntity(
body=clientDetails
)
)
|
[
"mbehera@qumu.com"
] |
mbehera@qumu.com
|
1ab116b1625ef9856f2198a6637bb253a87c266b
|
25f4e894beced05eb15708ac2314d72e679fa069
|
/google/google/spiders/GoogleSpider.py
|
7358c7cbfd2aefd2703b5e751cd026ebcac532c8
|
[] |
no_license
|
pyscrape/web-scraping-projects
|
4e6059daaa2f1092e2c8f1ee4a4ad1e422b21096
|
f1e77b151de256e1a5e83099859635edc2f5826a
|
refs/heads/master
| 2021-06-01T15:02:42.548264
| 2016-08-24T07:27:11
| 2016-08-24T07:27:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
from google.items import GoogleItem
from scrapy.conf import settings
def parsing_rating(line):
if len(line)>0:
return line[0][line[0].find("alt"):]
else:
return ""
class GoogleSpider(CrawlSpider):
name = 'google'
allowed_domains = ['google.com']
# rules = (Rule(LxmlLinkExtractor(allow=(r'\/([A-Z])([A-Z0-9]{9})'),deny=('')),callback='parse_item'),Rule(LxmlLinkExtractor(allow=(''))),),)
# rules = (Rule(LxmlLinkExtractor(allow=(r'https://www.tripadvisor.com/Attraction_Review.*')),callback='parse_trip', process_links='process_links'),)
rules = (Rule(LxmlLinkExtractor(allow=(r'https://www.google.com/.*')),callback='parse_search'),Rule(LxmlLinkExtractor(allow=(''))),follow=False)
def __init__(self,*args, **kwargs):
super(TripadvisorSpider, self).__init__(*args, **kwargs)
start_url='https://www.tripadvisor.com/Attractions-g187337-Activities-Frankfurt_Hesse.html'
# start_url='https://www.tripadvisor.com/'
self.start_urls = [start_url]
def parse_trip(self,response):
item = GoogleItem()
print "\n\n---------------------START-----------------------"
print response.url
# print response.xpath('//a/@href').extract()
# try:
item['name'] = response.xpath('//*[@id="HEADING"]/text()').extract()[0].encode('ascii','ignore')
# item['rating'] = parsing_rating(response.xpath('//*[@id="HEADING_GROUP"]/div/div[2]/div[1]/div/span/img').extract())
# item['neighborhood'] = response.xpath('//*[@id="MAP_AND_LISTING"]/div[2]/div/div[2]/div/div[1]/div/address/span/span').extract()
# item['classification'] = response.xpath('//*[@id="HEADING_GROUP"]/div/div[3]/div[2]/div').extract()
item['url'] = response.url
# item['price'] = response.xpath('//*[@id="ABOVE_THE_FOLD"]/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]/div[1]/text()').extract()
# item['hours'] = response.xpath('//*[@id="MAP_AND_LISTING"]/div[2]/div/div[2]/div/div[4]/div/div[2]/div').extract()
# item['desc'] = response.xpath('//*[@id="OVERLAY_CONTENTS"]/div/p/text()').extract()
# item['desc'] = [desc.encode('ascii','ignore') for desc in response.xpath('//*[@id="feature-bullets"]/ul/li/span/text()').extract() ]
# usernames = response.xpath('//*[@class="username mo"]').extract()
# reviews = response.xpath('//*[@class="partial_entry"]/text()').extract()
# item['reviews'] = zip(usernames,reviews)
print "\n\n---------------------------------------------------"
print(item)
# except:
# print('Not a product!')
# item = None
yield item
def process_links(self,links):
print "\n LINKS"
links_list = []
for i in links:
if "https://www.tripadvisor.com/Attraction_Review" in i.url:
links_list.append(i)
print i.url
return links_list
def dummy(self,response):
print(str(response.url))
|
[
"mymamyma@gmail.com"
] |
mymamyma@gmail.com
|
9e15da608c57204097c2be6bfac95cf84b72f297
|
8b71fdd80be5f22659cfb135019f5fd968a3cb77
|
/supervised_learning/0x01-classification/19-deep_neural_network.py
|
5e9cf4bd693450e09c970d9f96ab70baf7c24890
|
[] |
no_license
|
KamalTaleb/holbertonschool-machine_learning
|
357e1c6bfcffa6672e12a3d518846b2a96747148
|
242b449b3a7a4051270ca32a22866a884754d141
|
refs/heads/master
| 2023-03-29T07:24:44.113412
| 2021-04-08T15:45:16
| 2021-04-08T15:45:16
| 320,596,666
| 0
| 0
| null | 2020-12-11T14:41:48
| 2020-12-11T14:29:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,177
|
py
|
#!/usr/bin/env python3
"""Contains the DeepNeuralNetwork class"""
import numpy as np
class DeepNeuralNetwork:
"""
DeepNeuralNetwork class
defines a deep neural network
performing binary classification:
"""
def __init__(self, nx, layers):
"""
Class constructor
:param nx: the number of input features
:param layers: list representing the number of nodes
in each layer of the network
"""
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
if not isinstance(layers, list):
raise TypeError("layers must be a list of positive integers")
if len(layers) == 0:
raise TypeError('layers must be a list of positive integers')
self.nx = nx
self.layers = layers
self.__L = len(layers)
self.__cache = {}
self.__weights = {}
for i in range(self.L):
if not isinstance(layers[i], int) or layers[i] < 1:
raise TypeError("layers must be a list of positive integers")
W_key = "W{}".format(i + 1)
b_key = "b{}".format(i + 1)
self.weights[b_key] = np.zeros((layers[i], 1))
if i == 0:
f = np.sqrt(2 / nx)
self.__weights['W1'] = np.random.randn(layers[i], nx) * f
else:
f = np.sqrt(2 / layers[i - 1])
h = np.random.randn(layers[i], layers[i - 1]) * f
self.__weights[W_key] = h
@property
def L(self):
"""property to retrieve L"""
return self.__L
@property
def cache(self):
"""property to retrieve b1"""
return self.__cache
@property
def weights(self):
"""property to retrieve A1"""
return self.__weights
def forward_prop(self, X):
"""
Calculates the forward propagation of the neural network
:param X: a numpy.ndarray with shape (nx, m)
that contains the input data
:return: the output of the neural network and the cache,
respectively
"""
self.__cache['A0'] = X
for l in range(self.__L):
W_key = "W{}".format(l + 1)
b_key = "b{}".format(l + 1)
A_key_prev = "A{}".format(l)
A_key_forw = "A{}".format(l + 1)
Z = np.matmul(self.__weights[W_key], self.__cache[A_key_prev]) \
+ self.__weights[b_key]
self.__cache[A_key_forw] = 1 / (1 + np.exp(-Z))
return self.__cache[A_key_forw], self.__cache
def cost(self, Y, A):
"""
Calculates the cost of the model using logistic regression
:param Y: numpy.ndarray with shape (1, m)
that contains the correct labels for the input data
:param A: numpy.ndarray with shape (1, m)
containing the activated output of the neuron for each example
:return: the cost
"""
cost = -np.sum((Y * np.log(A)) +
((1 - Y) * np.log(1.0000001 - A))) / Y.shape[1]
return cost
|
[
"kamal.talebb@gmail.com"
] |
kamal.talebb@gmail.com
|
b94058827cb1372d534468bbbd322a04a99f959b
|
ebc7607785e8bcd6825df9e8daccd38adc26ba7b
|
/python/leetcode/dfs/dfs.py
|
73df417ba9abc9d12885a4e0fbd4b12e54d6ba30
|
[] |
no_license
|
galid1/Algorithm
|
18d1b72b0d5225f99b193e8892d8b513a853d53a
|
5bd69e73332f4dd61656ccdecd59c40a2fedb4b2
|
refs/heads/master
| 2022-02-12T07:38:14.032073
| 2022-02-05T08:34:46
| 2022-02-05T08:34:46
| 179,923,655
| 3
| 0
| null | 2019-06-14T07:18:14
| 2019-04-07T05:49:06
|
Python
|
UTF-8
|
Python
| false
| false
| 920
|
py
|
import sys
# stack을 이용한 구현
# def dfs(g, start):
# for key in g.keys():
# g[key] = sorted(g[key], reverse=True)
# stack = []
# stack.append(start)
#
# visited = set()
# while stack:
# cur = stack.pop()
# if cur in visited:
# continue
# # 방문 처리 및 출력
# print(cur)
# visited.add(cur)
#
# for link in g[cur]:
# stack.append(link)
def dfs(g, vertex, visited):
print(vertex, end=' ')
visited.add(vertex)
for next_vertex in g[vertex]:
if next_vertex not in visited:
dfs(g, next_vertex, visited)
n, m, s = map(int, sys.stdin.readline().strip().split(" "))
g = {i: [] for i in range(1, n+1)}
for _ in range(m):
k, v = map(int, sys.stdin.readline().strip().split(" "))
g[k].append(v)
g[v].append(k)
for key in g.keys():
g[key].sort()
dfs(g, s, set())
|
[
"galid1@naver.com"
] |
galid1@naver.com
|
d508c9134e55dfefde90ff6147e63c22f1ef4da6
|
817c58b0d73d20638ea410512aa61b6b8837cf62
|
/backend/backend/api_urls.py
|
9a86a93b978ca7dfe66526f69f4c88b62bccf19d
|
[
"MIT"
] |
permissive
|
ProjetoALES/ales-website
|
d64eaef437ba6da1df7d810b8f495ad141d41464
|
9dc5b460f5e780a1221d0ed5071043f088082395
|
refs/heads/master
| 2022-01-25T04:15:59.302899
| 2020-02-25T05:15:10
| 2020-02-25T05:15:10
| 242,913,529
| 0
| 0
|
MIT
| 2022-01-06T22:42:46
| 2020-02-25T04:57:07
|
Vue
|
UTF-8
|
Python
| false
| false
| 403
|
py
|
from django.urls import include, path
from .views import CurrentUserViewset
from student import api_urls as student_urls
from .router import base_router
base_router.register("me", CurrentUserViewset, basename="me")
app_name = "api"
urlpatterns = [
path(
"",
include((student_urls.urlpatterns, student_urls.app_name), namespace="student"),
)
]
urlpatterns += base_router.urls
|
[
"gustavomaronato@gmail.com"
] |
gustavomaronato@gmail.com
|
95293ca379d2cc6ee06bbdf758e30368f3d7f4e6
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/zDei9LFWkX9d7wXyb_17.py
|
4d91f8876a0ba580b6c2e2347c216c95b10964bd
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
def malthusian(food_growth, pop_mult):
year=0
pop=100
food_prod = 100
while True:
food_prod+=food_growth
pop*=pop_mult
year+=1
if pop>food_prod:
break
return year
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
15dc7407da6affbb37c0dd12b9349f1ca91bba81
|
b2edef9270dfe69986c1f268d4bad7c4b1a54315
|
/329.longest-increasing-path-in-a-matrix.py
|
cf326f9a2ae3e6f0ab5d2ed9fd1dbb47b1428f78
|
[] |
no_license
|
mrgrant/LeetCode
|
9167f29462a072df4932201834073043cba99366
|
82132065ae1b4964a1e0ef913912f382471f4eb5
|
refs/heads/master
| 2021-12-03T00:16:42.070167
| 2021-11-30T15:02:29
| 2021-11-30T15:02:29
| 143,361,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
#
# @lc app=leetcode id=329 lang=python
#
# [329] Longest Increasing Path in a Matrix
#
# @lc code=start
import collections
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
# dfs solution
# m = len(matrix)
# n = len(matrix[0])
# dp = [[0 for _ in range(n)] for _ in range(m)]
# def dfs(i, j):
# if not dp[i][j]:
# val = matrix[i][j]
# dp[i][j] = 1 + max(
# dfs(i-1,j) if i > 0 and matrix[i-1][j] < val else 0,
# dfs(i+1,j) if i < m - 1 and matrix[i+1][j] < val else 0,
# dfs(i,j-1) if j > 0 and matrix[i][j-1] < val else 0,
# dfs(i,j+1) if j < n - 1 and matrix[i][j+1] < val else 0,
# )
# return dp[i][j]
# for i in range(m):
# for j in range(n):
# dfs(i, j)
# return max(dp[i][j] for i in range(m) for j in range(n))
# bfs with topological sort solution
m = len(matrix)
n = len(matrix[0])
dir = [(1, 0), (-1, 0), (0, 1), (0, -1)]
indeg = {}
q = collections.deque()
for i in range(m):
for j in range(n):
cnt = 0
for dx, dy in dir:
mx = i + dx
my = j + dy
if 0 <= mx < m and 0 <= my < n and matrix[mx][my] < matrix[i][j]:
cnt += 1
indeg[(i, j)] = cnt
if cnt == 0:
q.append((i, j))
step = 0
while q:
size = len(q)
for _ in range(size):
x, y = q.popleft()
for dx, dy in dir:
mx = x + dx
my = y + dy
if 0 <= mx < m and 0 <= my < n and matrix[mx][my] > matrix[x][y] and (mx, my) in indeg:
indeg[(mx, my)] -= 1
if indeg[(mx, my)] == 0:
q.append((mx, my))
step += 1
return step
# @lc code=end
|
[
"mrgrantcy@gmail.com"
] |
mrgrantcy@gmail.com
|
fc65a1a0c364aa804a4aa0ff883d6b6e9a1b0133
|
2279568acd5c7182ea4d287d20cd208b10c945a2
|
/django/django_intro/form_submission/POST_form_submission/views.py
|
6e7bdc0b8c7af63776f3aafbe8e7e54a0eb7dcd8
|
[] |
no_license
|
quangnguyen17/Python
|
fbc5cec0eb51e48c964022e1bd45fb585d2b60ec
|
1920f757c5381480fc42f90946651aa0363fcaff
|
refs/heads/master
| 2020-12-27T08:58:51.058504
| 2020-02-10T21:42:45
| 2020-02-10T21:43:12
| 237,815,684
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from django.shortcuts import render, redirect
# Create your views here.
def index(request):
return render(request, "index.html")
def create_user(request):
request.session['name'] = request.POST['name']
request.session['email'] = request.POST['email']
return redirect("/success")
def success(request):
return render(request, "success.html")
|
[
"wan15112001@gmail.com"
] |
wan15112001@gmail.com
|
3fa906d66b83757fe642d3e423f6b479a7ee5ff3
|
796344a0ecccb0c979348baef8b80a5146ba5ddd
|
/mysite/settings.py
|
bf17080ab7a8e8652cc8a877fd0148fa785c140d
|
[] |
no_license
|
emantovanelli/my-first-blog
|
d5f2d2af6373196172acbefbbf090f80296b5d99
|
6c87da8db54576111976dc57109ca096fd694363
|
refs/heads/master
| 2021-01-10T12:36:48.237270
| 2016-01-04T13:52:34
| 2016-01-04T13:52:34
| 48,194,425
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u_&c4%-u=vm)^hsyx-30-hf9z#oa_@db=s89od@1_t^5c=x)1e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
e21df57f20a42deffed822307c1bf7b5614cf75f
|
0274f2c465f110598456624581f569331221068b
|
/impl/gps/gps_operations.py
|
64a7a924c36f2bd4394f6ff191375c071b57d962
|
[] |
no_license
|
bluecube/thesis
|
63e745076c86a3122e9c3d7ff42ff22e32921860
|
588db206e64de9b681372fea9a70d3fa2aa598df
|
refs/heads/master
| 2016-09-06T00:01:03.840006
| 2013-05-27T09:36:51
| 2013-05-27T09:36:51
| 1,376,241
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,060
|
py
|
from __future__ import unicode_literals
import collections
import logging
from . import sirf
from . import sirf_messages
if bytes == str:
# This branch is here for python 2.x and to avoid
# the cost of calls to sirf.bytes_to_message_id
# This whole business is a little ugly :-)
_message_id_filter = chr
else:
_message_id_filter = lambda(x): x
assert _message_id_filter(97) == b'a'[0]
class GpsOperations(collections.Iterator):
"""
Operations common to both real gps and recording.
"""
def __init__(self):
self._logger = logging.getLogger('localization.gps')
def _read_binary_sirf_msg(self):
"""
Return bytes with a single valid message read from the port
(the message payload).
"""
raise NotImplemented()
def set_message_rate(self, msg_type, rate):
"""
Set how often a message gets sent by the SIRF chip.
Rate is integer, meaning number of seconds, 0 means disabled.
This is a no-op unless we are on a real gps.
"""
pass
def try_read_message(self):
"""
Try to read one SIRF message from the gps.
Raises UnrecognizedMessageException.
"""
return sirf.from_bytes(self._read_binary_sirf_msg())
def read_message(self):
"""
Read one recognized SIRF message from the gps.
"""
while True:
try:
return sirf.from_bytes(self._read_binary_sirf_msg())
except sirf.UnrecognizedMessageException:
pass
def read_specific_message(self, msg_type):
"""
Discards messages until one of given type is received.
May block for a long time, careful with this.
"""
if not issubclass(msg_type, sirf_messages._SirfReceivedMessageBase):
raise TypeError("msg_type must be a message type.")
msg = None
while not isinstance(msg, msg_type):
msg = self.read_message()
#print(msg)
return msg
def filtered_messages(self, msg_type_set):
"""
Returns iterator of messages of types in msg_type_set.
Faster than filtering using isinstance.
"""
ids = {msg_type.get_message_id() for msg_type in msg_type_set}
while True:
data = self._read_binary_sirf_msg()
if sirf.bytes_to_message_id(data) in ids:
yield sirf.from_bytes(data)
def split_to_cycles(self, msg_type_filter = None, separation = 0.5):
"""
Returns iterator of messages grouped by the measurement cycles
and optionally filtered only to message types contained in msg_type_filter.
"""
ids = {msg_type.get_message_id() for msg_type in msg_type_filter}
if not len(ids):
class _Everything:
def __contains__(self, x):
return True
ids = _Everything()
out = []
last_msg_time = float("nan")
while True:
data = self._read_binary_sirf_msg()
if sirf.bytes_to_message_id(data) in ids:
out.append(sirf.from_bytes(data))
if self.last_msg_time - last_msg_time > separation:
yield out
out = []
last_msg_time = self.last_msg_time
def loop(self, observers, cycle_end_callback = None, cycle_end_threshold = 0.3, log_status = 600):
"""
Read messages in infinite loop and notify observers.
observers:
Iterable of observers that will be notified as messages
are received
cycle_end_callback:
Callable that will be called after the measurement cycle ends, or None.
Block end callback will only be called when a message arrives with time distance larger
than block end threshold, not immediately after the time runs out!
cycle_end_threshold:
How long a pause between two messages must be to be taken as a start of new measurement cycle.
log_status:
After how many cycles should the status be logged.
If this is false, then no logging is performed.
"""
observers = list(observers)
message_ids = {}
for observer in observers:
for message_type in observer.observed_message_types():
filtered_id = _message_id_filter(message_type.get_message_id())
message_ids.setdefault(filtered_id, []).append(observer)
if log_status:
status_id = _message_id_filter(sirf_messages.GeodeticNavigationData.get_message_id())
else:
status_id = None
status_remaining = 0
last_msg_time = float("nan")
while True:
try:
binary = self._read_binary_sirf_msg()
except StopIteration:
return
if cycle_end_callback is not None and self.last_msg_time - last_msg_time > cycle_end_threshold:
cycle_end_callback()
last_msg_time = self.last_msg_time
message_id = binary[0]
if status_remaining <= 0 and message_id == status_id:
message = sirf.from_bytes(binary)
self._logger.info(message.status_line())
status_remaining = log_status
if message_id not in message_ids:
continue
else:
if message_id == status_id:
status_remaining -= 1
if message_id not in message_ids:
continue
else:
message = sirf.from_bytes(binary)
for observer in message_ids[message_id]:
observer(message)
def __next__(self):
"""
We want to support iterator protocol.
"""
return self.read_message()
def next(self):
"""
Iterator protocol for python 2.x
"""
return self.read_message()
|
[
"blue.cube@seznam.cz"
] |
blue.cube@seznam.cz
|
7c38e135fbe87c2b8a76e963211c110dcae4f12e
|
9bac4cd580ecd3152b828d3bb421e648f2156361
|
/_admin_panel/apuzzles/forms.py
|
c6fc0066f8a3ab00f8123e3fdbbcb435b61c62ae
|
[] |
no_license
|
sharingsimplethoughts/mygame2
|
2d3b5febfc950faeec535347fbdaff39191a4805
|
d0432bdbf74b03fb7244ff8911f04b485aff016f
|
refs/heads/master
| 2023-05-29T04:01:04.104641
| 2020-07-07T14:38:07
| 2020-07-07T14:38:07
| 276,071,526
| 0
| 0
| null | 2021-06-11T18:12:15
| 2020-06-30T10:45:33
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,626
|
py
|
from django import forms
from puzzles.models import *
class PuzzlesAddEditForm(forms.Form):
def clean(self):
les_name = self.data['les_name']
les_cat = self.data['les_cat']
les_desc = self.data['les_desc']
les_hint = self.data['les_hint']
les_exp = self.data['les_exp']
les_learn = self.data['les_learn']
if not les_name or les_name=="":
raise forms.ValidationError('Please provide puzzle name')
if not les_cat or les_cat=="":
raise forms.ValidationError('Please choose puzzle category')
if les_cat:
obj = PuzzleCategory.objects.filter(id=les_cat).first()
if not obj:
raise forms.ValidationError('Please choose valid puzzle category')
if not les_desc or les_desc=="":
raise forms.ValidationError('Please provide puzzle description')
if not les_hint or les_hint=="":
raise forms.ValidationError('Please provide puzzle hint')
if not les_exp or les_exp=="":
raise forms.ValidationError('Please provide puzzle explanation')
if not les_learn or les_learn=="":
raise forms.ValidationError('Please provide puzzle learning text')
class PuzzleCategoriesAddForm(forms.Form):
def clean(self):
cr_name=self.data['cr_name']
cr_list=self.data['cr_list']
if not cr_name or cr_name=="":
raise forms.ValidationError('Please provide category name')
# if not cr_list or cr_list=="," or cr_list=="":
# raise forms.ValidationError('Please select puzzles')
lcr = PuzzleCategory.objects.filter(name=cr_name).first()
if lcr:
raise forms.ValidationError('This category name already exists')
class PuzzleCategoriesEditForm(forms.Form):
# def __init__(self,*args, **kwargs):
# self.cr_id=kwargs.pop('cr_id',None)
# super(PuzzleCategoriesEditForm,self).__init__(*args,**kwargs)
def clean(self):
cr_id=self.data['cr_id']
cr_name=self.data['cr_name']
cr_list=self.data['cr_list']
if not cr_name or cr_name=="":
raise forms.ValidationError('Please provide category name')
# if not cr_list or cr_list=="," or cr_list=="":
# raise forms.ValidationError('Please select puzzles')
lcobj = PuzzleCategory.objects.filter(id=cr_id).first()
if lcobj.name!=cr_name:
lcr = PuzzleCategory.objects.filter(name=cr_name).first()
if lcr:
raise forms.ValidationError('This category name already taken')
|
[
"sukamal.sinha@fluper.in"
] |
sukamal.sinha@fluper.in
|
d2139d040c78dcd1ac92ed08ba7de06fe9427ce8
|
60962534e8f0fbbe87732ff38f613a3f5fc5342f
|
/largestRectangleHistogram/main.py
|
8977284fc74c5d2d742e622f35fdafa1e1048086
|
[] |
no_license
|
publicbull/leetcode
|
4ebde395814e8ed9ce8bc8576d3c15d224ee3722
|
73fb5c1d77002cc24a2ea2db58e679cf2bd1c767
|
refs/heads/master
| 2020-12-24T15:22:12.457530
| 2013-07-03T22:03:15
| 2013-07-03T22:03:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
'''
Created on May 15, 2013
@author: Administrator
'''
def largestRectangle(data):
def _largestRectangle(left, right, maxRect):
if left > right:
return
currMin = data[left]
currMinPos = left
for i in range(left, right + 1):
if currMin > data[i]:
currMin = data[i]
currMinPos = i
maxRect[0] = currMin * (right - left + 1)
maxRect[1], maxRect[2] = left, right
max1 = [-1, 0, 0]
_largestRectangle(left, currMinPos - 1, max1)
if maxRect[0] < max1[0]:
maxRect[0], maxRect[1], maxRect[2] = max1[0], max1[1], max1[2]
max2 = [-1, 0, 0]
_largestRectangle(currMinPos + 1, right, max2)
if maxRect[0] < max2[0]:
maxRect[0], maxRect[1], maxRect[2] = max2[0], max2[1], max2[2]
size = len(data)
maxRect = [-1, 0, 0]
_largestRectangle(0, size - 1, maxRect)
return maxRect
if __name__ == '__main__':
data = [2, 1, 5, 6, 2, 3]
print(largestRectangle(data))
|
[
"baiyubin@gmail.com"
] |
baiyubin@gmail.com
|
c884e21d21ea76207943ee8ca33e776e8a9ee7e0
|
2d58c1351ab970eb55f4832b09582592e96468d5
|
/p31.py
|
4a9857e0b2d677a5a819dd859be186c88da7b955
|
[] |
no_license
|
0x0400/LeetCode
|
832bc971c2cae9eecb55f5b14e8c34eaec0d9e26
|
94bb9fedc908490cc52d87def317c057fadaeceb
|
refs/heads/master
| 2023-02-24T20:13:11.345873
| 2023-02-10T16:46:31
| 2023-02-10T16:46:31
| 84,653,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
# https://leetcode.com/problems/next-permutation/
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
idx = len(nums) - 2
while idx >= 0:
if nums[idx] >= nums[idx+1]:
idx -= 1
continue
minIdx = len(nums) -1
while minIdx > idx:
if nums[minIdx] <= nums[idx]:
minIdx -= 1
continue
break
nums[idx], nums[minIdx] = nums[minIdx], nums[idx]
break
idx += 1
lastIdx = len(nums) - 1
i = 0
while idx + i < lastIdx - i:
nums[idx+i], nums[lastIdx-i] = nums[lastIdx-i], nums[idx+i]
i += 1
|
[
"0x0400@users.noreply.github.com"
] |
0x0400@users.noreply.github.com
|
25510828bc0661930551ad3acd71ac4f3ed9447b
|
5eca88bd5e2d9f5bb92d0a5cdeb39032015c4b92
|
/python/batch_uninstall.py
|
448a61a5d18cfc0162b9ddc2f57929bf58a6b571
|
[] |
no_license
|
cet4meiguo/AndroidTestPyScripts
|
09e105cc40389ec530af99aa3ce9be43378ea756
|
ae6864a3cca2f8c1486e67faf069c9c137deedb4
|
refs/heads/master
| 2021-06-18T03:24:24.750998
| 2017-03-04T14:16:27
| 2017-03-04T14:16:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
#批量卸载设备上的第三方应用
def uninstall():
os.popen("adb wait-for-device")
print "start uninstall..."
for packages in os.popen("adb shell pm list packages -3").readlines():
packageName = packages.split(":")[-1].splitlines()[0]
os.popen("adb uninstall %s" %packageName)
print "remove %s successes." %packageName
if __name__ == "__main__":
uninstall()
print " "
print "All the third-party applications uninstall successes."
|
[
"jayzhen_testing@163.com"
] |
jayzhen_testing@163.com
|
eb86ed1da548c5ada25cc1aa23969fe16b9b6d66
|
cc7ad1a2aa5d691c15ff7838d1e5126ab2c2bee0
|
/basic_ranking/urls.py
|
7691f1ea08646018399e25f6c2c1ded7e23ba8f9
|
[] |
no_license
|
demirantay/lingooapp
|
9632be8a7d3dd00e7a4ac13618f32975da389729
|
c842bb032668ef1bd5e7f4282acd4990843c8640
|
refs/heads/master
| 2023-03-14T08:00:37.681334
| 2021-01-09T09:36:48
| 2021-01-09T09:36:48
| 285,181,982
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from django.urls import path
from . import views
urlpatterns = [
# Ranking Overview
path(
"ranking/overview/<int:page>/",
views.ranking_overview,
name="ranking_overview"
),
# Category Ranking Page
path(
"ranking/<str:language>/<int:page>/",
views.category_ranking,
name="category_ranking"
),
]
|
[
"demir99antay@gmail.com"
] |
demir99antay@gmail.com
|
43edad709ef0ed6de4ca2dcc842527c0a49c651b
|
db4c1703bee4e79e0a275434b7491c6dfe7a8602
|
/backend/rush_19600/wsgi.py
|
3761f5a1382397ef0ff7f2d1d384d21c0f1a9ca3
|
[] |
no_license
|
crowdbotics-apps/rush-19600
|
6baf41e83f15b7d48d3a04d2b83a29c8a4cfa781
|
8ecff341418f153c1a82b6492eaef662253b3254
|
refs/heads/master
| 2022-12-01T09:00:15.372702
| 2020-08-18T05:48:32
| 2020-08-18T05:48:32
| 288,006,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for rush_19600 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rush_19600.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ade89183eb307e372486d8f1a1c6c63400af1e0f
|
c3e75ab16954f7dffdf68983237df98fae832b43
|
/conf_matrix.py
|
adf29a6d56121b97b080b9403749eec2825a1afa
|
[] |
no_license
|
satojkovic/ml-algorithms-simple
|
713c6f57c23030a79db7500dff32f30858ebeee6
|
a2bd09544b82a96dcf41b5a650fee4fe21b99934
|
refs/heads/master
| 2021-06-04T15:22:06.421941
| 2021-05-11T12:50:51
| 2021-05-11T12:50:51
| 3,835,836
| 13
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def main():
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target)
estimator = SVC(C=1.0, kernel='rbf', gamma=0.01)
clf = OneVsRestClassifier(estimator)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
cm = confusion_matrix(y_test, pred)
print cm
print accuracy_score(y_test, pred)
if __name__ == '__main__':
main()
|
[
"satojkovic@gmail.com"
] |
satojkovic@gmail.com
|
9a79938a1d99004ccdaf6accc35ec65df0ca6e7c
|
abbc11abfabb0d3976789a9ec073b28892c78778
|
/machine_program/data_ready.py
|
c1616d0e234e88c9fe2e2d22875178636457ef3f
|
[] |
no_license
|
sunxhap/machine_learning
|
b06b28b3aba5b39704d8a3ae282f366dad6af406
|
ef1d80a16fd35f03e428ac27b9b0f771f6f1edbb
|
refs/heads/master
| 2022-05-01T15:22:07.314221
| 2017-11-12T09:12:30
| 2017-11-12T09:12:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
# -*- coding: utf-8 -*-
"""
@Time: 2017/11/8 11:24
@Author: sunxiang
"""
import numpy as np
filename = "data.csv"
def data_ready():
"""
数据读入 已有分类标签
"""
data = []
labels = []
with open("data.txt") as ifile:
for line in ifile:
try:
tokens = line.strip().split(' ')
data.append([float(tk) for tk in tokens[:-1]])
labels.append(tokens[-1])
except:
print line
x = np.array(data)
labels = np.array(labels)
y = np.zeros(labels.shape)
''''' 标签转换为0/1 '''
y[labels == 'A'] = 1
return x, y
def data_ready_notype():
"""
数据读入 已有分类标签 返回列表
"""
data = []
labels = []
with open(filename) as ifile:
for line in ifile:
try:
tokens = line.strip().split(' ')
data.append([tk for tk in tokens])
labels.append(tokens[-1])
except:
print line
# x = np.array(data)
# labels = np.array(labels)
# y = np.zeros(labels.shape)
#
# ''''' 标签转换为0/1 '''
# y[labels == 'A'] = 1
return data
# def createDataSet():
# """
# 创建数据集
# :return:
# """
# return data_ready_notype()
# # dataSet = [[1, 1, 'yes'],
# # [1, 1, 'yes'],
# # [1, 0, 'no'],
# # [0, 1, 'no'],
# # [0, 1, 'no']]
# # labels = ['no surfacing', 'flippers']
# # return dataSet, labels
def createDataSet():
"""
创建数据集
:return:
"""
return np.loadtxt(filename, dtype=str, delimiter=",").tolist()
|
[
"1925453680@qq.com"
] |
1925453680@qq.com
|
bf5774cfced892e0f81372fb0b659c3deb8a2cc0
|
97c5fe6a54636de9b056719ea62ac1de4e76ebdc
|
/src/matches/utils.py
|
75c3ec2bd03ff40320434f900d8ef59465c9cf50
|
[
"MIT"
] |
permissive
|
EdwardBetts/matchmaker
|
937ece7acbfd1fcb57ab59cd13b16c3cd67d54f3
|
ec56d18c6af8ca904325deca3be56484d3415c70
|
refs/heads/master
| 2020-12-11T01:50:10.773983
| 2016-01-26T16:53:29
| 2016-01-26T16:53:29
| 56,478,725
| 0
| 0
| null | 2016-04-18T05:11:12
| 2016-04-18T05:11:12
| null |
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
from decimal import Decimal
def get_match(user_a, user_b):
a = user_a.useranswer_set.all().values_list("question")
b = user_b.useranswer_set.all().values_list("question")
matches_b = user_b.useranswer_set.filter(question=a).order_by("question")
matches_a = user_a.useranswer_set.filter(question=b).order_by("question")
questions_match_num = matches_b.count()
if questions_match_num:
a_points = 0
b_points = 0
a_total_points = 0
b_total_points = 0
for question_a, question_b in zip(matches_a, matches_b):
if question_b.their_answer == question_a.my_answer:
a_points += question_b.their_points
a_total_points += question_b.their_points
if question_a.their_answer == question_b.my_answer:
b_points += question_a.their_points
b_total_points += question_a.their_points
if a_total_points == 0:
a_decimal = 0.000001
else:
a_decimal = a_points / Decimal(a_total_points)
if b_total_points == 0:
b_decimal = 0.000001
else:
b_decimal = b_points / Decimal(b_total_points)
match_percentage = (Decimal(a_decimal) * Decimal(b_decimal)) ** (1/Decimal(questions_match_num))
return match_percentage, questions_match_num
else:
return None, False
|
[
"eddie.valv@gmail.com"
] |
eddie.valv@gmail.com
|
6f759c661b592a52caaf1452caae33339900454b
|
5f364b328d0e7df6f292dbbec266995f495b2ed4
|
/src/python/txtai/pipeline/translation.py
|
b93b6d7c6fbacb0d01bcd3ab7561c52c463746d7
|
[
"Apache-2.0"
] |
permissive
|
binglinchengxiash/txtai
|
a17553f57ddd857ff39a7d0b38e24930f5c71596
|
1513eb8390f01848742e67690b6e4bc6452101ee
|
refs/heads/master
| 2023-04-03T18:59:35.845281
| 2021-04-05T22:05:15
| 2021-04-05T22:05:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,895
|
py
|
"""
Translation module
"""
import fasttext
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, MarianMTModel, MarianTokenizer
from transformers.file_utils import cached_path
from transformers.hf_api import HfApi
from .hfmodel import HFModel
class Translation(HFModel):
"""
Translates text from source language into target language.
"""
# Default language detection model
DEFAULT_LANG_DETECT = "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz"
def __init__(self, path="facebook/m2m100_418M", quantize=False, gpu=True, batch=64, langdetect=DEFAULT_LANG_DETECT):
"""
Constructs a new language translation pipeline.
Args:
path: optional path to model, accepts Hugging Face model hub id or local path,
uses default model for task if not provided
quantize: if model should be quantized, defaults to False
gpu: True/False if GPU should be enabled, also supports a GPU device id
batch: batch size used to incrementally process content
langdetect: path to language detection model, uses a default path if not provided
"""
# Call parent constructor
super().__init__(path, quantize, gpu, batch)
# Language detection
self.detector = None
self.langdetect = langdetect
# Language models
self.models = {}
self.ids = self.available()
def __call__(self, texts, target="en", source=None):
"""
Translates text from source language into target language.
This method supports texts as a string or a list. If the input is a string,
the return type is string. If text is a list, the return type is a list.
Args:
texts: text|list
target: target language code, defaults to "en"
source: source language code, detects language if not provided
Returns:
list of translated text
"""
values = [texts] if not isinstance(texts, list) else texts
# Detect source languages
languages = self.detect(values) if not source else [source] * len(values)
unique = set(languages)
# Build list of (index, language, text)
values = [(x, lang, values[x]) for x, lang in enumerate(languages)]
results = {}
for language in unique:
# Get all text values for language
inputs = [(x, text) for x, lang, text in values if lang == language]
# Translate text in batches
outputs = []
for chunk in self.batch([text for _, text in inputs], self.batchsize):
outputs.extend(self.translate(chunk, language, target))
# Store output value
for y, (x, _) in enumerate(inputs):
results[x] = outputs[y]
# Return results in same order as input
results = [results[x] for x in sorted(results)]
return results[0] if isinstance(texts, str) else results
def available(self):
"""
Runs a query to get a list of available language models from the Hugging Face API.
Returns:
list of available language name ids
"""
return set(x.modelId for x in HfApi().model_list() if x.modelId.startswith("Helsinki-NLP"))
def detect(self, texts):
"""
Detects the language for each element in texts.
Args:
texts: list of text
Returns:
list of languages
"""
if not self.detector:
# Suppress unnecessary warning
fasttext.FastText.eprint = lambda x: None
# Load language detection model
path = cached_path(self.langdetect)
self.detector = fasttext.load_model(path)
# Transform texts to format expected by language detection model
texts = [x.lower() for x in texts]
return [x[0].split("__")[-1] for x in self.detector.predict(texts)[0]]
def translate(self, texts, source, target):
"""
Translates text from source to target language.
Args:
texts: list of text
source: source language code
target: target language code
Returns:
list of translated text
"""
# Return original if already in target language
if source == target:
return texts
# Load model and tokenizer
model, tokenizer = self.lookup(source, target)
model.to(self.device)
indices = None
with self.context():
if isinstance(model, M2M100ForConditionalGeneration):
source = self.langid(tokenizer.lang_code_to_id, source)
target = self.langid(tokenizer.lang_code_to_id, target)
tokenizer.src_lang = source
tokens, indices = self.tokenize(tokenizer, texts)
translated = model.generate(**tokens, forced_bos_token_id=tokenizer.lang_code_to_id[target])
else:
tokens, indices = self.tokenize(tokenizer, texts)
translated = model.generate(**tokens)
# Decode translations
translated = tokenizer.batch_decode(translated, skip_special_tokens=True)
# Combine translations - handle splits on large text from tokenizer
results, last = [], -1
for x, i in enumerate(indices):
if i == last:
results[-1] += translated[x]
else:
results.append(translated[x])
last = i
return results
def lookup(self, source, target):
"""
Retrieves a translation model for source->target language. This method caches each model loaded.
Args:
source: source language code
target: target language code
Returns:
(model, tokenizer)
"""
# Determine best translation model to use, load if necessary and return
path = self.modelpath(source, target)
if path not in self.models:
self.models[path] = self.load(path)
return self.models[path]
def modelpath(self, source, target):
"""
Derives a translation model path given source and target languages.
Args:
source: source language code
target: target language code
Returns:
model path
"""
# First try direct model
template = "Helsinki-NLP/opus-mt-%s-%s"
path = template % (source, target)
if path in self.ids:
return path
# Use multi-language - english model
if target == "en":
return template % ("mul", target)
# Default model if no suitable model found
return self.path
def load(self, path):
"""
Loads a model specified by path.
Args:
path: model path
Returns:
(model, tokenizer)
"""
if path.startswith("Helsinki-NLP"):
model = MarianMTModel.from_pretrained(path)
tokenizer = MarianTokenizer.from_pretrained(path)
else:
model = M2M100ForConditionalGeneration.from_pretrained(path)
tokenizer = M2M100Tokenizer.from_pretrained(path)
# Apply model initialization routines
model = self.prepare(model)
return (model, tokenizer)
def langid(self, languages, target):
"""
Searches a list of languages for a prefix match on target.
Args:
languages: list of languages
target: target language code
Returns:
best match or None if no match found
"""
for lang in languages:
if lang.startswith(target):
return lang
return None
|
[
"561939+davidmezzetti@users.noreply.github.com"
] |
561939+davidmezzetti@users.noreply.github.com
|
1aeafea4c287d40f71a3a955dbca59e856894d98
|
ed0e1f62c637cee6c120f77ffc0d8db4a0b218c2
|
/test8.py
|
c9834f9ec424f9870f678bc7c5c69763054d726a
|
[] |
no_license
|
bcrafton/weight-mirror
|
1d386fe68f88eea3b67ddcef70450a37331871d7
|
d2e35e7378fc261de397c54a6db76b3f8a1e0281
|
refs/heads/master
| 2020-05-15T18:45:57.488229
| 2019-04-23T20:44:17
| 2019-04-23T20:44:17
| 182,438,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,428
|
py
|
import numpy as np
import argparse
import keras
import matplotlib.pyplot as plt
from whiten import whiten
#######################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--l2', type=float, default=1e-3)
args = parser.parse_args()
LAYER1 = 1024 * 3
LAYER2 = 1000
LAYER3 = 1000
LAYER4 = 10
TRAIN_EXAMPLES = 50000
TEST_EXAMPLES = 10000
#######################################
def softmax(x):
e_x = np.exp(x - np.max(x, axis=1, keepdims=True))
return e_x / np.sum(e_x, axis=1, keepdims=True)
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def dsigmoid(x):
return x * (1. - x)
def relu(x):
return x * (x > 0)
def drelu(x):
# USE A NOT Z
return 1.0 * (x > 0)
def tanh(x):
return np.tanh(x)
def dtanh(x):
# USE A NOT Z
return (1. - (x ** 2))
#######################################
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def zca_approx(data, ksize, ssize):
N, H, W, C = np.shape(data)
KX, KY, KZ = ksize
SX, SY, SZ = ssize
for sx in range(0, KX, SX):
for sy in range(0, KY, SY):
for sz in range(0, KZ, SZ):
for x in range(sx, H+sx, KX):
for y in range(sy, W+sy, KY):
for z in range(sz, C+sz, KZ):
x1 = x
x2 = x + KX
y1 = y
y2 = y + KY
z1 = z
z2 = z + KZ
if (x2 > H or y2 > W or z2 > C):
continue
print (x, y, z)
white = whiten(X=data[:, x1:x2, y1:y2, z1:z2], method='zca')
white = np.reshape(white, (N, x2-x1, y2-y1, z2-z1))
data[:, x1:x2, y1:y2, z1:z2] = white
return data
#######################################
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
assert(np.shape(x_train) == (50000, 32, 32, 3))
assert(np.shape(x_test) == (10000, 32, 32, 3))
y_train = keras.utils.to_categorical(y_train, 10)
x_train = x_train.astype('float32')
y_test = keras.utils.to_categorical(y_test, 10)
x_test = x_test.astype('float32')
mean = np.mean(x_train, axis=0, keepdims=True)
std = np.std(x_train, axis=0, ddof=1, keepdims=True)
scale = std
x_train = x_train - mean
x_train = x_train / scale
x_train = x_train.reshape(TRAIN_EXAMPLES, 32, 32, 3)
# x_train = whiten(x_train)
x_train = zca_approx(x_train, (8, 8, 3), (8, 8, 3))
x_train = x_train.reshape(TRAIN_EXAMPLES, 1024 * 3)
#######################################
high = 1. / np.sqrt(LAYER1)
weights1 = np.random.uniform(low=-high, high=high, size=(LAYER1, LAYER2))
bias1 = np.zeros(shape=LAYER2)
high = 1. / np.sqrt(LAYER2)
weights2 = np.random.uniform(low=-high, high=high, size=(LAYER2, LAYER3))
bias2 = np.zeros(shape=LAYER3)
high = 1. / np.sqrt(LAYER3)
weights3 = np.random.uniform(low=-high, high=high, size=(LAYER3, LAYER4))
bias3 = np.zeros(shape=LAYER4)
high = 1. / np.sqrt(LAYER2)
# b2 = np.random.uniform(low=-high, high=high, size=(LAYER2, LAYER3))
b2 = np.zeros(shape=(LAYER2, LAYER3))
########
xx1 = 0.
xx2 = 0.
batch_size = 100
for idx in range(0, 10000, batch_size):
print (idx)
start = idx
end = idx + batch_size
x1 = np.reshape(x_train[start:end], (batch_size, LAYER1)) @ weights1
x2 = np.random.uniform(low=-1., high=1., size=(batch_size, LAYER1)) @ weights1
y1 = x1 @ weights2
y2 = x2 @ weights2
xx1 += x1.T @ y1
xx2 += x2.T @ y2
xx1 = xx1 / np.max(xx1)
xx2 = xx2 / np.max(xx2)
weights2 = weights2 / np.max(weights2)
print (np.shape(xx1), np.shape(xx2), np.shape(weights2))
loss1 = np.sum((weights2 - xx1) ** 2)
loss2 = np.sum((weights2 - xx2) ** 2)
angle1 = angle_between(np.reshape(xx1, -1), np.reshape(weights2, -1)) * (180.0 / 3.14)
angle2 = angle_between(np.reshape(xx2, -1), np.reshape(weights2, -1)) * (180.0 / 3.14)
print (loss1, loss2, loss1 / loss2)
print (angle1, angle2)
|
[
"crafton.b@husky.neu.edu"
] |
crafton.b@husky.neu.edu
|
8f633e9e029f949e14bea3ec4d3701110965256d
|
21ae28849f391b58cbc5a6d3d586af68e20e3954
|
/bin/update_node_allowed_ips.py
|
add0aec3c64275b2c578baca75cfc829f189d714
|
[
"MIT"
] |
permissive
|
OriHoch/knesset-data-k8s
|
f6fa12abdde9d7f929769938a3e82ea8fa364e3d
|
0a9de5ecd1fc50f3607936500833e15de2ae8f80
|
refs/heads/master
| 2023-08-17T12:28:05.208947
| 2023-08-14T18:04:57
| 2023-08-14T18:04:57
| 116,827,286
| 0
| 0
|
MIT
| 2018-03-28T10:58:49
| 2018-01-09T14:35:09
|
Shell
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
#!/usr/bin/env python3
import json
import subprocess
ALLOWED_IPS="""
212.80.204.81
5.100.254.253
194.36.91.251
212.199.115.150
212.80.204.206
194.36.91.165
5.100.248.220
195.28.181.207
212.115.111.44
212.115.111.199
83.229.74.79
83.229.74.80
194.36.90.155
"""
def main():
allowed_ips = [ip.strip() for ip in ALLOWED_IPS.split() if ip.strip()]
node_ip_names = {}
for node in json.loads(subprocess.check_output(['kubectl', 'get', 'node', '-o', 'json']))['items']:
node_ip_names[node['metadata']['annotations']['rke.cattle.io/external-ip']] = node['metadata']['name']
for ip in allowed_ips:
node_name = node_ip_names.get(ip)
if node_name:
subprocess.check_call(['kubectl', 'label', 'node', node_name, 'oknesset-allowed-ip=true', '--overwrite'])
if __name__ == '__main__':
main()
|
[
"ori@uumpa.com"
] |
ori@uumpa.com
|
b02560b0defdd18eb23f539beec3bddce578c929
|
4382c60f18aba351a2e7cdab7ce2793c2d27717c
|
/Algorithm 190902/N-Queen.py
|
8cb26ea9eb6c4917e91ef390623b09c6de2f3091
|
[] |
no_license
|
vxda7/pycharm
|
e550b1db4cabe1a0fa03e140f33b028ef08bd4cb
|
ce29f682a923875b62a8c7c0102790eef11ab156
|
refs/heads/master
| 2020-07-03T11:27:27.807096
| 2019-11-15T08:50:32
| 2019-11-15T08:50:32
| 201,891,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
def find(N):
stack = []
stack.append([0,0])
# place.append([0,0])
cnt = 0
res = 0
ok = False
next = 0
while stack != []:
get = stack.pop()
col, row = get[0], get[1]
for i in stack: # 확인하는 곳
if i[0] != col and i[1] != row and i[0]+i[1] != col+row and i[0]-i[1] != col-row:
ok = True
cnt += 1
if cnt == N:
res += 1
cnt = 0
if ok: # 위치가 관계없다면 추가
if 0 <= col + 1 < N and 0 <= row + next < N:
stack.append([col+1, row+2])
return res
t = int(input())
for tc in range(1, t+1):
N = int(input())
print("#{} {}".format(tc, find(N)))
|
[
"vxda77@gmail.com"
] |
vxda77@gmail.com
|
45febe6de65dd3e8d1ad838f075d14c9a9588b72
|
baa6ba7246fb214c32451126d521919d5f9f40c5
|
/pbrx/cmd/main.py
|
22a43cea35cd05e4957e80f794fb70c5a554052d
|
[
"Apache-2.0"
] |
permissive
|
emonty/shiny-octo-computing-machine
|
52838f025fb60c69df78e8f6165d76780ef3c676
|
7fa0dab928196e4f9ef0a5110459e350059e2493
|
refs/heads/master
| 2020-03-08T12:21:51.197849
| 2018-04-04T19:52:12
| 2018-04-27T21:17:33
| 128,124,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,385
|
py
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import logging
import logging.config
import os
import sys
try:
import yaml
except ImportError:
yaml = None
from pbrx import containers
from pbrx import siblings
import pbr.version
log = logging.getLogger("pbrx")
def _read_logging_config_file(filename):
if not os.path.exists(filename):
raise ValueError("Unable to read logging config file at %s", filename)
ext = os.path.splitext(filename)[1]
if ext in (".yml", ".yaml"):
if not yaml:
raise ValueError(
"PyYAML not installed but a yaml logging config was provided."
" Install PyYAML, or convert the config to JSON."
)
return yaml.safe_load(open(filename, "r"))
elif ext == ".json":
return json.load(open(filename, "r"))
return filename
def setup_logging(log_config, debug):
if log_config:
config = _read_logging_config_file(log_config)
if isinstance(config, dict):
logging.config.dictConfig(config)
else:
logging.config.fileConfig(config)
else:
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG if debug else logging.INFO)
def main():
parser = argparse.ArgumentParser(
description="pbrx: Utilities for projects using pbr"
)
parser.add_argument(
"--version",
action="version",
version=str(pbr.version.VersionInfo("pbrx")),
)
parser.add_argument(
"--debug", help="Emit debug output", action="store_true"
)
parser.add_argument(
"--log-config",
help="Path to a logging config file. Takes precedence over --debug",
)
subparsers = parser.add_subparsers(
title="commands", description="valid commands", help="additional help"
)
cmd_siblings = subparsers.add_parser(
"install-siblings", help="install sibling packages"
)
cmd_siblings.set_defaults(func=siblings.main)
cmd_siblings.add_argument(
"-c,--constraints",
dest="constraints",
help="Path to constraints file",
required=False,
)
cmd_siblings.add_argument(
"projects", nargs="*", help="List of project src dirs to process"
)
cmd_containers = subparsers.add_parser(
"build-containers", help="build per-process container images"
)
cmd_containers.set_defaults(func=containers.build)
cmd_containers.add_argument(
"--prefix", help="Organization prefix containers will be published to"
)
args = parser.parse_args()
setup_logging(args.log_config, args.debug)
try:
return args.func(args)
except Exception as e:
log.exception(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"mordred@inaugust.com"
] |
mordred@inaugust.com
|
74bdee5b7b4f80809a44c39f4990a698827c4318
|
85d380bc1fa9b5d091caab98951fec2bf7ae0407
|
/hog_von_mises.py
|
ead3541b58e1645770d071fb461c792f1a2fddc4
|
[] |
no_license
|
bbbales2/faehrmann_hogs
|
6b0469351d6a1e749fe00399474ee0466ab94fcb
|
622ec582501041ff5e6dd50526d0e3d91de29f53
|
refs/heads/master
| 2020-02-26T13:39:05.916838
| 2016-06-23T23:20:30
| 2016-06-23T23:20:30
| 61,756,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,279
|
py
|
#%%
import pystan
import matplotlib.pyplot as plt
import numpy
import os
import itertools
import math
import scipy.integrate
import mahotas
import collections
import skimage.measure, skimage.io, skimage.feature, skimage.util, skimage.filters
import seaborn
import random
#
#,,
# '/home/bbales2/microhog/rafting_rotated_2d/2na/9/signalx.png',
# '/home/bbales2/microhog/rafting_rotated_2d/ah/9/signalx.png''/home/bbales2/web/hog/static/images/renen5strain02.png',
# '/home/bbales2/web/hog/static/images/renen5strain22.png'
#
ims2 = []
for path in ['/home/bbales2/web/hog/static/images/molybdenum0.png',
'/home/bbales2/web/hog/static/images/molybdenum1.png']:
im = skimage.io.imread(path, as_grey = True).astype('float')
im = skimage.transform.rescale(im, 0.25)
im -= im.mean()
im /= im.std()
stats = []
for i in range(im.shape[0]):
for j in range(im.shape[1]):
if i == 0:
dy = im[i + 1, j] - im[i, j]
elif i == im.shape[0] - 1:
dy = im[i, j] - im[i - 1, j]
else:
dy = (im[i + 1, j] - im[i - 1, j]) / 2.0
if j == 0:
dx = im[i, j + 1] - im[i, j]
elif j == im.shape[1] - 1:
dx = im[i, j] - im[i, j - 1]
else:
dx = (im[i, j + 1] - im[i, j - 1]) / 2.0
angle = (numpy.arctan2(dy, dx) + numpy.pi)# / (2.0 * numpy.pi)
mag = numpy.sqrt(dy**2 + dx**2)
stats.append((angle, mag))
stats = numpy.array(stats)
plt.imshow(im)
plt.show()
1/0
hog = microstructure.features.hog2(im, bins = 20, stride = 1, sigma = 1.0)
#%%
stats[:, 0] = stats[:, 0] - stats[:, 0].min()
#%%
idxs = range(len(stats))
random.shuffle(idxs)
seaborn.distplot(stats[idxs[:10000], 0])
plt.show()
seaborn.distplot(stats[idxs[:10000], 1])
plt.show()
idxs = numpy.argsort(stats[:, 1])[-2000:]
seaborn.distplot(stats[idxs, 0])
plt.show()
#%%
model_code = """
data {
int<lower=1> K; //Number of Von Mises distributions to fit
int<lower=1> N;
real<lower=0.0, upper=2.0 * pi()> y[N];
}
parameters {
real<lower=0.0, upper=2.0 * pi()> mu[K];
simplex[K + 1] theta;
real<lower=0.0> kappa[K];
}
model {
real ps[K + 1];
for (k in 1:K) {
kappa[k] ~ normal(5.0, 10.0);
}
for (n in 1:N) {
for (k in 1:K) {
ps[k] <- log(theta[k]) + von_mises_log(y[n], mu[k], kappa[k]);
}
ps[K + 1] <- log(theta[K + 1]) + uniform_log(y[n], 0.0, 2.0 * pi());
increment_log_prob(log_sum_exp(ps));
}
}
generated quantities {
real out;
{
int k;
k <- categorical_rng(theta);
if (k <= K)
{
out <- von_mises_rng(mu[k], kappa[k]);
}
else
{
out <- uniform_rng(0.0, 2.0 * pi());
}
}
}
"""
sm = pystan.StanModel(model_code = model_code)
#%%
N = 100
idxs2 = range(len(idxs))
random.shuffle(idxs2)
seaborn.distplot(stats[idxs[idxs2[:N]], 0], bins = 20)
plt.show()
samples = stats[idxs[idxs2[:N]], 0]
#%%
fit = sm.sampling(data = {
'K' : 4,
'N' : N,
'y' : samples
})
#%%
print fit
plt.hist(fit.extract()['out'][2000:] % (2.0 * numpy.pi), normed = True, alpha = 0.5)
plt.hist(samples, normed = True, alpha = 0.5)
plt.show()
|
[
"bbbales2@gmail.com"
] |
bbbales2@gmail.com
|
c22f182daa3f1e38aa9cc338dd18a375ab6e398c
|
13d0ad57a2f5deb83593e73843be7cbeeaad8d3d
|
/medium/longest_palindromic_substring.py
|
50d56789db9b059cf53561ba450b02bb4226062c
|
[] |
no_license
|
mwong33/leet-code-practice
|
b21f277d73b30df9e681499733baad07979480a1
|
9c0e6294bf3b3614b185f0760906abad60f8d9b6
|
refs/heads/main
| 2023-03-29T20:35:43.841662
| 2021-03-31T22:05:44
| 2021-03-31T22:05:44
| 317,382,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
class Solution:
# O(n^2) time O(1) space
def longestPalindrome(self, s: str) -> str:
if len(s) <= 1:
return s[0]
start = 0
end = 0
for i in range(len(s)):
length_1 = self.middleOut(s, i, i)
length_2 = self.middleOut(s, i, i+1)
max_length = max(length_1, length_2)
if max_length > (end - start + 1):
start = i - ((max_length-1)//2)
end = i + (max_length//2)
return s[start:end+1]
def middleOut(self, s, start, end):
while start >= 0 and end < len(s) and s[start] == s[end]:
start -= 1
end += 1
return end - start - 1
|
[
"noreply@github.com"
] |
mwong33.noreply@github.com
|
1ad003672fb2b8b55a75c3dcdb098c88cb9ebd98
|
4ace4d5a94ab0db79562f1b23edd6011a89148c6
|
/src/airflow-stubs/contrib/task_runner/cgroup_task_runner.pyi
|
4414ee2cae524c1466ecef790cd589d60586a9be
|
[
"MIT"
] |
permissive
|
viewthespace/mypy-stubs
|
9abebc2eab2b46b2230842f06114673e1a4de052
|
182fa275c4a7011eb5345694b88229adbddcc999
|
refs/heads/master
| 2023-06-07T18:52:46.739560
| 2023-06-01T22:05:27
| 2023-06-01T22:05:45
| 236,780,299
| 0
| 0
|
MIT
| 2022-01-11T20:53:55
| 2020-01-28T16:23:07
|
Python
|
UTF-8
|
Python
| false
| false
| 551
|
pyi
|
from airflow.task.task_runner.base_task_runner import BaseTaskRunner as BaseTaskRunner
from airflow.utils.helpers import reap_process_group as reap_process_group
from airflow.utils.operator_resources import Resources as Resources
from typing import Any
class CgroupTaskRunner(BaseTaskRunner):
process: Any
def __init__(self, local_task_job) -> None: ...
mem_cgroup_name: Any
cpu_cgroup_name: Any
def start(self) -> None: ...
def return_code(self): ...
def terminate(self) -> None: ...
def on_finish(self) -> None: ...
|
[
"andrew.marshall@vts.com"
] |
andrew.marshall@vts.com
|
507ee908ce5b75fda95f6fe95550269860a2ecbb
|
1ce26dbce1da7dabb87e275ff9f49a6988a34b0b
|
/shops/models.py
|
8789cd51b1e8525aad8d6eec6c3013595214bf11
|
[] |
no_license
|
eyobofficial/clothing-shop-app
|
38d48ae12fb24265aac7ecbad650c41c785daf1c
|
5bfc35f765b4ebf6916c306341597d217be60a1d
|
refs/heads/master
| 2020-03-18T01:15:46.482548
| 2018-05-20T09:02:14
| 2018-05-20T09:02:14
| 134,135,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,624
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
from django.conf import settings
from django.urls import reverse
def get_deleted_user():
return get_user_model().objects.get_or_create(username='Deleted')[0]
class CustomUser(AbstractUser):
pass
class Base(models.Model):
"""
Base abstract model for all other models to inherit from
"""
created_at = models.DateTimeField(
'Created date',
auto_now_add=True,
help_text='Record created date and time.'
)
updated_at = models.DateTimeField(
'Modified date',
auto_now=True,
help_text='Record last modified date and time'
)
class Meta:
abstract = True
class Shop(Base):
"""
Models a virtual shop
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
logo = models.ImageField(
upload_to='shops/logo/',
null=True, blank=True
)
description = models.TextField('Short description', blank=True)
class Meta:
get_latest_by = ['updated_at', ]
def __str__(self):
return self.name
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:shop-detail', args=[str(self.pk)])
class Catagory(Base):
"""
Models Product Catagory
Example: Dress, Shoes, Leather etc...
"""
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.TextField('Short description', blank=True)
class Meta:
get_latest_by = ['-updated_at', ]
def __str__(self):
self.name
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:catagory-detail', args=[str(self.pk)])
class Tag(Base):
"""
Models a product Tag
Example: leather, oldies, modern, jano etc...
"""
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Product(Base):
"""
Models a Product
"""
GENDER_OPTIONS = (
('M', 'Male'),
('F', 'Female'),
)
AGE_OPTIONS = (
('A', 'Adults'),
('K', 'Kids'),
)
shop = models.ForeignKey(
Shop,
related_name='products',
on_delete=models.CASCADE,
)
catagory = models.ForeignKey(
Catagory,
related_name='products',
on_delete=models.CASCADE,
)
name = models.CharField(max_length=100)
slug = models.SlugField()
description = models.TextField(blank=True)
tags = models.ManyToManyField(
Tag,
blank=True, related_name='products',
)
gender = models.CharField(max_length=1, choices=GENDER_OPTIONS)
age = models.CharField(max_length=1, choices=AGE_OPTIONS)
price = models.DecimalField(max_digits=10, decimal_places=2)
is_on_sale = models.BooleanField('On sale', default=False)
is_featured = models.BooleanField('Featured', default=False)
thumbnail = models.ImageField(upload_to='shops/products/')
publish = models.BooleanField(
default=True,
help_text='Publish product to the public'
)
class Meta:
order_with_respect_to = 'shop'
get_latest_by = ['-updated_at', ]
def __str__(self):
return self.name
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:product-detail', args=[str(self.pk)])
class ProductPicture(Base):
product = models.ForeignKey(
Product,
related_name='pictures',
on_delete=models.CASCADE
)
picture = models.ImageField(
upload_to='shops/products/pictures',
blank=True, null=True,
)
class Meta:
get_latest_by = ['-updated_at', ]
order_with_respect_to = 'product'
def __str__(self):
return '{} product pic #{}'.format(self.product, self.pk)
class Inventory(Base):
"""
Models the product inventory data
"""
product = models.ForeignKey(
Product,
related_name='inventory',
on_delete=models.CASCADE
)
color = models.CharField(max_length=100, blank=True)
size = models.CharField(max_length=100, blank=True)
stock = models.PositiveIntegerField('Available in stock')
class Meta:
order_with_respect_to = 'product'
def __str__(self):
return 'Inventory for {}'.format(self.product)
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:inventory-detail', args=[str(self.pk)])
class DeliveryMethod(Base):
"""
Models a product delivery method type
"""
name = models.CharField(max_length=100)
icon = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2)
class Meta:
get_latest_by = ['updated_at', ]
def __str__(self):
return self.name
class PaymentMethod(Base):
"""
Models a payment method type
"""
name = models.CharField(max_length=100)
icon = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
class Meta:
get_latest_by = ['updated_at', ]
def __str__(self):
return self.name
class Order(Base):
cutomer = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='orders',
on_delete=models.SET(get_deleted_user)
)
delivery_method = models.ForeignKey(
DeliveryMethod,
related_name='orders',
null=True, on_delete=models.SET_NULL,
)
order_date = models.DateTimeField()
is_delivered = models.BooleanField('Delivery status', default=False)
class Meta:
get_latest_by = ['-order_date', ]
ordering = ['-order_date', 'is_delivered', ]
def __str__(self):
return 'Order No. {}'.format(self.pk)
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:order-detail', args=[str(self.pk)])
class OrderList(Base):
"""
Models the an ordered product with respect to an Order object
"""
order = models.ForeignKey(
Order,
related_name='ordered_lists',
on_delete=models.CASCADE
)
product = models.ForeignKey(
Product,
related_name='ordered_lists',
null=True, on_delete=models.SET_NULL
)
qty = models.PositiveIntegerField('Quantity', default=1)
amount = models.DecimalField(max_digits=8, decimal_places=2)
class Meta:
order_with_respect_to = 'order'
get_latest_by = ['-updated_at', ]
def __str__(self):
return '{} of {}'.format(self.product, self.order)
class Payment(Base):
"""
Models a payment made for an order
"""
order = models.ForeignKey(
Order,
related_name='payments',
null=True, on_delete=models.SET_NULL,
)
payment_method = models.ForeignKey(
PaymentMethod,
related_name='payments',
null=True, on_delete=models.SET_NULL
)
subtotal = models.DecimalField(max_digits=8, decimal_places=2)
delivery_amount = models.DecimalField(max_digits=6, decimal_places=2)
tax = models.DecimalField(max_digits=6, decimal_places=2)
payment_date = models.DateTimeField()
is_payment_completed = models.BooleanField(default=False)
class Meta:
get_latest_by = ['-updated_at', ]
ordering = ['is_payment_completed', '-updated_at']
def __str__(self):
return 'Payment for {}'.format(self.order)
def get_absolute_url(self, *args, **kwargs):
return reverse('shops:payment-detail', args=[str(self.pk)])
|
[
"eyobtariku@gmail.com"
] |
eyobtariku@gmail.com
|
91231887766eacf8e685b57bd6b7f460b361dead
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/798.py
|
443a65047771e9eece0a19b643b46c7730fc197c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
import sys
def read_case(l):
au = l.strip().split()[1]
return [int(x) for x in au]
def cant(c):
inv = [i - sum(c[:i]) for i in range(len(c))]
return max(0, max(inv))
def rint():
return int(rline())
def rline():
global linenr
linenr += 1
return stdin[linenr - 1]
global stdin
global linenr
stdin = sys.stdin.readlines()
linenr = 0
cases = rint()
case = 1
while linenr < len(stdin):
c = read_case(rline())
print 'Case #{0}: {1}'.format(case, cant(c))
case += 1
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
e745c490084e729003b83b40f597a6e997348317
|
bc2df7e370e70aa3ccdab80bcecd5379d8ca222c
|
/bin/base/stuydy_recover.py
|
257f038f1c7b201773df4497d9b1f00ca1704412
|
[] |
no_license
|
windyStreet/el-OAMP
|
092fe39e938ff2bf499ea5790e3914e359ec2069
|
9a986629daab6b24722a7e18ea0e6593a77d451d
|
refs/heads/master
| 2023-05-05T06:27:49.595082
| 2019-10-15T03:46:53
| 2019-10-15T03:46:53
| 371,572,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
#!/usr/bin/env python
# !-*- coding:utf-8 -*-
from bin.base.tool import RabbitMQ
from pymongo import MongoClient
from bin import init
if __name__ == '__main__':
conn = MongoClient('211.154.149.99', 27017)
db = conn.BBT_TrainRecord
record_set = db.processRecord
ds = {
"toolUser": "longrise",
# "toolPassword": "longrise",
# "toolHost": "192.168.7.219",
"toolPort": 5672,
"toolName": "",
'toolHost': '211.154.149.99',
'toolPassword': 'uCSXBwi7KJfKIx4x',
}
init.CONF_INFO = {
'study_recover': ds
}
RM = RabbitMQ.getInstance(ds='study_recover')
start = 0
end = 5000000
limit = 1000
n = 0
while (True):
offset = start + limit * n
if offset >= end:
break
for re_dict in record_set.find({"studyend": {"$gt": "2019-06-13 10:00:00.000", '$lt': "2019-06-14 10:00:00"}, "isvideopass": "1"}).sort('createtime').skip(offset).limit(limit):
re = {
'id': re_dict.get("recordid"),
'cwid': re_dict.get('cwid'),
'cardno': re_dict.get('cardno'),
'studentno': re_dict.get('studentno'),
'effecttime': '1',
'stuclientip': re_dict.get('ip'),
'stuclientmacinfo': "BBAPP_MQ_" + re_dict.get('comfrom2'),
}
RM.sendMsg(queue='APP_onlineUpdate', msg=re)
print(offset)
n = n + 1
|
[
"yq904276384@foxmail.com"
] |
yq904276384@foxmail.com
|
1c4dc3f363cf67370ac7a4c684739dccada90fdc
|
f6db8d85a3b41eed543959314d65927353a8229c
|
/W5/geolocation/migrations/0005_auto_20201207_1442.py
|
687464ec4b4eab5b7af85f1f1b57254ccd0985cc
|
[] |
no_license
|
NFEL/DjangoPaeez99
|
d573cc8e36500f08bc104d76f7a2628062d86c2f
|
621636bfb47d71f2a4f45037b7264dd5ebc7cdd7
|
refs/heads/main
| 2023-01-27T22:05:57.788049
| 2020-12-08T10:08:28
| 2020-12-08T10:08:28
| 304,553,353
| 1
| 2
| null | 2020-10-16T07:33:04
| 2020-10-16T07:33:03
| null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
# Generated by Django 3.1.2 on 2020-12-07 14:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geolocation', '0004_address_lo'),
]
operations = [
migrations.RenameField(
model_name='address',
old_name='lo',
new_name='service_area',
),
migrations.AddField(
model_name='address',
name='service_radius',
field=models.DecimalField(decimal_places=3, max_digits=5, null=True, verbose_name='شعاع فعالیت'),
),
]
|
[
"nfilsaraee@gmail.com"
] |
nfilsaraee@gmail.com
|
f86ee34060570d42f42a0bd9f70543ce71859a55
|
5fd658211a0951e287742973618012b7d9f89e43
|
/tennisblock/webapp/management/commands/local_host_entries.py
|
e9a7e7ac422add274661f5b80c3b0b1c2b7e0986
|
[] |
no_license
|
sharpertool/tennisblock
|
6d14000d3d709ec339124e893ffc8a7cdfe73d8d
|
82bec4179a5c487a588ff10d910c6c7a9c1014d6
|
refs/heads/master
| 2022-03-05T13:03:43.728596
| 2021-02-15T06:37:53
| 2021-02-15T06:37:53
| 148,848,726
| 1
| 0
| null | 2022-02-10T14:00:37
| 2018-09-14T22:43:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
from os.path import exists
import re
from shutil import copy
from django.core.management.base import BaseCommand
from wagtail.core.models import Site
class Command(BaseCommand):
help = 'Generate output to be added to /etc/hosts file'
def add_arguments(self, parser):
parser.add_argument('--env', help='''
Point this to your .env.local file and it will update the allowed host entries
''')
def handle(self, *args, **options):
allowed_hosts = list(
Site.objects.values_list('hostname', flat=True).all())
etc_hosts = [f"127.0.0.1 {x}" for x in allowed_hosts]
hosts_lines = "\n".join(etc_hosts)
etc_data = "# tennisblock.local begin\n" + hosts_lines + "\n# tennisblock.local end\n"
print("Insert the following lines into your /etc/hosts file:")
print(etc_data)
envfile = options.get('env', None)
if envfile and exists(envfile):
copy(envfile, envfile + '.bak')
with open(envfile, 'r') as fp:
original = fp.readlines()
with open(envfile, 'w') as fp:
for line in original:
if re.search(r'^DJANGO_ALLOWED_HOSTS', line):
line = f"DJANGO_ALLOWED_HOSTS={','.join(allowed_hosts)}"
fp.write(line)
|
[
"ed@sharpertool.com"
] |
ed@sharpertool.com
|
b5478f5ad21fcd76d7703d0c8a466721f17e07d0
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/stringReduction_20200722190200.py
|
ef338e3e8ae2885556338eaa1ee2321358b4f04a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
def string(str):
st = list(str)
count = 0
while count < 20:
for i in range(0,len(st)-1,2):
pair = st[i] + st[i+1]
if pair == 'ab' or pair == 'ba':
st.pop(i)
st.pop(i)
st.insert(i,'c')
break
if pair == 'bc' or pair == 'cb':
st.pop(i)
st.pop(i)
st.insert(i,'a')
break
if pair == 'ac' or pair == 'ca':
st.pop(i)
st.pop(i)
st.insert(i,'b')
break
print(st)
count +=1
print(len(st) // 2 )
string("abcabc")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
f55287dd1de797ed3a0460fd54cdcb062360a5f4
|
99a4817f852115f2f14d50cc6a99abbdb62c4218
|
/Modulos/Modulo 02 - Deep Learning Frameworks/03 - Passoa a Passo/tensorcode08.py
|
7071ad1ac1236324ed48246ed099bb9c60c6f37b
|
[] |
no_license
|
eduardodimperio/formacao-inteligencia-artificial
|
cac290e1385a9770a7b492ef3e695124b0ac5499
|
c5b50bad4908e8423fe384d90929a772f01787c3
|
refs/heads/master
| 2023-07-03T21:02:17.107564
| 2021-08-07T13:33:34
| 2021-08-07T13:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# Criando Tensores
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
print("\n")
# Criando um tensor preenchido com zeros.
tensor = tf.zeros(shape = [3, 4], dtype = tf.int32)
print(('Tensor preenchido com zeros como int32, 3 linhas e 4 colunas:\n{0}').format(tensor.numpy()))
print("\n")
# Criando um tensor preenchido com valor 1 e tipo de dados float32.
tensor = tf.ones(shape = [5, 3], dtype = tf.float32)
print(('\nTensor preenchido com valor 1 e float32, 5 linhas e 3 colunas:\n{0}').format(tensor.numpy()))
print("\n")
# Criando um tensor preenchido com valor 100 e tipo de dados float64.
tensor = tf.constant(100, shape = [4, 4], dtype = tf.float64)
print(('\nTensor preenchido com valor 100 e float64, 4 linhas e 4 colunas:\n{0}').format(tensor.numpy()))
print("\n")
# Criando um tensor preenchido Rank 2 preenchido com zeros
tensor = tf.Variable(tf.zeros([1, 2]))
print(tensor)
print("\n")
# Atribuindo valores ao tensor criado no item anterior
tensor.assign_add([[100, 200]])
print(tensor)
print("\n")
|
[
"angelicogfa@gmail.com"
] |
angelicogfa@gmail.com
|
97aba7f61ee1a790a8f6bc2b427fe23430b50d04
|
f40cc44ebfc337326577c91cd88d0c1dd845b098
|
/LuminarPythonPrograms/PythonToDatabase/SelectFromDBS.py
|
abfd684fd180b6d5bd3d4b9d24f0cf1e4182d452
|
[] |
no_license
|
Aswin2289/LuminarPython
|
6e07d6f9bf6c8727b59f38f97f5779a33b2fab0d
|
ba633a276dd79bbf214cfceac2413c894eaa1875
|
refs/heads/master
| 2023-01-01T07:52:41.598110
| 2020-10-13T04:34:49
| 2020-10-13T04:34:49
| 290,109,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
import mysql.connector
db=mysql.connector.connect(
host="localhost",
port=3307,
user="root",
password="Password@123",
database="luminarpython" ,
auth_plugin='mysql_native_password'
)
cursor=db.cursor()
try:
sql="SELECT * FROM EMPLOYEE"
cursor.execute(sql)
myresult=cursor.fetchall()
for x in myresult:
print(x)
except Exception as e:
db.rollback()
print(e.args)
finally:
db.close()
|
[
"aswinabraham4@gmail.com"
] |
aswinabraham4@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.