blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
005852383cf1e3ae176206e5dd95e2754cd001ce
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/container/browser/find.py
|
ee744f8239d12401177ed371c83a4a3a56c523fe
|
[
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Find View Class
$Id: find.py 29143 2005-02-14 22:43:16Z srichter $
"""
__docformat__ = 'restructuredtext'
from zope.app import zapi
from zope.app.container.find import SimpleIdFindFilter
from zope.app.container.interfaces import IFind
from zope.app.traversing.api import getName
from zope.app.publisher.browser import BrowserView
# Very simple implementation right now
class Find(BrowserView):
def findByIds(self, ids):
"""Do a find for the `ids` listed in `ids`, which is a string."""
finder = IFind(self.context)
ids = ids.split()
# if we don't have any ids listed, don't search at all
if not ids:
return []
request = self.request
result = []
for object in finder.find([SimpleIdFindFilter(ids)]):
url = zapi.absoluteURL(object, request)
result.append({ 'id': getName(object), 'url': url})
return result
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
064b469872ad95e7487c3cf649ca3cfa62170bdd
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/test/python/068d64a694460d83bc9a67db9e2e5f1e4e03d3c3urls.py
|
068d64a694460d83bc9a67db9e2e5f1e4e03d3c3
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
from django.conf.urls import url
from . import views
SITE_SLUG = "(?P<site_slug>[-_\w]+)"
IMAGE_SLUG = "(?P<image_slug>[-_\w]+)"
urlpatterns = [
# Manage
url(r'^$', views.manage_redirect, name='manage_redirect'),
url(r'^manage/$', views.manage, name='manage'),
url(r'^manage/archives$', views.archives, name='archives'),
url(r'^manage/create/$', views.create, name='create'),
url(r'^manage/create_js/$', views.create_js, name='create_js'),
url(r'^manage/' + IMAGE_SLUG + '/trash$', views.trash, name='trash'),
# View
url(r'^' + IMAGE_SLUG + '$', views.view),
url(r'^' + IMAGE_SLUG + '.thumbnail', views.thumbnail),
url(r'^' + IMAGE_SLUG + '.original', views.original),
]
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
94e1bcfdf5adabec1171a6844867b600be9ef5e8
|
c93b0f008d0977e0b9327ad8b930489f5cccae97
|
/platfrom/testdata/RawQosBuffering.py
|
3dbfb80b4f23f72c376766ece3d0dc34e83de492
|
[] |
no_license
|
ParkPan/ATCasePackage
|
15caa664bd94c014ccbd1780353bfc5fcc0caa87
|
edad6c1d5a343c740e251821fee0c29336f3d435
|
refs/heads/master
| 2020-06-16T02:44:06.323352
| 2016-12-01T03:46:44
| 2016-12-01T03:46:44
| 75,251,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
import random
import sys
import os
import datavars
import dataprovider
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
from commonfunc import get_timestamp_by_time
class RawQosBuffering(dataprovider.Dataprovider):
tablename = 'raw_input_qos_buffering'
@classmethod
def gettablename(cls):
return cls.tablename
def makedata(self):
data_format = '%s,%d,%s,%s,itsavvidstring,%s,1111,222,%d,%d\n'
with open(os.path.abspath(os.path.dirname(__file__)) + '/RawQosBuffering.txt', 'w') as filedemanddata:
for i in range(24):
for j in [2, 6, 15, 26]:
id = datavars.id_range[random.randint(0,14)]
timestamp = get_timestamp_by_time(datavars.time_format% (i, j))
peerid = datavars.peeid_range[random.randint(0,9)]
url = datavars.url_range[random.randint(0,4)]
type = datavars.type_range[random.randint(0, 3)]
line = data_format % (
id, int(timestamp), peerid, url, type, int(timestamp)+random.randint(1,100),
int(timestamp) + random.randint(100,10000))
filedemanddata.write(line)
return os.path.abspath(os.path.dirname(__file__)) + '/RawQosBuffering.txt'
|
[
"panpan@cloutropy.com"
] |
panpan@cloutropy.com
|
ceab03c4764ad7cac99e7e1fcadaca2cdc5da95a
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/lib/python3.9/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_custom_frames.py
|
94cabd744e1d3785ac2a728ff2ac0c584fccdf39
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,397
|
py
|
from _pydevd_bundle.pydevd_constants import get_current_thread_id, Null, ForkSafeLock
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
from _pydev_imps._pydev_saved_modules import thread, threading
import sys
from _pydev_bundle import pydev_log
DEBUG = False
class CustomFramesContainer:
# Actual Values initialized later on.
custom_frames_lock = None # : :type custom_frames_lock: threading.Lock
custom_frames = None
_next_frame_id = None
_py_db_command_thread_event = None
def custom_frames_container_init(): # Note: no staticmethod on jython 2.1 (so, use free-function)
CustomFramesContainer.custom_frames_lock = ForkSafeLock()
# custom_frames can only be accessed if properly locked with custom_frames_lock!
# Key is a string identifying the frame (as well as the thread it belongs to).
# Value is a CustomFrame.
#
CustomFramesContainer.custom_frames = {}
# Only to be used in this module
CustomFramesContainer._next_frame_id = 0
# This is the event we must set to release an internal process events. It's later set by the actual debugger
# when we do create the debugger.
CustomFramesContainer._py_db_command_thread_event = Null()
# Initialize it the first time (it may be reinitialized later on when dealing with a fork).
custom_frames_container_init()
class CustomFrame:
def __init__(self, name, frame, thread_id):
# 0 = string with the representation of that frame
self.name = name
# 1 = the frame to show
self.frame = frame
# 2 = an integer identifying the last time the frame was changed.
self.mod_time = 0
# 3 = the thread id of the given frame
self.thread_id = thread_id
def add_custom_frame(frame, name, thread_id):
'''
It's possible to show paused frames by adding a custom frame through this API (it's
intended to be used for coroutines, but could potentially be used for generators too).
:param frame:
The topmost frame to be shown paused when a thread with thread.ident == thread_id is paused.
:param name:
The name to be shown for the custom thread in the UI.
:param thread_id:
The thread id to which this frame is related (must match thread.ident).
:return: str
Returns the custom thread id which will be used to show the given frame paused.
'''
with CustomFramesContainer.custom_frames_lock:
curr_thread_id = get_current_thread_id(threading.current_thread())
next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1
# Note: the frame id kept contains an id and thread information on the thread where the frame was added
# so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).
frame_custom_thread_id = '__frame__:%s|%s' % (next_id, curr_thread_id)
if DEBUG:
sys.stderr.write('add_custom_frame: %s (%s) %s %s\n' % (
frame_custom_thread_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name))
CustomFramesContainer.custom_frames[frame_custom_thread_id] = CustomFrame(name, frame, thread_id)
CustomFramesContainer._py_db_command_thread_event.set()
return frame_custom_thread_id
def update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('update_custom_frame: %s\n' % frame_custom_thread_id)
try:
old = CustomFramesContainer.custom_frames[frame_custom_thread_id]
if name is not None:
old.name = name
old.mod_time += 1
old.thread_id = thread_id
except:
sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_custom_thread_id,))
pydev_log.exception()
CustomFramesContainer._py_db_command_thread_event.set()
def remove_custom_frame(frame_custom_thread_id):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('remove_custom_frame: %s\n' % frame_custom_thread_id)
CustomFramesContainer.custom_frames.pop(frame_custom_thread_id, None)
CustomFramesContainer._py_db_command_thread_event.set()
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
ea30277fdda4769bc035c83cf910f8660e83b049
|
421f6ce9490876be113e5ed1ac173b1f6d70cb66
|
/newYork/new_york_analysis/recursive_top_level/u_craigslist4237915975/craigslist4237915975scraper/craigslist4237915975scraper/items.py
|
2ed8d4fb8cf4de54768e328577d307baa7ea0dfc
|
[] |
no_license
|
EricSchles/humanTraffickingTalk
|
a1f4770c4380ea0424663baac79686be5b74733a
|
f399e6e6188601f34eab3fd8e7fc4a3ca30d9b14
|
refs/heads/master
| 2021-01-01T06:11:24.424134
| 2014-08-14T18:51:23
| 2014-08-14T18:51:23
| 14,879,906
| 17
| 5
| null | 2019-10-15T11:10:13
| 2013-12-03T01:15:11
|
Python
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
from scrapy.item import Item, Field
class craigslist4237915975Item(Item):
title = Field()
link = Field()
desc = Field()
|
[
"ericschles@gmail.com"
] |
ericschles@gmail.com
|
a4e44762a7511ec359dd8e19c070b721d03e6d4c
|
ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a
|
/AtCoder/Panasonic 2020/C.py
|
df4a723d90f0c2af78b234c8e09df7cc7078f4ca
|
[] |
no_license
|
cormackikkert/competitive-programming
|
f3fa287fcb74248ba218ecd763f8f6df31d57424
|
3a1200b8ff9b6941c422371961a127d7be8f2e00
|
refs/heads/master
| 2022-12-17T02:02:40.892608
| 2020-09-20T11:47:15
| 2020-09-20T11:47:15
| 266,775,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
a, b, c = map(int, input().split())
if (c - a - b) >= 0 and 4 * a * b < (c - a - b) * (c - a - b):
print("Yes")
else:
print("No")
|
[
"u6427001@anu.edu.au"
] |
u6427001@anu.edu.au
|
566bdadc52d20472b63a9220e98e6d64c70af204
|
12fb02e7d946002beee4e095ea23f4d98c968afa
|
/tscripts/yunwei/operate/compress.py
|
2322013f32616938001a146dfb17314ba7e2ad9c
|
[] |
no_license
|
cash2one/yunwei-1
|
0ab4ec0783c061739dc9a6c3db2f9379605746fd
|
b929fe23fd95ea1f18bd809b82523101eb414309
|
refs/heads/master
| 2020-07-02T14:31:00.776030
| 2016-09-09T05:31:52
| 2016-09-09T05:31:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,511
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
date: 2016/08/20
role: 压缩解压
usage: cmb = compressBase(log_path) 实例化
cmb.zipp(source_dir,zipfile_path)
cmb.tar(source_dir,tarfile_path)
cmb.unzip(zipfile_path,target_dir)
cmb.untar(tarfile_path,target_dir)
'''
from __future__ import absolute_import
from yunwei.operate.prefix import log
logIns = log('117')
import os,zipfile,tarfile
###压缩解压操作类
class compressBase:
def __init__(self,log_path):
###log_path为日志写入文件
logIns = log('117',log_path)
self.zf = ''
###析构函数
def __del__(self):
try:
self.zf.close()
except:
pass
###zip压缩
def zipp(self,source_dir,zipfile_path):
###判断文件或目录是否存在
if not os.path.exists(source_dir):
logIns.writeLog('error','%s not exists' %source_dir)
raise ValueError('117,%s not exists' %source_dir)
###循环把文件加入列表
file_list = []
if os.path.isfile(source_dir):
file_list.append(source_dir)
else:
for root, dirs, files in os.walk(source_dir):
for name in files:
file_list.append(os.path.join(root, name))
###调用zipfile模块
self.zf = zipfile.ZipFile(zipfile_path, "w", zipfile.zlib.DEFLATED)
for file_one in file_list:
arc_name = file_one[len(source_dir):]
self.zf.write(file_one,arc_name)
###解压zip
def unzip(self,zipfile_path, unzip_dir):
if not os.path.exists(unzip_dir):
os.makedirs(unzip_dir, 0777)
self.zf = zipfile.ZipFile(zipfile_path)
for name in self.zf.namelist():
name = name.replace('\\','/')
if name.endswith('/'):
os.makedirs(os.path.join(unzip_dir, name))
else:
ext_file = os.path.join(unzip_dir, name)
ext_dir = os.path.dirname(ext_file)
if not os.path.exists(ext_dir) :
os.makedirs(ext_dir,0777)
with open(ext_file, 'wb') as ef:
ef.write(self.zf.read(name))
###tar压缩
def tar(self,source_dir,tarfile_path):
###判断文件或目录是否存在
if not os.path.exists(source_dir):
logIns.writeLog('error','%s not exists' %source_dir)
raise ValueError('117,%s not exists' %source_dir)
###调用tarfile模块
self.zf = tarfile.open(tarfile_path, "w:gz")
###判断源目录长度
len_source = len(source_dir)
###循环把文件加入列表
for root, dirs, files in os.walk(source_dir):
for name in files:
full_path = os.path.join(root,name)
self.zf.add(full_path,arcname=os.path.join(root[len_source:],name))
###解压tar
def untar(self,tarfile_path, untar_dir):
if not os.path.exists(untar_dir):
os.makedirs(untar_dir, 0777)
try:
self.zf = tarfile.open(tarfile_path, "r:gz")
file_names = self.zf.getnames()
for file_name in file_names:
self.zf.extract(file_name, untar_dir)
except Exception, e:
logIns.writeLog('error','%s untar error' %tarfile_path)
raise ValueError('error','%s untar error' %tarfile_path)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
308f47876d956e476994e9c9fe6924bde8b25f3c
|
22e9d7c194cf22513d68b61b97c49405a47e8708
|
/Number_Theory/sieves_primality_test.py
|
ef64fdf8d48dbf9a21543d0f6f5e2a11e959499b
|
[] |
no_license
|
SandeepPadhi/Algorithmic_Database
|
44c26f9300a99539781c5beb5587997b3ecadfe1
|
ab8040a7dad94c84ec88f40e44b8520edcbe2443
|
refs/heads/main
| 2023-06-22T02:04:29.362315
| 2021-07-19T17:48:40
| 2021-07-19T17:48:40
| 338,329,340
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
import math
maxn=1000000
spf=[i for i in range(maxn+1)]
def sieve(spf):
for i in range(2,int(math.sqrt(maxn))+1,1):
if spf[i]==i:
for j in range(i*i,maxn+1):
spf[j]=i
def isPrime(x):
return True if spf[x]==x else False
sieve(spf)
print(isPrime(31))
|
[
"padhisandeep96@gmail.com"
] |
padhisandeep96@gmail.com
|
b74c7a408b72582b81de14ddae925d60aa364fdf
|
86cf79436659ff8d69d6d7a8d9cb358f0d1b4f1c
|
/AOJ/0383/0383.py
|
366208a7d42f41637177a43b9108f38835ec689a
|
[] |
no_license
|
pombredanne/problem-solving
|
d96a367851a34fb4f947b3b7a95ad364cf94ea8f
|
fefdbfb89ba04dbcd7df93c02968759ea970db06
|
refs/heads/master
| 2020-05-20T12:34:23.654253
| 2019-03-31T09:57:55
| 2019-03-31T09:57:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
A,B,X = map(int, input().split())
ans = a = b = 0
if X % 500 != 0: X += 500 - X%500
if A < B:
a = X//1000 + (1 if (X%1000>0) else 0)
elif A > 2*B:
b = X//500
else:
a = X//1000; X %= 1000
b = X//500
print(A*a + B*b)
|
[
"y.watanobe@gmail.com"
] |
y.watanobe@gmail.com
|
54a92741481e50fdde73c533ad52c1b313d363a4
|
cb3bce599e657188c30366adb0af3007ff9b8f96
|
/src/note/test_proxy.py
|
bd9bcba2da944244a78ca5f41ac1a3c0cc431346
|
[] |
no_license
|
skk4/python_study
|
534339e6c378d686c29af6d81429c472fca19d6d
|
4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987
|
refs/heads/master
| 2021-01-01T04:36:52.037184
| 2017-12-08T01:04:27
| 2017-12-08T01:04:27
| 97,207,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
# -*- coding:utf-8 -*-
#import socket
import random
import urllib2
iplist = ['111.13.7.42:81']
url = 'http://www.whatismyip.com.tw/'
proxy = {'http': random.choice(iplist)}
proxy_support = urllib2.ProxyHandler(proxy)
opener = urllib2.build_opener(proxy_support)
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36')]
urllib2.install_opener(opener)
rq = urllib2.Request(url)
print rq.get_full_url()
fd = urllib2.urlopen(rq)
print fd.read()
fd.close()
|
[
"skk_4@163.com"
] |
skk_4@163.com
|
6d594e11da8a7b220ea7286f7fb5b4a2a98c0b15
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/16/usersdata/78/6015/submittedfiles/triangulo.py
|
8f53086208c10d248c43bc38a441462edf00389a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('digite o valor de a:')
b=input('digite o valor de b:')
c=input('digite o valor de c:')
if a>=b>=c>0:
print('s')
if a>b+c:
print('n')
if a**2==(b**2)+(c**2):
print('Re')
if a**2>(b**2)+(c**2):
print('Ob')
if a**2<(b**2)+(c**2):
print('Ac')
if a==b==c:
print('Eq')
if b==c!=a:
print('Is')
if a!=b!=c:
print('Es')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e8f6627e5ca6c6c236f176ab86c0fa1405ddd68d
|
691d3f3e04d354e11772335064f33245e1ed8c28
|
/lib/galaxy/tools/test.py
|
ec7c7c7d1a8913c9ba7ecbcc555ce0d7d27eba56
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
dbcls/dbcls-galaxy
|
934a27cc13663549d5208158fc0b2821609399a8
|
6142165ef27f6a02aee42f26e0b94fed67ecc896
|
refs/heads/master
| 2016-09-05T22:53:27.553419
| 2009-09-09T06:35:28
| 2009-09-09T06:35:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
import new, sys
import galaxy.util
import parameters
from parameters import basic
from parameters import grouping
from elementtree.ElementTree import XML
class ToolTestBuilder( object ):
"""
Encapsulates information about a tool test, and allows creation of a
dynamic TestCase class (the unittest framework is very class oriented,
doing dynamic tests in this was allows better integration)
"""
def __init__( self, tool, name ):
self.tool = tool
self.name = name
self.required_files = []
self.inputs = []
self.outputs = []
self.error = False
self.exception = None
def add_param( self, name, value, extra ):
try:
if name not in self.tool.inputs:
for input_name, input_value in self.tool.inputs.items():
if isinstance( input_value, grouping.Conditional ) or isinstance( input_value, grouping.Repeat ):
self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value)
elif isinstance( self.tool.inputs[name], parameters.DataToolParameter ):
self.required_files.append( ( value, extra ) )
except: pass
self.inputs.append( ( name, value, extra ) )
def add_output( self, name, file ):
self.outputs.append( ( name, file ) )
def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
# Currently handles grouping.Conditional and grouping.Repeat
if isinstance( grouping_value, grouping.Conditional ):
if name != grouping_value.test_param.name:
for case in grouping_value.cases:
for case_input_name, case_input_value in case.inputs.items():
if case_input_name == name and isinstance( case_input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
elif isinstance( case_input_value, grouping.Conditional ):
self.__expand_grouping_for_data_input(name, value, extra, case_input_name, case_input_value)
elif isinstance( grouping_value, grouping.Repeat ):
# FIXME: grouping.Repeat can only handle 1 repeat param element since the param name
# is something like "input2" and the expanded page display is something like "queries_0|input2".
# The problem is that the only param name on the page is "input2", and adding more test input params
# with the same name ( "input2" ) is not yet supported in our test code ( the lat one added is the only
# one used ).
for input_name, input_value in grouping_value.inputs.items():
if input_name == name and isinstance( input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
|
[
"h-morita@esm.co.jp"
] |
h-morita@esm.co.jp
|
835ae6671986312e9febcc5c4269d9c60e34366d
|
32cba9d6b0cb420e13a2a26c9e8c3d07e2c127b6
|
/附录A 梯度下降法/最速下降法(原生Python+scipy导数计算实现).py
|
cee687d26b7245cfa1b086e591ae59819bbce477
|
[] |
no_license
|
wanglg007/Lihang-Statistical-learning-methods-Code
|
bed22551a2883b40e93340d3f96cf2fcf9e19ef2
|
190d16310be154282550e1f55eaadd8c4dd83263
|
refs/heads/main
| 2023-07-03T17:00:35.809206
| 2021-08-02T08:37:33
| 2021-08-02T08:37:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,826
|
py
|
from scipy.misc import derivative
def partial_derivative(func, arr, dx=1e-6):
"""计算n元函数在某点各个自变量的梯度向量(偏导数列表)
:param func: [function] n元函数
:param arr: [list/tuple] 目标点的自变量坐标
:param dx: [int/float] 计算时x的增量
:return: [list] 偏导数
"""
n_features = len(arr)
ans = []
for i in range(n_features):
def f(x):
arr2 = list(arr)
arr2[i] = x
return func(arr2)
ans.append(derivative(f, arr[i], dx=dx))
return ans
def golden_section_for_line_search(func, a0, b0, epsilon):
"""一维搜索极小值点(黄金分割法)
:param func: [function] 一元函数
:param a0: [int/float] 目标区域左侧边界
:param b0: [int/float] 目标区域右侧边界
:param epsilon: [int/float] 精度
"""
a1, b1 = a0 + 0.382 * (b0 - a0), b0 - 0.382 * (b0 - a0)
fa, fb = func(a1), func(b1)
while b1 - a1 > epsilon:
if fa <= fb:
b0, b1, fb = b1, a1, fa
a1 = a0 + 0.382 * (b0 - a0)
fa = func(a1)
else:
a0, a1, fa = a1, b1, fb
b1 = b0 - 0.382 * (b0 - a0)
fb = func(b1)
return (a1 + b1) / 2
def steepest_descent(func, n_features, epsilon, distance=3, maximum=1000):
"""梯度下降法
:param func: [function] n元目标函数
:param n_features: [int] 目标函数元数
:param epsilon: [int/float] 学习精度
:param distance: [int/float] 每次一维搜索的长度范围(distance倍梯度的模)
:param maximum: [int] 最大学习次数
:return: [list] 结果点坐标
"""
x0 = [0] * n_features # 取自变量初值
y0 = func(x0) # 计算函数值
for _ in range(maximum):
nabla = partial_derivative(func, x0) # 计算梯度
# 当梯度的模长小于精度要求时,停止迭代
if pow(sum([nabla[i] ** 2 for i in range(n_features)]), 0.5) < epsilon:
return x0
def f(x):
"""梯度方向的一维函数"""
x2 = [x0[i] - x * nabla[i] for i in range(n_features)]
return func(x2)
lk = golden_section_for_line_search(f, 0, distance, epsilon=1e-6) # 一维搜索寻找驻点
x1 = [x0[i] - lk * nabla[i] for i in range(n_features)] # 迭代自变量
y1 = func(x1) # 计算函数值
if abs(y1 - y0) < epsilon: # 如果当前变化量小于学习精度,则结束学习
return x1
x0, y0 = x1, y1
if __name__ == "__main__":
# [0]
print(steepest_descent(lambda x: x[0] ** 2, 1, epsilon=1e-6))
# [-2.9999999999635865, -3.999999999951452]
print(steepest_descent(lambda x: ((x[0] + 3) ** 2 + (x[1] + 4) ** 2) / 2, 2, epsilon=1e-6))
|
[
"1278729001@qq.com"
] |
1278729001@qq.com
|
e894a478cf49f5d808333ba19573bf3ba9434e8e
|
13f5984be7be77852e4de29ab98d5494a7fc6767
|
/Exam/商汤/环形赛道小游戏.py
|
cfeda51c8d56cb04a31ed5a4d36ff1e03e2acc17
|
[] |
no_license
|
YuanXianguo/Python-Interview-Master
|
4252514763fc3f563d9b94e751aa873de1719f91
|
2f73786e8c51dbd248341559de171e18f67f9bf2
|
refs/heads/master
| 2020-11-26T18:14:50.190812
| 2019-12-20T02:18:03
| 2019-12-20T02:18:03
| 229,169,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
def get_sum(nums):
n = len(nums)
nums.extend(nums)
dp = [nums[0]] * len(nums)
for i in range(2 * n):
dp[i] = max(dp[i]+nums[i], nums[i])
return dp[-1]
n = int(input())
nums = list(map(int, input().split()))
print(get_sum(nums))
|
[
"736913978@qq.com"
] |
736913978@qq.com
|
be490d67d8abd9e56665d7b6ef9536c0352d1325
|
fd62d8096dc95923341cfac29f0209bfbea887b4
|
/models_evaluation/xgboost/grid_search/jobs_test/5.0_0.03_0.0_200.0_10.0.job.py
|
9abbc493eabf624713f7efad4e08eff3f17a4fed
|
[] |
no_license
|
Eulerianial/premise-selection-deepmath-style
|
06c8f2f540bc7e3840c6db0a66c5b30b5f4257f9
|
8684a59b5d8beab1d02a3a7c568a16c790ea4b45
|
refs/heads/master
| 2021-07-17T17:04:13.472687
| 2017-10-25T13:54:44
| 2017-10-25T13:54:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
import xgboost as xgb
import argparse
import sys
import os
from saving_loading import *
#####################################
p = {
"max_depth":int(5.0),
"eta":0.03,
"gamma":0.0,
"num_boost_round":int(200.0),
"early_stopping_rounds":int(10.0)
}
#####################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run CV for xgboost with particular combination of parameters')
parser.add_argument("X",
help = "path to CSR matrix with features of pairs (theorem, premise)")
parser.add_argument("y",
help = "path to CSV file with labels reflecting relevances of pairs (theorem, premise)")
parser.add_argument("output_directory",
help = "path to directory where performance of tested model should be saved")
args = parser.parse_args()
y = read_csv(os.path.abspath(args.y), type_of_records = "int")
X = load_obj(os.path.abspath(args.X))
output_directory = os.path.abspath(args.output_directory)
dtrain = xgb.DMatrix(X, label = y)
params = {
"max_depth":p["max_depth"],
"eta":p["eta"],
"gamma":p["gamma"],
"objective":"binary:logistic"
}
x = xgb.cv(
params = params,
dtrain = dtrain,
num_boost_round = p["num_boost_round"],
early_stopping_rounds = p["early_stopping_rounds"],
nfold = 4,
metrics = {"error","auc","logloss"}
)
output_name = os.path.join(output_directory, "_".join(map(str, list(p.values())))+".pkl")
save_obj({"params":p, "stats":x}, output_name)
|
[
"bartoszpiotrowski@post.pl"
] |
bartoszpiotrowski@post.pl
|
5a37f7eb85b6bd929fabe005a19a2a43d41f15d5
|
da5bc6efaebc9ff015938d207b25c7804bc03b33
|
/11_class/quiz03/quiz03.py
|
58c5273addfe4332aba0c15c597067916327331e
|
[] |
no_license
|
codud0954/megait_python_20201116
|
b0f68f50a1e0d41c3c35535e718d5a236a7b1a98
|
a71f57d4332027406953599612cd014de2d26713
|
refs/heads/master
| 2023-01-31T11:14:27.611468
| 2020-12-18T09:03:11
| 2020-12-18T09:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
# 제품관리 설계도
class Product:
# 생성자
def __init__(self, name, price, expired_date):
self.name = name
self.price = price
self.expired_date = expired_date
# 제품 정보
def product_info(self):
print("이름:", self.name)
print("가격:", self.price)
print("유통기한", self.expired_date)
# 제품 n개의 가격
def price_of_product(self, count):
return count * self.price
# 판매 가능 여부
def sale_status(self):
# 오늘 날짜 <= 유통기한 날짜 : 판매 가능 상품
# 오늘 날짜 > 유통기한 날짜 : 판매 불가 상품
today = "2020-12-14"
if today <= self.expired_date:
return "판매 가능 상품"
else:
return "판매 불가 상품"
# 객체 생성
shrimp = Product("새우깡", 1300, "2021-03-01")
shrimp.product_info()
print()
print("제품 5개의 가격 : %d" % shrimp.price_of_product(5))
print("제품 13개의 가격 : %d" % shrimp.price_of_product(13))
print(shrimp.sale_status())
|
[
"noreply@github.com"
] |
codud0954.noreply@github.com
|
ef9173cfa8a6c3ee550b53d9ab4739412550077e
|
567b880347a4ace3a64060753bf9bfadb42fb242
|
/demo/app.py
|
e158c660ac3904f01488022ac78189149d5840be
|
[] |
no_license
|
land-pack/intuition
|
7b8335a8c0a07975c862d8e0daaa1f814bd9f63b
|
bc0a4e847ebe2b4c80c18d6a7e6e16a828c2a712
|
refs/heads/master
| 2020-03-23T07:03:36.530012
| 2018-07-18T06:26:09
| 2018-07-18T06:26:09
| 141,245,462
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
import requests
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
r = requests.get('http://127.0.0.1:5001/api/preview')
data = r.json()
images = data.get('images')
return render_template('index.html', images=images)
@app.route("/upload")
def upload():
return render_template('upload.html')
if __name__ == '__main__':
app.run(debug=True)
|
[
"landpack@sina.com"
] |
landpack@sina.com
|
fc07856387a10a3a8dbed500fe7a51d73eaeb050
|
e59273ecf45ddc40af8f51607e3ca1fb46632bb1
|
/Payload_Types/apfell/mythic/agent_functions/download.py
|
fcfff9bf3b21b6ee4794053ec13673c5fa3ac9f6
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
thiagomayllart/Mythic
|
62ae01a42027ac1a71564775c8cc7ac8d0e88aa4
|
bb1a90fb3c3e37c284fc812548b8f7ae5ffc1fb1
|
refs/heads/master
| 2023-06-02T08:12:09.099400
| 2021-06-19T23:30:26
| 2021-06-19T23:30:26
| 326,127,766
| 0
| 1
|
NOASSERTION
| 2021-06-20T03:20:21
| 2021-01-02T06:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,829
|
py
|
from CommandBase import *
import json
from MythicResponseRPC import *
class DownloadArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
if len(self.command_line) > 0:
if self.command_line[0] == "{":
temp_json = json.loads(self.command_line)
if "host" in temp_json:
# this means we have tasking from the file browser rather than the popup UI
# the apfell agent doesn't currently have the ability to do _remote_ listings, so we ignore it
self.command_line = temp_json["path"] + "/" + temp_json["file"]
else:
raise Exception("Unsupported JSON")
class DownloadCommand(CommandBase):
cmd = "download"
needs_admin = False
help_cmd = "download {path to remote file}"
description = "Download a file from the victim machine to the Mythic server in chunks (no need for quotes in the path)."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = True
is_remove_file = False
is_upload_file = False
author = "@its_a_feature_"
parameters = []
attackmapping = ["T1020", "T1030", "T1041"]
argument_class = DownloadArguments
browser_script = BrowserScript(script_name="download", author="@its_a_feature_")
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicResponseRPC(task).register_artifact(
artifact_instance="$.NSFileHandle.fileHandleForReadingAtPath, readDataOfLength",
artifact_type="API Called",
)
return task
async def process_response(self, response: AgentResponse):
pass
|
[
"codybthomas@gmail.com"
] |
codybthomas@gmail.com
|
ec6fcf9d5ab20c814125e6ac6e0b78fc36051033
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R1/benchmark/startPyquil196.py
|
6360a951c9bd056e2dd8006aa958ef69a2c7c95e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
# qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += CNOT(2,0) # number=5
prog += H(0) # number=9
prog += CZ(2,0) # number=10
prog += H(0) # number=11
prog += X(3) # number=7
prog += X(3) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil196.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
a2a54db18153e09c2bdd4306052b808031bbdae2
|
eba5e5ff22bcba73001fba729218c02cd257759f
|
/assets/utils/webssh.py
|
2cb35bc81b8a867099b45e74389da231bd5cb930
|
[] |
no_license
|
duoyichen/Ops-1
|
d04ea66aa37c0732ddeff08889819d8ca830985e
|
56d3838a40dc0644a0fe8e58f40be421eaddc693
|
refs/heads/master
| 2020-04-29T11:48:34.329401
| 2019-03-14T10:29:38
| 2019-03-14T10:29:38
| 166,679,958
| 0
| 1
| null | 2019-01-20T15:59:18
| 2019-01-20T15:59:18
| null |
UTF-8
|
Python
| false
| false
| 5,155
|
py
|
# -*- coding: utf-8 -*-
import paramiko
import threading
import time
import os
import logging
from socket import timeout
from assets.tasks import admin_file
from channels.generic.websocket import WebsocketConsumer
from assets.models import ServerAssets, AdminRecord
from django.conf import settings
from utils.crypt_pwd import CryptPwd
class MyThread(threading.Thread):
def __init__(self, chan):
super(MyThread, self).__init__()
self.chan = chan
self._stop_event = threading.Event()
self.start_time = time.time()
self.current_time = time.strftime(settings.TIME_FORMAT)
self.stdout = []
self.read_lock = threading.RLock()
def stop(self):
self._stop_event.set()
def run(self):
with self.read_lock:
while not self._stop_event.is_set():
time.sleep(0.1)
try:
data = self.chan.chan.recv(1024)
if data:
str_data = bytes.decode(data)
self.chan.send(str_data)
self.stdout.append([time.time() - self.start_time, 'o', str_data])
except timeout:
break
self.chan.send('\n由于长时间没有操作,连接已断开!')
self.stdout.append([time.time() - self.start_time, 'o', '\n由于长时间没有操作,连接已断开!'])
self.chan.close()
def record(self):
record_path = os.path.join(settings.MEDIA_ROOT, 'admin_ssh_records', self.chan.scope['user'].username,
time.strftime('%Y-%m-%d'))
if not os.path.exists(record_path):
os.makedirs(record_path, exist_ok=True)
record_file_name = '{}.{}.cast'.format(self.chan.host_ip, time.strftime('%Y%m%d%H%M%S'))
record_file_path = os.path.join(record_path, record_file_name)
header = {
"version": 2,
"width": self.chan.width,
"height": self.chan.height,
"timestamp": round(self.start_time),
"title": "Demo",
"env": {
"TERM": os.environ.get('TERM'),
"SHELL": os.environ.get('SHELL', '/bin/bash')
},
}
admin_file.delay(record_file_path, self.stdout, header)
login_status_time = time.time() - self.start_time
if login_status_time >= 60:
login_status_time = '{} m'.format(round(login_status_time / 60, 2))
elif login_status_time >= 3600:
login_status_time = '{} h'.format(round(login_status_time / 3660, 2))
else:
login_status_time = '{} s'.format(round(login_status_time))
try:
AdminRecord.objects.create(
admin_login_user=self.chan.scope['user'],
admin_server=self.chan.host_ip,
admin_remote_ip=self.chan.remote_ip,
admin_start_time=self.current_time,
admin_login_status_time=login_status_time,
admin_record_file=record_file_path.split('media/')[1]
)
except Exception as e:
logging.getLogger().error('数据库添加用户操作记录失败,原因:{}'.format(e))
class SSHConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super(SSHConsumer, self).__init__(*args, **kwargs)
self.ssh = paramiko.SSHClient()
self.group_name = self.scope['url_route']['kwargs']['group_name']
self.server = ServerAssets.objects.select_related('assets').get(id=self.scope['path'].split('/')[3])
self.host_ip = self.server.assets.asset_management_ip
self.width = 150
self.height = 30
self.t1 = MyThread(self)
self.remote_ip = self.scope['query_string'].decode('utf8')
self.chan = None
def connect(self):
self.accept()
username = self.server.username
try:
self.ssh.load_system_host_keys()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.host_ip, int(self.server.port), username,
CryptPwd().decrypt_pwd(self.server.password), timeout=5)
except Exception as e:
logging.getLogger().error('用户{}通过webssh连接{}失败!原因:{}'.format(username, self.host_ip, e))
self.send('用户{}通过webssh连接{}失败!原因:{}'.format(username, self.host_ip, e))
self.close()
self.chan = self.ssh.invoke_shell(term='xterm', width=self.width, height=self.height)
# 设置如果3分钟没有任何输入,就断开连接
self.chan.settimeout(60 * 3)
self.t1.setDaemon(True)
self.t1.start()
def receive(self, text_data=None, bytes_data=None):
self.chan.send(text_data)
def disconnect(self, close_code):
try:
self.t1.record()
finally:
self.ssh.close()
self.t1.stop()
|
[
"zm_world@163.com"
] |
zm_world@163.com
|
55a9e15caa3390bc0770bedd2dfc2dc21ce45dea
|
43204546c687d7ec6bba04dc925eb07fc3f938e7
|
/angrdbg/server.py
|
478c2e72b3c3c6f84679b0c78a5ca6077afea852
|
[
"BSD-2-Clause"
] |
permissive
|
jhscheer/angrdbg
|
5ac4a278b02e4009442e1033a1cbd9bb5d024806
|
50f257fcfea1dde8e4e76625fe64e3ac4e5eca51
|
refs/heads/master
| 2020-03-29T05:38:19.115641
| 2018-09-17T10:15:26
| 2018-09-17T10:15:26
| 149,591,381
| 0
| 0
|
BSD-2-Clause
| 2018-09-20T10:20:11
| 2018-09-20T10:20:11
| null |
UTF-8
|
Python
| false
| false
| 7,990
|
py
|
#!/usr/bin/env python
"""
classic rpyc server running a SlaveService + angrdbg + IPython shell
usage:
angrdbg-srv.py # default settings
angrdbg-srv.py --host HOST --port PORT # custom settings
# ssl-authenticated server (keyfile and certfile are required)
angrdbg-srv.py --ssl-keyfile keyfile.pem --ssl-certfile certfile.pem --ssl-cafile cafile.pem
"""
import sys
import os
import rpyc
import threading
import signal
import Queue
from plumbum import cli
from rpyc.utils.server import Server
from rpyc.utils.classic import DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT
from rpyc.utils.registry import REGISTRY_PORT
from rpyc.utils.registry import UDPRegistryClient, TCPRegistryClient
from rpyc.utils.authenticators import SSLAuthenticator
from rpyc.lib import setup_logger
from rpyc.core import SlaveService
BANNER = "[angrdbg server v1.0]"
#######################
import angr
import claripy
import pyvex
import angrdbg
import IPython
#from angrdbg import *
#######################
class WeirdServer(Server): # n1 threaded n2 forked
def __init__(self, service, done_event, **kwargs):
self.num_conns = 2
self.thread = None
self.proc = None
self.done_event = done_event
Server.__init__(self, service, **kwargs)
@classmethod
def _handle_sigchld(cls, signum, unused):
try:
while True:
pid, dummy = os.waitpid(-1, os.WNOHANG)
if pid <= 0:
break
except OSError:
pass
# re-register signal handler (see man signal(2), under Portability)
signal.signal(signal.SIGCHLD, cls._handle_sigchld)
def _accept_method(self, sock):
self.num_conns -= 1
if self.num_conns == 1:
t = threading.Thread(
target=self._authenticate_and_serve_client,
args=[sock])
t.start()
self.thread = t
else:
pid = os.fork()
if pid == 0:
# child
try:
self.logger.debug("child process created")
# 76: call signal.siginterrupt(False) in forked child
signal.siginterrupt(signal.SIGCHLD, False)
self.listener.close()
self.clients.clear()
self._authenticate_and_serve_client(sock)
except BaseException:
self.logger.exception(
"child process terminated abnormally")
else:
self.logger.debug("child process terminated")
finally:
self.logger.debug("child terminated")
os._exit(0)
else:
# parent
self.proc = pid
sock.close()
if self.num_conns == 0:
self.done_event.set()
self.listener.close()
self.join()
def join(self):
self.thread.join()
try:
pid, dummy = os.waitpid(self.proc, 0) # os.WNOHANG)
except OSError as ee:
print ee
class AngrDbgServer(cli.Application):
port = cli.SwitchAttr(["-p", "--port"], cli.Range(0, 65535), default=None,
help="The TCP listener port (default = %s, default for SSL = %s)" %
(DEFAULT_SERVER_PORT, DEFAULT_SERVER_SSL_PORT), group="Socket Options")
host = cli.SwitchAttr(
["--host"],
str,
default="127.0.0.1",
help="The host to bind to. "
"The default is INADDR_ANY",
group="Socket Options")
ipv6 = cli.Flag(["--ipv6"], help="Enable IPv6", group="Socket Options")
logfile = cli.SwitchAttr(
"--logfile",
str,
default=None,
help="Specify the log file to use; "
"the default is stderr",
group="Logging")
quiet = cli.Flag(["-q",
"--quiet"],
help="Quiet mode (only errors will be logged)",
group="Logging")
ssl_keyfile = cli.SwitchAttr(
"--ssl-keyfile",
cli.ExistingFile,
help="The keyfile to use for SSL. Required for SSL",
group="SSL",
requires=["--ssl-certfile"])
ssl_certfile = cli.SwitchAttr(
"--ssl-certfile",
cli.ExistingFile,
help="The certificate file to use for SSL. Required for SSL",
group="SSL",
requires=["--ssl-keyfile"])
ssl_cafile = cli.SwitchAttr(
"--ssl-cafile",
cli.ExistingFile,
help="The certificate authority chain file to use for SSL. Optional; enables client-side "
"authentication",
group="SSL",
requires=["--ssl-keyfile"])
auto_register = cli.Flag(
"--register",
help="Asks the server to attempt registering with "
"a registry server. By default, the server will not attempt to register",
group="Registry")
registry_type = cli.SwitchAttr(
"--registry-type",
cli.Set(
"UDP",
"TCP"),
default="UDP",
help="Specify a UDP or TCP registry",
group="Registry")
registry_port = cli.SwitchAttr(
"--registry-port",
cli.Range(
0,
65535),
default=REGISTRY_PORT,
help="The registry's UDP/TCP port",
group="Registry")
registry_host = cli.SwitchAttr(
"--registry-host",
str,
default=None,
help="The registry host machine. For UDP, the default is 255.255.255.255; "
"for TCP, a value is required",
group="Registry")
def main(self):
if self.registry_type == "UDP":
if self.registry_host is None:
self.registry_host = "255.255.255.255"
self.registrar = UDPRegistryClient(
ip=self.registry_host, port=self.registry_port)
else:
if self.registry_host is None:
raise ValueError(
"With TCP registry, you must specify --registry-host")
self.registrar = TCPRegistryClient(
ip=self.registry_host, port=self.registry_port)
if self.ssl_keyfile:
self.authenticator = SSLAuthenticator(
self.ssl_keyfile, self.ssl_certfile, self.ssl_cafile)
default_port = DEFAULT_SERVER_SSL_PORT
else:
self.authenticator = None
default_port = DEFAULT_SERVER_PORT
if self.port is None:
self.port = default_port
setup_logger(self.quiet, self.logfile)
sys.stdout.write(
BANNER + " starting at %s %s\n" %
(self.host, self.port))
sys.stdout.flush()
done_event = threading.Event()
srv = WeirdServer(
SlaveService,
done_event,
hostname=self.host,
port=self.port,
reuse_addr=True,
ipv6=self.ipv6,
authenticator=self.authenticator,
registrar=self.registrar,
auto_register=self.auto_register)
t = threading.Thread(target=self._serve, args=[srv])
t.start()
# wait for 2 connections
done_event.wait()
IPython.embed(
banner1=BANNER + " client connected\n",
banner2="", # "tip: call serve_all() on the client to have a full working shell here.",
exit_msg=BANNER + " shell closed.\nexiting...\n"
)
os.kill(srv.proc, signal.SIGKILL)
os._exit(0)
def _serve(self, srv):
srv.start()
sys.stdout.write("\n" + BANNER + " client disconnected.\nexiting...\n")
os._exit(0)
def main():
AngrDbgServer.run()
'''simple client
import rpyc
import thread
conn1 = rpyc.classic.connect("localhost")
conn2 = rpyc.classic.connect("localhost")
thread.start_new_thread(conn2.serve_all, tuple())
'''
|
[
"andreafioraldi@gmail.com"
] |
andreafioraldi@gmail.com
|
9ae8d7ce445ae3cc95832b024c28c453579539ec
|
2b7c7e9b00ed9b2dbbac943ee4b79865a96d10de
|
/Figure_script/Figure_Sobol_env_heatmap.py
|
c9d4513a3b6f3d83f0f93bf2429b86ad119e7dbf
|
[] |
no_license
|
YaojieLu/Plant_traits_inversion
|
ad973e60bb32717d9d718f774c2ec77433c38ced
|
ec83642ae2a2e6ef96502e58f8074bffdadfefe8
|
refs/heads/master
| 2021-06-21T15:22:00.225498
| 2020-12-13T22:12:21
| 2020-12-13T22:12:21
| 140,017,309
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# import data
df = pd.read_csv('../Results/Sobol_env.txt', sep = ',', index_col = 0)
df = df[df['T'] == 30]
df['D'] = round(df['D'], 4)
# labels
paras = ['c', 'L', 'p50', 'ps']
latex = ['$\\mathit{c}$', '$\\mathit{L}$',
'$\\psi_{x50}$', '$\\psi_{s}$']
labels = dict(zip(paras, latex))
# figure
sns.set(font_scale = 1.3)
fig = plt.figure(figsize = (16, 16))
for i in range(len(paras)):
ax = fig.add_subplot(2, len(paras)/2, i+1)
df_para = df.pivot(index = 'I', columns = 'D', values = paras[i])
sns.heatmap(df_para, cmap = 'viridis', xticklabels = 3, yticklabels = 3)
#plt.xlim
#plt.ylim([0, 1])
if i > 1:
plt.xlabel('$\\mathit{D}$', fontsize = 20)
else:
ax.axes.get_xaxis().set_visible(False)
if i == 0 or i == 2:
plt.ylabel('$\\mathit{I}$', fontsize = 20)
else:
ax.axes.get_yaxis().set_visible(False)
plt.title(labels[paras[i]], fontsize = 20)
plt.tight_layout
plt.subplots_adjust(wspace = 0, hspace = 0.15)
plt.savefig('../Figures/Figure Sobol_env_heatmap.png', bbox_inches = 'tight')
|
[
"="
] |
=
|
479c2117988d2ed2dca6b2805202adc6d5027b9d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02397/s357415256.py
|
f7d7f986e5b38a58810ae61c71f351e5d8d9603c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
while True:
c = input().split()
x, y = int(c[0]), int(c[1])
if x == y == 0:
break
if y < x:
x, y = y, x
print("%d %d" % (x, y))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
081a8a4aa09d2eafd182ca6436c7c72218f6dcc5
|
3efee0cf2bd9e0c34bfdd94ab24a15cb88c04509
|
/TMM_examples/TMM_fabry_perot.py
|
13a671883453a7e29f38c3f94209049946a45615
|
[
"MIT"
] |
permissive
|
luwl85/Rigorous-Coupled-Wave-Analysis
|
bf5016ec70525f5e7bf59dfa93a03902afdfac12
|
a28fdf90b5b5fc0fedacc8bb44a0a0c2f2a02143
|
refs/heads/master
| 2023-04-25T20:46:45.397976
| 2021-05-20T22:17:54
| 2021-05-20T22:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,761
|
py
|
'''
TMM applied to a single uniform layer
should recover the analytic fabry perot solution
'''
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import matplotlib.pyplot as plt;
import cmath;
from TMM_functions import run_TMM_simulation as rTMM
## GOAL: simulate a BRAGG MIRROR at some wavelength (1 micron)
#%% DEFINE SIMULATION PARAMETers
#% General Units
degrees = np.pi/180;
L0 = 1e-6; #units of microns;
eps0 = 8.854e-12;
mu0 = 4*np.pi*10**-7;
c0 = 1/(np.sqrt(mu0*eps0))
## normalized units
#z' = k0*z;
#k = k/k0;
## REFLECTION AND TRANSMSSION SPACE epsilon and mu PARAMETERS
m_r = 1; e_r = 1; incident_medium = [e_r, m_r];
m_t = 1; e_t = 1; transmission_medium = [e_t, m_t];
## set wavelength scanning range
wavelengths = np.linspace(0.5,1.6,500); #500 nm to 1000 nm
kmagnitude_scan = 2 * np.pi / wavelengths; #no
omega = c0 * kmagnitude_scan; #using the dispersion wavelengths
#source parameters
theta = 0 * degrees; #%elevation angle; #off -normal incidence does not excite guided resonances...
phi = 0 * degrees; #%azimuthal angle
## incident wave properties, at this point, everything is in units of k_0
n_i = np.sqrt(e_r*m_r);
#k0 = np.sqrt(kx**2+ky**2+kz**2); we know k0, theta, and phi
#actually, in the definitions here, kx = k0*sin(theta)*cos(phi), so kx, ky here are normalized
kx = n_i*np.sin(theta)*np.cos(phi); #constant in ALL LAYERS; kx = 0 for normal incidence
ky = n_i*np.sin(theta)*np.sin(phi); #constant in ALL LAYERS; ky = 0 for normal incidence
print((n_i**2, kx**2+ky**2))
kz_inc = cmath.sqrt(e_r * m_r - kx ** 2 - ky ** 2);
normal_vector = np.array([0, 0, -1]) #positive z points down;
ate_vector = np.matrix([0, 1, 0]); #vector for the out of plane E-field
#ampltidue of the te vs tm modes (which are decoupled)
pte = 1; #1/np.sqrt(2);
ptm = 0; #cmath.sqrt(-1)/np.sqrt(2);
polarization_amplitudes = [pte, ptm]
k_inc = [kx, ky];
print('--------incident wave paramters----------------')
print('incident n_i: '+str(n_i))
print('kx_inc: '+str(kx)+' ky_inc: '+str(ky))
print('kz_inc: ' + str(kz_inc));
print('-----------------------------------------------')
#thickness 0 means L = 0, which only pops up in the xponential part of the expression
ER = [12]
UR = [1]
layer_thicknesses = [0.6]
## run simulation
Ref, Tran = rTMM.run_TMM_simulation(wavelengths, polarization_amplitudes, theta, phi, ER, UR, layer_thicknesses,\
transmission_medium, incident_medium)
plt.figure();
plt.plot(wavelengths, Ref);
plt.plot(wavelengths, Tran);
plt.title('Spectrum of a Bragg Mirror')
plt.xlabel('wavelength ($\mu m$)')
plt.ylabel('R/T')
plt.legend(('Ref','Tran'))
plt.savefig('bragg_TMM.png');
plt.show();
|
[
"nzz2102@stanford.edu"
] |
nzz2102@stanford.edu
|
6a6762c469e81d373c201c6a168dd6ee3e4c665c
|
ed75b99e824b5724746d72f2d529781eccf8ef0d
|
/biostar/celeryconfig.py
|
8b2c7b42b8db811498eb7a13032c3e9671c2e8aa
|
[
"MIT"
] |
permissive
|
satra/biostar-central
|
6799c4df4d12de1278f60fb2b29623acf8cc7640
|
794c67d2972a4fe700c79841f5f3c0c562352738
|
refs/heads/master
| 2021-01-12T20:32:14.356389
| 2014-03-20T15:37:27
| 2014-03-20T15:37:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
from __future__ import absolute_import
from datetime import timedelta
from celery.schedules import crontab
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
BROKER_URL = 'django://'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERYBEAT_SCHEDULE = {
'prune_data': {
'task': 'biostar.celery.call_command',
'schedule': timedelta(days=1),
'kwargs': dict(name="prune_data")
},
'sitemap': {
'task': 'biostar.celery.call_command',
'schedule': timedelta(hours=6),
'kwargs': dict(name="sitemap")
},
'update_index': {
'task': 'biostar.celery.call_command',
'schedule': timedelta(minutes=15),
'args': ["update_index"],
'kwargs': {"age": 1}
},
'hourly_dump': {
'task': 'biostar.celery.call_command',
'schedule': crontab(minute=10),
'args': ["biostar_pg_dump"],
'kwargs': {"hourly": True}
},
'daily_dump': {
'task': 'biostar.celery.call_command',
'schedule': crontab(hour=22),
'args': ["biostar_pg_dump"],
},
}
CELERY_TIMEZONE = 'UTC'
|
[
"istvan.albert@gmail.com"
] |
istvan.albert@gmail.com
|
325a93e9027f90d97fe0431288393f2f293520c7
|
90b8d12660adc7dcf63bffce20ba1b7ede64386a
|
/official/vision/beta/serving/export_saved_model.py
|
95027be136a8209c9e2a438072cf195c7d18771c
|
[
"Apache-2.0"
] |
permissive
|
thalitadru/models
|
7109797ed536ccb10e17bba6add0f571a1c1c96d
|
7faaa572db44621f8e2998abd8dc6a22e86001f2
|
refs/heads/master
| 2022-05-05T15:04:01.683629
| 2022-03-23T16:20:46
| 2022-03-23T16:20:46
| 82,706,460
| 3
| 0
| null | 2017-02-21T17:14:13
| 2017-02-21T17:14:12
| null |
UTF-8
|
Python
| false
| false
| 3,850
|
py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.beta.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer(
'batch_size', None, 'The batch size.')
flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example` and `tflite`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
flags.DEFINE_string('export_checkpoint_subdir', 'checkpoint',
'The subdirectory for checkpoints.')
flags.DEFINE_string('export_saved_model_subdir', 'saved_model',
'The subdirectory for saved model.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_checkpoint_subdir=FLAGS.export_checkpoint_subdir,
export_saved_model_subdir=FLAGS.export_saved_model_subdir)
if __name__ == '__main__':
app.run(main)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
a3bfa9e158ba5fe5b5a7697cfc74d1a729aefa2a
|
65c616c59ae005debf91d82f4efc7f7cdcc2a7a4
|
/news_recommendation/home/forms.py
|
a4274fbc39b814ece457b077eceefcf942431907
|
[] |
no_license
|
nghiatd16/most_cb
|
28db8b0c52cc391f6890f2a56c8dee308a6dfc85
|
46d91016b20d57f3f43b63813f7fbccd5626a848
|
refs/heads/master
| 2022-12-25T17:33:04.896024
| 2020-09-19T08:34:15
| 2020-09-19T08:34:15
| 296,822,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
from django.forms import ModelForm
import django.forms as forms
from django.conf import settings
import os
import glob
import shutil
|
[
"nghiatd.proptit@gmail.com"
] |
nghiatd.proptit@gmail.com
|
c30e22ee2d9981b49022661bd8c8de23908ce27e
|
78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227
|
/448.py
|
d69d285b5d87d18b1e029474dd35d050e1364dcc
|
[] |
no_license
|
GenryEden/kpolyakovName
|
97db13ef93061a8c2afc6cc5acd91337f79063f1
|
c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9
|
refs/heads/master
| 2023-05-23T21:22:51.983756
| 2021-06-21T08:56:49
| 2021-06-21T08:56:49
| 350,466,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
def f(x):
if x < 1:
return 0
elif x == 1:
return 1
else:
ans = f(x-1)
if x - 1 != 7:
ans += f(x-2)
return ans
print(f(12))
|
[
"a926788@gmail.com"
] |
a926788@gmail.com
|
4105691310284155e93357df83d7741f403738fd
|
a6f4e2e2b2e25f7af509598327aaaa5c795433ac
|
/django_gocardless/views.py
|
7208815d058b283889ea24034697e84804b85aa8
|
[] |
no_license
|
adamcharnock/django-gocardless
|
4042e9dc6a179cf2030064855b82411adc960470
|
ac126fcb12baf8a33472f0e22b29ede2b92e27ed
|
refs/heads/master
| 2021-01-18T13:24:22.265030
| 2014-05-08T17:56:35
| 2014-05-08T17:56:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
import json
import logging
from django.conf import settings
from django.http.response import HttpResponseBadRequest
from django.views.generic.base import View, logger
from gocardless.utils import generate_signature
class GoCardlessPayloadMixin(object):
def get_payload(self, request):
if not hasattr(self, '_payload'):
if request.method.lower() == 'get':
self._payload = request.GET.dict()
else:
self._payload = json.loads(request.body)['payload']
return self._payload
class GoCardlessSignatureMixin(GoCardlessPayloadMixin):
""" Will verify a GoCardless signature """
manual_signature_check = False
def verify_signature(self, request):
data = self.get_payload(request)
if not data:
logger.warning('No payload or request data found')
return False
pms = data.copy()
pms.pop('signature')
signature = generate_signature(pms, settings.GOCARDLESS_APP_SECRET)
if signature == data['signature']:
return True
return False
def dispatch(self, request, *args, **kwargs):
if not self.manual_signature_check and not self.verify_signature(request):
return self.handle_invalid_signature(request, *args, **kwargs)
response = super(GoCardlessSignatureMixin, self).dispatch(request, *args, **kwargs)
response['Cache-Control'] = 'no-cache'
return response
def handle_invalid_signature(self, request, *args, **kwargs):
response = HttpResponseBadRequest('Signature did not validate')
response['Cache-Control'] = 'no-cache'
return response
class GoCardlessView(GoCardlessSignatureMixin, View):
pass
|
[
"adam@omniwiki.co.uk"
] |
adam@omniwiki.co.uk
|
0e5dc5c575994fb9d51f5fd31c55ef92cd32e3f8
|
ddadba7ebb64c2f341280728fd50e62369d6251e
|
/apps/notes_app/models.py
|
d5bba0cd359c70e4d2e127891182602fdd6e7910
|
[] |
no_license
|
LisCoding/Notes-App
|
0f630b8229553d6cac278650f5649a9737ce1285
|
21bd8d0177ecf69335ec24e52c49df81f555f7a5
|
refs/heads/master
| 2021-06-22T10:44:25.755893
| 2017-08-31T19:33:49
| 2017-08-31T19:33:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Note(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(default="")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
|
[
"cardozoliseth@gmail.com"
] |
cardozoliseth@gmail.com
|
761eeaa6e8e18f8112e281af167a7ccbc3748013
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03837/s920938245.py
|
3aeb662f91dd2f46bbfdb9f9b7edc7cc0fdcb132
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
def warshall_floyd():
for k in range(N):
for i in range(N):
for j in range(N):
d[i][j] = min(d[i][j], d[i][k]+d[k][j])
N, M = map(int, input().split())
d = [[10**18]*N for _ in range(N)]
for i in range(N):
d[i][i] = 0
edges = []
for _ in range(M):
a, b, c = map(int, input().split())
d[a-1][b-1] = c
d[b-1][a-1] = c
edges.append((a-1, b-1, c))
warshall_floyd()
ans = 0
for a, b, c in edges:
flag = True
for i in range(N):
for j in range(N):
if d[i][a]+c+d[b][j]==d[i][j]:
flag = False
if flag:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
74fc280f27c08e1336a10b2c6a6e61901d2387e1
|
cd2ea0b9f0f8e01950ea4dd629a325ef26f914ad
|
/topics/Trees/BinaryTreeTraversal.py
|
b180ed30d3f5c5183418e0a9355c08ce008c8282
|
[] |
no_license
|
akhandsingh17/assignments
|
df5f1af44486ffefe1fefcccc643e6818ac1c55d
|
c89f40dcd7a8067fa78ed95d3fecc36cb1ca7b5d
|
refs/heads/master
| 2023-08-24T18:00:32.938254
| 2021-10-06T06:01:32
| 2021-10-06T06:01:32
| 305,913,409
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
"""
1
/ \
2 3
/ \ / \
4 5 6 7
\
8
"""
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class BinaryTreeTraversal:
def __init__(self, root):
self.root = Node(root)
def preorder(self, start, traversal):
if start != None:
traversal = traversal + (str(start.val) + '-')
traversal = self.preorder(start.left, traversal )
traversal = self.preorder(start.right, traversal)
return traversal
def inorder(self, start, traversal):
if start != None:
traversal = self.preorder(start.left, traversal)
traversal = traversal + (str(start.val) + '-')
traversal = self.preorder(start.right, traversal)
return traversal
def postorder(self, start, traversal):
if start != None:
traversal = self.preorder(start.left, traversal )
traversal = self.preorder(start.right, traversal)
traversal = traversal + (str(start.val) + '-')
return traversal
def print_traversal(self, type):
if type == 'preorder':
return self.preorder(self.root, '')
if type == 'inorder':
return self.inorder(self.root, '')
if type == 'postorder':
return self.postorder(self.root, '')
def main():
tree = BinaryTreeTraversal(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.left = Node(6)
tree.root.right.right = Node(7)
tree.root.right.right.right = Node(8)
print(tree.print_traversal('preorder'))
print(tree.print_traversal('inorder'))
print(tree.print_traversal('postorder'))
if __name__=='__main__':
main()
|
[
"akhans@amazon.com"
] |
akhans@amazon.com
|
22dfaba28c59c06bab37c8db0df174e75f3bf706
|
bd9278423bb215dcdbf9f56a948210db044bdba2
|
/tests/test_01_main/test_env_vars_1.py
|
501ad06fecc21417b497a0cafa4e66f9cbcc5426
|
[
"MIT"
] |
permissive
|
dungnv2602/uvicorn-gunicorn-docker
|
77fd5e0d07a94c7acc0876a773e6b1262619fb6d
|
37dbc188e555c22cf9b2dd0f3f6ab3e122e32c24
|
refs/heads/master
| 2020-04-26T16:40:32.749609
| 2019-02-08T10:44:24
| 2019-02-08T10:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,250
|
py
|
import time
import pytest
import requests
import docker
from ..utils import CONTAINER_NAME, get_config, get_logs, remove_previous_container
client = docker.from_env()
def verify_container(container, response_text):
config_data = get_config(container)
assert config_data["workers_per_core"] == 1
assert config_data["host"] == "0.0.0.0"
assert config_data["port"] == "8000"
assert config_data["loglevel"] == "warning"
assert config_data["bind"] == "0.0.0.0:8000"
logs = get_logs(container)
assert "Checking for script in /app/prestart.sh" in logs
assert "Running script /app/prestart.sh" in logs
assert (
"Running inside /app/prestart.sh, you could add migrations to this file" in logs
)
response = requests.get("http://127.0.0.1:8000")
assert response.text == response_text
@pytest.mark.parametrize(
"image,response_text",
[
(
"tiangolo/uvicorn-gunicorn:python3.6",
"Hello world! From Uvicorn with Gunicorn. Using Python 3.6",
),
(
"tiangolo/uvicorn-gunicorn:python3.7",
"Hello world! From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"tiangolo/uvicorn-gunicorn:latest",
"Hello world! From Uvicorn with Gunicorn. Using Python 3.7",
),
(
"tiangolo/uvicorn-gunicorn:python3.6-alpine3.8",
"Hello world! From Uvicorn with Gunicorn in Alpine. Using Python 3.6",
),
(
"tiangolo/uvicorn-gunicorn:python3.7-alpine3.8",
"Hello world! From Uvicorn with Gunicorn in Alpine. Using Python 3.7",
),
],
)
def test_env_vars_1(image, response_text):
remove_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={"WORKERS_PER_CORE": 1, "PORT": "8000", "LOG_LEVEL": "warning"},
ports={"8000": "8000"},
detach=True,
)
time.sleep(1)
verify_container(container, response_text)
container.stop()
# Test that everything works after restarting too
container.start()
time.sleep(1)
verify_container(container, response_text)
container.stop()
container.remove()
|
[
"tiangolo@gmail.com"
] |
tiangolo@gmail.com
|
781ee264796e64ff53334b63df8e2b3568dff462
|
7e0393251012e91213dddfd9c93f6b6b73ca2bfe
|
/cloudnetpy/products/drizzle_error.py
|
6ac4ba7a3ba679b8c9adeb0aead54d7fe56fdbce
|
[
"MIT"
] |
permissive
|
josephhardinee/cloudnetpy
|
ff4cc0303d7f2ae40f2d3466298257659ff3ccde
|
c37760db3cdfe62ae769f8090ba621803ec9a92c
|
refs/heads/master
| 2021-03-06T15:37:51.529776
| 2020-02-13T09:05:29
| 2020-02-13T09:05:29
| 246,207,849
| 0
| 0
|
MIT
| 2020-03-10T04:29:48
| 2020-03-10T04:26:16
| null |
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
import numpy as np
import numpy.ma as ma
import cloudnetpy.utils as utils
def _get_drizzle_indices(diameter):
return {'drizzle': diameter > 0,
'small': np.logical_and(diameter <= 1e-4, diameter > 1e-5),
'tiny': np.logical_and(diameter <= 1e-5, diameter > 0)}
def _read_input_uncertainty(categorize, uncertainty_type):
return tuple(db2lin(categorize.getvar(f'{key}_{uncertainty_type}'))
for key in ('Z', 'beta'))
MU_ERROR = 0.07
MU_ERROR_SMALL = 0.25
def get_drizzle_error(categorize, drizzle_parameters):
""" Estimates error and bias for drizzle classification.
Args:
categorize (DrizzleSource): The :class:`DrizzleSource` instance.
drizzle_parameters (DrizzleSolving): The :class:`DrizzleSolving` instance.
Returns:
errors (dict): Dictionary containing information of estimated error and bias for drizzle
"""
parameters = drizzle_parameters.params
drizzle_indices = _get_drizzle_indices(parameters['Do'])
error_input = _read_input_uncertainty(categorize, 'error')
bias_input = _read_input_uncertainty(categorize, 'bias')
errors = _calc_errors(drizzle_indices, error_input, bias_input)
return errors
def _calc_errors(drizzle_indices, error_input, bias_input):
errors = _calc_parameter_errors(drizzle_indices, error_input)
biases = _calc_parameter_biases(bias_input)
results = {**errors, **biases}
_add_supplementary_errors(results, drizzle_indices, error_input)
_add_supplementary_biases(results, bias_input)
return _convert_to_db(results)
def _calc_parameter_errors(drizzle_indices, error_input):
def _calc_dia_error():
error = _calc_error(2 / 7, (1, 1), error_input, add_mu=True)
error_small = _calc_error(1 / 4, (1, 1), error_input, add_mu_small=True)
return _stack_errors(error, drizzle_indices, error_small)
def _calc_lwc_error():
error = _calc_error(1 / 7, (1, 6), error_input)
error_small = _calc_error(1 / 4, (1, 3), error_input)
return _stack_errors(error, drizzle_indices, error_small)
def _calc_lwf_error():
error = _calc_error(1 / 7, (3, 4), error_input, add_mu=True)
error_small = _calc_error(1 / 2, (1, 1), error_input, add_mu_small=True)
error_tiny = _calc_error(1 / 4, (3, 1), error_input, add_mu_small=True)
return _stack_errors(error, drizzle_indices, error_small, error_tiny)
def _calc_s_error():
error = _calc_error(1 / 2, (1, 1), error_input)
return _stack_errors(error, drizzle_indices)
return {'Do_error': _calc_dia_error(),
'drizzle_lwc_error': _calc_lwc_error(),
'drizzle_lwf_error': _calc_lwf_error(),
'S_error': _calc_s_error()}
def _calc_parameter_biases(bias_input):
def _calc_bias(scale, weights):
return utils.l2norm_weighted(bias_input, scale, weights)
dia_bias = _calc_bias(2/7, (1, 1))
lwc_bias = _calc_bias(1/7, (1, 6))
lwf_bias = _calc_bias(1/7, (3, 4))
return {'Do_bias': dia_bias,
'drizzle_lwc_bias': lwc_bias,
'drizzle_lwf_bias': lwf_bias}
def _add_supplementary_errors(results, drizzle_indices, error_input):
def _calc_n_error():
z_error = error_input[0]
dia_error = db2lin(results['Do_error'])
n_error = utils.l2norm(z_error, 6 * dia_error)
return _stack_errors(n_error, drizzle_indices)
def _calc_v_error():
error = results['Do_error']
error[drizzle_indices['tiny']] *= error[drizzle_indices['tiny']]
return error
results['drizzle_N_error'] = _calc_n_error()
results['v_drizzle_error'] = _calc_v_error()
results['mu_error'] = MU_ERROR
return results
def _add_supplementary_biases(results, bias_input):
def _calc_n_bias():
z_bias = bias_input[0]
dia_bias = db2lin(results['Do_bias'])
return utils.l2norm_weighted((z_bias, dia_bias), 1, (1, 6))
results['drizzle_N_bias'] = _calc_n_bias()
results['v_drizzle_bias'] = results['Do_bias']
return results
def _convert_to_db(results):
"""Converts linear error values to dB."""
return {name: lin2db(value) for name, value in results.items()}
def _calc_error(scale, weights, error_input, add_mu=False, add_mu_small=False):
error = utils.l2norm_weighted(error_input, scale, weights)
if add_mu:
error = utils.l2norm(error, MU_ERROR)
if add_mu_small:
error = utils.l2norm(error, MU_ERROR_SMALL)
return error
def _stack_errors(error_in, drizzle_indices, error_small=None, error_tiny=None):
def _add_error_component(source, ind):
error[ind] = source[ind]
error = ma.zeros(error_in.shape)
_add_error_component(error_in, drizzle_indices['drizzle'])
if error_small is not None:
_add_error_component(error_small, drizzle_indices['small'])
if error_tiny is not None:
_add_error_component(error_tiny, drizzle_indices['tiny'])
return error
COR = 10 / np.log(10)
def db2lin(x):
if ma.max(x) > 100:
raise ValueError('Too large values in drizzle.db2lin()')
return ma.exp(x / COR) - 1
def lin2db(x):
if ma.min(x) < -0.9:
raise ValueError('Too small values in drizzle.lin2db()')
return ma.log(x + 1) * COR
|
[
"simo.tukiainen@fmi.fi"
] |
simo.tukiainen@fmi.fi
|
6475c7715d2ace925da77b437721147f76ea65b2
|
d1d79d0c3889316b298852834b346d4246825e66
|
/blackbot/core/wss/ttp/art/art_T1218.011-1.py
|
ae3f24301b8519e5fd2ff913044f9b62c72f2ce2
|
[] |
no_license
|
ammasajan/Atomic-Red-Team-Intelligence-C2
|
78d1ed2de49af71d4c3c74db484e63c7e093809f
|
5919804f0bdeb15ea724cd32a48f377bce208277
|
refs/heads/master
| 2023-07-17T12:48:15.249921
| 2021-08-21T20:10:30
| 2021-08-21T20:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'DefenseEvasion/T1218.011-1'
self.controller_type = ''
self.external_id = 'T1218.011'
self.blackbot_id = 'T1218.011-1'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/cmd_prompt.boo'), 'r') as ttp_src:
src = ttp_src.read()
cmd_script = get_path_in_package('core/wss/ttp/art/cmd_ttp/defenseEvasion/T1218.011-1')
with open(cmd_script) as cmd:
src = src.replace("CMD_SCRIPT", cmd.read())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/cmd_ttp/defenseEvasion/T1218.011-1')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
|
[
"root@uw2artic201.blackbot.net"
] |
root@uw2artic201.blackbot.net
|
0021c7e9e93f3bb30c1d2f4511b9a15aee101958
|
a00c487d88c50401ebf8505cd267c70b42e3c362
|
/bangla/soup/MSR.py
|
9c488a0e43064bf12f84eebfbeb665415f8376dd
|
[] |
no_license
|
sharif1302042/A-news-Agrregation-system
|
9aca07ed29f13b5da8e93a2aabe03281d6b66365
|
5e48a726f5fedba686d18601d561784c6ceddd5a
|
refs/heads/master
| 2020-04-01T11:00:39.088387
| 2019-11-09T15:05:24
| 2019-11-09T15:05:24
| 153,142,416
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
import requests
from bs4 import BeautifulSoup
news=[]
r=requests.get('https://bangla.bdnews24.com/politics/')
soup = BeautifulSoup(r.text, 'html.parser')
r1=soup.find_all('li',attrs={'class':'article first '})
r2=soup.find_all('li',attrs={'class':'article '})
l=0
for r in r1+r2:
if l<10:
txt=r.find('a')['href']
url=r.find('a').text[1:-1]
news.append((url,txt,'Bdnews24'))
l+=1
"""
#--------------jugantor-----------
r=requests.get('https://www.jugantor.com/')
soup = BeautifulSoup(r.text, 'html.parser')
r1=soup.find_all('div',attrs={'id':'popular_list_block'})
url=r1[0].find('a')
r=r1[0].find('a')
txt=r.find('h4').text
news.append((txt,url,"Jugantor"))
r1=soup.find_all('div',attrs={'class':'editor_picks_list'})
l=0
for r in r1:
if l<6:
url=r.find('a')['href']
txt=r.find('a')
txt=txt.find('h4').text
news.append((txt,url,"Jugantor"))
l+=1
print('MSR',len(news))
for r in news:
print(r[0])
"""
|
[
"sharifulcsehstu@gmail.com"
] |
sharifulcsehstu@gmail.com
|
5102a13c1af192205b49132a170a820a1c33ee47
|
a799a105ab2aba39a475bf2ce086405def0351c2
|
/test/model/tpp/common.py
|
4fd78b174d605ac5736f5e09c8c9e575b42dfa7b
|
[
"Apache-2.0"
] |
permissive
|
mbohlkeschneider/gluon-ts
|
d663750d13798624eca5c9d6f12a87e321ce7334
|
df4256b0e67120db555c109a1bf6cfa2b3bd3cd8
|
refs/heads/master
| 2021-11-24T06:09:49.905352
| 2021-10-14T09:30:38
| 2021-10-14T09:30:38
| 192,546,557
| 54
| 10
|
Apache-2.0
| 2022-08-31T18:36:44
| 2019-06-18T13:33:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import pandas as pd
import pytest
from gluonts.dataset.common import ListDataset
def point_process_dataset():
ia_times = np.array([0.2, 0.7, 0.2, 0.5, 0.3, 0.3, 0.2, 0.1])
marks = np.array([0, 1, 2, 0, 1, 2, 2, 2])
lds = ListDataset(
[
{
"target": np.c_[ia_times, marks].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
}
],
freq="H",
one_dim_target=False,
)
return lds
def point_process_dataset_2():
lds = ListDataset(
[
{
"target": np.c_[
np.array([0.2, 0.7, 0.2, 0.5, 0.3, 0.3, 0.2, 0.1]),
np.array([0, 1, 2, 0, 1, 2, 2, 2]),
].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
},
{
"target": np.c_[
np.array([0.2, 0.1, 0.2, 0.1, 0.3, 0.3, 0.5, 0.4]),
np.array([0, 1, 2, 0, 1, 2, 1, 1]),
].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
},
{
"target": np.c_[
np.array([0.2, 0.7, 0.2, 0.5, 0.1, 0.1, 0.2, 0.1]),
np.array([0, 1, 2, 0, 1, 0, 1, 2]),
].T,
"start": pd.Timestamp("2011-01-01 00:00:00", freq="H"),
"end": pd.Timestamp("2011-01-01 03:00:00", freq="H"),
},
],
freq="H",
one_dim_target=False,
)
return lds
|
[
"noreply@github.com"
] |
mbohlkeschneider.noreply@github.com
|
94edf1ad6adc7d8a3551c8b9103bf294c8afc731
|
cd23b0457bc02a60b89f1f52783e56cc36d85b5e
|
/oop/getitem.py
|
1bff5722f8bb3e1db032216968dc42463cf0a724
|
[] |
no_license
|
cluo/learingPython
|
65c7068613e1a2ae0178e23770503043d9278c45
|
54609288e489047d4dd1dead5ac142f490905f0e
|
refs/heads/master
| 2020-04-01T13:04:15.981758
| 2015-02-23T13:21:31
| 2015-02-23T13:21:31
| 28,440,969
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
#!/usr/bin/evn python
#-*- coding:utf-8 -*-
__author__ = 'admin'
class Indexer:
def __getitem__(self, index):
return index ** 2
X = Indexer()
print X[2]
for i in range(5):
print(X[i])
class stepper:
def __getitem__(self, i):
return self.data[i]
X = stepper()
X.data = 'spam'
for item in X:
print(item)
print 'p' in X
print [c for c in X]
print ''.join(X)
print list(X)
print tuple(X)
|
[
"luosheng@meizu.com"
] |
luosheng@meizu.com
|
087e92e25d5452f986b22430ce4fffefb538f075
|
0b49c40162e15b5e0c551e548d865c4105e8df7d
|
/koopmanInvertedPendulum.py
|
23a2317318f8c0da4234581463207c14b2bd54f1
|
[] |
no_license
|
jiemingChen/DeepKoopman
|
654a47922e4d7d6161c032a5e7ac7374d6999917
|
2e6ce8218c0bf5b7bcb072a6983a8f6870ec6186
|
refs/heads/master
| 2023-03-27T22:39:24.992333
| 2020-09-28T23:11:40
| 2020-09-28T23:11:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,539
|
py
|
import numpy as np
import torch
import torch.nn as nn
import gym
from torch.utils.data import Dataset, DataLoader
import control
import os
from ReplayBuffer import ReplayBuffer
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--env_name", default='InvertedPendulum-v2')
parser.add_argument("--max_iter", default=200)
parser.add_argument("--hidden_dim", default=3, type=int)
parser.add_argument("--mode", default="train")
args = parser.parse_args()
class DeepKoopman():
def __init__(self, env_name = "Pendulum-v0", hidden_dim = 2):
self.env_name = env_name
self.env = gym.make(env_name)
self.state_dim = self.env.observation_space.shape[0]+1
self.hidden_dim = hidden_dim
self.action_dim = self.env.action_space.shape[0]
self.encoder = nn.Sequential(nn.Linear(self.state_dim, 16),
nn.PReLU(),
nn.Linear(16, 16),
nn.PReLU(),
nn.Linear(16, hidden_dim))
self.decoder = nn.Sequential(nn.Linear(hidden_dim, 16),
nn.PReLU(),
nn.Linear(16, 16),
nn.PReLU(),
nn.Linear(16, self.state_dim))
self.propagate = nn.Linear(hidden_dim+self.action_dim, hidden_dim, bias = False)
self.lambda1 = 1.0
self.lambda2 = 0.3
self.replay_buffer = ReplayBuffer(100000)
def get_system(self):
weight = self.propagate.weight.data.numpy()
A = weight[:, :self.hidden_dim]
B = weight[:, self.hidden_dim:]
return A, B
def forward(self, xt, ut):
gt = self.encoder(xt)
xt_ = self.decoder(gt)
gtdot = self.propagate(torch.cat((gt, ut), axis = -1))
gt1 = gt + self.env.env.dt*gtdot
xt1_ = self.decoder(gt1)
return gt, gt1, xt_, xt1_
def save(self):
if not os.path.exists("weights/"):
os.mkdir("weights/")
file_name = "weights/" + self.env_name + ".pt"
torch.save({"encoder" : self.encoder.state_dict(),
"decoder" : self.decoder.state_dict(),
"propagate" : self.propagate.state_dict()}, file_name)
print("save model to " + file_name)
def load(self):
try:
if not os.path.exists("weights/"):
os.mkdir("weights/")
file_name = "weights/" + self.env_name + ".pt"
checkpoint = torch.load(file_name)
self.encoder.load_state_dict(checkpoint["encoder"])
self.decoder.load_state_dict(checkpoint["decoder"])
self.propagate.load_state_dict(checkpoint["propagate"])
print("load model from " + file_name)
except:
print("fail to load model!")
def transform_state(self, x):
return np.array([x[1], np.sin(x[1]), np.cos(x[1]), x[2], x[3]])
def policy_rollout(self):
A, B = self.get_system()
Q = np.eye(self.hidden_dim)
R = np.array([[0.01]])
K, _, _ = control.lqr(A, B, Q, R)
ref = torch.FloatTensor([[0.0, 0.0, 1.0, 0., 0.]])
ref = model.encoder(ref).detach().numpy()
obs_old = self.transform_state(self.env.reset())
#obs_old[2] = obs_old[2] / 8.0
for _ in range(200):
if np.random.random() > 0.05:
state = torch.FloatTensor(obs_old.reshape((1, -1)))
y = model.encoder(state).detach().numpy()
action = -np.dot(K, (y-ref).T)
action = np.clip(np.array([action.item()]), -1., 1.)
else:
action = self.env.action_space.sample()
#self.env.render()
obs, reward, done, info = self.env.step(action)
#obs[2] = obs[2] / 8.0
obs = self.transform_state(obs)
self.replay_buffer.push((obs_old, action, obs))
obs_old = obs
def random_rollout(self):
obs_old = self.transform_state(self.env.reset())
#obs_old[2] = obs_old[2] / 8.
for _ in range(200):
action = self.env.action_space.sample()
obs, reward, done, info = self.env.step(action)
obs = self.transform_state(obs)
#obs[2] = obs[2] / 8.0
self.replay_buffer.push((obs_old, action, obs))
obs_old = obs
def train(self, max_iter, lr =0.001):
mseloss = nn.MSELoss()
l1loss = nn.L1Loss()
encoder_optimizer = torch.optim.Adam(self.encoder.parameters(), lr = lr)
decoder_optimizer = torch.optim.Adam(self.decoder.parameters(), lr = lr)
propagate_optimizer = torch.optim.Adam(self.propagate.parameters(), lr = lr)
for i in range(20):
self.random_rollout()
for it in range(max_iter):
loss_hist = []
for _ in range(100):
xt, ut, xt1 = self.replay_buffer.sample(64)
xt = torch.FloatTensor(xt)
ut = torch.FloatTensor(ut)
xt1 = torch.FloatTensor(xt1)
gt, gt1, xt_, xt1_ = self.forward(xt, ut)
ae_loss = mseloss(xt_, xt)
pred_loss = mseloss(xt1_, xt1)
metric_loss = l1loss(torch.norm(gt1-gt, dim=1), torch.norm(xt1-xt, dim=1))
#reg_loss = torch.norm(self.propagate.weight.data[:, self.hidden_dim:])
total_loss = ae_loss + self.lambda1*pred_loss + self.lambda2*metric_loss
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
propagate_optimizer.zero_grad()
total_loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
propagate_optimizer.step()
loss_hist.append(total_loss.detach().numpy())
print("epoch: %d, loss: %2.5f" % (it, np.mean(loss_hist)))
for i in range(5):
self.policy_rollout()
for i in range(5):
self.random_rollout()
if __name__ == "__main__":
model = DeepKoopman(args.env_name, args.hidden_dim)
if args.mode == "train":
model.train(args.max_iter, 0.001)
model.save()
else:
model.load()
A, B = model.get_system()
Q = np.eye(args.hidden_dim)
R = np.array([[0.08]])
K, _, _ = control.lqr(A, B, Q, R)
print(A)
print(B)
print(K)
env = gym.make(args.env_name)
ref = torch.FloatTensor([[0.0, 0.0, 1.0, 0., 0.]])
ref = model.encoder(ref).detach().numpy()
offset = [0.1, 0.2, 0.3, 0.4, 0.5]
for k in range(5):
state = env.reset()
state[1] = offset[k]
env.env.set_state(state[:2], state[:2])
state = model.transform_state(state)
for i in range(200):
env.render()
state = torch.FloatTensor(state.reshape((1, -1)))
#state[0, 2] = state[0, 2] / 8.0
y = model.encoder(state).detach().numpy()
action = -np.dot(K, (y-ref).T)
state, reward, done, info = env.step(action)
#print(state)
state = model.transform_state(state)
env.close()
|
[
"csj15thu@gmail.com"
] |
csj15thu@gmail.com
|
01e1441294cda302a160e5771d99e199e575a62e
|
90cdfc6ff827c8334c81f6f896b1081cbb4d4f7a
|
/07GUI/08Pyqt5/06QtLearning/main.py
|
67e007139e350075c02c31f2644d82b77e45fcbe
|
[] |
no_license
|
HBU/Jupyter
|
c79883f329efd2426c5c8fde1364266ed8b5059f
|
b3d5d08c89c26c68027409c2b466ac64aeb1af39
|
refs/heads/master
| 2022-07-06T22:00:43.694050
| 2020-12-22T09:53:02
| 2020-12-22T09:53:02
| 123,717,897
| 3
| 3
| null | 2022-07-06T19:20:58
| 2018-03-03T18:04:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,190
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(517, 400)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(80, 10, 211, 61))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(36)
self.label.setFont(font)
self.label.setObjectName("label")
self.tableView = QtWidgets.QTableView(Dialog)
self.tableView.setGeometry(QtCore.QRect(60, 100, 256, 261))
self.tableView.setObjectName("tableView")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(340, 120, 135, 241))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_3.setObjectName("pushButton_3")
self.verticalLayout.addWidget(self.pushButton_3)
self.pushButton_4 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_4.setObjectName("pushButton_4")
self.verticalLayout.addWidget(self.pushButton_4)
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout.addWidget(self.lineEdit)
self.pushButton_5 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_5.setObjectName("pushButton_5")
self.verticalLayout.addWidget(self.pushButton_5)
self.pushButton = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
self.retranslateUi(Dialog)
self.pushButton.clicked.connect(Dialog.btnClose)
self.pushButton_2.clicked.connect(Dialog.btnInsert)
self.pushButton_3.clicked.connect(Dialog.btnDelete)
self.pushButton_4.clicked.connect(Dialog.btnUpdate)
self.pushButton_5.clicked.connect(Dialog.btnQuery)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "用户管理"))
self.pushButton_2.setText(_translate("Dialog", "增加"))
self.pushButton_3.setText(_translate("Dialog", "删除"))
self.pushButton_4.setText(_translate("Dialog", "修改"))
self.pushButton_5.setText(_translate("Dialog", "查询"))
self.pushButton.setText(_translate("Dialog", "关闭"))
|
[
"8584751@qq.com"
] |
8584751@qq.com
|
902b8d163053965b0fd5ccb0bccc4093f6735a82
|
0adf94fc39a02018165b62e93dd83edddd041230
|
/.history/Jobs/views_20190225164613.py
|
81e0cf17f2f0704d47a0e7fa8441b3d22cbb48ad
|
[] |
no_license
|
SabitDeepto/BrJobs
|
1e3baa143331cf46b9c70911c6644d1efd4fffd6
|
1a458c8c667f8093a2325d963e5542655467c7aa
|
refs/heads/master
| 2020-04-24T08:02:26.350007
| 2019-03-17T05:53:30
| 2019-03-17T05:53:30
| 171,818,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from django.views import generic
from .forms import UserForm, ProfileForm
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import get_object_or_404, render, render_to_response
from .forms import JobPostForm
from .models import JobPost
def home(request):
post = JobPost.objects.all()
return render(request, 'basic/index.html', {'post': post})
def single_post(request, post_id):
post = JobPost.objects.get(pk=post_id)
return render(request, 'basic/detail.html', {'post': post})
def jobpost(request):
form = JobPostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
return render(request, 'basic/client-job.html', {'form': form})
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, ('Your profile was successfully updated!'))
# return redirect('settings:profile')
else:
messages.error(request, ('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'basic/test.html', {
'user_form': user_form,
'profile_form': profile_form
})
def searchposts(request):
if request.method == 'GET':
query = request.GET.get('q')
submitbutton = request.GET.get('submit')
if query is not None:
lookups = Q(title__icontains=query) | Q(detail__icontains=query)
results = Blog.objects.filter(lookups).distinct()
context = {'results': results,
'submitbutton': submitbutton}
return render(request, 'blog/blog_view.html', context)
else:
return render(request, 'blog/blog_view.html')
else:
return render(request, 'blog/blog_view.html')
|
[
"deepto69@gmail.com"
] |
deepto69@gmail.com
|
612247c1e53605ffa741a2fd8c545e5aee1047b8
|
1c2a9ce62301d5342113f2fdea8faefe807877c3
|
/weekly/models.py
|
95cda273c45b342928bebd15c878c21b9bdd4218
|
[] |
no_license
|
Jillelanglas/weekly
|
782c03595118bb110c6d4ef3cda182d4b750ce30
|
b4b5bd373b7b9a07198c1354ea2f9a7854ffa75b
|
refs/heads/master
| 2021-01-15T23:07:08.495235
| 2013-10-05T18:01:51
| 2013-10-05T18:01:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,524
|
py
|
from weekly import db
import cryptacular.bcrypt
import datetime
import mongoengine
from flask import url_for
from misaka import Markdown, HtmlRenderer
rndr = HtmlRenderer()
md = Markdown(rndr)
crypt = cryptacular.bcrypt.BCRYPTPasswordManager()
class User(db.Document):
_password = db.StringField(max_length=1023, required=True)
username = db.StringField(max_length=32, min_length=3, unique=True)
name = db.StringField(max_length=32, min_length=3, unique=True)
team = db.ReferenceField('Team')
major = db.ReferenceField('Major')
email = db.StringField(required=True)
admin = db.BooleanField(default=False)
active = db.BooleanField(default=False)
_type = db.IntField(min_value=0, max_value=3)
@property
def type(self):
if self._type == 0:
return 'Volunteer'
elif self._type == 1:
return 'Senior'
elif self._type == 2:
return 'Alumni'
else:
return 'Other'
@property
def password(self):
return self._password
@password.setter
def password(self, val):
self._password = unicode(crypt.encode(val))
def check_password(self, password):
return crypt.check(self._password, password)
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.nickname)
class Comment(db.EmbeddedDocument):
body = db.StringField(min_length=10)
user = db.ReferenceField(User, required=True)
time = db.DateTimeField()
@property
def md_body(self):
return md.render(self.body)
class Post(db.Document):
id = db.ObjectIdField()
body = db.StringField(min_length=10)
timestamp = db.DateTimeField(default=datetime.datetime.now())
year = db.IntField(required=True)
week = db.IntField(required=True)
user = db.ReferenceField(User, required=True)
comments = db.ListField(db.EmbeddedDocumentField(Comment))
@property
def md_body(self):
return md.render(self.body)
@classmethod
def next_week(self, week=None, year=None):
now = datetime.datetime.now().isocalendar()
if not week:
week = now[1] - 1
if not year:
year = now[0]
if week == 52:
year += 1
week = 0
else:
week += 1
return url_for('index', week=week, year=year)
@classmethod
def prev_week(self, week=None, year=None):
now = datetime.datetime.now().isocalendar()
if not week:
week = now[1] - 1
if not year:
year = now[0]
if week == 0:
year -= 1
week = 52
else:
week -= 1
return url_for('index', week=week, year=year)
def add_comment(self, user, body):
comment = Comment(user=user,
body=body,
time=datetime.datetime.now())
self.comments.append(comment)
self.save()
class Team(db.Document):
id = db.ObjectIdField()
text = db.StringField()
def __str__(self):
return self.text
def users(self):
return User.objects(team=self, _type=1)
class Major(db.Document):
key = db.StringField(max_length=5, primary_key=True)
text = db.StringField()
def __str__(self):
return self.text
|
[
"isaac@simpload.com"
] |
isaac@simpload.com
|
91844c1ed6cc7e36ae4119c9586f5fb82f28822b
|
e204cdd8a38a247aeac3d07f6cce6822472bdcc5
|
/.history/app_test_django/models_20201116133107.py
|
2523c8d06c874130fa411ddfea0a2aa8bcbbfe7e
|
[] |
no_license
|
steven-halla/python-test
|
388ad8386662ad5ce5c1a0976d9f054499dc741b
|
0b760a47d154078002c0272ed1204a94721c802a
|
refs/heads/master
| 2023-04-08T03:40:00.453977
| 2021-04-09T19:12:29
| 2021-04-09T19:12:29
| 354,122,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
from django.db import models
import re
class UserManager(models.Manager):
def user_registration_validator(self, post_data):
errors = {}
EMAIL_REGEX = re.compile(
r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if len(post_data['first_name']) < 3:
errors['first_name'] = "First name must be 3 characters"
if post_data['first_name'].isalpha() == False:
errors['first_name'] = "letters only"
if len(post_data['last_name']) < 3:
errors['last_name'] = "Last name must be 3 characters"
if post_data['last_name'].isalpha() == False:
errors['last_name'] = "letters only"
if len(post_data['email']) < 8:
errors['email'] = "Email must contain 8 characters"
#if post_data['email'].Books.objects.filter(title=post_data) == True:
# errors['email'] ="this email already exist in database"
if post_data['email'].find("@") == -1:
errors['email'] = "email must contain @ and .com"
if post_data['email'].find(".com") == -1:
errors['email'] = "email must contain @ and .com"
# test whether a field matches the pattern
if not EMAIL_REGEX.match(post_data['email']):
errors['email'] = "Invalid email address!"
if post_data['password'] != post_data['confirm_password']:
errors['pass_match'] = "password must match confirm password"
if len(post_data['password']) < 8:
errors['pass_length'] = "password must be longer than 8 characters"
return errors
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class TripManager(models.Manager):
def add_trip_validator(self, post_data):
errors = {}
if len(post_data['destination']) < 2:
errors['title'] = "destination name must be 2 characters"
if len(post_data['startdate']) < 1:
errors['title'] = "start date needs input"
if len(post_data['enddate']) < 1:
errors['desc'] = "end date needs input"
if len(post_data['plan']) < 5:
errors['desc'] = "plan must be 5 characters"
return errors
class Trip(models.Model):
destination = models.CharField(max_length=20)
startdate = models.DateTimeField()
enddate = models.DateTimeField()
plan = models.CharField(max_length=30)
uploaded_by = models.ForeignKey(User, related_name="trip_uploaded", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects=TripManager()
|
[
"69405488+steven-halla@users.noreply.github.com"
] |
69405488+steven-halla@users.noreply.github.com
|
c462bafef5399e8f9cd37b8a37573720063ab2c2
|
306d2a92fb331aec6ddf0794b538d6e3385a0df9
|
/app/api/account/urls.py
|
21f884031d1962d2ca3574afe6cc2097735a669d
|
[] |
no_license
|
Zarinabonu/ForceApp
|
f343d3a52aee08890230c5425c9e238df99c5a7f
|
13f8e8613999c4850fc6f0bfcec66f897eecbe4a
|
refs/heads/master
| 2020-12-10T08:00:25.072289
| 2020-01-20T13:14:07
| 2020-01-20T13:14:07
| 233,540,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from rest_framework.serializers import ModelSerializer
from app.model import Account
class AccountSerializer(ModelSerializer):
class Meta:
model = Account
fields = ('id',
'f_name',
'l_name',
'm_name',)
|
[
"zarinabonu199924@gmail.com"
] |
zarinabonu199924@gmail.com
|
19e9eb6c0f0128d8724b3f15dc2aeca49e1f211b
|
2d921bb03eade0763ddb3a9cc5cb637730ecbde1
|
/bdt/misassign_masses.py
|
21339aff913311d7f6730d9ba3d5c46fd49fded9
|
[] |
no_license
|
rmanzoni/WTau3Mu
|
10c57971b80f9769578284abd69009008901eea7
|
5ad336df976d5a1b39e4b516641661921b06ba20
|
refs/heads/92X
| 2021-01-18T15:10:41.887147
| 2019-05-09T12:48:00
| 2019-05-09T12:48:00
| 84,342,825
| 0
| 7
| null | 2018-07-19T09:08:19
| 2017-03-08T16:35:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,883
|
py
|
import ROOT
import root_pandas
import numpy as np
import pandas
import root_numpy
global m_k
global m_pi
m_k = 0.493677
m_pi = 0.13957061
# tree = ROOT.TChain('tree')
# tree.Add('/Users/manzoni/Documents/tau3mu2018/16april/ntuples/data_enriched_16apr2018v16.root')
print 'loading dataset...'
dataset = pandas.DataFrame(root_numpy.root2array(
'/Users/manzoni/Documents/tau3mu2018/16april/ntuples/data_enriched_16apr2018v16.root',
'tree',
# start=0,
# stop=100000,
)
)
print '\t...done'
mpp12_array = []
mpp13_array = []
mpp23_array = []
mkk12_array = []
mkk13_array = []
mkk23_array = []
mkp12_array = []
mkp13_array = []
mkp23_array = []
mpk12_array = []
mpk13_array = []
mpk23_array = []
mppp_array = []
mppk_array = []
mpkp_array = []
mkpp_array = []
mpkk_array = []
mkpk_array = []
mkkp_array = []
mkkk_array = []
# for i, ev in enumerate(tree):
for i in range(len(dataset)):
if i%10000 == 0:
print '========> processed %d/%d \tevents\t%.1f' %(i, len(dataset), float(i)/len(dataset))
# for i in range(10):
# k1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu1_pt, ev.mu1_eta, ev.mu1_phi, m_k )
# k2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu2_pt, ev.mu2_eta, ev.mu2_phi, m_k )
# k3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu3_pt, ev.mu3_eta, ev.mu3_phi, m_k )
#
# pi1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu1_pt, ev.mu1_eta, ev.mu1_phi, m_pi)
# pi2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu2_pt, ev.mu2_eta, ev.mu2_phi, m_pi)
# pi3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(ev.mu3_pt, ev.mu3_eta, ev.mu3_phi, m_pi)
k1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu1_refit_pt[i], dataset.mu1_refit_eta[i], dataset.mu1_refit_phi[i], m_k )
k2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu2_refit_pt[i], dataset.mu2_refit_eta[i], dataset.mu2_refit_phi[i], m_k )
k3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu3_refit_pt[i], dataset.mu3_refit_eta[i], dataset.mu3_refit_phi[i], m_k )
pi1p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu1_refit_pt[i], dataset.mu1_refit_eta[i], dataset.mu1_refit_phi[i], m_pi)
pi2p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu2_refit_pt[i], dataset.mu2_refit_eta[i], dataset.mu2_refit_phi[i], m_pi)
pi3p4 = ROOT.Math.LorentzVector('ROOT::Math::PtEtaPhiM4D<double>')(dataset.mu3_refit_pt[i], dataset.mu3_refit_eta[i], dataset.mu3_refit_phi[i], m_pi)
mpp12 = (pi1p4 + pi2p4).mass()
mpp13 = (pi1p4 + pi3p4).mass()
mpp23 = (pi2p4 + pi3p4).mass()
mkk12 = (k1p4 + k2p4).mass()
mkk13 = (k1p4 + k3p4).mass()
mkk23 = (k2p4 + k3p4).mass()
mkp12 = (k1p4 + pi2p4).mass()
mkp13 = (k1p4 + pi3p4).mass()
mkp23 = (k2p4 + pi3p4).mass()
mpk12 = (pi1p4 + k2p4).mass()
mpk13 = (pi1p4 + k3p4).mass()
mpk23 = (pi2p4 + k3p4).mass()
mppp = (pi1p4 + pi2p4 + pi3p4).mass()
mppk = (pi1p4 + pi2p4 + k3p4 ).mass()
mpkp = (pi1p4 + k2p4 + pi3p4).mass()
mkpp = (k1p4 + pi2p4 + pi3p4).mass()
mpkk = (pi1p4 + k2p4 + k3p4 ).mass()
mkpk = (k1p4 + pi2p4 + k3p4 ).mass()
mkkp = (k1p4 + k2p4 + pi3p4).mass()
mkkk = (k1p4 + k2p4 + k3p4 ).mass()
mpp12_array.append(mpp12)
mpp13_array.append(mpp13)
mpp23_array.append(mpp23)
mkk12_array.append(mkk12)
mkk13_array.append(mkk13)
mkk23_array.append(mkk23)
mkp12_array.append(mkp12)
mkp13_array.append(mkp13)
mkp23_array.append(mkp23)
mpk12_array.append(mpk12)
mpk13_array.append(mpk13)
mpk23_array.append(mpk23)
mppp_array .append(mppp )
mppk_array .append(mppk )
mpkp_array .append(mpkp )
mkpp_array .append(mkpp )
mpkk_array .append(mpkk )
mkpk_array .append(mkpk )
mkkp_array .append(mkkp )
mkkk_array .append(mkkk )
dataset['mpp12'] = mpp12_array
dataset['mpp13'] = mpp13_array
dataset['mpp23'] = mpp23_array
dataset['mkk12'] = mkk12_array
dataset['mkk13'] = mkk13_array
dataset['mkk23'] = mkk23_array
dataset['mkp12'] = mkp12_array
dataset['mkp13'] = mkp13_array
dataset['mkp23'] = mkp23_array
dataset['mpk12'] = mpk12_array
dataset['mpk13'] = mpk13_array
dataset['mpk23'] = mpk23_array
dataset['mppp'] = mppp_array
dataset['mppk'] = mppk_array
dataset['mpkp'] = mpkp_array
dataset['mkpp'] = mkpp_array
dataset['mpkk'] = mpkk_array
dataset['mkpk'] = mkpk_array
dataset['mkkp'] = mkkp_array
dataset['mkkk'] = mkkk_array
print 'staging dataset...'
dataset.to_root(
'/Users/manzoni/Documents/tau3mu2018/16april/ntuples/data_enriched_16apr2018v16_extra_masses.root',
key='tree',
store_index=False
)
print '\t...done'
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
727fc97005633da5105c31d875de048d679cb327
|
17268419060d62dabb6e9b9ca70742f0a5ba1494
|
/pp/samples/191_mirror_h.py
|
5d5f8caa93016a9121b917401e02a52f9b2ade76
|
[
"MIT"
] |
permissive
|
TrendingTechnology/gdsfactory
|
a19124423b12cbbb4f35b61f33303e9a012f82e5
|
c968558dba1bae7a0421bdf49dc192068147b776
|
refs/heads/master
| 2023-02-22T03:05:16.412440
| 2021-01-24T03:38:00
| 2021-01-24T03:38:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
if __name__ == "__main__":
import pp
c = pp.Component()
m1 = c << pp.c.mmi1x2()
m2 = c << pp.c.mmi1x2()
m2.reflect_h(port_name="E1")
m2.movex(10)
pp.show(c)
|
[
"noreply@github.com"
] |
TrendingTechnology.noreply@github.com
|
8813f6544d2ccea3832683f456c77c7e969252cd
|
11d697345808e3630985d70600fd6f2bed1ac7e5
|
/slacktheme/models.py
|
519400c4f6b64e846e1d9bf5d6e8f82435b917a8
|
[] |
no_license
|
openhealthcare/opal-slacktheme
|
ce97ddac3c490ed19a3ab96dd85a17eec010cff5
|
c819e02f9e4a45a554ae5b49d28b95a812a86bca
|
refs/heads/master
| 2021-01-19T22:52:41.572813
| 2017-06-08T13:21:42
| 2017-06-08T13:21:42
| 88,879,256
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
"""
Models for slacktheme
"""
#
# Warning - even if you don't have any models, please don't delete this file.
# Some parts of Django require you to have something it can import called
# slacktheme.models in order for us to let you be a Django app.
#
|
[
"david@deadpansincerity.com"
] |
david@deadpansincerity.com
|
9667e86b4ca07c2e6716741e6cf0e9de4b7bdee6
|
4ad04de638ccfed398adb5496826c0d19e755d9e
|
/models/hr_contract_wage_type_period.py
|
50c5042a81dece83a28e3107e503007f66523598
|
[
"BSD-2-Clause"
] |
permissive
|
aroodooteam/aro_hr_payroll
|
2f399a0f2e45652d2791df48a95e5ad66a051d71
|
dd95d500827566f1444e32760dda5b5b69a8906e
|
refs/heads/master
| 2021-01-22T13:47:46.272642
| 2018-01-29T11:25:35
| 2018-01-29T11:25:35
| 100,686,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import time
import logging
logger = logging.getLogger(__name__)
# Contract wage type period name
class hr_contract_wage_type_period(osv.osv):
_name = 'hr.contract.wage.type.period'
_description = 'Wage Period'
_columns = {
'name': fields.char('Period Name', size=50,
required=True, select=True),
'factor_days': fields.float('Hours in the period',
digits=(12, 4), required=True,)
}
_defaults = {
'factor_days': 173.33
}
hr_contract_wage_type_period()
|
[
"aroodoo@asus.aro"
] |
aroodoo@asus.aro
|
27d25a48451ddf4fd37788f53f17ab7d7bbbb843
|
b71f656374293c5f1238fcb449aa4dde78632861
|
/eudplib/eudlib/memiof/byterw.py
|
c6a45de2f0bcb03d62c384d553512caacbd340cb
|
[
"MIT"
] |
permissive
|
tobeinged/eudplib
|
ce1cdc15f7ec6af857b4b64b5c826b3dd95d3e48
|
066c0faa200dc19e70cdb6979daf8f008b8ae957
|
refs/heads/master
| 2023-05-04T08:49:01.180147
| 2019-03-18T14:30:29
| 2019-03-18T14:30:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,708
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2014 trgk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from ... import core as c
from ... import ctrlstru as cs
from . import dwepdio as dwm
_epd, _suboffset = c.EUDCreateVariables(2)
class EUDByteReader:
"""Read byte by byte."""
def __init__(self):
self._dw = c.EUDVariable()
self._b = c.EUDCreateVariables(4)
self._suboffset = c.EUDVariable()
self._offset = c.EUDVariable()
# -------
@c.EUDMethod
def seekepd(self, epdoffset):
"""Seek EUDByteReader to specific epd player address"""
c.SeqCompute([
(self._offset, c.SetTo, epdoffset),
(self._suboffset, c.SetTo, 0)
])
c.SetVariables(self._dw, dwm.f_dwread_epd(epdoffset))
c.SetVariables([
self._b[0],
self._b[1],
self._b[2],
self._b[3],
], dwm.f_dwbreak(self._dw)[2:6])
@c.EUDMethod
def seekoffset(self, offset):
"""Seek EUDByteReader to specific address"""
global _epd, _suboffset
# convert offset to epd offset & suboffset
c.SetVariables([_epd, _suboffset], c.f_div(offset, 4))
c.SeqCompute([(_epd, c.Add, -0x58A364 // 4)])
# seek to epd & set suboffset
self.seekepd(_epd)
c.SeqCompute([
(self._suboffset, c.SetTo, _suboffset)
])
# -------
@c.EUDMethod
def readbyte(self):
"""Read byte from current address. Reader will advance by 1 bytes.
:returns: Read byte
"""
case0, case1, case2, case3, swend = [c.Forward() for _ in range(5)]
ret = c.EUDVariable()
# suboffset == 0
case0 << c.NextTrigger()
cs.EUDJumpIfNot(self._suboffset.Exactly(0), case1)
c.SeqCompute([
(ret, c.SetTo, self._b[0]),
(self._suboffset, c.Add, 1)
])
cs.EUDJump(swend)
# suboffset == 1
case1 << c.NextTrigger()
cs.EUDJumpIfNot(self._suboffset.Exactly(1), case2)
c.SeqCompute([
(ret, c.SetTo, self._b[1]),
(self._suboffset, c.Add, 1)
])
cs.EUDJump(swend)
# suboffset == 2
case2 << c.NextTrigger()
cs.EUDJumpIfNot(self._suboffset.Exactly(2), case3)
c.SeqCompute([
(ret, c.SetTo, self._b[2]),
(self._suboffset, c.Add, 1)
])
cs.EUDJump(swend)
# suboffset == 3
# read more dword
case3 << c.NextTrigger()
c.SeqCompute([
(ret, c.SetTo, self._b[3]),
(self._offset, c.Add, 1),
(self._suboffset, c.SetTo, 0)
])
c.SetVariables(self._dw, dwm.f_dwread_epd(self._offset))
c.SetVariables([
self._b[0],
self._b[1],
self._b[2],
self._b[3],
], dwm.f_dwbreak(self._dw)[2:6])
swend << c.NextTrigger()
return ret
class EUDByteWriter:
"""Write byte by byte"""
def __init__(self):
self._dw = c.EUDVariable()
self._suboffset = c.EUDVariable()
self._offset = c.EUDVariable()
self._b = [c.EUDLightVariable() for _ in range(4)]
@c.EUDMethod
def seekepd(self, epdoffset):
"""Seek EUDByteWriter to specific epd player addresss"""
c.SeqCompute([
(self._offset, c.SetTo, epdoffset),
(self._suboffset, c.SetTo, 0)
])
c.SetVariables(self._dw, dwm.f_dwread_epd(epdoffset))
c.SetVariables(self._b, dwm.f_dwbreak(self._dw)[2:6])
@c.EUDMethod
def seekoffset(self, offset):
"""Seek EUDByteWriter to specific address"""
global _epd, _suboffset
# convert offset to epd offset & suboffset
c.SetVariables([_epd, _suboffset], c.f_div(offset, 4))
c.SeqCompute([(_epd, c.Add, (0x100000000 - 0x58A364) // 4)])
self.seekepd(_epd)
c.SeqCompute([
(self._suboffset, c.SetTo, _suboffset)
])
@c.EUDMethod
def writebyte(self, byte):
"""Write byte to current position.
Write a byte to current position of EUDByteWriter. Writer will advance
by 1 byte.
.. note::
Bytes could be buffered before written to memory. After you
finished using writebytes, you must call `flushdword` to flush the
buffer.
"""
cs.EUDSwitch(self._suboffset)
for i in range(3):
if cs.EUDSwitchCase()(i):
cs.DoActions([
self._b[i].SetNumber(byte),
self._suboffset.AddNumber(1)
])
cs.EUDBreak()
if cs.EUDSwitchCase()(3):
cs.DoActions(self._b[3].SetNumber(byte))
self.flushdword()
cs.DoActions([
self._offset.AddNumber(1),
self._suboffset.SetNumber(0),
])
c.SetVariables(self._dw, dwm.f_dwread_epd(self._offset))
c.SetVariables(self._b, dwm.f_dwbreak(self._dw)[2:6])
cs.EUDEndSwitch()
@c.EUDMethod
def flushdword(self):
"""Flush buffer."""
# mux bytes
c.RawTrigger(actions=self._dw.SetNumber(0))
for i in range(7, -1, -1):
for j in range(4):
c.RawTrigger(
conditions=[
self._b[j].AtLeast(2 ** i)
],
actions=[
self._b[j].SubtractNumber(2 ** i),
self._dw.AddNumber(2 ** (i + j * 8))
]
)
dwm.f_dwwrite_epd(self._offset, self._dw)
|
[
"phu54321@naver.com"
] |
phu54321@naver.com
|
dcf9d83ba4bfa75b310253049edaadb0ac26101c
|
5c056604ecbfdd6e3d20c6d3b891855767c431b8
|
/CIFAR-10/DRE-F-SP+RS/models/ResNet_extract.py
|
c0f759d4aeba34d997dc7326df08db4232fb134d
|
[] |
no_license
|
pkulwj1994/cDR-RS
|
135d1fc9504304ba0303fe5acc3594ea27531557
|
661d694d6a8dfb44885271bdfd92d6dc150a40f8
|
refs/heads/main
| 2023-08-30T05:37:07.449304
| 2021-11-16T03:17:42
| 2021-11-16T03:17:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,346
|
py
|
'''
ResNet-based model to map an image from pixel space to a features space.
Need to be pretrained on the dataset.
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
IMG_SIZE=32
NC=3
resize=(32,32)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_extract(nn.Module):
def __init__(self, block, num_blocks, num_classes=100, nc=NC, img_height=IMG_SIZE, img_width=IMG_SIZE):
super(ResNet_extract, self).__init__()
self.in_planes = 64
self.main = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h
nn.BatchNorm2d(64),
nn.ReLU(),
self._make_layer(block, 64, num_blocks[0], stride=1), # h=h
self._make_layer(block, 128, num_blocks[1], stride=2),
self._make_layer(block, 256, num_blocks[2], stride=2),
self._make_layer(block, 512, num_blocks[3], stride=2),
nn.AvgPool2d(kernel_size=4)
)
self.classifier_1 = nn.Sequential(
nn.Linear(512*block.expansion, img_height*img_width*nc),
# nn.BatchNorm1d(img_height*img_width*nc),
# nn.ReLU(),
)
self.classifier_2 = nn.Sequential(
nn.Linear(img_height*img_width*nc, num_classes)
)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
# x = nn.functional.interpolate(x,size=resize,mode='bilinear',align_corners=True)
features = self.main(x)
features = features.view(features.size(0), -1)
features = self.classifier_1(features)
out = self.classifier_2(features)
return out, features
def ResNet18_extract(num_classes=10):
return ResNet_extract(BasicBlock, [2,2,2,2], num_classes=num_classes)
def ResNet34_extract(num_classes=10):
return ResNet_extract(BasicBlock, [3,4,6,3], num_classes=num_classes)
def ResNet50_extract(num_classes=10):
return ResNet_extract(Bottleneck, [3,4,6,3], num_classes=num_classes)
def ResNet101_extract(num_classes=10):
return ResNet_extract(Bottleneck, [3,4,23,3], num_classes=num_classes)
def ResNet152_extract(num_classes=10):
return ResNet_extract(Bottleneck, [3,8,36,3], num_classes=num_classes)
if __name__ == "__main__":
net = ResNet34_extract(num_classes=10).cuda()
x = torch.randn(16,3,32,32).cuda()
out, features = net(x)
print(out.size())
print(features.size())
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(net))
|
[
"dingx92@gmail.com"
] |
dingx92@gmail.com
|
d2d53550d8562b31f2ef00de641a54b3c591e3fd
|
5bb8b4c7faeebd16da16ecbcd4a98aabaf688e8f
|
/data_tools/walker/src-cikm/build_graph/preprocess_venue_word.py
|
3d7fc4f53a23214e5cb8bba6ec763cd94551ca7c
|
[] |
no_license
|
xiaoqinzhe/vrdetection
|
014fc2b61c9b30dd2699fdba41089b18b7f060be
|
604a812a21a98d72ba8e23a716eb72153bdaa7c4
|
refs/heads/master
| 2023-07-04T07:44:12.141404
| 2021-08-01T06:21:17
| 2021-08-01T06:21:17
| 150,063,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
#coding:utf-8
file_name = '../dataset/paper_title_venue.txt'
venues = set()
word_df = {}
with open(file_name) as file:
for line in file:
paper_id, title, venue = line.strip().split()
words = title.split('-')
for word in words:
if word not in word_df:
word_df[word] = set()
word_df[word].add(venue)
venues.add(venue)
venues.remove('none')
for word, venue in word_df.items():
if 'none' in venue:
venue.remove('none')
venues = list(venues)
venues.sort()
with open('../dataset/venues.txt', 'w') as file:
for venue in venues:
file.write('{}\n'.format(venue))
words = list(word_df.keys())
words.sort()
with open('../dataset/word_df.txt', 'w') as file:
for word in words:
if len(word)==1 or len(word_df[word])<3:
continue
df = len(word_df[word])/len(venues)
file.write('{} {:.4f}\n'.format(word, df))
|
[
"xiaoqinzhe@qq.com"
] |
xiaoqinzhe@qq.com
|
cc997c66aa7c0603bbc734ce62e689cd06b97a65
|
1b5d39f9dd5126b6f21e83efe58b7e86ef8d94f2
|
/CodeChef/LTIME80B/CARR.py
|
22f1cdd88dba474004c0ee9865be462ca2cd7494
|
[] |
no_license
|
jai-dewani/Competitive-Programming
|
dfad61106a648b80cc97c85cc5c8bc5d1cd335d9
|
a2006e53b671ba56d4b0a20dd81fd0e21d0b0806
|
refs/heads/master
| 2021-07-03T16:08:02.466423
| 2020-09-24T16:22:28
| 2020-09-24T16:22:28
| 178,812,685
| 1
| 2
| null | 2019-10-18T14:43:19
| 2019-04-01T07:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
from random import randint
mod = 10**9+7
for _ in range(int(input())):
n,m = map(int,input().strip().split())
# n = randint(1,10**10)
# m = randint(1,10**10)
answer = 0
fact = m*pow(m-1,n-1,mod)
# for i in range(n-1):
# fact *= (m-1)
answer += fact
if(n>2):
fact = m*pow(m-1,n-2,mod)
elif n==2:
fact = m
# for i in range(n-2):
# fact *= (m-1)
fact*= (n-1)
fact %= mod
answer += fact
print(answer%mod)
|
[
"jai.dewani.99@gmail.com"
] |
jai.dewani.99@gmail.com
|
5a0f58aac33d8bad2c16cd0bc92a93704417daad
|
4cdc9ba739f90f6ac4bcd6f916ba194ada77d68c
|
/剑指offer/第五遍/32-2.分行从上到下打印二叉树.py
|
cac8702eac2082f33f6071a4d95e0ccd60552e50
|
[] |
no_license
|
leilalu/algorithm
|
bee68690daf836cc5807c3112c2c9e6f63bc0a76
|
746d77e9bfbcb3877fefae9a915004b3bfbcc612
|
refs/heads/master
| 2020-09-30T15:56:28.224945
| 2020-05-30T03:28:39
| 2020-05-30T03:28:39
| 227,313,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
"""
从上到下按层打印二叉树,同一层的节点按从左到右的顺序打印,每一层打印到一行。
例如:
给定二叉树: [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
返回其层次遍历结果:
[
[3],
[9,20],
[15,7]
]
"""
class Solution:
def levelOrder(self, root):
# 首先判断输入为空的情况
if not root:
return []
res = []
queue = [root]
thisLevel = 1
nextLevel = 0
level = []
while queue:
node = queue.pop(0)
level.append(node.val)
thisLevel -= 1
if node.left:
queue.append(node.left)
nextLevel += 1
if node.right:
queue.append(node.right)
nextLevel += 1
if thisLevel == 0:
res.append(level)
level = []
thisLevel = nextLevel
nextLevel = 0
return res
|
[
"244492644@qq.com"
] |
244492644@qq.com
|
311729967843c5ec8099011965d0fc07f899187d
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotedge/huaweicloudsdkiotedge/v2/model/container_configs_dto.py
|
5f77f61d48e99914c232542c25d08d3c747de972
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,284
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ContainerConfigsDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'privileged': 'bool',
'host_network': 'bool',
'restart_policy': 'str',
'container_port_list': 'list[ContainerPortDTO]'
}
attribute_map = {
'privileged': 'privileged',
'host_network': 'host_network',
'restart_policy': 'restart_policy',
'container_port_list': 'container_port_list'
}
def __init__(self, privileged=None, host_network=None, restart_policy=None, container_port_list=None):
"""ContainerConfigsDTO
The model defined in huaweicloud sdk
:param privileged: 开启容器特权模式
:type privileged: bool
:param host_network: 是否使用主机网络模式
:type host_network: bool
:param restart_policy: 重启策略,容器执行健康检查后失败后的策略
:type restart_policy: str
:param container_port_list: 容器端口映射值
:type container_port_list: list[:class:`huaweicloudsdkiotedge.v2.ContainerPortDTO`]
"""
self._privileged = None
self._host_network = None
self._restart_policy = None
self._container_port_list = None
self.discriminator = None
if privileged is not None:
self.privileged = privileged
if host_network is not None:
self.host_network = host_network
self.restart_policy = restart_policy
if container_port_list is not None:
self.container_port_list = container_port_list
@property
def privileged(self):
"""Gets the privileged of this ContainerConfigsDTO.
开启容器特权模式
:return: The privileged of this ContainerConfigsDTO.
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""Sets the privileged of this ContainerConfigsDTO.
开启容器特权模式
:param privileged: The privileged of this ContainerConfigsDTO.
:type privileged: bool
"""
self._privileged = privileged
@property
def host_network(self):
"""Gets the host_network of this ContainerConfigsDTO.
是否使用主机网络模式
:return: The host_network of this ContainerConfigsDTO.
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""Sets the host_network of this ContainerConfigsDTO.
是否使用主机网络模式
:param host_network: The host_network of this ContainerConfigsDTO.
:type host_network: bool
"""
self._host_network = host_network
@property
def restart_policy(self):
"""Gets the restart_policy of this ContainerConfigsDTO.
重启策略,容器执行健康检查后失败后的策略
:return: The restart_policy of this ContainerConfigsDTO.
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""Sets the restart_policy of this ContainerConfigsDTO.
重启策略,容器执行健康检查后失败后的策略
:param restart_policy: The restart_policy of this ContainerConfigsDTO.
:type restart_policy: str
"""
self._restart_policy = restart_policy
@property
def container_port_list(self):
"""Gets the container_port_list of this ContainerConfigsDTO.
容器端口映射值
:return: The container_port_list of this ContainerConfigsDTO.
:rtype: list[:class:`huaweicloudsdkiotedge.v2.ContainerPortDTO`]
"""
return self._container_port_list
@container_port_list.setter
def container_port_list(self, container_port_list):
"""Sets the container_port_list of this ContainerConfigsDTO.
容器端口映射值
:param container_port_list: The container_port_list of this ContainerConfigsDTO.
:type container_port_list: list[:class:`huaweicloudsdkiotedge.v2.ContainerPortDTO`]
"""
self._container_port_list = container_port_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContainerConfigsDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e5f657f8585b64e8ca97392387cbc8e5ea4a0f7d
|
4c9c2940ef3a07e2756fcceddf01acd384ebde01
|
/Python/[7 kyu] Ordered count of characters.py
|
da36015440ad03be1c025a725b9cca4d2ae3af47
|
[
"MIT"
] |
permissive
|
KonstantinosAng/CodeWars
|
7d3501a605f7ffecb7f0b761b5ffe414e2f1983a
|
157818ece648454e882c171a71b4c81245ab0214
|
refs/heads/master
| 2023-04-11T09:44:27.480064
| 2023-03-26T21:37:07
| 2023-03-26T21:37:07
| 245,296,762
| 6
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# see https://www.codewars.com/kata/57a6633153ba33189e000074/solutions/python
def ordered_count(inp):
counts = {}
for letter in inp:
if letter not in counts:
counts[letter] = 1
else:
counts[letter] += 1
return [(key, value) for key, value in counts.items()]
tests = (
('abracadabra', [('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]),
('Code Wars', [('C', 1), ('o', 1), ('d', 1), ('e', 1), (' ', 1), ('W', 1), ('a', 1), ('r', 1), ('s', 1)])
)
for t in tests:
inp, exp = t
print(ordered_count(inp) == exp)
|
[
"kwstantinos.agelopoulos@outlook.com"
] |
kwstantinos.agelopoulos@outlook.com
|
a80cf6d1ddfc46a4bc219908bc8145a82db73edb
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_naturalism.py
|
8c5e9015f19160ff616ae4d4cd686e3352b59c9f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
#calss header
class _NATURALISM():
def __init__(self,):
self.name = "NATURALISM"
self.definitions = [u'showing people and experiences as they really are, instead of suggesting that they are better than they really are or representing them in a fixed style: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
bf5dc29023067b377e9be2c8a51b47247ca9a81a
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-dialogflow-cx/samples/generated_samples/dialogflow_v3_generated_environments_deploy_flow_async.py
|
ade1585fbfce343c1ef8b1d490219dca511c0ddb
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,997
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeployFlow
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow-cx
# [START dialogflow_v3_generated_Environments_DeployFlow_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3
async def sample_deploy_flow():
# Create a client
client = dialogflowcx_v3.EnvironmentsAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3.DeployFlowRequest(
environment="environment_value",
flow_version="flow_version_value",
)
# Make the request
operation = client.deploy_flow(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END dialogflow_v3_generated_Environments_DeployFlow_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
79afdf13c61a200d338ede0d864a956c63fabe3f
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/little_person_or_time/year/be_long_woman/part_and_thing/same_fact.py
|
4a49f93faba8dbcc80ef785b249febee2ff44e24
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#! /usr/bin/env python
def public_company(str_arg):
life_or_long_week(str_arg)
print('tell_part')
def life_or_long_week(str_arg):
print(str_arg)
if __name__ == '__main__':
public_company('want_next_thing')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
dce33266677a3e01c6ff99c2c720c7dfc65d296c
|
d7d7873d0bea9185a252916e3599b33e301d394c
|
/setup.py
|
8f0a378e044d453b35d69a16563e88fab08a6dcc
|
[] |
no_license
|
KennethJHan/pip_test
|
b16a3248a50025075cc3db916d07ee9761cc9b9f
|
89e957d7059e303e5b640a1f2e514c437b616c10
|
refs/heads/main
| 2023-01-12T02:00:35.976500
| 2020-11-18T07:36:52
| 2020-11-18T07:36:52
| 313,856,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="gitandpip",
version="0.0.1",
author="kenneth joohyun han",
author_email="kenneth.jh.han@snu.ac.kr",
description="It's pip... with git.",
long_description=long_description,
url="https://github.com/KennethJHan/pip_test",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"kenneth.jh.han@gmail.com"
] |
kenneth.jh.han@gmail.com
|
1d477bdc2d24efe805ae12ada9589a200b99ac7d
|
f2658c4bd7f833ace25ac2b63e88317b05f4602d
|
/2017 July/2017-July-11/st_rdf_test/model2/RelationsConstruction.py
|
80db921a3a777c4028c6f12a17dbc2aa3c535f55
|
[] |
no_license
|
xiaochao00/telanav_diary
|
e4c34ac0a14b65e4930e32012cc2202ff4ed91e2
|
3c583695e2880322483f526c98217c04286af9b2
|
refs/heads/master
| 2022-01-06T19:42:55.504845
| 2019-05-17T03:11:46
| 2019-05-17T03:11:46
| 108,958,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,410
|
py
|
#-------------------------------------------------------------------------------
# Name: RelationsConstruction model
# Purpose: this model is used to mapping the
# columns: [ ]
#
# Author: rex
#
# Created: 2016/01/20
# Copyright: (c) rex 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
from record import Record
from constants import *
import os
import sys
import datetime
import json
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
GLOBAL_KEY_PREFIX = "relations_construction_"
CSV_SEP = '`'
LF = '\n'
#(key, category, function)
STATISTIC_KEYS = (
("type",False,"type"),
)
class RelationsConstruction(Record):
def __init__(self, region):
Record.__init__(self)
self.dump_file = os.path.join(ROOT_DIR, "temporary", self.__class__.__name__)
self.stat = {}
self.region = region
def dump2file(self):
cmd = "SELECT \
DISTINCT(rc.condition_id), \
rc.condition_type \
FROM \
public.rdf_condition AS rc LEFT JOIN public.rdf_nav_strand AS rns ON rns.nav_strand_id=rc.nav_strand_id \
LEFT JOIN public.rdf_nav_link AS rnl ON rns.link_id = rnl.link_id \
WHERE rc.condition_type='3' AND rnl.iso_country_code IN (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '`'"%(cmd),open(self.dump_file,"w"))
def get_statistic(self):
try:
self.dump2file()
except:
print "Oops! Some table or schema don't exist! Please check the upper sql"
return {}
processcount = 0
with open(self.dump_file, "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
line_p = line.split(CSV_SEP)
if len(line_p) < 1:
continue
self.__statistic(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def __statistic(self,line):
for keys in STATISTIC_KEYS:
try:
getattr(self,'_RelationsConstruction__get_'+keys[2])(keys,line)
except:
print "The statistic [ %s ] didn't exist"%(keys[2])
print ("Unexpected error:[ RelationsConstruction.py->__statistic] "+str(sys.exc_info()))
def __count(self,key):
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
# all statistic method
def __get_type(self,keys,line):
if '\N' != line[0]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
stat = RelationsConstruction('na').get_statistic()
keys = stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
|
[
"1363180272@qq.com"
] |
1363180272@qq.com
|
9f3a4c72756e26bb17b1fe4a87c755b5e04cd441
|
ab174d6a1c5effdaab4a49015987c44909680792
|
/p4/solve.py
|
cf24db71cb7964f30b8b21b561e3433d28b73124
|
[] |
no_license
|
carrdelling/AdventOfCode2018
|
2b26ed6cae8e48f473243e156d528b17fcb71584
|
c42f29d684ca7fb1954c3c1d45031e837d8c818a
|
refs/heads/master
| 2022-01-14T10:01:14.460444
| 2022-01-02T16:35:59
| 2022-01-02T16:35:59
| 160,434,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
import datetime as dt
from collections import defaultdict, Counter
log = []
with open('input_data') as in_f:
for row in in_f:
timestamp, action = row.strip().split(']')
_time = dt.datetime.strptime(timestamp[1:], "%Y-%m-%d %H:%M")
log.append((_time, action.strip()))
log.sort()
guard_id = None
start = None
sleep_time = None
sum_sleep = defaultdict(int)
sleep_periods = defaultdict(list)
for _time, action in log:
if 'Guard' in action:
guard_id = action.split()[1]
start = None
if 'falls' in action:
start = _time
if 'wakes' in action:
sleep_time = int((_time - start).total_seconds() / 60.0)
start_minute = start.minute
sum_sleep[guard_id] += sleep_time
sleep_periods[guard_id].append([start_minute + i for i in range(sleep_time)])
lazy_guard = sorted(sum_sleep.items(), key=lambda x: -x[1])[0]
sleep_pattern = Counter(minute for night in sleep_periods[lazy_guard[0]] for minute in night)
quiet_minute = sleep_pattern.most_common(1)[0][0]
plan = int(lazy_guard[0][1:]) * quiet_minute
all_quiet_minutes = []
for guard, sleep_patterns in sleep_periods.items():
sleep_pattern = Counter(minute for night in sleep_patterns for minute in night)
quiet_minute, times = sleep_pattern.most_common(1)[0]
all_quiet_minutes.append((guard, quiet_minute, times))
laziest_guard, quiet_minute, zzz_times = sorted(all_quiet_minutes, key=lambda x: -x[2])[0]
second_plan = int(laziest_guard[1:]) * quiet_minute
print(f'P4-1: {plan}')
print(f'P4-2: {second_plan}')
|
[
"carrdelling@gmail.com"
] |
carrdelling@gmail.com
|
a3f2a5a005d26ab9af467662fd50ff955da9a329
|
381612e57ef807e573b40b2dfaf062c8fe7a43f7
|
/nesi/softbox/api/models/route_models.py
|
7aea30390ad3f30d7155b1f369e6370d70560810
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
zcf900/NESi
|
1635a405660bb9390843468f34105dd2ef45bd75
|
0db169dd6378fbd097380280cc41440e652de19e
|
refs/heads/master
| 2023-01-31T23:21:02.799923
| 2020-12-18T13:37:43
| 2020-12-18T13:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
# This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by Ilya Etingof <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - Janis Groß <https://github.com/unkn0wn-user>
# - Philip Konrath <https://github.com/Connyko65>
# - Alexander Dincher <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
import uuid
from nesi.softbox.api import db
class Route(db.Model):
id = db.Column(db.Integer(), primary_key=True)
dst = db.Column(db.String(23))
gw = db.Column(db.String(23))
metric = db.Column(db.Integer(), default=1)
box_id = db.Column(db.Integer, db.ForeignKey('box.id'))
sub_mask = db.Column(db.Integer(), default=None)
|
[
"janis.gross.jg@gmail.com"
] |
janis.gross.jg@gmail.com
|
208410d3e358a10f563e5f103349fd22130cf43d
|
aae3d55b9d2004e04c5917a31408384a4269a425
|
/astrodash/save_binned_templates_as_arrays.py
|
46d645225b46ac3a4d4829533285989d5f651758
|
[
"MIT"
] |
permissive
|
daniel-muthukrishna/astrodash
|
5b1ee330d2ae2d9cc43f5c52d0765359aa40673f
|
acc241ad73133894d93ef16733cf0f1fb4ca7b87
|
refs/heads/master
| 2023-04-04T03:27:07.480846
| 2023-03-22T17:02:08
| 2023-03-22T17:02:08
| 75,250,754
| 23
| 12
|
MIT
| 2019-04-26T15:27:30
| 2016-12-01T03:19:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
import numpy as np
import pickle
import os
from astrodash.create_arrays import AgeBinning
from astrodash.helpers import temp_list
from astrodash.combine_sn_and_host import BinTemplate
def create_sn_and_host_arrays(snTemplateDirectory, snTempFileList, galTemplateDirectory, galTempFileList, paramsFile):
snTemplates = {}
galTemplates = {}
snList = temp_list(snTempFileList)
galList = temp_list(galTempFileList)
with open(paramsFile, 'rb') as f:
pars = pickle.load(f)
w0, w1, nw, snTypes, galTypes, minAge, maxAge, ageBinSize = pars['w0'], pars['w1'], pars['nw'], pars['typeList'], \
pars['galTypeList'], pars['minAge'], pars['maxAge'], \
pars['ageBinSize']
ageBinning = AgeBinning(minAge, maxAge, ageBinSize)
ageLabels = ageBinning.age_labels()
# Create dictionary of dictionaries for type and age of SN
for snType in snTypes:
snTemplates[snType] = {}
for ageLabel in ageLabels:
snTemplates[snType][ageLabel] = {}
snTemplates[snType][ageLabel]['snInfo'] = []
snTemplates[snType][ageLabel]['names'] = []
for galType in galTypes:
galTemplates[galType] = {}
galTemplates[galType]['galInfo'] = []
galTemplates[galType]['names'] = []
for snFile in snList:
snBinTemplate = BinTemplate(snTemplateDirectory + snFile, 'sn', w0, w1, nw)
nAges = snBinTemplate.nCols
ages = snBinTemplate.ages
snType = snBinTemplate.tType
filename = snBinTemplate.filename
for ageIdx in range(nAges):
age = ages[ageIdx]
if minAge < age < maxAge:
ageBin = ageBinning.age_bin(age)
ageLabel = ageLabels[ageBin]
snInfo = snBinTemplate.bin_template(ageIdx)
snTemplates[snType][ageLabel]['snInfo'].append(snInfo)
snTemplates[snType][ageLabel]['names'].append("%s_%s" % (filename, age))
print("Reading {} {} out of {}".format(snFile, ageIdx, nAges))
for galFile in galList:
galBinTemplate = BinTemplate(galTemplateDirectory + galFile, 'gal', w0, w1, nw)
galType = galBinTemplate.tType
filename = galBinTemplate.filename
galInfo = galBinTemplate.bin_template()
galTemplates[galType]['galInfo'].append(galInfo)
galTemplates[galType]['names'].append(filename)
print("Reading {}".format(galFile))
# Convert lists in dictionaries to numpy arrays
for snType in snTypes:
for ageLabel in ageLabels:
snTemplates[snType][ageLabel]['snInfo'] = np.array(snTemplates[snType][ageLabel]['snInfo'])
snTemplates[snType][ageLabel]['names'] = np.array(snTemplates[snType][ageLabel]['names'])
for galType in galTypes:
galTemplates[galType]['galInfo'] = np.array(galTemplates[galType]['galInfo'])
galTemplates[galType]['names'] = np.array(galTemplates[galType]['names'])
return snTemplates, galTemplates
def save_templates():
scriptDirectory = os.path.dirname(os.path.abspath(__file__))
parameterFile = 'models_v06/models/zeroZ/training_params.pickle'
snTemplateDirectory = os.path.join(scriptDirectory, "../templates/training_set/")
snTempFileList = snTemplateDirectory + 'templist.txt'
galTemplateDirectory = os.path.join(scriptDirectory, "../templates/superfit_templates/gal/")
galTempFileList = galTemplateDirectory + 'gal.list'
saveFilename = 'models_v06/models/sn_and_host_templates.npz'
snTemplates, galTemplates = create_sn_and_host_arrays(snTemplateDirectory, snTempFileList, galTemplateDirectory,
galTempFileList, parameterFile)
np.savez_compressed(saveFilename, snTemplates=snTemplates, galTemplates=galTemplates)
return saveFilename
if __name__ == "__main__":
unCombinedTemplates = save_templates()
|
[
"daniel.muthukrishna@gmail.com"
] |
daniel.muthukrishna@gmail.com
|
4db43b3627ce71b65078c3610a3ad71319c4c739
|
a512b8893b0d2de827d6292e810f3a98b41e132c
|
/Week4/Day6/Solutions/Python/prog3.py
|
e8ebb18c93788e86255dbf1b31875bd34116bfa1
|
[] |
no_license
|
Audarya07/Daily-Flash-Codes
|
d771079fd0d470e2d3e05679f17f32fb64b4f426
|
cf96ca2b1676b038e243fac67be778381492ffeb
|
refs/heads/master
| 2022-11-06T15:37:47.180729
| 2020-06-25T16:20:55
| 2020-06-25T16:20:55
| 274,960,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
for num in range(1,101):
sum = 0
for i in range(1,num):
if num%i==0:
sum+=i
if sum==num:
continue
else:
print(num,end=" ")
print()
|
[
"audiuttarwar2000@gmail.com"
] |
audiuttarwar2000@gmail.com
|
fb30f63ea2395b0dcca9405b88c567a7a4bb60d6
|
e7dd192123f404367e9623a357366643742fa723
|
/kubernetes/test/test_scheduling_v1beta1_api.py
|
700a3b463994104380586917c18869a0959fe020
|
[
"Apache-2.0"
] |
permissive
|
itholic/python
|
1772725582f28af445efb233eca6c9139da3ae49
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
refs/heads/master
| 2020-09-12T08:59:16.847326
| 2019-11-15T20:40:32
| 2019-11-15T20:40:32
| 222,375,164
| 0
| 0
|
Apache-2.0
| 2019-11-18T06:05:45
| 2019-11-18T06:05:43
| null |
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.scheduling_v1beta1_api import SchedulingV1beta1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestSchedulingV1beta1Api(unittest.TestCase):
"""SchedulingV1beta1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.scheduling_v1beta1_api.SchedulingV1beta1Api() # noqa: E501
def tearDown(self):
pass
def test_create_priority_class(self):
"""Test case for create_priority_class
"""
pass
def test_delete_collection_priority_class(self):
"""Test case for delete_collection_priority_class
"""
pass
def test_delete_priority_class(self):
"""Test case for delete_priority_class
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_priority_class(self):
"""Test case for list_priority_class
"""
pass
def test_patch_priority_class(self):
"""Test case for patch_priority_class
"""
pass
def test_read_priority_class(self):
"""Test case for read_priority_class
"""
pass
def test_replace_priority_class(self):
"""Test case for replace_priority_class
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"haoweic@google.com"
] |
haoweic@google.com
|
c288c52f8ee60885fe587a639279c2976ed3966e
|
9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100
|
/configs/example_old_map_1228.py
|
5fa96132f84104c6dbd4a5e0ca228a05b0c82a8a
|
[] |
no_license
|
SiyuanLee/caps
|
0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3
|
476802e18ca1c7c88f1e29ed66a90c350aa50c1f
|
refs/heads/master
| 2021-06-20T22:48:16.230354
| 2021-02-22T13:21:57
| 2021-02-22T13:21:57
| 188,695,489
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,754
|
py
|
"""
This is the example config file
larger lr
beta no bias
lower explr
comment: too small!
not target beta
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 5, 1, 0, 0, 0, 6, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 0, 0, 2, 0, 7, 0, 1],
[1, 0, 2, 0, 0, 0, 2, 0, 0, 0, 1],
[1, 9, 2, 9, 9, 9, 2, 9, 9, 9, 1],
[1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1],
[1, 2, 0, 1, 0, 2, 0, 1, 0, 2, 1],
[1, 2, 9, 1, 9, 2, 8, 1, 9, 2, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
8: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only #########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule
# load the random sampled obs
import pickle
pkl_file = __cur_dir + 'eval_obs_array_random_old_map.pkl'
with open(pkl_file, 'rb') as f:
eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2e6 # 40 epoch
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 1e-4 * lr_multiplier),
(num_iter / 10, 1e-4 * lr_multiplier),
(num_iter / 2, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
learning_rate_term = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 40, 1e-3 * lr_multiplier),
(num_iter / 20, 1e-2 * lr_multiplier),
(num_iter / 10, 5e-2 * lr_multiplier),
(num_iter * 3 / 4, 5e-3 * lr_multiplier),
(num_iter * 7 / 8, 5e-4 * lr_multiplier),
], outside_value=5e-4 * lr_multiplier)
# piecewise exploration rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 40, 0.97),
(num_iter * 3 / 8, 0.7),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
######### transfer only #########
import tensorflow as tf
source_dirs = [
# an old map policy
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_1c_12_07_17_22:15:51/dqn',
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_2_12_13_17_19:12:07/dqn',
#'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_3_12_13_17_19:13:03/dqn',
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_4_12_23_17_16:20:56/dqn',
]
transfer_config = {
'source_dirs': source_dirs,
'online_q_omega': False, # default false off policy with experience replay
'q_omega_uniform_sample': False, # default false
'four_to_two': True, # default false frame_history_len must be 4!
'source_noop': False, # default false (false means source policies HAS noop action)
'no_share_para': True, # default false set to true to stop sharing parameter between q network and q_omega/term
'xi': 0.005, # default none you may specify a constant. none means xi = 0.5 (q_omega_val - q_omega_second_max)
'target_beta': False, # default false (true means using target beta)
'termination_stop': True, # default false train cnn when training beta online
'learning_rate_term': learning_rate_term,
'beta_no_bias': True, # default false prune bias for termination function
}
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 50000,
'learning_freq': learning_freq,
'frame_history_len': 4,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
'eval_obs_array': eval_obs_array, # TODO: construct some eval_obs_array
'room_q_interval': 1e5, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', ''), # the config file name
'transfer_config': transfer_config,
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': True,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
}
|
[
"lisiyuan@bupt.edu.cn"
] |
lisiyuan@bupt.edu.cn
|
07ff6980884d70cacc711dfc287bfbf96c7c733e
|
f4b694982027ac362de1e9d6755f2943d0355a06
|
/DECSKS-03 -- Convergence of FD formulation of high order CS/pyfiles/plots_df9_comparison.py
|
6ef2da6f2b41cabe799772b8df49ec3244e370d7
|
[] |
no_license
|
dsirajud/IPython-notebooks
|
55275e44191c16f5393571522787993f931cfd98
|
6ad9d978c611558525fc9d716af101dc841a393b
|
refs/heads/master
| 2021-01-15T15:33:57.119172
| 2016-07-13T20:08:29
| 2016-07-13T20:08:29
| 35,054,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from convergence_routines import *
Nx = 2488
x, dx, L = domain(_Nx = Nx)
L2error, df9_approx = FD_derivative_matrix_formulation(_dn = 9, _p = 3, _Nx = Nx)
df9_exact = df9(x)
plt.plot(x,df9_exact, label = 'exact df9', linewidth = 3)
plt.hold('on')
plt.plot(x,df9_approx, label = 'approx df9', linewidth = 1, color = "red")
# compare with the function whose derivative this is
df8_exact = df8(x)
plt.plot(x,df8_exact * np.abs(np.min(df9_approx)) / np.abs(np.min(df8_exact)), label = 'exact df4', linewidth = 1, color = "cyan")
plt.hold('off')
plt.legend(loc = 'best')
plt.grid()
plt.show()
|
[
"sirajuddin@wisc.edu"
] |
sirajuddin@wisc.edu
|
d59800358316a58679932c187a9225e40f43364e
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/learningCurve/leaveOneOut/lrClassifierF.py
|
36c020e785dfac7d8a00613b3398404787143651
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,134
|
py
|
# 9 September 2019
# Kiyoto Aramis Tanemura
# I modified the rfClassifier.py script to implement a logistic regression classifier. This classifier runs faster than the random forest classifier and Jun previously observed comparable results between logistic regression and random forest classifiers for the protein folding system. Due to the lesser time cost, I may sample a greater hyperparameter space using the logistic regression classifier. If the sampling yields a region in which overfitting is not observed, then I can refine the search. If the results are similar to that of the random forest classifier, then I may have exhausted the dataset for generalizability.
# Modified 26 October 2019 by Kiyoto Aramis Tanemura. Apply logistic regression classifier to CASF-PPI dataset.
# Modified 2020-02-09 by KAT. Code generalized for public use on GitHub.
import pandas as pd
import numpy as np
import os
import json
import pickle
#from multiprocessing import Pool
from time import time
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from random import shuffle, random
#os.chdir('/mnt/scratch/tanemur1/')
toc = time()
# Randomize input file orders
pathToInput = 'data/comparison_descriptors/'
pathToOutput = 'results/learningCurve/'
fileNames = [x for x in os.listdir(pathToInput) if '.csv' in x]
shuffle(fileNames) # note: shuffle is in-place. Do not assign to variable
# Specify training set fraction
train_fraction = 0.99
if len(fileNames) * train_fraction == int(len(fileNames) * train_fraction):
train_file_number = int(len(fileNames) * train_fraction)
else:
train_file_number = int(len(fileNames) * train_fraction + 1)
x_train = pd.DataFrame()
y_train = pd.DataFrame()
# Read individual csv for comparison descriptors, append to train_data, and partition to x_train, y_train
fileNamesWithPath = [pathToInput + fileName for fileName in fileNames]
def read_csv(filePath):
return pd.read_csv(filePath, index_col = 0)
print('begin read training set')
#with Pool(np.min([train_file_number, 28])) as p:
# train_dataList = list(p.map(read_csv, fileNamesWithPath[:train_file_number]))
train_dataList = list(map(read_csv, fileNamesWithPath[:train_file_number]))
print('begin append DF | ', (time() - toc) / 60, ' min')
# Append DataFrames into one. While loop used to reduce append operations. Iteratively, DFs in a list are appended
# to the following DF.
while len(train_dataList) != 1:
number = int(len(train_dataList) / 2)
for i in range(number):
train_dataList[2 * i] = train_dataList[2 * i].append(train_dataList[2 * i + 1], sort = True)
for j in range(number):
del train_dataList[j + 1]
x_train = train_dataList[0]
del train_dataList
print('train_data dimensions', x_train.shape, ' | ', (time() - toc) / 60, ' min')
y_train = x_train['class']
x_train = x_train.drop('class', axis = 1) # x_train contains only nonbonding descriptors
feature_names = x_train.columns
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
y_train = y_train.values
print('Dimensions x_train ', x_train.shape, ' | y_train', y_train.shape)
# Define a logistic regression classifier along with pertinent hyperparameters. Here, default values are used.
clf = LogisticRegression(penalty='l2', verbose = 1)
def sampleRationalVals(minVal, maxVal):
return 2 ** (random() * (np.log2(maxVal) - np.log2(minVal)) + np.log2(minVal))
def sampleRationalList(minVal, maxVal):
theList = []
for i in range(int(2 * np.log2(maxVal - minVal) + 1)):
theVal = sampleRationalVals(minVal, maxVal)
theList.append(theVal)
return theList
parameters = {
# include any hyperparameters to sample. Otherwise, leave empty to perform five fold cross validation with default values. For example:
# 'C': sampleRationalList(0.001, 1000),
# 'solver': ['newton-cg', 'lbfgs', 'sag','saga']
}
print('begin RandomizedSearchCV | ' + str((time() - toc)/60) + ' mins')
randomized_search = RandomizedSearchCV(estimator = clf, param_distributions = parameters, n_iter = 1, scoring = 'accuracy', refit = True, cv = 5, verbose = 1, n_jobs = 1, pre_dispatch = 'n_jobs', return_train_score=True)
randomized_search.fit(x_train, y_train)
print('begin output | ', (time() - toc) / 60 / 60, ' hours')
tic = time()
with open(pathToOutput + 'bestParamF.json', 'w') as g:
json.dump(randomized_search.best_estimator_.get_params(), g)
with open(pathToOutput + 'modelF.pkl', 'wb') as h:
pickle.dump(randomized_search, h)
with open(pathToOutput + 'trainingSetF.txt', 'w') as i:
i.write('Training set:\n')
for pdbID in fileNames[:train_file_number]:
i.write(pdbID + '\n')
i.write('\nJob time: ' + str((tic - toc) / 60 / 60) + ' hours')
with open(pathToOutput + 'standardScalerF.pkl', 'wb') as j:
pickle.dump(scaler, j)
bestCoefficient = randomized_search.best_estimator_.coef_
coefDf = pd.DataFrame(bestCoefficient, columns = feature_names)
with open(pathToOutput + 'coefficientsF.csv', 'w') as f:
coefDf.to_csv(f)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
18a28d5e4e839646f65336d3d49006c5a957223d
|
de0584cdd6a0b452efa3c8bd0e1e43286853c814
|
/preprocess/huff/clean_huffpost.py
|
a2a2d91bc756e5a1c5826ea7fe1277733daea635
|
[] |
no_license
|
johnsonice/triplet-loss
|
a325ecd229b5346aaca4cb0556bbc18e9e4eae26
|
71c13dfa7631ec93c564d9dc9da4fcf667eb9500
|
refs/heads/master
| 2023-08-24T17:49:01.593415
| 2021-10-23T16:27:26
| 2021-10-23T16:27:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
import json
from random import shuffle
#cleaning up text
import re
def get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
def clean_dataset(file_path, output_path_train, output_path_test):
lines = open(file_path, 'r').readlines()
category_to_headlines = {}
for line in lines:
d = json.loads(line[:-1])
category = d['category']
headline = d['headline']
if len(headline) > 10:
if category in category_to_headlines:
category_to_headlines[category].append(headline)
else:
category_to_headlines[category] = [headline]
category_to_id = {category: i for i, category in enumerate(list(sorted(list(category_to_headlines.keys()))))}
train_writer = open(output_path_train, 'w')
test_writer = open(output_path_test, 'w')
for category, headlines in category_to_headlines.items():
_id = category_to_id[category]
shuffle(headlines)
test_headlines = headlines[:300]
train_headlines = headlines[300:1000]
for train_headline in train_headlines:
train_writer.write('\t'.join([str(_id), get_only_chars(train_headline)]) + '\n')
for test_headline in test_headlines:
test_writer.write('\t'.join([str(_id), get_only_chars(test_headline)]) + '\n')
if __name__ == "__main__":
clean_dataset('News_Category_dataset_v2.json', 'huffpost/train.txt', 'huffpost/test.txt')
|
[
"jason.weng.wei@gmail.com"
] |
jason.weng.wei@gmail.com
|
3d725712e172cee8591768772262237bc21dcaae
|
830465731dfda87b4141546262f20d74c29297bf
|
/GENERAL/RADARCTF/Logo/sol.py
|
d32c2f2933fdf57751dd6485d243603bc52c9566
|
[] |
no_license
|
jchen8tw-research/CTF
|
f559d7ca0e16a730335b11caeeae208c42e8bf17
|
f49615c24437a9cc6a2c20d6b30cb5abf7a32b71
|
refs/heads/master
| 2023-03-17T12:29:08.630613
| 2021-03-23T06:31:26
| 2021-03-23T06:31:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
import os
import binascii
import struct
misc = open("logo.png","rb").read()
for i in range(1024):
data = misc[12:16] + struct.pack('>i',i)+ misc[20:29]
crc32 = binascii.crc32(data) & 0xffffffff
if crc32 == 0xB65879B0:
print i
|
[
"cpr1014@gmail.com"
] |
cpr1014@gmail.com
|
a0f042399c854efeeae2f22745708993359d89e0
|
8a11814f757b22cacd89ae618265d6705393ba78
|
/amplify/agent/data/statsd.py
|
8c17a990d29c16671f7bda85bf50d173b786d17e
|
[
"BSD-2-Clause"
] |
permissive
|
ngonsol/nginx-amplify-agent
|
e763bfcc82cf103b4eb2ce49269dfccaec0cb9af
|
c711579208465578b03dda5db40ccc7dc8f31b81
|
refs/heads/master
| 2021-01-18T03:17:04.494068
| 2016-05-18T20:17:25
| 2016-05-18T20:17:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,307
|
py
|
# -*- coding: utf-8 -*-
import copy
import time
from collections import defaultdict
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class StatsdClient(object):
def __init__(self, address=None, port=None, interval=None, object=None):
# Import context as a class object to avoid circular import on statsd. This could be refactored later.
from amplify.agent.common.context import context
self.context = context
self.address = address
self.port = port
self.object = object
self.interval = interval
self.current = defaultdict(dict)
self.delivery = defaultdict(dict)
def average(self, metric_name, value):
"""
Same thing as histogram but without p95
:param metric_name: metric name
:param value: metric value
"""
if metric_name in self.current['average']:
self.current['average'][metric_name].append(value)
else:
self.current['average'][metric_name] = [value]
def timer(self, metric_name, value):
"""
Histogram with 95 percentile
The algorithm is as follows:
Collect all the data samples for a period of time (commonly a day, a week, or a month).
Sort the data set by value from highest to lowest and discard the highest 5% of the sorted samples.
The next highest sample is the 95th percentile value for the data set.
:param metric_name: metric name
:param value: metric value
"""
if metric_name in self.current['timer']:
self.current['timer'][metric_name].append(value)
else:
self.current['timer'][metric_name] = [value]
def incr(self, metric_name, value=None, rate=None, stamp=None):
"""
Simple counter with rate
:param metric_name: metric name
:param value: metric value
:param rate: rate
:param stamp: timestamp (current timestamp will be used if this is not specified)
"""
timestamp = stamp or int(time.time())
if value is None:
value = 1
# new metric
if metric_name not in self.current['counter']:
self.current['counter'][metric_name] = [[timestamp, value]]
return
# metric exists
slots = self.current['counter'][metric_name]
last_stamp, last_value = slots[-1]
# if rate is set then check it's time
if self.interval and rate:
sample_duration = self.interval * rate
# write to current slot
if timestamp < last_stamp + sample_duration:
self.current['counter'][metric_name][-1] = [last_stamp, last_value + value]
else:
self.current['counter'][metric_name].append([last_stamp, value])
else:
self.current['counter'][metric_name][-1] = [last_stamp, last_value + value]
def agent(self, metric_name, value, stamp=None):
"""
Agent metrics
:param metric_name: metric
:param value: value
:param stamp: timestamp (current timestamp will be used if this is not specified)
"""
timestamp = stamp or int(time.time())
self.current['gauge'][metric_name] = [(timestamp, value)]
def gauge(self, metric_name, value, delta=False, prefix=False, stamp=None):
"""
Gauge
:param metric_name: metric name
:param value: metric value
:param delta: metric delta (applicable only if we have previous values)
:param stamp: timestamp (current timestamp will be used if this is not specified)
"""
timestamp = stamp or int(time.time())
if metric_name in self.current['gauge']:
if delta:
last_stamp, last_value = self.current['gauge'][metric_name][-1]
new_value = last_value + value
else:
new_value = value
self.current['gauge'][metric_name].append((timestamp, new_value))
else:
self.current['gauge'][metric_name] = [(timestamp, value)]
def flush(self):
if not self.current:
return
results = {}
delivery = copy.deepcopy(self.current)
self.current = defaultdict(dict)
# histogram
if 'timer' in delivery:
timers = {}
timestamp = int(time.time())
for metric_name, metric_values in delivery['timer'].iteritems():
if len(metric_values):
metric_values.sort()
length = len(metric_values)
timers['G|%s' % metric_name] = [[timestamp, sum(metric_values) / float(length)]]
timers['C|%s.count' % metric_name] = [[timestamp, length]]
timers['G|%s.max' % metric_name] = [[timestamp, metric_values[-1]]]
timers['G|%s.median' % metric_name] = [[timestamp, metric_values[int(round(length / 2 - 1))]]]
timers['G|%s.pctl95' % metric_name] = [[timestamp, metric_values[-int(round(length * .05))]]]
results['timer'] = timers
# counters
if 'counter' in delivery:
counters = {}
for k, v in delivery['counter'].iteritems():
# Aggregate all observed counters into a single record.
last_stamp = v[-1][0] # Use the oldest timestamp.
total_value = 0
for timestamp, value in v:
total_value += value
# Condense the list of lists 'v' into a list of a single element. Remember that we are using lists
# instead of tuples because we need mutability during self.incr().
counters['C|%s' % k] = [[last_stamp, total_value]]
results['counter'] = counters
# gauges
if 'gauge' in delivery:
gauges = {}
for k, v in delivery['gauge'].iteritems():
# Aggregate all observed gauges into a single record.
last_stamp = v[-1][0] # Use the oldest timestamp.
total_value = 0
for timestamp, value in v:
total_value += value
# Condense list of tuples 'v' into a list of a single tuple using an average value.
gauges['G|%s' % k] = [(last_stamp, float(total_value)/len(v))]
results['gauge'] = gauges
# avg
if 'average' in delivery:
averages = {}
timestamp = int(time.time()) # Take a new timestamp here because it is not collected previously.
for metric_name, metric_values in delivery['average'].iteritems():
if len(metric_values):
length = len(metric_values)
averages['G|%s' % metric_name] = [[timestamp, sum(metric_values) / float(length)]]
results['average'] = averages
return {
'metrics': copy.deepcopy(results),
'object': self.object.definition
}
|
[
"dedm@nginx.com"
] |
dedm@nginx.com
|
0e37bf6580d3248893e8a1c0e5dd6d1ebbe57409
|
8efe56ee34c455a6b1336897f6d457acbc9c10f9
|
/tests/metarl/tf/models/test_cnn_model.py
|
6d8f599ade02b39db52ef608f933167b30287246
|
[
"MIT"
] |
permissive
|
neurips2020submission11699/metarl
|
ab18d11e708bf569d76cb2fab2bcce089badd111
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
refs/heads/master
| 2022-10-15T22:03:09.948673
| 2020-06-11T19:22:55
| 2020-06-11T19:30:58
| 268,410,657
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,567
|
py
|
import pickle
import numpy as np
import pytest
import tensorflow as tf
from metarl.tf.models import CNNModel
from metarl.tf.models import CNNModelWithMaxPooling
from tests.fixtures import TfGraphTestCase
class TestCNNModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 5
self.input_width = 10
self.input_height = 10
self.obs_input = np.ones(
(self.batch_size, self.input_width, self.input_height, 3))
input_shape = self.obs_input.shape[1:] # height, width, channel
self._input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
# yapf: disable
@pytest.mark.parametrize('filters, in_channels, strides', [
(((32, (1, 1)),), (3, ), (1, )), # noqa: E122
(((32, (3, 3)),), (3, ), (1, )),
(((32, (3, 3)),), (3, ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)),
])
# yapf: enable
def test_output_value(self, filters, in_channels, strides):
model = CNNModel(filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph)
output = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
expected_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize(
'filters, in_channels, strides, pool_strides, pool_shapes',
[
(((32, (1, 1)), ), (3, ), (1, ), (1, 1), (1, 1)), # noqa: E122
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (1, 1)),
(((32, (3, 3)), ), (3, ), (1, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (2, 2), (2, 2)),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2), (1, 1), (1, 1)),
])
# yapf: enable
def test_output_value_max_pooling(self, filters, in_channels, strides,
pool_strides, pool_shapes):
model = CNNModelWithMaxPooling(
filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
pool_strides=pool_strides,
pool_shapes=pool_shapes,
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph)
output = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
height_size = int(
(height_size - pool_shapes[0]) / pool_strides[0]) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
width_size = int(
(width_size - pool_shapes[1]) / pool_strides[1]) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
expected_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((32, (1, 1)),), (1, )), # noqa: E122
(((32, (3, 3)),), (1, )),
(((32, (3, 3)),), (2, )),
(((32, (1, 1)), (64, (1, 1))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, filters, strides):
model = CNNModel(filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph)
with tf.compat.v1.variable_scope('cnn_model/cnn/h0', reuse=True):
bias = tf.compat.v1.get_variable('bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_shape = self.obs_input.shape[1:] # height, width, channel
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
outputs = model_pickled.build(input_ph)
output2 = sess.run(outputs, feed_dict={input_ph: self.obs_input})
assert np.array_equal(output1, output2)
|
[
"neurips2020submission11699@gmail.com"
] |
neurips2020submission11699@gmail.com
|
f38cf335b8fab60a2d1b2f67a4620fe3e0c47847
|
dfe3191eee14251b958589f9b383fd5f8798d47e
|
/habanero/__init__.py
|
7fae4d724c918f2b4ae26eb4c4d44980330b35cc
|
[
"MIT"
] |
permissive
|
kyleniemeyer/habanero
|
39257428cc442ec764edd3616749db10af783262
|
6338f22f06912a4f1af5f0459ff8329906442489
|
refs/heads/master
| 2021-01-14T11:53:45.396972
| 2016-07-11T15:59:41
| 2016-07-11T15:59:41
| 66,299,090
| 0
| 0
| null | 2016-08-22T18:47:17
| 2016-08-22T18:47:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
# -*- coding: utf-8 -*-
# habanero
'''
habanero library
~~~~~~~~~~~~~~~~~~~~~
habanero is a low level client for the Crossref search API.
Usage::
from habanero import Crossref
cr = Crossref()
# setup a different base URL
Crossref(base_url = "http://some.other.url")
# setup an api key
Crossref(api_key = "123456")
# Make request against works route
cr.works(ids = '10.1371/journal.pone.0033693')
# curl options
## For example, set a timeout
cr.works(query = "ecology", timeout=0.1)
## advanced logging
### setup first
import requests
import logging
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
### then make request
cr.works(query = "ecology")
'''
__title__ = 'habanero'
__version__ = '0.2.6'
__author__ = 'Scott Chamberlain'
__license__ = 'MIT'
from .crossref import Crossref
from .cn import content_negotiation, csl_styles
from .counts import citation_count
from .exceptions import *
|
[
"myrmecocystus@gmail.com"
] |
myrmecocystus@gmail.com
|
6f4c7736b4f7b3b3be54a806fa5fed52f9e446db
|
e3c6dcf5a77ae0b930087bb5849352a088dbc2e4
|
/hamon_shu/segments/segment_03/.handlers.py
|
4687984f7cf378e302baf025675dc29baf63361d
|
[] |
no_license
|
Catsvilles/hamon_shu
|
684cda44661ba18724af6719e4efc5f763c3cf61
|
35b377074cff9900193018446668aeb5440475be
|
refs/heads/master
| 2022-12-04T08:00:46.779614
| 2020-08-26T21:25:57
| 2020-08-26T21:25:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,913
|
py
|
import abjad
handler_to_value = abjad.OrderedDict(
[
(
'violin_1_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 38),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 45),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 59),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 34),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 45),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 25),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 52),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 26),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 72),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 24),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 57),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 38),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 44),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 34),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 55),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 14),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'dynamic_handler_one',
abjad.OrderedDict(
[
('count_1', 39),
('count_2', 12),
('count_3', 26),
('count_4', 12),
('count_5', 39),
]
),
),
(
'dynamic_handler_two',
abjad.OrderedDict(
[
('count_1', 10),
('count_2', 3),
('count_3', 6),
('count_4', 3),
('count_5', 10),
]
),
),
(
'articulation_handler_three',
abjad.OrderedDict(
[
('count', 92),
('vector_count', 92),
]
),
),
(
'articulation_handler_two',
abjad.OrderedDict(
[
('count', 19),
('vector_count', 19),
]
),
),
]
)
|
[
"gregoryrowlandevans@gmail.com"
] |
gregoryrowlandevans@gmail.com
|
f29f4d3f9eb00ed98d6c9da648caeb5da3c9d380
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/f3jX2BwzAuR8DXsy4_22.py
|
e469acc301be3f4807256f980cb528fa19e2fb93
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
"""
Create a function that takes an integer `n` and returns the **factorial of
factorials**. See below examples for a better understanding:
### Examples
fact_of_fact(4) ➞ 288
# 4! * 3! * 2! * 1! = 288
fact_of_fact(5) ➞ 34560
fact_of_fact(6) ➞ 24883200
### Notes
N/A
"""
import math
from functools import reduce
def fact_of_fact(n):
m = [math.factorial(i) for i in list(range(1, n+1))]
return reduce((lambda x, y: x * y), m)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
5c11d0ef4c5a83f6c0d971af6f4730a9a6fe1a67
|
c1e488789b41a714cdd37525d7e71815753c21d9
|
/atcoder/beginners/chap1/PASTFILES/ABC088A_1.py
|
041c7c7cbb41d815d7d2848a46a3bce2ad8a670a
|
[] |
no_license
|
happyhappyhappyhappy/pythoncode
|
638a0cbeb94ec04829c1c4e216fb200863cd7a4e
|
247b8346a503cab272043c20e6210ee03cfdd8c4
|
refs/heads/master
| 2023-08-31T20:54:06.144750
| 2023-08-30T08:33:15
| 2023-08-30T08:33:15
| 223,697,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# Problem https://atcoder.jp/contests/abc088/tasks/abc088_a
# Python 1st Try
if __name__ == "__main__":
yes = "Yes"
no = "No"
answer = ""
N = int(input().strip())
A = int(input().strip())
chargeCoin = N % 500
if chargeCoin <= A:
answer = yes
else:
answer = no
print(answer)
exit
|
[
"ymnkkj@gmail.com"
] |
ymnkkj@gmail.com
|
5937a083574b20b77de3073d1b7317e4f94be9ec
|
c9cf4e7acd3ff09412610965dc83988b3f501e5e
|
/utils/readWrite/read.py
|
2fe030fe3668a47d797bc8bc787023f8779bee51
|
[] |
no_license
|
Noba1anc3/General-Doc-SemSeg
|
31df6cc0c747c5586fbbeb9dace6170d3fbef4bd
|
27d9761fd45b2d5d52cfe3ed50413f902912b238
|
refs/heads/master
| 2021-05-19T04:15:42.604378
| 2020-03-31T06:59:45
| 2020-03-31T06:59:45
| 251,524,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,247
|
py
|
import os
import sys
from configparser import ConfigParser
from utils.logging.syslog import Logger
class Configuration():
def __init__(self):
self.logging = Logger(__name__)
Logger.get_log(self.logging).info('Start processing ConfigFile')
self.config()
Logger.get_log(self.logging).info('ConfigFile Processed\n')
def config(self):
cp = ConfigParser()
cp.read('conf.cfg')
self.folder = cp.get('configuration', 'folder')
self.filename = cp.get('configuration', 'filename')
self.tit_choice = cp.getint('configuration', 'tit_choice')
self.text_level = cp.getint('configuration', 'text_level')
self.table_level = cp.getint('configuration', 'table_level')
self.save_text = cp.getboolean('configuration', 'save_text')
self.save_image = cp.getboolean('configuration', 'save_image')
self.configCheck()
self.output_folder = 'output/'
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
if self.save_text or self.save_image:
self.prediction_folder = self.output_folder + 'prediction/'
if not os.path.exists(self.prediction_folder):
os.mkdir(self.prediction_folder)
if self.save_text == True:
self.json_folder = self.prediction_folder + 'json/'
if not os.path.exists(self.json_folder):
os.mkdir(self.json_folder)
if self.save_image == True:
self.img_folder = self.prediction_folder + 'image/'
if not os.path.exists(self.img_folder):
os.mkdir(self.img_folder)
if self.filename == 'all':
self.fileList = sorted(os.listdir(self.folder))
else:
self.fileList = [self.filename]
def configCheck(self):
if not self.folder[-1] == '/':
Logger.get_log(self.logging).critical('Configuration - Folder Format Error')
print("Configuration - Folder may loss '/' to the end of the path")
y_n = input("Do you want system add '/' to the end of path ? (Y/N)\n")
if y_n.lower() == 'y' or y_n.lower() == 'yes':
self.folder += '/'
else:
sys.exit()
if not self.filename == 'all' and not self.filename[-4:] == '.pdf':
Logger.get_log(self.logging).critical('Configuration - FileName Not End With .pdf ')
print('Configuration - FileName Not End With \'.pdf\'')
y_n = input("Do you want system add '.pdf' to the end of filename ? (Y/N)\n")
if y_n.lower() == 'y' or y_n.lower() == 'yes':
self.filename += '.pdf'
else:
sys.exit()
if not (self.tit_choice == 0 or self.tit_choice == 1 or self.tit_choice == 2 or self.tit_choice == 3):
Logger.get_log(self.logging).critical('Configuration - tit_choice Format Error ')
while True:
print('Configuration - tit_choice Format Error')
tit_choice = input("Please press 0/1/2/3 to specify a tit_choice \n")
if tit_choice == '0' or tit_choice == '1' or tit_choice == '2' or tit_choice == '3':
self.tit_choice = tit_choice
break
if not (self.text_level == 1 or self.text_level == 2):
Logger.get_log(self.logging).critical('Configuration - text_level Format Error ')
while True:
print('Configuration - text_level Format Error ')
text_level = input("Please press 1/2 to specify a text_level \n")
if text_level == '1' or text_level == '2':
self.text_level = text_level
break
if not (self.table_level == 1 or self.table_level == 2):
Logger.get_log(self.logging).critical('Configuration - table_level Format Error ')
while True:
print('Configuration - table_level Format Error ')
table_level = input("Please press 1/2 to specify a table_level \n")
if table_level == '1' or table_level == '2':
self.text_level = table_level
break
|
[
"zxryhjp@yahoo.co.jp"
] |
zxryhjp@yahoo.co.jp
|
f79103b6166bbcddf98f63d0c258951fb19b31eb
|
28280d1c7ca06f89906e811f3b7311a5e8a0046b
|
/ecoz2py/__init__.py
|
5bffc1a049e5d658a7b607a7b4e2c48e1360e361
|
[] |
no_license
|
mbari-org/ecoz2py
|
e5e96ba127a397c7d319a15ca13889f724943ba5
|
00d17b1696debc3aff7da37f0e4be316de70c3a7
|
refs/heads/master
| 2022-09-03T20:59:18.927539
| 2020-05-03T02:06:51
| 2020-05-03T02:06:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,691
|
py
|
import os
from _ecoz2_extension import ffi
from _ecoz2_extension.lib import ecoz2_hmm_learn
from _ecoz2_extension.lib import ecoz2_prd_show_file
from _ecoz2_extension.lib import ecoz2_set_random_seed
from _ecoz2_extension.lib import ecoz2_version
from _ecoz2_extension.lib import ecoz2_vq_learn
def get_version():
return ffi.string(ecoz2_version())
def prd_show_file(filename,
show_reflections=False,
from_=-1,
to=-1,
):
ecoz2_prd_show_file(filename, show_reflections, from_, to)
def set_random_seed(seed):
ecoz2_set_random_seed(seed)
def hmm_learn(N,
sequence_filenames,
model_type=3,
hmm_epsilon=1.e-5,
val_auto=0.3,
max_iterations=-1,
hmm_learn_callback=None
):
c_sequence_filenames_keepalive = [ffi.new("char[]", _to_bytes(s)) for s in sequence_filenames]
c_sequence_filenames = ffi.new("char *[]", c_sequence_filenames_keepalive)
# for (i, c_sequence_filename) in enumerate(c_sequence_filenames):
# print('SEQ {} => {}'.format(i, ffi.string(c_sequence_filename)))
@ffi.callback("void(char*, double)")
def callback(c_variable, c_value):
if hmm_learn_callback:
variable = _to_str(ffi.string(c_variable))
value = float(c_value)
hmm_learn_callback(variable, value)
ecoz2_hmm_learn(N,
model_type,
c_sequence_filenames,
len(c_sequence_filenames),
hmm_epsilon,
val_auto,
max_iterations,
callback
)
def vq_learn(prediction_order,
predictor_filenames,
codebook_class_name='_',
epsilon=0.05,
vq_learn_callback=None
):
c_codebook_class_name = ffi.new("char []", _to_bytes(codebook_class_name))
c_predictor_filenames_keepalive = [ffi.new("char[]", _to_bytes(s)) for s in predictor_filenames]
c_predictor_filenames = ffi.new("char *[]", c_predictor_filenames_keepalive)
@ffi.callback("void(int, double, double, double)")
def callback(m, avg_distortion, sigma, inertia):
if vq_learn_callback:
vq_learn_callback(m, avg_distortion, sigma, inertia)
return ecoz2_vq_learn(prediction_order,
epsilon,
c_codebook_class_name,
c_predictor_filenames,
len(c_predictor_filenames),
callback
)
def get_actual_filenames(filenames, file_ext):
"""
Returns the given list of files but expanding any directories.
"""
files = []
for path in filenames:
if os.path.isdir(path):
dir_files = list_files(path, file_ext)
files = files + dir_files
elif os.path.isfile(path) and path.endswith(file_ext):
files.append(path)
return files
def list_files(directory, file_ext):
"""
ListS all files under the given directory and having the given extension.
"""
files = []
for e in os.listdir(directory):
f = "{}/{}".format(directory, e)
# print(f)
if os.path.isdir(f):
files = files + list_files(f, file_ext)
elif os.path.isfile(f) and f.endswith(file_ext):
files.append(f)
return files
# ---------
def _to_bytes(s):
return s if isinstance(s, bytes) else str(s).encode("utf-8")
def _to_str(s):
return s if isinstance(s, str) else bytes(s).decode("utf-8")
|
[
"carueda@mbari.org"
] |
carueda@mbari.org
|
bd37d6634f405523c79a877228689da80f242c6a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_89/46.py
|
2b2b33f6462a2c18f37ba5fc20391f0621f9a50f
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Waiters en LCM
"""
import sys, time, copy
from pdb import set_trace as DEBUG
def p(*s):
print >> sys.stderr, s
def gcd(a, b):
while b:
a, b = b, a % b
return a
def lcm(a, b):
return a * b // gcd(a, b)
def lcmm(*args):
return reduce(lcm, args)
def factors(n):
fact={1:1}
check=2
while check<=n:
if n%check==0:
n/=check
t = fact.get(check, 0)
fact[check] = t+1
else:
check+=1
return fact
#problem specific functions
def parseInput(f):
return int(f.readline())
def main(N):
if N ==1: return 0
l = lcmm(*range(1,N+1))
f = factors(l)
facts = {1:1}
maxturns = 0
for i in range(1,N+1):
fact = factors(i)
contribute = 0
for k,v in fact.items():
if k not in facts:
contribute+=1
if facts.get(k,0)<v:
facts[k] = v
maxturns+=contribute
return sum(f.values()) - maxturns
#for i in range(N, 0, -1):
#fact = factors(i)
#for k,v in fact.items():
#fk = facts.get(k,0)
#if fk>v:
#facts[k]-=v
#elif fk==v:
#del(facts[k])
#else:
#continue
#pass
#maxturns = i
#return maxturns
if __name__ == "__main__":
if len(sys.argv)==1:
filename = 'test.in'
else:
filename = sys.argv[1]
f = open('primes.txt')
primes = f.read().split()
primes = map(int, primes)
f.close()
#print primes
f = open(filename)
cases = int(f.readline())
for case in range(cases):
#p("Case #%i" % (case+1))
args = parseInput(f)
print "Case #%i: %s" % (case+1, main(args))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0fbaab7562dfc9e920f442142b34da9865161986
|
7fdff3ab45f5fef05cc76f97ee44e44779f87120
|
/peerloan/migrations/0018_auto_20160912_1536.py
|
e45c005b5ff05f11df1b1b9a437414fdb3067bda
|
[] |
no_license
|
Calvin66der/project_peerloan
|
4a132c7464b21e75a80f091d44c389cbd10c2cc5
|
99a02843addbfcffec5c7d7a964f0b3347a03962
|
refs/heads/master
| 2021-01-12T07:45:00.811952
| 2016-12-20T08:44:42
| 2016-12-20T08:44:42
| 77,006,043
| 0
| 0
| null | 2016-12-21T01:47:48
| 2016-12-21T01:47:48
| null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('peerloan', '0017_borrowrequest_overpay_amount'),
]
operations = [
migrations.AlterField(
model_name='loanschedule',
name='received_amount',
field=models.FloatField(default=0),
),
]
|
[
"15113029g@connect.polyu.hk"
] |
15113029g@connect.polyu.hk
|
72c850969dfe5e6528309e706ffd673c82f7a44c
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/VanderPlas17Python/E_Chapter4/E_VisualizingErrors/index.py
|
c2c770e9b709e97993efbbfb79962c767157f91e
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651
| 2019-10-26T04:40:49
| 2019-10-26T04:40:49
| 213,980,247
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,314
|
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_BasicErrorbars.index import BasicErrorbars as A_BasicErrorbars
from .B_ContinuousErrors.index import ContinuousErrors as B_ContinuousErrors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Figure 4-26. Using point properties to encode features of the Iris data
#
# We can see that this scatter plot has given us the ability to simultaneously explore
# four different dimensions of the data: the (x, y) location of each point corresponds to
# the sepal length and width, the size of the point is related to the petal width, and the
# color is related to the particular species of flower. Multicolor and multifeature scatter
# plots like this can be useful for both exploration and presentation of data.
#
# plot Versus scatter: A Note on Efficiency
# Aside from the different features available in plt.plot and plt.scatter, why might
# you choose to use one over the other? While it doesn’t matter as much for small
# amounts of data, as datasets get larger than a few thousand points, plt.plot can be
# noticeably more efficient than plt.scatter. The reason is that plt.scatter has the
# capability to render a different size and/or color for each point, so the renderer must
# do the extra work of constructing each point individually. In plt.plot, on the other
# hand, the points are always essentially clones of each other, so the work of determin‐
# ing the appearance of the points is done only once for the entire set of data. For large
# datasets, the difference between these two can lead to vastly different performance,
# and for this reason, plt.plot should be preferred over plt.scatter for large
# datasets.
#
# Visualizing Errors
# For any scientific measurement, accurate accounting for errors is nearly as important,
# if not more important, than accurate reporting of the number itself. For example,
# imagine that I am using some astrophysical observations to estimate the Hubble Con‐
# stant, the local measurement of the expansion rate of the universe. I know that the
# current literature suggests a value of around 71 (km/s)/Mpc, and I measure a value of
# 74 (km/s)/Mpc with my method. Are the values consistent? The only correct answer,
# given this information, is this: there is no way to know.
#
#
# Visualizing Errors | 237
#
# Suppose I augment this information with reported uncertainties: the current litera‐
# ture suggests a value of around 71 ± 2.5 (km/s)/Mpc, and my method has measured a
# value of 74 ± 5 (km/s)/Mpc. Now are the values consistent? That is a question that
# can be quantitatively answered.
# In visualization of data and results, showing these errors effectively can make a plot
# convey much more complete information.
#
# Basic Errorbars
# A basic errorbar can be created with a single Matplotlib function call (Figure 4-27):
# In[1]: %matplotlib inline
# import matplotlib.pyplot as plt
# plt.style.use('seaborn-whitegrid')
# import numpy as np
# In[2]: x = np.linspace(0, 10, 50)
# dy = 0.8
# y = np.sin(x) + dy * np.random.randn(50)
#
# plt.errorbar(x, y, yerr=dy, fmt='.k');
#
#
#
#
# Figure 4-27. An errorbar example
#
# Here the fmt is a format code controlling the appearance of lines and points, and has
# the same syntax as the shorthand used in plt.plot, outlined in “Simple Line Plots”
# on page 224 and “Simple Scatter Plots” on page 233.
# In addition to these basic options, the errorbar function has many options to fine-
# tune the outputs. Using these additional options you can easily customize the aesthet‐
# ics of your errorbar plot. I often find it helpful, especially in crowded plots, to make
# the errorbars lighter than the points themselves (Figure 4-28):
# In[3]: plt.errorbar(x, y, yerr=dy, fmt='o', color='black',
# ecolor='lightgray', elinewidth=3, capsize=0);
#
#
#
#
# 238 | Chapter 4: Visualization with Matplotlib
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Visualizing Errors",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class VisualizingErrors(HierNode):
def __init__(self):
super().__init__("Visualizing Errors")
self.add(Content())
self.add(A_BasicErrorbars())
self.add(B_ContinuousErrors())
# eof
|
[
"lawrence.mcafee@gmail.com"
] |
lawrence.mcafee@gmail.com
|
3037cc9f0d5675cef844ea03c08be30f015cdeb3
|
fe7996f7110211e8c2df7cd7a4d81cc572204a70
|
/synthetic-enumeration/sprint-12/03-collect-experimental-data-from-Lauren-assignments.py
|
afb9cf0e066c49396bcbc2bd77a5215fad858d7a
|
[
"MIT"
] |
permissive
|
FoldingAtHome/covid-moonshot
|
78c2bc7e6d00f371d626fcb0a4381cf528413eef
|
814189c239f8f0189c6cc48afcbca1f96c87dd09
|
refs/heads/master
| 2023-02-23T04:23:00.064389
| 2023-02-19T23:18:10
| 2023-02-19T23:18:10
| 249,626,873
| 62
| 11
|
MIT
| 2022-03-01T20:43:56
| 2020-03-24T06:07:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,603
|
py
|
#!/bin/env python
"""
Collect experimental data from Lauren's reassignments via CSV file
"""
import numpy as np
import json
import math
import itertools
import datetime
from rich.progress import track
from openeye import oechem
xchem_project = 'Mpro'
creator = 'John Chodera <john.chodera@choderalab.org>'
creation_date = datetime.datetime.now()
prefix = 'sprint-12'
description = 'COVID Moonshot Sprint 12 for optimizing 5-spiro compounds'
csv_filename = 'experimental-data/Fl_agg_data_all_data_11_01_2022_11_13_20-cleaned-reassigned_isomers.csv'
#
# Now pull in all submitted designs
#
def smiles_is_racemic(suspected_smiles):
"""
Return True if compound is racemic.
Examples:
"CNC(=O)CN1Cc2ccc(Cl)cc2[C@@]2(CCN(c3cncc4c3CCCC4)C2=O)C1 |o1:14|" : compound is enantiopure, but stereochemistry is uncertain
"CNC(=O)CN1Cc2ccc(Cl)cc2[C@@]2(CCN(c3cncc4c3CCCC4)C2=O)C1" : compound is enantiopure, stereochemistry is certain
"CNC(=O)CN1Cc2ccc(Cl)cc2[C]2(CCN(c3cncc4c3CCCC4)C2=O)C1" : compound is racemic
"""
smiles = suspected_smiles.split()[0] # truncate suffix
return stereochemistry_is_uncertain(smiles)
def stereochemistry_is_uncertain(suspected_smiles):
"""
Return True if there is uncertainty in the enantiopure compound or mixture is racemic.
"""
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions
rdmol = Chem.MolFromSmiles(suspected_smiles)
smi_list = []
opts = StereoEnumerationOptions(unique=True)
isomers = tuple(EnumerateStereoisomers(rdmol, options=opts))
for smi in sorted(Chem.MolToSmiles(isomer, isomericSmiles=True) for isomer in isomers):
smi_list.append(smi)
if len(smi_list) > 1:
return True
else:
return False
# Read all submitted designs
print('Reading CSV export...')
compounds_with_experimental_data = list()
# Drop columns that cause trouble for OpenEye
import pandas as pd
df = pd.read_csv(csv_filename, dtype=str)
# Drop columns
#drop_columns = []
#df.drop(columns=drop_columns, inplace=True)
# Replace suspected_SMILES with SMILES
#df['suspected_SMILES'].fillna(df['SMILES'], inplace=True)
# Exchange columns so suspected_SMILES is first
#title_column_index = df.columns.get_loc("Canonical PostEra ID")
#smiles_column_index = df.columns.get_loc("suspected_SMILES")
#cols = df.columns.tolist()
#cols = cols[smiles_column_index:(smiles_column_index+1)] + cols[title_column_index:(title_column_index+1)] + cols[:]
#df = df[cols]
# Replace < and > with limits
#df.applymap(lambda x: str(x))
#df.applymap(lambda x: 0.050 if "<" in str(x) else x)
#df.applymap(lambda x: 99.0 if ">" in str(x) else x)
# Eliminate stuff after spaces
#df = df.applymap(lambda x: str(x).split()[0])
ncompounds_dropped_due_to_uncertain_stereochemistry = 0
ncompounds_racemic = 0
# Iterate over molecules
# Fields: compound_name,compound_structure,measurement,qualifier,reassigned_structure
# Format: PostEra ID,SMILES,pIC50,comparator,reassigned_structure
delta_pIC50 = 0.2 # 95% CI is this many units in either direction
from fah_xchem.schema import ExperimentalCompoundData, ExperimentalCompoundDataUpdate
for index, row in df.iterrows():
row = row.to_dict()
suspected_smiles = row['compound_structure']
compound_id = row['compound_name']
is_racemic = smiles_is_racemic(suspected_smiles)
# Skip inequalities
if row['qualifier'] != '=':
continue
pIC50 = float(row['measurement'])
pIC50_lower = pIC50 - delta_pIC50
pIC50_upper = pIC50 + delta_pIC50
# Canonicalize with OpenEye SMILES
suspected_smiles = suspected_smiles.split()[0] # truncate stuff after whitespace
oemol = oechem.OEGraphMol()
oechem.OESmilesToMol(oemol, suspected_smiles)
suspected_smiles = oechem.OEMolToSmiles(oemol)
experimental_data = dict()
experimental_data['pIC50'] = pIC50
experimental_data['pIC50_lower'] = pIC50_lower
experimental_data['pIC50_upper'] = pIC50_upper
if is_racemic:
ncompounds_racemic += 1
# Store compound experimental data
experimental_compound_data = ExperimentalCompoundData(
compound_id=compound_id,
smiles=suspected_smiles,
is_racemic=is_racemic,
experimental_data=experimental_data,
)
compounds_with_experimental_data.append(experimental_compound_data)
print(f'{len(compounds_with_experimental_data)} measurements read and retained')
print(f'{ncompounds_dropped_due_to_uncertain_stereochemistry} enantiopure compounds with uncertain stereochemistry dropped.')
print(f'{ncompounds_racemic} compounds assayed as racemates')
dataset = ExperimentalCompoundDataUpdate(compounds=compounds_with_experimental_data)
print(f'There are {len(compounds_with_experimental_data)} compounds in this sprint with in-range IC50 measurements')
# Write JSON
def write_json(compound_series, json_filename):
print(f'Writing JSON to {json_filename}')
if '.bz2' in json_filename:
import bz2
with bz2.open(json_filename, "wt") as f:
f.write(compound_series.json())
elif '.gz' in json_filename:
import gzip
with gzip.open(json_filename, "wt") as f:
f.write(compound_series.json())
else:
with open(json_filename, "wt") as f:
f.write(compound_series.json())
import os
os.makedirs('json', exist_ok=True)
print(f'Generating experimental data JSON for {prefix}...')
json_filename = f'json/{prefix}-experimental-data.json' # output filename
write_json(dataset, json_filename)
|
[
"john.chodera@choderalab.org"
] |
john.chodera@choderalab.org
|
db61be2c3b26ca80b961f9b324f981d7de1be14a
|
99361c45166c3e39bdc1e5e7ff796b60e5edc20e
|
/setup.py
|
59352d3cc65d0277c567478e0470ebd9187c11c0
|
[] |
no_license
|
wkcn/WorldCup
|
2b358b73aab5496b3f7e209dc615c97c0181abff
|
1acef2d2cadf5e8cbb911b05a8ecfd98aa43920d
|
refs/heads/master
| 2020-03-08T10:38:08.558059
| 2018-04-04T15:03:07
| 2018-04-04T15:03:07
| 128,077,995
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# -*- coding: utf-8 -*-
from distutils.core import setup
import py2exe
import sys
sys.argv.append('py2exe')
setup(
windows=[
{"script":"run.py","icon_resources":[(1,"logo.ico"),]}],
options={
"py2exe":{"includes":["sip"],"dll_excludes":["MSVCP90.dll"],\
"bundle_files": 3,"optimize": 2,
}},
data_files=[
("image", ["./logo.ico",])]
)
|
[
"wkcn@live.cn"
] |
wkcn@live.cn
|
eca69742d6ec30ac047d2b79b46fa7b0ddc3cf56
|
237cc38de0cf7a6e3661ed552ae771bd972d7438
|
/base/obj2_demo.py
|
ce08920ba539aeb6829dc7a411f369bec63a4e60
|
[] |
no_license
|
chydream/python
|
af5ad8a98c78de71e255f7b776f936c4b89c616e
|
e5bfef53a7770d4f323bd2877f93c8166c563695
|
refs/heads/master
| 2020-05-07T17:00:33.558178
| 2020-05-05T13:45:19
| 2020-05-05T13:45:19
| 180,708,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,949
|
py
|
class Point(object):
# 自定义Point类的构造(初始化)方法
def __init__(self, x, y):
self.x = x
self.y = y
# 自定义Point类对象的格式化输出函数(string())
def string(self):
print(print("{{X:{0},Y:{1}}}".format(self.x, self.y)))
class Circle(Point):
# 自定义Circle类的构造(初始化)方法
def __init__(self, x, y, radius):
Point.__init__(self, x, y)
# super().__init__(x, y)
self.radius = radius
# 自定义Circle类对象的格式化输出函数(string())
def string(self):
print("该图形初始化点为:{{X:{0},Y:{1}}};{{半径为:{2}}}".format(self.x, self.y, self.radius))
class Size(object):
# 自定义Size类的构造(初始化)方法
def __init__(self, width, height):
self.width = width
self.height = height
# 自定义Size类对象的格式化输出函数(string())
def string(self):
print("{{Width:{0},Height:{1}}}".format(self.width, self.height))
class Rectangle(Point, Size):
# 自定义Rectangle类的构造(初始化)方法,并在方法中调用父类的初始化方法以完成初始化
def __init__(self, x, y, width, height):
Point.__init__(self, x, y)
Size.__init__(self, width, height)
# 自定义Rectangle类对象的格式化输出函数(string())
def string(self):
print("该图形初始化点为:{{X:{0},Y:{1}}};长宽分别为:{{Width:{2}, Height:{3}}}".format(self.x, self.y, self.width, self.height))
if __name__ == "__main__":
# 实例化Circle对象,圆心为(5,5),半径为8
c = Circle(5, 5, 8)
c.string()
# 实例化Rectangle对象,顶点位置(15,15),长和宽分别为15和15
r1 = Rectangle(15, 15, 15, 15)
r1.string()
# 实例化Rectangle对象,顶点位置(40,30),长和宽分别为11和14
r2 = Rectangle(40, 30, 11, 14)
r2.string()
|
[
"yong.chen@doone.com.cn"
] |
yong.chen@doone.com.cn
|
f31a50aaf5650420eddc7d4b4b4b0b17edbae209
|
3fd7adb56bf78d2a5c71a216d0ac8bc53485b034
|
/experiments/cem_exp/benchmarks_goalimage/hor15_easygoal/mod_hyper.py
|
1060f0f55147f0e67cf53d1bef3020b1c04858e0
|
[] |
no_license
|
anair13/lsdc
|
6d1675e493f183f467cab0bfe9b79a4f70231e4e
|
7760636bea24ca0231b4f99e3b5e8290c89b9ff5
|
refs/heads/master
| 2021-01-19T08:02:15.613362
| 2017-05-12T17:13:54
| 2017-05-12T17:13:54
| 87,596,344
| 0
| 0
| null | 2017-04-08T00:18:55
| 2017-04-08T00:18:55
| null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
current_dir = '/'.join(str.split(__file__, '/')[:-1])
bench_dir = '/'.join(str.split(__file__, '/')[:-2])
from lsdc.algorithm.policy.cem_controller_goalimage import CEM_controller
policy = {
'type' : CEM_controller,
'use_goalimage':"",
'low_level_ctrl': None,
'usenet': True,
'nactions': 5,
'repeat': 3,
'initial_std': 7,
'netconf': current_dir + '/conf.py',
'use_first_plan': False, # execute MPC instead using firs plan
'iterations': 5,
'load_goal_image':'make_easy_goal',
}
agent = {
'T': 25,
'use_goalimage':"",
'start_confs': bench_dir + '/make_easy_goal/configs_easy_goal'
}
|
[
"frederik.ebert@mytum.de"
] |
frederik.ebert@mytum.de
|
e17f92d3d343d5272ea4fbcebd7c5a86df5c6a2d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2811/60768/235290.py
|
44942420c792f233946644b79e4acce40a08ea76
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
pAndn = input().split(' ')
map = int(pAndn[0]) * ['']
num = int(pAndn[1])
conflict = False
for i in range(num):
index = int(input())
if map[index % len(map)] == '':
map[index % len(map)] = index
else:
print(i + 1)
conflict = True
break
if not conflict:
print(-1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
376f82bf1be280037aaad21374b43a1e4dce82eb
|
69889d51e933b4e8a1d4c8397a317aa1d1365a5a
|
/Stack/17299.py
|
3de2e8eff8d86d4a1485e3e058e23e566d2857dc
|
[] |
no_license
|
ddraa/Algorithm
|
a35c87631420ceccec6f7094da6f2b22ddb66c8c
|
a97c6628d5389f7f93603a2e95ac3b569057f556
|
refs/heads/master
| 2023-06-25T17:12:39.925821
| 2021-07-18T05:53:28
| 2021-07-18T05:53:28
| 279,240,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
import sys
input = sys.stdin.readline
N = int(input())
F, stack = {}, []
arr = list(map(int, input().split()))
res = [-1 for _ in range(N)]
for n in arr:
if n in F:
F[n] += 1
else:
F[n] = 1
for i in range(N - 1, -1, -1):
while stack and stack[-1][0] <= F[arr[i]]:
stack.pop()
if stack:
res[i] = stack[-1][1]
stack.append((F[arr[i]], arr[i]))
print(*res)
|
[
"ruuddyd@gmail.com"
] |
ruuddyd@gmail.com
|
bfb31bbaa48485e6c87d4b9683dbf6fc1c4d2f7b
|
91a9f5a7afb398f4238527708cbc155dc972cbfa
|
/older/Grapher_app0/Names_Module.py
|
1ff52a69c3e05a1e89a15ebd6b1cc78a4dd3597e
|
[] |
no_license
|
bddmodelcar/kzpy3.2
|
cd6f9bf6b7b8b920c79b4ee36c2592b992ae4332
|
b044b26649b19b240bd580feca20424a237374b1
|
refs/heads/master
| 2021-01-19T21:01:58.687712
| 2017-08-23T22:39:56
| 2017-08-23T22:39:56
| 101,243,308
| 0
| 1
| null | 2017-08-24T02:04:50
| 2017-08-24T02:04:50
| null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
from Paths_Module import *
exec(identify_file_str)
for _name in [
'pts_plot','img','purpose','name','xyz_sizes','data_type','x','y',
'xmin','ymin','xmax','ymax','xscale','yscale','floats_to_pixels',
'pixels_to_floats','ysize','xsize','lines_plot','color',
'reject_run',
'left',
'out1_in2',
'dic',
'name',
'test',
'dic_type',
'purpose',
'batch_size',
'net',
'camera_data',
'metadata',
'target_data',
'names',
'states',
'loss_dic',
'train',
'val',
'ctr',
'all_steer',
'epoch_counter',
'get_data',
'next',
'run_code',
'seg_num',
'offset',
'all_data_moment_id_codes',
'left',
'right',
'fill',
'clear',
'forward',
'backward',
'display',
'GPU',
'BATCH_SIZE',
'DISPLAY',
'VERBOSE',
'LOAD_ARUCO',
'BAIR_CAR_DATA_PATH',
'RESUME',
'IGNORE',
'REQUIRE_ONE',
'USE_STATES',
'N_FRAMES',
'N_STEPS',
'STRIDE',
'save_net_timer',
'print_timer',
'epoch_timer',
'WEIGHTS_FILE_PATH',
'SAVE_FILE_NAME',
'mode',
'criterion',
'optimizer',
'data_ids',
'data_moment',
'racing',
'caffe',
'follow',
'direct',
'play',
'furtive',
'labels',
'LCR',
'data_moment_loss_record',
'loss',
'outputs',
'print_now',
'network',
'metadata',
'steer',
'motor',
'data',
'NETWORK_OUTPUT_FOLDER',
'code','data_moment_loss_records','loss_history','weights',
'save_net',
'CODE_PATH',
'rate_ctr',
'rate_timer',
'step',
'rate_counter',
'loss_record',
'add','loss',
'TRAIN_TIME',
'VAL_TIME','INITIAL_WEIGHTS_FOLDER',
'activiations',
'moment_index', 'imgs', 'view','camera_input','final_output',
'pre_metadata_features','pre_metadata_features_metadata','post_metadata_features','scales','delay'
]:exec(d2n(_name,'=',"'",_name,"'"))
#
#EOF
|
[
"karlzipser@berkeley.edu"
] |
karlzipser@berkeley.edu
|
7540b3442e53b36dbb55bce5a3c058d967207818
|
296d4fec38b2a5ec2f4eb402d1b2145980dd184b
|
/aliens.py
|
ac6d5f3a8d3bb0c5203dcb6a7cf851111dbd07b3
|
[] |
no_license
|
RayGutt/python
|
9464ae7c63850240df58ff78c6050bc6e1d35b3e
|
a9b68d43923f13b58e7d59fdabf649820d48bd52
|
refs/heads/master
| 2020-11-27T01:17:57.136062
| 2020-01-22T14:36:25
| 2020-01-22T14:36:25
| 229,254,199
| 0
| 0
| null | 2020-01-05T19:47:27
| 2019-12-20T11:39:53
|
HTML
|
UTF-8
|
Python
| false
| false
| 844
|
py
|
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'yellow', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
print("_________")
# Make an empty list for storing aliens.
aliens = []
# Make 30 green aliens.
for alien_number in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[0:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['speed'] = 'medium'
alien['points'] = 10
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['speed'] = 'fast'
alien['points'] = 15
# Show the first 5 aliens.
for alien in aliens[:5]:
print(alien)
print("...")
# Show hown many aliens have been created.
print("Total number of aliens: " + str(len(aliens)))
|
[
"le.caribou@gmail.com"
] |
le.caribou@gmail.com
|
a232ab5e7b7b3938334e7d69911f01ae956a17eb
|
4fdaa61e2fb2d320a0903e17024598c6a67ab0fb
|
/python/Vaav/kitchen.py
|
9ffefc461390bce99d81d0b9e5536c9669c10b11
|
[] |
no_license
|
khans/ProgrammingAndDataStructures
|
10d5cd5f30f703298ba132be4dfba828f3a0e9e1
|
58c1d822fa5eab17485369bc40dd1376db389f44
|
refs/heads/master
| 2021-01-25T14:03:40.616633
| 2018-06-19T23:02:44
| 2018-06-19T23:02:44
| 123,643,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
from collections import deque,defaultdict
class Table:
capacity = 0
availability = True
occupancy = 0
def __init__(self,number):
self.number = number
def addOccupant(self):
self.occupancy += 1
self.availability = False
def setCapacity(self,capacity):
self.capacity = capacity
def getTableNumber(self):
return self.number
class Order:
def __init__(self):
self.orderList = {}
def addOrder(self,item,count):
self.orderList[item] = count
class Kitchen:
queue = deque();
free = False
def make(self,order):
self.queue.append(order)
def isReady(self,order):
if order in self.queue:
return False
else:
return True
def getFood(self):
self.queue.popleft();
def getQueue(self):
return self.queue;
def doneDish(self):
self.queue.popleft()
|
[
"isafakhan@gmail.com"
] |
isafakhan@gmail.com
|
7fbc8d5ca1d93c1ff42c22beefc7772cb15d39ca
|
2f8f8171b3b996b0c866ede72367ec26f64eae39
|
/sampleproject/book/BeginningPython3_O_REILLY/chapter10/10-8.py
|
659dc821caed89f2f69b939227a7fca816939de1
|
[] |
no_license
|
kabaksh0507/exercise_python_it-1
|
da46edce09301b03a5351ee1885fb01eb69d8240
|
2b6c80a79494c9981e51bd03696c3aa19d6625ec
|
refs/heads/main
| 2023-03-04T03:12:44.188468
| 2021-02-08T08:55:36
| 2021-02-08T08:55:36
| 337,014,697
| 0
| 0
| null | 2021-02-08T08:57:30
| 2021-02-08T08:57:30
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from datetime import date
birth_day = date(1987, 8, 9)
print(birth_day)
fmt = 'year = %Y , month = %B , day = %d , day of the week = %A'
print(birth_day.strftime(fmt))
|
[
"kazkitou9080@gmail.com"
] |
kazkitou9080@gmail.com
|
597f6e44b90374e56fd32df848bc609cc1e37273
|
733496067584ee32eccc333056c82d60f673f211
|
/idfy_rest_client/models/signer_info.py
|
be68e6b47d5cff31143fcbe749d6914360bfe06d
|
[
"MIT"
] |
permissive
|
dealflowteam/Idfy
|
90ee5fefaa5283ce7dd3bcee72ace4615ffd15d2
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
refs/heads/master
| 2020-03-07T09:11:15.410502
| 2018-03-30T08:12:40
| 2018-03-30T08:12:40
| 127,400,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,191
|
py
|
# -*- coding: utf-8 -*-
"""
idfy_rest_client.models.signer_info
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
import idfy_rest_client.models.mobile
import idfy_rest_client.models.organization_info
class SignerInfo(object):
"""Implementation of the 'SignerInfo' model.
TODO: type model description here.
Attributes:
first_name (string): The signers first name
last_name (string): The signers last name
email (string): The signers email adress, define this if you are using
notifications
mobile (Mobile): The signers mobile, define this if you are using
notifications
organization_info (OrganizationInfo): The signers organization info
"""
# Create a mapping from Model property names to API property names
_names = {
"first_name":'firstName',
"last_name":'lastName',
"email":'email',
"mobile":'mobile',
"organization_info":'organizationInfo'
}
def __init__(self,
first_name=None,
last_name=None,
email=None,
mobile=None,
organization_info=None,
additional_properties = {}):
"""Constructor for the SignerInfo class"""
# Initialize members of the class
self.first_name = first_name
self.last_name = last_name
self.email = email
self.mobile = mobile
self.organization_info = organization_info
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
first_name = dictionary.get('firstName')
last_name = dictionary.get('lastName')
email = dictionary.get('email')
mobile = idfy_rest_client.models.mobile.Mobile.from_dictionary(dictionary.get('mobile')) if dictionary.get('mobile') else None
organization_info = idfy_rest_client.models.organization_info.OrganizationInfo.from_dictionary(dictionary.get('organizationInfo')) if dictionary.get('organizationInfo') else None
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(first_name,
last_name,
email,
mobile,
organization_info,
dictionary)
|
[
"runes@unipluss.no"
] |
runes@unipluss.no
|
bf6f30ccfa37f9d4acc212e1f4ec33d7b4457052
|
09fd456a6552f42c124c148978289fae1af2d5c3
|
/Greedy/1282.py
|
0aeb767815ec62b1439482c75e3f15c26f9a4fc9
|
[] |
no_license
|
hoang-ng/LeetCode
|
60b4e68cbcf54cbe763d1f98a70f52e628ab32fb
|
5407c6d858bfa43325363503c31134e560522be3
|
refs/heads/master
| 2021-04-10T11:34:35.310374
| 2020-07-28T10:22:05
| 2020-07-28T10:22:05
| 248,932,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
# 1282. Group the People Given the Group Size They Belong To
# There are n people whose IDs go from 0 to n - 1 and each person belongs exactly to one group. Given the array groupSizes of length n telling the group size each person belongs to, return the groups there are and the people's IDs each group includes.
# You can return any solution in any order and the same applies for IDs. Also, it is guaranteed that there exists at least one solution.
# Example 1:
# Input: groupSizes = [3,3,3,3,3,1,3]
# Output: [[5],[0,1,2],[3,4,6]]
# Explanation:
# Other possible solutions are [[2,1,6],[5],[0,4,3]] and [[5],[0,6,2],[4,3,1]].
# Example 2:
# Input: groupSizes = [2,1,3,3,3,2]
# Output: [[1],[0,5],[2,3,4]]
# Constraints:
# groupSizes.length == n
# 1 <= n <= 500
# 1 <= groupSizes[i] <= n
import collections
class Solution(object):
def groupThePeople(self, groupSizes):
dic = collections.defaultdict(list)
for i in range(len(groupSizes)):
dic[groupSizes[i]].append(i)
rs = []
for key in dic.keys():
count = 0
subArr = []
for i in range(len(dic[key])):
subArr.append(dic[key][i])
count += 1
if count == key:
rs.append(subArr)
subArr = []
count = 0
return rs
|
[
"hoang2109@gmail.com"
] |
hoang2109@gmail.com
|
a728af285352f2bc6175af70b01b5f0761313a71
|
acf5a0ea75b92eb8d082f04961a7646d8ccf7b32
|
/passpie/database.py
|
cf67372c689b4ec9ef0d8045fd5348f847a064c1
|
[
"MIT"
] |
permissive
|
mauriciovieira/passpie
|
6f9c98ba086bfe10a9d2c964c473507feba22586
|
bd0f5cca6ce12fc4469f4007199bef7ab3b8980e
|
refs/heads/master
| 2021-01-18T08:56:23.853489
| 2016-01-26T07:03:26
| 2016-01-26T07:03:26
| 50,439,403
| 0
| 0
| null | 2016-01-26T15:49:43
| 2016-01-26T15:49:43
| null |
UTF-8
|
Python
| false
| false
| 3,841
|
py
|
from datetime import datetime
import logging
import os
import shutil
from tinydb import TinyDB, Storage, where, Query
import yaml
from .utils import mkdir_open
from .credential import split_fullname, make_fullname
class PasspieStorage(Storage):
extension = ".pass"
def __init__(self, path):
super(PasspieStorage, self).__init__()
self.path = path
def delete(self, credentials):
for cred in credentials:
dirname, filename = cred["name"], cred["login"] + self.extension
credpath = os.path.join(self.path, dirname, filename)
os.remove(credpath)
if not os.listdir(os.path.dirname(credpath)):
shutil.rmtree(os.path.dirname(credpath))
def read(self):
elements = []
for rootdir, dirs, files in os.walk(self.path):
filenames = [f for f in files if f.endswith(self.extension)]
for filename in filenames:
docpath = os.path.join(rootdir, filename)
with open(docpath) as f:
elements.append(yaml.load(f.read()))
return {"_default":
{idx: elem for idx, elem in enumerate(elements, start=1)}}
def write(self, data):
deleted = [c for c in self.read()["_default"].values()
if c not in data["_default"].values()]
self.delete(deleted)
for eid, cred in data["_default"].items():
dirname, filename = cred["name"], cred["login"] + self.extension
credpath = os.path.join(self.path, dirname, filename)
with mkdir_open(credpath, "w") as f:
f.write(yaml.dump(dict(cred), default_flow_style=False))
class Database(TinyDB):
def __init__(self, path, extension='.pass', storage=PasspieStorage):
self.path = path
PasspieStorage.extension = extension
super(Database, self).__init__(self.path, storage=storage)
def has_keys(self):
return os.path.exists(os.path.join(self.path, '.keys'))
def credential(self, fullname):
login, name = split_fullname(fullname)
return self.get((where("login") == login) & (where("name") == name))
def add(self, fullname, password, comment):
login, name = split_fullname(fullname)
if login is None:
logging.error('Cannot add credential with empty login. use "@<name>" syntax')
return None
credential = dict(fullname=fullname,
name=name,
login=login,
password=password,
comment=comment,
modified=datetime.now())
self.insert(credential)
return credential
def update(self, fullname, values):
values['fullname'] = make_fullname(values["login"], values["name"])
values['modified'] = datetime.now()
self.table().update(values, (where("fullname") == fullname))
def credentials(self, fullname=None):
if fullname:
login, name = split_fullname(fullname)
Credential = Query()
if login is None:
creds = self.search(Credential.name == name)
else:
creds = self.search((Credential.login == login) & (Credential.name == name))
else:
creds = self.all()
return sorted(creds, key=lambda x: x["name"] + x["login"])
def remove(self, fullname):
self.table().remove(where('fullname') == fullname)
def matches(self, regex):
Credential = Query()
credentials = self.search(
Credential.name.matches(regex) |
Credential.login.matches(regex) |
Credential.comment.matches(regex)
)
return sorted(credentials, key=lambda x: x["name"] + x["login"])
|
[
"marcwebbie@gmail.com"
] |
marcwebbie@gmail.com
|
11fad38dc34588ed44dd250c8b3bee034cee5107
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03495/s959914177.py
|
79888ecf7a1c1e5617d415a8a5f3fbe869a319b8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from collections import Counter
N,K=map(int,input().split())
A=list(map(int,input().split()))
c = Counter(A)
val = sorted(c.values())
if len(val) <= K:
print(0)
exit()
print(sum(val[:len(val)-K]))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9728d3469911e999ed53abd170b3c8608947e880
|
caaf9046de59559bb92641c46bb8ab00f731cb46
|
/Configuration/Generator/python/Upsilon1SToMuMu_forSTEAM_13TeV_TuneCUETP8M1_cfi.py
|
eaeffad1236fe5b17d942a6e9bfb79db3a17feaa
|
[] |
no_license
|
neumeist/cmssw
|
7e26ad4a8f96c907c7373291eb8df205055f47f0
|
a7061201efe9bc5fa3a69069db037d572eb3f235
|
refs/heads/CMSSW_7_4_X
| 2020-05-01T06:10:08.692078
| 2015-01-11T22:57:32
| 2015-01-11T22:57:32
| 29,109,257
| 1
| 1
| null | 2015-01-11T22:56:51
| 2015-01-11T22:56:49
| null |
UTF-8
|
Python
| false
| false
| 3,453
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
source = cms.Source("EmptySource")
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(0.53),
pythiaHepMCVerbosity = cms.untracked.bool(False),
crossSection = cms.untracked.double(9090000.0),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Bottomonium:states(3S1) = 553', # filter on 553 and prevents other onium states decaying to 553, so we should turn the others off
'Bottomonium:O(3S1)[3S1(1)] = 9.28',
'Bottomonium:O(3S1)[3S1(8)] = 0.15',
'Bottomonium:O(3S1)[1S0(8)] = 0.02',
'Bottomonium:O(3S1)[3P0(8)] = 0.02',
'Bottomonium:gg2bbbar(3S1)[3S1(1)]g = on',
'Bottomonium:gg2bbbar(3S1)[3S1(8)]g = on',
'Bottomonium:qg2bbbar(3S1)[3S1(8)]q = on',
'Bottomonium:qqbar2bbbar(3S1)[3S1(8)]g = on',
'Bottomonium:gg2bbbar(3S1)[1S0(8)]g = on',
'Bottomonium:qg2bbbar(3S1)[1S0(8)]q = on',
'Bottomonium:qqbar2bbbar(3S1)[1S0(8)]g = on',
'Bottomonium:gg2bbbar(3S1)[3PJ(8)]g = on',
'Bottomonium:qg2bbbar(3S1)[3PJ(8)]q = on',
'Bottomonium:qqbar2bbbar(3S1)[3PJ(8)]g = on',
'553:onMode = off', # ignore cross-section re-weighting (CSAMODE=6) since selecting wanted decay mode
'553:onIfAny = 13',
'PhaseSpace:pTHatMin = 20.',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
oniafilter = cms.EDFilter("PythiaFilter",
Status = cms.untracked.int32(2),
MaxEta = cms.untracked.double(1000.0),
MinEta = cms.untracked.double(-1000.0),
MinPt = cms.untracked.double(0.0),
ParticleID = cms.untracked.int32(553)
)
mumugenfilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(0.5, 0.5),
MinP = cms.untracked.vdouble(2.7, 2.7),
MaxEta = cms.untracked.vdouble(2.5, 2.5),
MinEta = cms.untracked.vdouble(-2.5, -2.5),
MinInvMass = cms.untracked.double(5.0),
MaxInvMass = cms.untracked.double(20.0),
ParticleCharge = cms.untracked.int32(-1),
ParticleID1 = cms.untracked.vint32(13),
ParticleID2 = cms.untracked.vint32(13)
)
ProductionFilterSequence = cms.Sequence(generator*oniafilter*mumugenfilter)
|
[
"you@somedomain.com"
] |
you@somedomain.com
|
0a3953d0402b818210f35ac3401f274eb0d96b78
|
cae8adc520ee71ffd9cfc82418152b4ec63f9302
|
/template_wsgi/demo1.py
|
98b1b0acbce69b38b641792d1f5dcb3850bfeb56
|
[] |
no_license
|
dong-c-git/WSGIServer
|
55111c04f4bbefe239949ddaea16c71221b7f795
|
1f0b58977e2a951f3c6dec335854dd9d6e31cdfd
|
refs/heads/master
| 2020-08-01T17:03:30.307962
| 2019-11-09T01:45:30
| 2019-11-09T01:45:30
| 211,054,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
#coding:utf-8
import time
def application(environ,start_response):
status = '200 OK'
response_headers = [('Content-Type','text/html')]
start_response(status,response_headers)
return str(environ)+'==Hello world from a simple WSGI application!-->%s\n'%time.ctime()
|
[
"dc111000@hotmail.com"
] |
dc111000@hotmail.com
|
f8202764eacbf21b84e1afab879c8f6bea7c9820
|
ec6f83a3636fdb0d6f2266c56b58ac294eb2a945
|
/ntut python/associationRule.py
|
5c174b680a9cb0dfb518b5d31898b1cfb5313f2c
|
[] |
no_license
|
jack20951948/Python-Learning
|
f65c2aacea6cbe61a8be2539f2959202546adb7d
|
d683790ba47b73c6360f5f804700c664d40777c9
|
refs/heads/main
| 2023-06-26T03:43:47.395088
| 2021-07-18T08:00:28
| 2021-07-18T08:00:28
| 387,111,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,601
|
py
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import networkx as nx
from apyori import apriori
#pip install apriori
from wordcloud import WordCloud
#pip install wordcloud
def testTensorflow():
hello = tf.constant('hello tensorflow!')
sess = tf.Session()
print("hello")
print(sess.run(hello))
#conda install -c conda-forge wordcloud
#pip install wordcloud
def wordCloud():
plt.figure(figsize=(9,6))
data=np.array([
['Milk','Bread','Apple'],
['Milk','Bread'],
['Milk','Bread','Apple', 'Banana'],
['Milk', 'Banana','Rice','Chicken'],
['Apple','Rice','Chicken'],
['Milk','Bread', 'Banana'],
['Rice','Chicken'],
['Bread','Apple', 'Chicken'],
['Bread','Chicken'],
['Apple', 'Banana']])
#convert the array to text
text_data=[]
for i in data:
for j in i:
text_data.append(j)
products=' '.join(map(str, text_data))
print(products)
wordcloud = WordCloud(relative_scaling = 1.0,stopwords = {}).generate(products)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
def draw(df):
plt.style.use('ggplot')
plt.figure(figsize=(9,6))
print(df.iloc[6:19][['items','support']]) # Only get items with two pair sets. They start from index 6 to 19
ar=(df.iloc[6:19]['items'])
G = nx.Graph()
G.add_edges_from(ar)
pos = nx.spring_layout(G)
nx.draw(G, pos, font_size=16, with_labels=False, edge_color='green',node_size=800,node_color=['red','green','blue','cyan','orange','magenta'])
for p in pos:
pos[p][1] += 0.07
nx.draw_networkx_labels(G, pos)
plt.show()
def simple_bar_chart(support,products):
labels=np.array(products)
colors = ['#008000','#808000','#FFFF00','#000000','#FF0000','#00FF00','#0000FF','#008080','#aa22ff','#aa22ff','#dd0022','#ff00cc','#eeaa22','#22bbaa','#C0C0C0']
y_pos = np.arange(len(labels))
x_pos = np.array(support)
plt.barh(y_pos, x_pos, color=colors, align='center' ,edgecolor='green')
plt.yticks(y_pos, labels)
plt.ylabel('Products',fontsize=18)
plt.xlabel('Support',fontsize=18)
plt.title('Consumer Buying Behaviour\n',fontsize=20)
plt.show()
def testApriori_s():
data=np.array([
['Milk','Bread','Apple'],
['Milk','Bread'],
['Milk','Bread','Apple', 'Banana'],
['Milk', 'Banana','Rice','Chicken'],
['Apple','Rice','Chicken'],
['Milk','Bread', 'Banana'],
['Rice','Chicken'],
['Bread','Apple', 'Chicken'],
['Bread','Chicken'],
['Apple', 'Banana']])
for i in data:
print(i)
print("\n\n")
result=list(apriori(data))
df=pd.DataFrame(result)
df.to_csv("appriori_results.csv") #Save to csv formart for detailed view
print(df.head()) # Print the first 5 items
#print(df)
draw(df)
support=df.iloc[0:19]['support']*100
products=df.iloc[0:19]['items']
simple_bar_chart(support,products)
def testApriori():
records = []
store_data = pd.read_csv('e:\\Datasets\\store_data.csv', header=None)
#print(store_data)
print(store_data.head())
#perprocessing
#convert our pandas dataframe into a list of lists
for i in range(0, 7501):
#records.append([str(store_data.values[i,j]) for j in range(0, 20)])
records.append([str(store_data.values[i,j]) for j in range(0, 20) if str(store_data.values[i,j]) != 'nan'])
# remove NaN value
#print(records)
association_rules = apriori(records, min_support=0.0045, min_confidence=0.2, min_lift=3, min_length=2)
#min_length: at least 2 product in the rules
association_results = list(association_rules)
print(len(association_results))
#print(association_results)
print(association_results[0])
for item in association_results:
# first index of the inner list
# Contains base item and add item
pair = item[0]
items = [x for x in pair]
print("Rule: " + items[0] + " -> " + items[1])
#second index of the inner list
print("Support: " + str(item[1]))
#third index of the list located at 0th
#of the third index of the inner list
print("Confidence: " + str(item[2][0][2]))
print("Lift: " + str(item[2][0][3]))
print("=====================================")
def main():
testApriori()
#testApriori_s()
wordCloud()
main()
|
[
"j20951948@gmail.com"
] |
j20951948@gmail.com
|
fbee478ecc1dd477bdebf5a09cd472cb2d0ebc20
|
c42a085521cec895fac0021eb1638d6f077eadf7
|
/PYTHON_FUNDAMENTALS_May_August_2020/Exersice_Objects_And_Classes_26_06_2020/Storage.py
|
88a44072c4d4c8e1b26fab959fea06bf9c937ddf
|
[] |
no_license
|
vasil-panoff/Python_Fundamentals_SoftUni_May_2020
|
f645ce85efa6db047b52a8b63d411d2e5bd5bd9a
|
daf1a27ff1a4684d51cf875ee0a4c0706a1a4404
|
refs/heads/main
| 2023-01-06T22:20:30.151249
| 2020-11-03T22:56:24
| 2020-11-03T22:56:24
| 309,818,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
class Storage:
def __init__(self, capacity):
self.capacity = capacity
self.storage = []
def add_product(self, product):
if len(self.storage) < self.capacity:
self.storage.append(product)
def get_products(self):
return self.storage
storage = Storage(4)
storage.add_product("apple")
storage.add_product("banana")
storage.add_product("potato")
storage.add_product("tomato")
storage.add_product("bread")
print(storage.get_products())
|
[
"vasil.panov@gmail.com"
] |
vasil.panov@gmail.com
|
0cd87e0d9eca96df30c68ee957e543ea4bf80730
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/linearrelu_19.py
|
7b3a00c5cb35632a17f95048599dcdc9247a02b4
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100
| 2021-10-27T18:37:12
| 2021-10-27T18:37:12
| 227,103,881
| 2
| 1
| null | 2020-02-19T22:07:24
| 2019-12-10T11:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
# -*- coding: utf-8 -*-
"""
linearrelu_19.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class LinearReLU_19(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Linear(in_features=784, out_features=75, bias=False)
self.f1 = nn.ReLU(inplace=False)
self.f2 = nn.Linear(in_features=75, out_features=43, bias=True)
self.f3 = nn.ReLU(inplace=False)
self.f4 = nn.Linear(in_features=43, out_features=34, bias=True)
self.f5 = nn.ReLU(inplace=False)
self.f6 = nn.Linear(in_features=34, out_features=10, bias=True)
self.f7 = nn.Linear(in_features=10, out_features=10, bias=False)
self.f8 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],784)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = self.f7(x)
x = self.f8(x)
return x
|
[
"41098605+ahgamut@users.noreply.github.com"
] |
41098605+ahgamut@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.