blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a406e2a620162230e1e7cc273a3998b61cf94a92
|
74926d4145b9cd91bd040a7887d6baef838865d3
|
/autoencoder/metrics.py
|
3ad28ecd5bf3da0615cf6a06bd1ac56acd7e6403
|
[
"MIT"
] |
permissive
|
Elaine0/Anomaly-Detection
|
3837b602c6c8ba12fb2df7170292ebded893bbe0
|
45ab34235fd865006292a6645bbf2fc8bed9e959
|
refs/heads/master
| 2023-06-16T16:27:12.675954
| 2021-07-13T09:06:19
| 2021-07-13T09:06:19
| 282,931,372
| 0
| 0
| null | 2020-07-27T15:02:17
| 2020-07-27T15:02:17
| null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
import tensorflow as tf
from tensorflow import keras
import keras.backend as K
def ssim_metric(dynamic_range):
def ssim(imgs_true, imgs_pred):
return K.mean(tf.image.ssim(imgs_true, imgs_pred, dynamic_range), axis=-1)
return ssim
def mssim_metric(dynamic_range):
def mssim(imgs_true, imgs_pred):
return K.mean(
tf.image.ssim_multiscale(imgs_true, imgs_pred, dynamic_range), axis=-1
)
return mssim
|
[
"google-dl-platform@googlegroups.com"
] |
google-dl-platform@googlegroups.com
|
c5b06264124b13023d9bd904c223f6a20f2da8ab
|
d49cfe38764aa35992ba5cf65655a6a45d9487c8
|
/旋转图像.py
|
1660cfd80a5903f2b51d956378ca51aa57d5f90a
|
[] |
no_license
|
getabear/leetcode
|
fc0797f664ab4052aa2635341f4bbe40b74ec2b8
|
4af6608166f2e4cdfcfb0bbb92133b4a0f90ea34
|
refs/heads/master
| 2021-07-15T11:05:23.049235
| 2020-11-15T12:57:14
| 2020-11-15T12:57:14
| 224,601,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
from typing import List
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
if not matrix:
return
m,n=len(matrix),len(matrix[0])
dp=[[0]*n for _ in range(m)]
for h in range(m):
for x in range(n):
dp[x][m-h-1]=matrix[h][x]
matrix[:]=dp[:]
return
a=Solution()
matrix=[[1,2,3],[4,5,6],[7,8,9]]
a.rotate(matrix)
|
[
"1874178998@qq.com"
] |
1874178998@qq.com
|
e97231aa59386188f10462edf9ebb223d62915b0
|
7d99c16d3222dd09d2358dac17d693deb7ed8dfd
|
/mwk_converters/mwk_to_sqlite3.py
|
a662445947c9bf94dfd56abd7f356d3172ba54b1
|
[] |
no_license
|
afcarl/mw_data_analysis_helpers
|
55c287daa06ef398e25ee9a8ecb290fc1f58c4dc
|
88e8eaae3b26f2ce7c482585414340c8e59f6ed2
|
refs/heads/master
| 2020-03-17T14:56:10.483526
| 2011-07-26T17:50:01
| 2011-07-26T17:50:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,031
|
py
|
#!/usr/bin/env python
import logging, os, sys
import sqlite3
import json
#import mwk
import mworks.data as mwk
def mwk_to_sqlite3(inFile, outFile, blacklist=[]):
m = mwk.MWKFile(inFile)
m.open()
# fix codec
codec = m.codec
codec[0], codec[1], codec[2], codec[3] = ('#codec', '#systemEvent', '#components', '#termination')
revCodec = {}
for k,v in codec.iteritems():
revCodec[v] = k
evs = m.get_events()
# open sqlite3 database
logging.debug("opening sqlite3 database: %s" % outFile)
conn = sqlite3.connect(outFile)
c = conn.cursor()
# # make table to add to data files table
# logging.debug("adding information to db")
# c.execute('''create table datafiles
# (animal text, day text)''')
# make table for new data
# tableName = os.path.splitext(os.path.basename(inFile))[0]
# cmd = "create table %s (code int, time int, value text)" % tableName
# c.execute(cmd)
c.execute('''create table events
(code int, time int, value text)''')
# make table for codec
# codecTableName = "%s_codec" % tableName
# cmd = "create table %s (code int, name text)" % codecTableName
# c.execute(cmd)
c.execute('''create table codec
(code int, name text)''')
# # add information to datafiles table
# animal = tableName.split('_')[0].lower()
# day = tableName.split('_')[1]
# c.execute('''insert into datafiles
# values(?,?)''', (animal, day))
# add codec to database
#codec = m.codec
# cmd = "insert into %s values(?,?)" % codecTableName
for (k,v) in codec.iteritems():
# c.execute(cmd,(k,v))
c.execute('''insert into codec values (?,?)''',(k,v))
# add events to database
logging.debug("adding events to db")
# cmd = "insert into %s values(?,?,?)" % tableName
for e in evs:
if codec[e.code] in blacklist:
continue
# c.execute(cmd, (e.code, e.time, json.dumps(e.value)))
c.execute('''insert into events
values(?,?,?)''', (e.code, e.time, json.dumps(e.value)))
logging.debug("cleaning up")
# close database connection
conn.commit()
c.close()
# close mworks file
m.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
eventsBlacklist = ['#announceCurrentState','#codec', '#systemEvent', '#components', '#termination'] # not implemented
# parse command line arguments
logging.debug("Parsing command line arguments")
if len(sys.argv) == 3:
inFile = sys.argv[1]
outFile = sys.argv[2]
elif len(sys.argv) == 2:
inFile = sys.argv[1]
outFile = '%s.sqlite3' % os.path.splitext(os.path.basename(inFile))[0]
else:
print "Usage: %s input_mwk_file (output_sqlite3_file)" % __file__
sys.exit(1)
# open up and read mwks file
logging.debug("opening and reading mwks file: %s" % inFile)
mwk_to_sqlite3(inFile, outFile, eventsBlacklist)
# exit nicely
sys.exit(0)
|
[
"graham@rowland.harvard.edu"
] |
graham@rowland.harvard.edu
|
30d71a0f811024388f46fa160a7fb991a7429ec3
|
76e9afdf16eabcc9e1a3facd308e56362112efc4
|
/20210222_ls/auto_chmod.py
|
84ae38d02246d5ec3b8c66e17a9fcebb764dc397
|
[] |
no_license
|
rerejii/pwb_work_2021
|
c65c5e787ad98b7d847cb63ebadc24a02f001e90
|
8ecfb2a98d9d396ed505ecc939e384cf6400412d
|
refs/heads/main
| 2023-03-30T10:43:18.115386
| 2021-03-24T05:38:41
| 2021-03-24T05:38:41
| 350,954,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import time
import sys
import os
# args = sys.argv
path = '/nas-homes/krlabmember/hayakawa/binary/20210115'
while True:
os.chmod(path, 0o755)
time.sleep(10)
|
[
"hayakawa.shinya.kochi@gmail.com"
] |
hayakawa.shinya.kochi@gmail.com
|
7cd8898e0e3005975525306f1622e0d54d94136b
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/140_gui/pyqt_pyside/examples/PyQt5/Chapter13_Running Python Scripts on Android and iOS/demoMultipleSelection.py
|
96a2d86b0e8e60e19544ee49d3e46b244fb8b9fb
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
import android
app = android.Android()
app.dialogCreateAlert("Select your food items")
app.dialogSetMultiChoiceItems(['Pizza', 'Burger', 'Hot Dog'])
app.dialogSetPositiveButtonText('Done')
app.dialogShow()
app.dialogGetResponse()
response = app.dialogGetSelectedItems()
print(response)
selectedResult=response[1]
n=len(selectedResult)
print("You have selected following food items: ")
for i in range(0, n):
if selectedResult[i]==0:
print("Pizza")
elif selectedResult[i]==1:
print("Burger")
elif selectedResult[i]==2:
print("Hot Dog")
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
a3976a033b050da9d584f2ee555049bc57e48660
|
5fe194b477ba8af8acc846db2dfc961ad2a57013
|
/.metadata/.plugins/org.eclipse.core.resources/.history/5/c0e3511f98fb001611e7bcc2e467e756
|
55afe0616ecc9457ecdb14d257b97cdcb7536b33
|
[] |
no_license
|
robotanica/ExcavatorROS
|
6a129e302f0a288d198e3f720a78610e6333f7d3
|
db0bdf0f9988ebf910f832e22f46f679e936cdcb
|
refs/heads/master
| 2023-03-17T23:49:58.762691
| 2017-09-29T16:31:33
| 2017-09-29T16:31:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,974
|
#!/usr/bin/python
'''
Created on Feb 21, 2017
@author: yutak
'''
import rospy
import sensor_msgs.msg as smsg
import exp_excavator.msg as cmsg
class JoyMsgManager:
def __init__(self):
rospy.init_node('joy_msg_manager', anonymous=True)
self.test = rospy.get_param('~test', False)
self.joy_val_msg = cmsg.JointValues()
self.joy_val_msg.boom = 0.0
self.joy_val_msg.arm = 0.0
self.joy_val_msg.bucket = 0.0
self.joy_val_msg.swing = 0.0
self.sub_spd_com_bucket = rospy.Subscriber('joy_right', smsg.Joy,
self.cb_joy_right)
self.sub_joy_left = rospy.Subscriber('joy_left', smsg.Joy,
self.cb_joy_left)
self.pub_joy_values = rospy.Publisher('joy_values', cmsg.JointValues,
queue_size= 10)
def cb_joy_right(self, joy):
self.joy_val_msg.boom = joy.axes[1]
self.joy_val_msg.bucket = joy.axes[0]
if self.test:
rospy.loginfo('Boom Joystick Value:%f' %self.joy_val_msg.boom)
rospy.loginfo('Bucket Joystick Value:%f'
%self.joy_val_msg.bucket)
self.pub_joy_values.publish(self.joy_val_msg)
def cb_joy_left(self, joy):
self.joy_val_msg.arm = joy.axes[1]
self.joy_val_msg.swing = joy.axes[0]
if self.test:
rospy.loginfo('Arm Joystick Value:%f' %self.joy_val_msg.arm)
rospy.loginfo('Swing Joystick Value:%f'
%self.joy_val_msg.swing)
self.pub_joy_values.publish(self.joy_val_msg)
if __name__ == '__main__':
jm = JoyMsgManager()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
|
[
"filippos.sotiropoulos@gmail.com"
] |
filippos.sotiropoulos@gmail.com
|
|
44260eaf54611020edd327e194c75925b182f899
|
d1a5e15463623d75560659481e8277a516a8e280
|
/Website_Project/mysite/account/views.py
|
c6f37f13125fb41f2acb3bc18dc0c8c6497fd11f
|
[] |
no_license
|
LeeSungRyul/KD_AI
|
fac54863d6eb49fa8fe96dbb700e9279d2a1f0fb
|
560e4c2e777ab5f5ee6a2a31f17372f58ea483ef
|
refs/heads/master
| 2023-06-27T00:06:47.433793
| 2021-07-27T14:59:25
| 2021-07-27T14:59:25
| 339,369,278
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password, check_password
from .models import Account
# Create your views here.
def register(request): # 회원가입 페이지를 보여주기 위한 함수
if request.method == "GET":
return render(request, 'register.html')
elif request.method == "POST":
userID = request.POST.get('userID', None) # 딕셔너리형태
userPW = request.POST.get('userPW', None)
re_password = request.POST.get('re_password', None)
userMail = request.POST.get('userMail', None)
userPhone = request.POST.get('userPhone', None)
res_data = {}
if not (userID and userPW and re_password and userMail and userPhone):
res_data['error'] = "All values must be entered."
return render(request, 'register.html', res_data)
if userPW != re_password:
# return HttpResponse('비밀번호가 다릅니다.')
res_data['error'] = 'Confirm password does not match.'
return render(request, 'register.html', res_data)
else:
account = Account(userID=userID, userPW=make_password(userPW), userMail=userMail, userPhone=userPhone)
account.save()
return redirect('/login/')
# register를 요청받으면 register.html 로 응답. return render(request, 'register.html')
# res_data: html 파일에서 {{error}}와 맵핑되어 처리. 즉, if문에서 걸리면 뒤의 문자열이 출력
def login(request):
response_data = {}
if request.method == "GET":
return render(request, 'login.html')
elif request.method == "POST":
if '_login' in request.POST:
login_userID = request.POST.get('userID', None)
login_userPW = request.POST.get('userPW', None)
# 아이디와 PW 중 어느 하나라도 입력되지 않은 경우
if not (login_userID and login_userPW):
response_data['error'] = "All values must be entered."
else:
account = Account.objects.get(userID=login_userID)
# db에서 꺼내는 명령. Post로 받아온 userID로 , db의 userID을 꺼내온다.
if check_password(login_userPW, account.userPW):
request.session['account'] = account.userID
# 세션도 딕셔너리 변수 사용과 똑같이 사용하면 된다.
# 세션 account라는 key에 방금 로그인한 id를 저장한것.
return redirect('/') # 로그인 된 홈 화면 이동
else:
response_data['error'] = "Invalid username or password."
return render(request, 'login.html', response_data)
elif '_register' in request.POST:
return redirect('/login/register/')
|
[
"airtrack03@naver.com"
] |
airtrack03@naver.com
|
f8720cb40162973a04d2461826c550fb6a66e68e
|
9672e0b45f72261c069aa8140a01e861b8f8db45
|
/query/migrations/0005_auto_20150526_1736.py
|
970fbb78131ebda45f4baae6af990612482e6cf4
|
[] |
no_license
|
KeleiAzz/SCRC_server
|
40882c0d5804b0488dd31f4d4db353616d318e48
|
669d45e4d5059cfc766a2a0852d23522d2af7d84
|
refs/heads/master
| 2020-04-06T10:18:40.817298
| 2016-12-04T06:07:53
| 2016-12-04T06:07:53
| 35,839,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,782
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('query', '0004_evidence'),
]
operations = [
migrations.AlterField(
model_name='evidence',
name='h1',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h10',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h11',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h12',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h13',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h14',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h15',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h16',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h17',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h18',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h19',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h2',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h20',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h21',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h22',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h23',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h3',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h4',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h5',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h6',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h7',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h8',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evidence',
name='h9',
field=models.IntegerField(blank=True, null=True),
preserve_default=True,
),
]
|
[
"kgong@ncsu.edu"
] |
kgong@ncsu.edu
|
1049caf450a5e4ca1eedc4aec81d6fe28ca216eb
|
bca6e5728aa041d348482e4265fd2c6f1f4a67d3
|
/ucsmsdk/mometa/storage/StorageSasPort.py
|
6a17becbc5ebeaadbdf5cee82300824e1e91d16c
|
[
"Apache-2.0"
] |
permissive
|
psterdale/ucsmsdk
|
fc7c519ea1a43c5e77a015e3605bc2acfe3c917a
|
821b805c18ad7652a79d4f581f4695558f17e943
|
refs/heads/master
| 2020-12-03T08:11:54.382427
| 2017-06-28T12:46:52
| 2017-06-28T12:46:52
| 95,667,187
| 1
| 0
| null | 2017-06-28T12:33:27
| 2017-06-28T12:33:27
| null |
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
"""This module contains the general information for StorageSasPort ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageSasPortConsts:
LC_ALLOCATED = "allocated"
LC_AVAILABLE = "available"
LC_DEALLOCATED = "deallocated"
LC_REPURPOSED = "repurposed"
LINK_SPEED_1_5_GBPS = "1-5-gbps"
LINK_SPEED_12_GBPS = "12-gbps"
LINK_SPEED_3_GBPS = "3-gbps"
LINK_SPEED_6_GBPS = "6-gbps"
LINK_SPEED_DISABLED = "disabled"
LINK_SPEED_DOWN = "down"
LINK_SPEED_HOST_POWER_OFF = "host-power-off"
LINK_SPEED_UNKNOWN = "unknown"
LINK_SPEED_UNSUPPORTED_DEVICE = "unsupported-device"
class StorageSasPort(ManagedObject):
"""This is StorageSasPort class."""
consts = StorageSasPortConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageSasPort", "storageSasPort", "sas-port-[id]", VersionMeta.Version312b, "InputOutput", 0x3f, [], ["read-only"], [u'storageEnclosureLocalDiskConfig', u'storageLocalDisk'], [], ["Get"])
prop_meta = {
"address": MoPropertyMeta("address", "address", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version312b, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"endpoint": MoPropertyMeta("endpoint", "endpoint", "uint", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version312b, MoPropertyMeta.NAMING, 0x8, None, None, None, [], ["0-4294967295"]),
"lc": MoPropertyMeta("lc", "lc", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "available", "deallocated", "repurposed"], []),
"link_descr": MoPropertyMeta("link_descr", "linkDescr", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"link_speed": MoPropertyMeta("link_speed", "linkSpeed", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["1-5-gbps", "12-gbps", "3-gbps", "6-gbps", "disabled", "down", "host-power-off", "unknown", "unsupported-device"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version312b, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"address": "address",
"childAction": "child_action",
"dn": "dn",
"endpoint": "endpoint",
"id": "id",
"lc": "lc",
"linkDescr": "link_descr",
"linkSpeed": "link_speed",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.address = None
self.child_action = None
self.endpoint = None
self.lc = None
self.link_descr = None
self.link_speed = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "StorageSasPort", parent_mo_or_dn, **kwargs)
|
[
"vijayvikrant84@gmail.com"
] |
vijayvikrant84@gmail.com
|
4b655608f8398692c28ca98e39291340429ff692
|
ba949e02c0f4a7ea0395a80bdc31ed3e5f5fcd54
|
/problems/greedy/Solution621.py
|
bbd433a5a095fd0e364fc666dd13252734d0de78
|
[
"MIT"
] |
permissive
|
akaliutau/cs-problems-python
|
6bc0a74064f6e9687fe58b13763da1fdf2e1f626
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
refs/heads/master
| 2023-05-11T22:19:06.711001
| 2021-06-04T11:14:42
| 2021-06-04T11:14:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
""" Given a characters array tasks, representing the tasks a CPU needs to do,
where each letter represents a different task. Tasks could be done in any
order. Each task is done in one unit of time. For each unit of time, the CPU
could complete either one task or just be idle.
However, there is a non-negative integer n that represents the cooldown
period between two same tasks (the same letter in the array), that is that
there must be at least n units of time between any two same tasks.
Return the least number of units of times that the CPU will take to finish
all the given tasks
IDEA:
The total number of CPU intervals we need consists of busy and idle slots.
Number of busy slots is defined by the number of tasks to execute:
len(tasks). The problem is to compute a number of idle slots.
Maximum possible number of idle slots is defined by the frequency of the most
frequent task: idle_time <= (f_max - 1) * n.
Example:
ABCA, cooling=2
|AAAA|B |C |
cooling
just calculate #empty + tot_len of letters
"""
class Solution621:
pass
|
[
"aliaksei.kaliutau@gmail.com"
] |
aliaksei.kaliutau@gmail.com
|
46938072fdd88d92a0daec7ee0a0b4f408d355c2
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/t000766_2.py
|
d782134c3d7d1053b4073e2e8647e4a5474ab4d1
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447
| 2023-09-01T09:09:56
| 2023-09-01T09:09:56
| 148,560,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
import time
time1 = time.time()
h01 = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
h1 = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
h2 = ["11", "22", "33", "44",
"55", "66", "77", "88", "99"]
hx3 = ["1x1", "2x2", "3x3", "4x4", "5x5",
"6x6", "7x7", "8x8", "9x9"]
hx4 = ["1xx1", "2xx2", "3xx3", "4xx4", "5xx5",
"6xx6", "7xx7", "8xx8", "9xx9"]
hx5 = ["1xyx1", "2xyx2", "3xyx3", "4xyx4", "5xyx5",
"6xyx6", "7xyx7", "8xyx8", "9xyx9"]
hx6 = ["1xyyx1", "2xyyx2", "3xyyx3", "4xyyx4", "5xyyx5",
"6xyyx6", "7xyyx7", "8xyyx8", "9xyyx9"]
h3 = []
h4 = []
h5 = []
h6 = []
hy5 = []
hy6 = []
for hx3_in in hx3:
for h in h01:
s = hx3_in.replace("x", h)
h3.append(s)
for hx4_in in hx4:
for h in h01:
s = hx4_in.replace("x", h)
h4.append(s)
for hx5_in in hx5:
for h in h01:
s = hx5_in.replace("x", h)
hy5.append(s)
for hx6_in in hx6:
for h in h01:
s = hx6_in.replace("x", h)
hy6.append(s)
for hy5_in in hy5:
for h in h01:
s = hy5_in.replace("y", h)
h5.append(s)
for hy6_in in hy6:
for h in h01:
s = hy6_in.replace("y", h)
h6.append(s)
h = h1 + h2 + h3 + h4 + h5 + h6
hh = []
for i in h:
d = str(int(i) ** 2)
k = str(int(i) ** 3)
dd = d[::-1]
kk = k[::-1]
if d == dd and k == kk:
hh.append(i)
hhh = []
ss = ""
k = 0
for h in hh:
if k == 5:
hhh.append(ss.strip())
ss = h + " "
k = 1
else:
ss = ss + h + " "
k = k + 1
hhh.append(ss.strip())
for i in hhh:
print(i)
print(time.time() - time1)
|
[
"feiyanshiren@163.com"
] |
feiyanshiren@163.com
|
923176b05b13547f26b54b29c28090ef780edb2a
|
017a57c810ad08ecff84652a252656afa3173e17
|
/odin/utils/shape_calculation.py
|
d2740e55a36f13457b42b10d7a8e3a26cad4ac51
|
[
"MIT"
] |
permissive
|
SmartArduino/odin
|
0189e0b71ccac311887f0fda6bafb96ca9c53a88
|
1706c91c2fbafd23018ce98bf87b3928935b2466
|
refs/heads/master
| 2021-01-17T08:22:48.073115
| 2017-03-04T12:15:03
| 2017-03-04T12:15:03
| 83,890,654
| 0
| 0
| null | 2017-03-04T12:07:20
| 2017-03-04T12:07:20
| null |
UTF-8
|
Python
| false
| false
| 7,382
|
py
|
from __future__ import print_function, division, absolute_import
from math import ceil
import numpy as np
# ===========================================================================
# Shape calculation for Pooling
# Contain code from theano: theano/tensor/signal/pool.py
# Copyright (c) 2008--2016, Theano Development Team
# ===========================================================================
def get_pool_output_shape(imgshape, ws, ignore_border=False,
strides=None, pad=None):
"""
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
order: (samples, pool_dim1, pool_dim2, pool_dim3, ..., input_depth)
(i.e tensorflow-NHWC format)
ws : list or tuple of N ints
Downsample factor over rows and column.
ws indicates the pool region size.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
strides : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
"""
# convert tensorflow shape to theano shape
imgshape = (imgshape[0], imgshape[-1]) + tuple(imgshape[1:-1])
ndim = len(ws)
# check valid pad (list or tuple of int)
if isinstance(pad, str):
if 'valid' in pad.lower():
pad = (0,) * ndim
elif 'same' in pad.lower():
out_shape = tuple([int(ceil(float(i) / float(j)))
for i, j in zip(imgshape[-ndim:], strides)])
return (imgshape[0],) + imgshape[2:-ndim] + out_shape + (imgshape[1],)
def compute_out(v, downsample, stride):
if ignore_border:
if downsample == stride:
return v // stride
else:
out = (v - downsample) // stride + 1
return np.maximum(out, 0)
else:
if stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample + stride) // stride) + 1
# ====== check input arguments ====== #
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if strides is None:
strides = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(imgshape[-ndim + i] + pad[i] * 2
for i in range(ndim))
out_shape = [compute_out(patch_shape[i], ws[i], strides[i])
for i in range(ndim)]
rval = tuple(imgshape[:-ndim]) + tuple(out_shape)
# convert theano shape to tensorflow shape
rval = (rval[0],) + rval[2:] + (rval[1],)
return rval
# ===========================================================================
# Shape calculation for Convolution
# Contain code from theano: theano/tensor/nnet/abstract_conv.py
# Copyright (c) 2008--2016, Theano Development Team
# ===========================================================================
def __get_conv_shape_1axis(image_shape, kernel_shape, border_mode,
subsample, dilation=1):
if None in [image_shape, kernel_shape, border_mode,
subsample, dilation]:
return None
# Implicit dilated kernel shape
dil_kernel_shape = (kernel_shape - 1) * dilation + 1
if isinstance(border_mode, str):
border_mode = border_mode.lower()
if border_mode == "half" or border_mode == "same":
pad = dil_kernel_shape // 2
elif border_mode == "full":
pad = dil_kernel_shape - 1
elif border_mode == "valid":
pad = 0
else:
pad = border_mode
if pad < 0:
raise ValueError("border_mode must be >= 0")
# In case of symbolic shape, we want to build the smallest graph
# (image_shape + 2 * pad - dil_kernel_shape) // subsample + 1
if pad == 0:
out_shp = (image_shape - dil_kernel_shape)
else:
out_shp = (image_shape + 2 * pad - dil_kernel_shape)
if subsample != 1:
out_shp = out_shp // subsample
out_shp = out_shp + 1
# ====== get exact same border_mode for theano ====== #
if (border_mode == 'half' or border_mode == 'same') and \
kernel_shape % 2 == 0:
out_shp = (image_shape + subsample - 1) // subsample
return out_shp
def get_conv_output_shape(image_shape, kernel_shape,
border_mode, subsample,
filter_dilation=None):
"""
This function compute the output shape of convolution operation.
original code: abstract_conv.py (theano)
Parameters
----------
image_shape: tuple of int (symbolic or numeric) corresponding to the input
order: (samples, conv_dim1, conv_dim2, conv_dim3, ..., input_depth)
(i.e tensorflow-NHWC format)
kernel_shape: tuple of int (symbolic or numeric) corresponding to the
order: (kernel_dim1, kernel_dim2, kernel_dim3, ..., input_depth, out_depth)
(i.e tensorflow-NHWC format)
border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
If it is a tuple, its two (or three) elements respectively correspond
to the padding on height and width (and possibly depth) axis.
subsample: tuple of int (symbolic or numeric). Its or three elements
espectively correspond to the subsampling on height and width (and
possibly depth) axis.
filter_dilation: tuple of int (symbolic or numeric). Its two elements
correspond respectively to the dilation on height and width axis.
Returns
-------
output_shape: tuple of int corresponding to the output image shape. Its
four element must correspond respectively to: batch size, number of
output channels, height and width of the image. None where undefined.
"""
# ====== convert tensorflow shape to theano shape ====== #
image_shape = (image_shape[0], image_shape[-1]) + tuple(image_shape[1:-1])
kernel_shape = (kernel_shape[-1], kernel_shape[-2]) + tuple(kernel_shape[:-2])
# ====== infer shape ====== #
bsize, imshp = image_shape[0], image_shape[2:]
nkern, kshp = kernel_shape[0], kernel_shape[2:]
if filter_dilation is None:
filter_dilation = np.ones(len(subsample), dtype='int')
if isinstance(border_mode, tuple):
out_shp = tuple(__get_conv_shape_1axis(
imshp[i], kshp[i], border_mode[i],
subsample[i], filter_dilation[i]) for i in range(len(subsample)))
else:
out_shp = tuple(__get_conv_shape_1axis(
imshp[i], kshp[i], border_mode,
subsample[i], filter_dilation[i]) for i in range(len(subsample)))
# ====== convert theano to tensorflow shape ====== #
return (bsize, ) + out_shp + (nkern,)
|
[
"nickartin13@gmail.com"
] |
nickartin13@gmail.com
|
a3cee10d2c3fa7bcdffc20880585935069d651fc
|
4910c0f3d03935fc8ee03f1e9dc20dfdb2c7c04b
|
/Codigos estudiantes por lenguaje/PY/Bryann Valderrama/Algoritmos de Busqueda/DifferencePairSearch.py
|
6c885a6c2d463ae002f1c7a54ec826b5b9e9f0a1
|
[] |
no_license
|
roca12/gpccodes
|
ab15eeedc0cadc0735651262887b44f1c2e65b93
|
aa034a3014c6fb879ec5392c51f9714bdc5b50c2
|
refs/heads/master
| 2023-02-01T13:49:27.563662
| 2023-01-19T22:50:58
| 2023-01-19T22:50:58
| 270,723,328
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
'''Dado un arreglo y un numero n, buscar si existe un par cuya
diferencia es n.
- Complejidad Tiempo: O (n logn)
'''
from sys import stdin, stdout
rl = stdin.readline
wr = stdout.write
def findPair(arr, n):
size = len(arr)
i, j = 0, 1
while i < size and j < size:
if i != j and arr[j] - arr[i] == n:
wr(f'Par encontrado: {arr[i]} - {arr[j]}\n')
# return True # Encontrar solo un par
i += 1 # Encontrar todos los pares
j += 1 # Encontrar todos los pares
elif arr[j] - arr[i] < n:
j += 1
else:
i += 1
wr('Par no encontrado\n')
return False
arr = list(map(int, rl().split())) # 1 2 3 4 5 6 7
n = int(rl()) # 5
findPair(arr, n) # 1 -6 | 2 - 7
|
[
"noreply@github.com"
] |
roca12.noreply@github.com
|
804b09be82a5890f8223579c5cca30c08fbd1e24
|
7ae32748fb910d2542e35c57543fc89f98cd2b1d
|
/tests/runtime/runtime.py
|
abd1a564bef4b7fae1348ad52b7bc9b326046667
|
[
"Apache-2.0"
] |
permissive
|
sanjaymsh/dtfabric
|
451c87d987f438fccfbb999079d2f55d01650b68
|
9e216f90b70d8a3074b2125033e0773e3e482355
|
refs/heads/master
| 2022-12-19T09:13:02.370724
| 2020-09-27T05:11:25
| 2020-09-27T05:11:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,589
|
py
|
# -*- coding: utf-8 -*-
"""Tests for the run-time object."""
from __future__ import unicode_literals
import unittest
from dtfabric.runtime import runtime
from tests import test_lib
class StructureValuesClassFactoryTest(test_lib.BaseTestCase):
"""Structure values class factory tests."""
# pylint: disable=protected-access
@test_lib.skipUnlessHasTestFile(['structure.yaml'])
def testCreateClassTemplate(self):
"""Tests the _CreateClassTemplate function."""
definitions_file = self._GetTestFilePath(['structure.yaml'])
definitions_registry = self._CreateDefinitionRegistryFromFile(
definitions_file)
data_type_definition = definitions_registry.GetDefinitionByName('point3d')
class_template = runtime.StructureValuesClassFactory._CreateClassTemplate(
data_type_definition)
self.assertIsNotNone(class_template)
# TODO: implement error conditions.
def testIsIdentifier(self):
"""Tests the _IsIdentifier function."""
result = runtime.StructureValuesClassFactory._IsIdentifier('valid')
self.assertTrue(result)
result = runtime.StructureValuesClassFactory._IsIdentifier('_valid')
self.assertTrue(result)
result = runtime.StructureValuesClassFactory._IsIdentifier('valid1')
self.assertTrue(result)
result = runtime.StructureValuesClassFactory._IsIdentifier('')
self.assertFalse(result)
result = runtime.StructureValuesClassFactory._IsIdentifier('0invalid')
self.assertFalse(result)
result = runtime.StructureValuesClassFactory._IsIdentifier('in-valid')
self.assertFalse(result)
def testValidateDataTypeDefinition(self):
"""Tests the _ValidateDataTypeDefinition function."""
definitions_file = self._GetTestFilePath(['structure.yaml'])
definitions_registry = self._CreateDefinitionRegistryFromFile(
definitions_file)
data_type_definition = definitions_registry.GetDefinitionByName('point3d')
runtime.StructureValuesClassFactory._ValidateDataTypeDefinition(
data_type_definition)
# TODO: implement error conditions.
def testCreateClass(self):
"""Tests the CreateClass function."""
definitions_file = self._GetTestFilePath(['structure.yaml'])
definitions_registry = self._CreateDefinitionRegistryFromFile(
definitions_file)
data_type_definition = definitions_registry.GetDefinitionByName('point3d')
structure_values_class = runtime.StructureValuesClassFactory.CreateClass(
data_type_definition)
self.assertIsNotNone(structure_values_class)
if __name__ == '__main__':
unittest.main()
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
7b964f61670ccb5db77a15f8f6c355bc59266f51
|
e8f88fa5c7ca0263be5958d85a36b855976d4b0f
|
/LAB_EXAM_QUESTIONS/Solutions/string_apps/string_operation.py
|
5f8da9ca2b6b403bbd4ca01c630c4963429c11e9
|
[] |
no_license
|
sxb42660/MachineLearning_Fall2019
|
67bb471e79608b17a57ac1fabc9f6de1e455a015
|
b256a6961d30918611ecbda6961d5938b1291864
|
refs/heads/master
| 2022-07-13T10:50:46.646541
| 2020-05-15T18:59:19
| 2020-05-15T18:59:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
'''
Question1 :
Write a program to find a longest substring without repeating characters from a given string input from the console.
Sample Input: ‘ababcdxa’
Sample Output: abcdx
'''
class StringOperations:
def longest_substring(self, input_string):
temp_string = ""
longest_substring = ""
char_list = []
for a in input_string:
if a in char_list:
longest_substring = self.clean_list(char_list, a, temp_string, longest_substring)
char_list.append(a)
if a not in char_list:
char_list.append(a)
for a in char_list:
temp_string += a
if len(longest_substring) < len(temp_string):
longest_substring = temp_string
print(longest_substring)
def clean_list(self, input_list, char, temp_string, longest_substring):
for a in input_list:
temp_string += a
for i in range(input_list.index(char) + 1):
del input_list[0]
if len(longest_substring) < len(temp_string):
longest_substring = temp_string
return longest_substring
|
[
"sivakumar.umkc.fall2019@gmail.com"
] |
sivakumar.umkc.fall2019@gmail.com
|
c35db6386f83d6038856651b1a5d0577fc8afc98
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-dcs/huaweicloudsdkdcs/v2/model/list_bigkey_scan_tasks_response.py
|
a6bf9b01d77cf1b7325ac90267c21449802d43a3
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,639
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListBigkeyScanTasksResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'count': 'int',
'records': 'list[RecordsResponse]'
}
attribute_map = {
'instance_id': 'instance_id',
'count': 'count',
'records': 'records'
}
def __init__(self, instance_id=None, count=None, records=None):
"""ListBigkeyScanTasksResponse - a model defined in huaweicloud sdk"""
super(ListBigkeyScanTasksResponse, self).__init__()
self._instance_id = None
self._count = None
self._records = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
if count is not None:
self.count = count
if records is not None:
self.records = records
@property
def instance_id(self):
"""Gets the instance_id of this ListBigkeyScanTasksResponse.
实例ID
:return: The instance_id of this ListBigkeyScanTasksResponse.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListBigkeyScanTasksResponse.
实例ID
:param instance_id: The instance_id of this ListBigkeyScanTasksResponse.
:type: str
"""
self._instance_id = instance_id
@property
def count(self):
"""Gets the count of this ListBigkeyScanTasksResponse.
总数
:return: The count of this ListBigkeyScanTasksResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListBigkeyScanTasksResponse.
总数
:param count: The count of this ListBigkeyScanTasksResponse.
:type: int
"""
self._count = count
@property
def records(self):
"""Gets the records of this ListBigkeyScanTasksResponse.
大key分析记录列表
:return: The records of this ListBigkeyScanTasksResponse.
:rtype: list[RecordsResponse]
"""
return self._records
@records.setter
def records(self, records):
"""Sets the records of this ListBigkeyScanTasksResponse.
大key分析记录列表
:param records: The records of this ListBigkeyScanTasksResponse.
:type: list[RecordsResponse]
"""
self._records = records
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListBigkeyScanTasksResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
62ec98c11c353c33219211cd44afe625c192ecf4
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py200620_python1_chen/py200703_06/quiz3_chen.py
|
037893db6af7902ecea02b99d135e9937ab38ff1
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136
| 2021-08-24T23:03:51
| 2021-08-24T23:03:51
| 210,029,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
"""
quiz 2.
date: 2020-07-03
student name: QiJun Chen
"""
"""
q8.
your answer:
"""
"""
q7.
your answer:
"""
"""
q6.
your answer:
a
"""
"""
q5.
your answer:
"""
"""
q4.
your answer:
"""
"""
q3.
your answer: -
a = 1 ; b = 2
"""
a = 1
b = 2
a = 1 ; b = 2
# a = 1 b = 2
"""
q2.
your answer:b
b, e, f, g
"""
"""
q1.
your answer:d
c and d
"""
|
[
"lada314@gmail.com"
] |
lada314@gmail.com
|
71d1e1bd7fd8420c085463ad1438045a67a185a2
|
c88aa1d1f85d58226015510537153daa73358dce
|
/13/ex3.py
|
350553daddfe47db637c2269b69f59f65552c088
|
[] |
no_license
|
kmollee/2014_fall_cp
|
e88ca3acf347a9f49c8295690e4ef81c828cec6b
|
fff65200333af8534ce23da8bdb97ed904cc71dc
|
refs/heads/master
| 2021-01-01T17:56:33.442405
| 2015-01-07T10:44:32
| 2015-01-07T10:44:32
| 24,130,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
# coding: utf-8
# 上面一行宣告程式內容所採用的編碼(encoding)
# 導入 cherrypy 模組
import cherrypy
# 導入 Python 內建的 os 模組
import os
# 以下為 Guess 類別的設計內容, 其中的 object 使用, 表示 Guess 類別繼承 object 的所有特性, 包括方法與屬性設計
class Guess(object):
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index(self, name="John"):
return "hello, " + name
@cherrypy.expose
def saygoodbye(self, name="John"):
return "goodbye," + name
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(Guess())
else:
# 表示在近端執行
cherrypy.quickstart(Guess())
|
[
"10073105@gm.nfu.edu.tw"
] |
10073105@gm.nfu.edu.tw
|
7682f8e46a7452dbb09d77d81b83c9ddd544deee
|
a3c662a5eda4e269a8c81c99e229879b946a76f6
|
/.venv/lib/python3.7/site-packages/pylint/test/input/func_bug113231.py
|
6334ff9c8ff8d817b27865e568d5dba02d72af51
|
[
"MIT"
] |
permissive
|
ahmadreza-smdi/ms-shop
|
0c29da82c58b243507575672bbc94fb6e8068aeb
|
65ba3f3061e2ac5c63115b08dadfe7d67f645fb6
|
refs/heads/master
| 2023-04-27T19:51:34.858182
| 2019-11-24T20:57:59
| 2019-11-24T20:57:59
| 223,616,552
| 6
| 2
|
MIT
| 2023-04-21T20:51:21
| 2019-11-23T16:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
# pylint: disable=E1101
# pylint: disable=C0103
# pylint: disable=R0903, useless-object-inheritance, unnecessary-pass
"""test bugfix for #113231 in logging checker
"""
from __future__ import absolute_import
# Muck up the names in an effort to confuse...
import logging as renamed_logging
__revision__ = ''
class Logger(object):
"""Fake logger"""
pass
logger = renamed_logging.getLogger(__name__)
fake_logger = Logger()
# Statements that should be flagged:
renamed_logging.warning('%s, %s' % (4, 5))
logger.warning('%s' % 5)
# Statements that should not be flagged:
fake_logger.warn('%s' % 5)
|
[
"ahmadreza.smdi@gmail.com"
] |
ahmadreza.smdi@gmail.com
|
9a70ed43d1cd64c0b0ca1d2c6fd5864c04128087
|
14373275670c1f3065ce9ae195df142146e2c1a4
|
/stubs/influxdb-client/influxdb_client/domain/bucket_retention_rules.pyi
|
48fc2554304ebe4cd9f9322e99473fb4876e26ed
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
sobolevn/typeshed
|
eb7af17c06a9722f23c337e6b9a4726223155d58
|
d63a82640390a9c130e0fe7d409e8b0b836b7c31
|
refs/heads/master
| 2023-08-04T05:59:29.447015
| 2023-06-14T21:27:53
| 2023-06-14T21:27:53
| 216,265,622
| 2
| 0
|
Apache-2.0
| 2022-02-08T10:40:53
| 2019-10-19T20:21:25
|
Python
|
UTF-8
|
Python
| false
| false
| 876
|
pyi
|
from _typeshed import Incomplete
class BucketRetentionRules:
openapi_types: Incomplete
attribute_map: Incomplete
discriminator: Incomplete
def __init__(
self,
type: str = "expire",
every_seconds: Incomplete | None = None,
shard_group_duration_seconds: Incomplete | None = None,
) -> None: ...
@property
def type(self): ...
@type.setter
def type(self, type) -> None: ...
@property
def every_seconds(self): ...
@every_seconds.setter
def every_seconds(self, every_seconds) -> None: ...
@property
def shard_group_duration_seconds(self): ...
@shard_group_duration_seconds.setter
def shard_group_duration_seconds(self, shard_group_duration_seconds) -> None: ...
def to_dict(self): ...
def to_str(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
|
[
"noreply@github.com"
] |
sobolevn.noreply@github.com
|
1160913b4e15aef699a5ac91d1ceb88cdfc89fbd
|
a6ed990fa4326c625a2a02f0c02eedf758ad8c7b
|
/meraki/sdk/python/getNetworkMerakiAuthUser.py
|
dd485a53b639bbcae0a844de8d441be562d0bd1c
|
[] |
no_license
|
StevenKitavi/Meraki-Dashboard-API-v1-Documentation
|
cf2352976c6b6c00c17a5f6442cedf0aeed46c22
|
5ed02a7def29a2ce455a3f2cfa185f76f44789f5
|
refs/heads/main
| 2023-03-02T08:49:34.846055
| 2021-02-05T10:31:25
| 2021-02-05T10:31:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
network_id = 'L_646829496481105433'
meraki_auth_user_id = ''
response = dashboard.networks.getNetworkMerakiAuthUser(
network_id, meraki_auth_user_id
)
print(response)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
c67931ef41e293a2d99a642e5d4acb3c14fba88e
|
01b759bfa841e601bebb560d49f7b33add6a6756
|
/sources/listen/liste3.py
|
eff7b61c7cedb3e01bc4c845708d487337d11c6a
|
[
"MIT"
] |
permissive
|
kantel/python-schulung
|
dd5469d77b48da5ee13d240ca54632c8191e4e27
|
c319125c4a6f8479aff5ca5e66f3bbfbf48eb22c
|
refs/heads/master
| 2021-01-21T15:21:52.719327
| 2018-09-23T16:28:12
| 2018-09-23T16:28:12
| 95,450,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
fruits = ["Apple", "Tomato", "Banana", "Orange", "Lemon"]
print(fruits)
for i in range(len(fruits) - 1, -1, -1):
if fruits[i] == "Banana":
fruits.pop(i)
print(fruits)
|
[
"joerg@kantel.de"
] |
joerg@kantel.de
|
7f0dbdd8bbddfe74ffae5412b96cb85bfa3d079e
|
33f805792e79a9ef1d577699b983031521d5b6c9
|
/tapiriik/web/templatetags/displayutils.py
|
8d12cc795fab8e114bb4aa56ce04880e311873f4
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
cpfair/tapiriik
|
0dce9599400579d33acbbdaba16806256270d0a3
|
c67e9848e67f515e116bb19cd4dd479e8414de4d
|
refs/heads/master
| 2023-08-28T10:17:11.070324
| 2023-07-25T00:59:33
| 2023-07-25T00:59:33
| 7,812,229
| 1,519
| 343
|
Apache-2.0
| 2022-10-24T16:52:34
| 2013-01-25T02:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,560
|
py
|
from django import template
from django.utils.timesince import timesince
from datetime import datetime, date
import json
register = template.Library()
@register.filter(name="utctimesince")
def utctimesince(value):
if not value:
return ""
return timesince(value, now=datetime.utcnow())
@register.filter(name="fractional_hour_duration")
def fractional_hour_duration(value):
if value is None:
return ""
return "%2.f hours" % (value / 60 / 60)
@register.filter(name="format_fractional_percentage")
def fractional_percentage(value):
try:
return "%d%%" % round(value * 100)
except:
return "NaN"
@register.filter(name="format_meters")
def meters_to_kms(value):
try:
return round(value / 1000)
except:
return "NaN"
@register.filter(name="format_daily_meters_hourly_rate")
def meters_per_day_to_km_per_hour(value):
try:
return (value / 24) / 1000
except:
return "0"
@register.filter(name="format_seconds_minutes")
def meters_to_kms(value):
try:
return round(value / 60, 3)
except:
return "NaN"
@register.filter(name='json')
def jsonit(obj):
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime) or isinstance(obj, date) else None
return json.dumps(obj, default=dthandler)
@register.filter(name='dict_get')
def dict_get(tdict, key):
if type(tdict) is not dict:
tdict = tdict.__dict__
return tdict.get(key, None)
@register.filter(name='format')
def format(format, var):
return format.format(var)
@register.simple_tag
def stringformat(value, *args):
return value.format(*args)
@register.filter(name="percentage")
def percentage(value, *args):
if not value:
return "NaN"
try:
return str(round(float(value) * 100)) + "%"
except ValueError:
return value
def do_infotip(parser, token):
tagname, infotipId = token.split_contents()
nodelist = parser.parse(('endinfotip',))
parser.delete_first_token()
return InfoTipNode(nodelist, infotipId)
class InfoTipNode(template.Node):
def __init__(self, nodelist, infotipId):
self.nodelist = nodelist
self.infotipId = infotipId
def render(self, context):
hidden_infotips = context.get('hidden_infotips', None)
if hidden_infotips and self.infotipId in hidden_infotips:
return ""
output = self.nodelist.render(context)
return "<p class=\"infotip\" id=\"%s\">%s</p>" % (self.infotipId, output)
register.tag("infotip", do_infotip)
|
[
"cpf@cpfx.ca"
] |
cpf@cpfx.ca
|
f1aeb93cd0ccc135f2f13a0e71519123c29394e4
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/experiments/ashvin/rss/pusher1/scale/rl.py
|
fe1b316d1274746e7760e94c9705a061398925ae
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
from railrl.launchers.experiments.murtaza.multiworld import her_td3_experiment
import railrl.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env import (
SawyerPushAndReachXYEnv
)
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj import SawyerMultiobjectEnv
from railrl.launchers.launcher_util import run_experiment
from railrl.launchers.arglauncher import run_variants
import numpy as np
if __name__ == "__main__":
# noinspection PyTypeChecker
x_low = -0.2
x_high = 0.2
y_low = 0.5
y_high = 0.7
t = 0.03
variant = dict(
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=501,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
max_path_length=100,
num_updates_per_env_step=4,
batch_size=128,
discount=0.99,
min_num_steps_before_training=4000,
reward_scale=1.0,
render=False,
collection_mode='online',
tau=1e-2,
parallel_env_params=dict(
num_workers=1,
),
),
her_kwargs=dict(
observation_key='state_observation',
desired_goal_key='state_desired_goal',
),
td3_kwargs=dict(),
),
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_rollout_goals=0.1,
fraction_goals_env_goals=0.5,
ob_keys_to_save=[],
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
max_sigma=.2,
),
exploration_type='ou',
observation_key='state_observation',
desired_goal_key='state_desired_goal',
init_camera=sawyer_pusher_camera_upright_v2,
do_state_exp=True,
save_video=True,
imsize=84,
snapshot_mode='gap_and_last',
snapshot_gap=50,
env_class=SawyerMultiobjectEnv,
env_kwargs=dict(
num_objects=1,
preload_obj_dict=[
dict(color2=(0.1, 0.1, 0.9)),
],
),
num_exps_per_instance=1,
region="us-west-2",
)
search_space = {
'seedid': range(5),
'algo_kwargs.base_kwargs.num_updates_per_env_step': [4, ],
'replay_buffer_kwargs.fraction_goals_rollout_goals': [0.1, ],
'replay_buffer_kwargs.fraction_goals_env_goals': [0.5, ],
'env_kwargs.action_repeat': [1, 5, 25],
'algo_kwargs.base_kwargs.max_path_length': [10, 20, 100],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 1
mode = 'ec2'
exp_prefix = 'sawyer_pusher_state_final'
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(her_td3_experiment, variants, run_id=0)
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
0c7e08fb553b03c40e35d8862537c388fe27ad46
|
0e25329bb101eb7280a34f650f9bd66ed002bfc8
|
/vendor/sat-solvers/simplesat/repository.py
|
e544bdb4aa6f264f1cf16b9c2b4d754e4b14d0f6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
enthought/enstaller
|
2a2d433a3b83bcf9b4e3eaad59d952c531f36566
|
9c9f1a7ce58358b89352f4d82b15f51fbbdffe82
|
refs/heads/master
| 2023-08-08T02:30:26.990190
| 2016-01-22T17:51:35
| 2016-01-22T17:51:35
| 17,997,072
| 3
| 4
| null | 2017-01-13T19:22:10
| 2014-03-21T23:03:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,337
|
py
|
from __future__ import absolute_import
import bisect
import collections
import operator
import six
from .errors import NoPackageFound
class Repository(object):
"""
A Repository is a set of packages, and knows about which package it
contains.
It also supports the iterator protocol. Iteration is guaranteed to be
deterministic and independent of the order in which packages have been
added.
"""
def __init__(self, packages=None):
self._name_to_packages = collections.defaultdict(list)
# Sorted list of keys in self._name_to_packages, to keep iteration
# over a repository reproducible
self._names = []
packages = packages or []
for package in packages:
self.add_package(package)
def __len__(self):
return sum(
len(packages) for packages in six.itervalues(self._name_to_packages)
)
def __iter__(self):
for name in self._names:
for package in self._name_to_packages[name]:
yield package
def add_package(self, package_metadata):
""" Add the given package to this repository.
Parameters
----------
package : PackageMetadata
The package metadata to add. May be a subclass of PackageMetadata.
Note
----
If the same package is added multiple times to a repository, every copy
will be available when calling find_package or when iterating.
"""
if package_metadata.name not in self._name_to_packages:
bisect.insort(self._names, package_metadata.name)
self._name_to_packages[package_metadata.name].append(package_metadata)
# Fixme: this should not be that costly as long as we don't have
# many versions for a given package.
self._name_to_packages[package_metadata.name].sort(
key=operator.attrgetter("version")
)
def find_package(self, name, version):
"""Search for the first match of a package with the given name and
version.
Parameters
----------
name : str
The package name to look for.
version : EnpkgVersion
The version to look for.
Returns
-------
package : PackageMetadata
The corresponding metadata.
"""
candidates = self._name_to_packages[name]
for candidate in candidates:
if candidate.version == version:
return candidate
raise NoPackageFound(
"Package '{0}-{1}' not found".format(name, str(version))
)
def find_packages(self, name):
""" Returns an iterable of package metadata with the given name, sorted
from lowest to highest version.
Parameters
----------
name : str
The package's name
Returns
-------
packages : iterable
Iterable of PackageMetadata instances (order is from lower to
higher version)
"""
return tuple(self._name_to_packages[name])
def update(self, iterable):
""" Add the packages from the given iterable into this repository.
Parameters
----------
"""
for package in iterable:
self.add_package(package)
|
[
"cournape@gmail.com"
] |
cournape@gmail.com
|
ba4fc1788c1dbf1d553af32f6f90a91f2aaa3485
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/external/chromium_org/chrome/chrome_repack_pseudo_locales.gypi
|
340ed191955ce1651ad7f722c9614b277d497f92
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868
| 2017-02-18T17:48:49
| 2017-02-18T17:48:49
| 81,847,777
| 0
| 2
|
MIT
| 2020-03-09T00:02:12
| 2017-02-13T16:47:00
| null |
UTF-8
|
Python
| false
| false
| 1,247
|
gypi
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'action_name': 'repack_pseudo_locales',
'variables': {
'conditions': [
['branding=="Chrome"', {
'branding_flag': ['-b', 'google_chrome',],
}, { # else: branding!="Chrome"
'branding_flag': ['-b', 'chromium',],
}],
],
},
'inputs': [
'tools/build/repack_locales.py',
'<!@pymod_do_main(repack_locales -i -p <(OS) <(branding_flag) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(INTERMEDIATE_DIR) <(pseudo_locales))'
],
'conditions': [
['OS == "mac" or OS == "ios"', {
'outputs': [
'<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(pseudo_locales))'
],
}, { # else 'OS != "mac"'
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/<(pseudo_locales).pak'
],
}],
],
'action': [
'<@(repack_locales_cmd)',
'<@(branding_flag)',
'-p', '<(OS)',
'-g', '<(grit_out_dir)',
'-s', '<(SHARED_INTERMEDIATE_DIR)',
'-x', '<(SHARED_INTERMEDIATE_DIR)/.',
'<@(pseudo_locales)',
],
}
|
[
"karun.matharu@gmail.com"
] |
karun.matharu@gmail.com
|
8265b22feff8988d5a78fd85d1d9fc43f915de26
|
b76615ff745c6d66803506251c3d4109faf50802
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nsnumberformatter.py
|
903f5e2dffeb62d1bb0af948a9b0ab4af0a16d49
|
[
"MIT"
] |
permissive
|
danchr/pyobjc-git
|
6ef17e472f54251e283a0801ce29e9eff9c20ac0
|
62b787fddeb381184043c7ff136f1c480755ab69
|
refs/heads/master
| 2021-01-04T12:24:31.581750
| 2020-02-02T20:43:02
| 2020-02-02T20:43:02
| 240,537,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,347
|
py
|
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSNumberFormatter(TestCase):
def testConstants(self):
self.assertEqual(NSNumberFormatterNoStyle, kCFNumberFormatterNoStyle)
self.assertEqual(NSNumberFormatterDecimalStyle, kCFNumberFormatterDecimalStyle)
self.assertEqual(
NSNumberFormatterCurrencyStyle, kCFNumberFormatterCurrencyStyle
)
self.assertEqual(NSNumberFormatterPercentStyle, kCFNumberFormatterPercentStyle)
self.assertEqual(
NSNumberFormatterScientificStyle, kCFNumberFormatterScientificStyle
)
self.assertEqual(
NSNumberFormatterSpellOutStyle, kCFNumberFormatterSpellOutStyle
)
self.assertEqual(NSNumberFormatterBehaviorDefault, 0)
self.assertEqual(NSNumberFormatterBehavior10_0, 1000)
self.assertEqual(NSNumberFormatterBehavior10_4, 1040)
self.assertEqual(
NSNumberFormatterPadBeforePrefix, kCFNumberFormatterPadBeforePrefix
)
self.assertEqual(
NSNumberFormatterPadAfterPrefix, kCFNumberFormatterPadAfterPrefix
)
self.assertEqual(
NSNumberFormatterPadBeforeSuffix, kCFNumberFormatterPadBeforeSuffix
)
self.assertEqual(
NSNumberFormatterPadAfterSuffix, kCFNumberFormatterPadAfterSuffix
)
self.assertEqual(NSNumberFormatterRoundCeiling, kCFNumberFormatterRoundCeiling)
self.assertEqual(NSNumberFormatterRoundFloor, kCFNumberFormatterRoundFloor)
self.assertEqual(NSNumberFormatterRoundDown, kCFNumberFormatterRoundDown)
self.assertEqual(NSNumberFormatterRoundUp, kCFNumberFormatterRoundUp)
self.assertEqual(
NSNumberFormatterRoundHalfEven, kCFNumberFormatterRoundHalfEven
)
self.assertEqual(
NSNumberFormatterRoundHalfDown, kCFNumberFormatterRoundHalfDown
)
self.assertEqual(NSNumberFormatterRoundHalfUp, kCFNumberFormatterRoundHalfUp)
@min_os_level("10.11")
def testConstants(self):
self.assertEqual(NSNumberFormatterOrdinalStyle, kCFNumberFormatterOrdinalStyle)
self.assertEqual(
NSNumberFormatterCurrencyISOCodeStyle,
kCFNumberFormatterCurrencyISOCodeStyle,
)
self.assertEqual(
NSNumberFormatterCurrencyPluralStyle, kCFNumberFormatterCurrencyPluralStyle
)
self.assertEqual(
NSNumberFormatterCurrencyAccountingStyle,
kCFNumberFormatterCurrencyAccountingStyle,
)
def testOutput(self):
self.assertResultIsBOOL(NSNumberFormatter.getObjectValue_forString_range_error_)
self.assertArgIsOut(NSNumberFormatter.getObjectValue_forString_range_error_, 0)
self.assertArgIsInOut(
NSNumberFormatter.getObjectValue_forString_range_error_, 2
)
self.assertArgIsOut(NSNumberFormatter.getObjectValue_forString_range_error_, 3)
self.assertResultIsBOOL(NSNumberFormatter.generatesDecimalNumbers)
self.assertArgIsBOOL(NSNumberFormatter.setGeneratesDecimalNumbers_, 0)
self.assertResultIsBOOL(NSNumberFormatter.allowsFloats)
self.assertArgIsBOOL(NSNumberFormatter.setAllowsFloats_, 0)
self.assertResultIsBOOL(NSNumberFormatter.alwaysShowsDecimalSeparator)
self.assertArgIsBOOL(NSNumberFormatter.setAlwaysShowsDecimalSeparator_, 0)
self.assertResultIsBOOL(NSNumberFormatter.usesGroupingSeparator)
self.assertArgIsBOOL(NSNumberFormatter.setUsesGroupingSeparator_, 0)
self.assertResultIsBOOL(NSNumberFormatter.isLenient)
self.assertArgIsBOOL(NSNumberFormatter.setLenient_, 0)
self.assertResultIsBOOL(NSNumberFormatter.usesSignificantDigits)
self.assertArgIsBOOL(NSNumberFormatter.setUsesSignificantDigits_, 0)
self.assertResultIsBOOL(NSNumberFormatter.isPartialStringValidationEnabled)
self.assertArgIsBOOL(NSNumberFormatter.setPartialStringValidationEnabled_, 0)
self.assertResultIsBOOL(NSNumberFormatter.hasThousandSeparators)
self.assertArgIsBOOL(NSNumberFormatter.setHasThousandSeparators_, 0)
self.assertResultIsBOOL(NSNumberFormatter.localizesFormat)
self.assertArgIsBOOL(NSNumberFormatter.setLocalizesFormat_, 0)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
1217867d6445957c4cf47d6409ea0cceff370ef9
|
783244556a7705d99662e0b88872e3b63e3f6301
|
/denzo/migrations(second attempt)/0017_auto_20160224_0953.py
|
41cd71d62a6d23ebf83e8ef0f9f1d7495859f9c9
|
[] |
no_license
|
KobiBeef/eastave_src
|
bf8f2ce9c99697653d36ca7f0256473cc25ac282
|
dfba594f3250a88d479ccd9f40fefc907a269857
|
refs/heads/master
| 2021-01-10T16:49:14.933424
| 2016-03-08T13:30:48
| 2016-03-08T13:30:48
| 51,752,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('denzo', '0016_auto_20160224_0951'),
]
operations = [
migrations.AlterField(
model_name='patientinfo',
name='attending_physician',
field=models.ManyToManyField(related_name='attending_phycisian1', to='denzo.PhysicianInfo'),
),
migrations.AlterField(
model_name='patientinfo',
name='physician',
field=models.ManyToManyField(related_name='attending_physician2', to='denzo.PhysicianInfo'),
),
]
|
[
"ezekielbacungan@gmail.com"
] |
ezekielbacungan@gmail.com
|
1e6f70eec09a530a4c3db0e9939343b1075d7a12
|
3971979d46959636ee2a7a68d72428b1d7fd9853
|
/elasticsearch_django/management/commands/__init__.py
|
5c823387e8298bac9a333b02cb017b59c389c852
|
[
"MIT"
] |
permissive
|
vryazanov/elasticsearch-django
|
818b302d53cdbf9c2c1ee05255170710e450186d
|
adc8328ca3e6ef5d21cb53447e3a2e0663d770d4
|
refs/heads/master
| 2020-03-16T16:11:19.230648
| 2018-02-23T14:50:36
| 2018-02-23T14:50:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
# -*- coding: utf-8 -*-
"""Base command for search-related management commands."""
import logging
import builtins
from django.core.management.base import BaseCommand
from elasticsearch.exceptions import TransportError
logger = logging.getLogger(__name__)
class BaseSearchCommand(BaseCommand):
"""Base class for commands that interact with the search index."""
description = "Base search command."
def _confirm_action(self):
"""Return True if the user confirms the action."""
msg = "Are you sure you wish to continue? [y/N] "
return builtins.input(msg).lower().startswith('y')
def add_arguments(self, parser):
"""Add default base options of --noinput and indexes."""
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Do no display user prompts - may affect data.'
)
parser.add_argument(
'indexes',
nargs='*',
help="Names of indexes on which to run the command."
)
def do_index_command(self, index, interactive):
"""Run a command against a named index."""
raise NotImplementedError()
def handle(self, *args, **options):
"""Run do_index_command on each specified index and log the output."""
for index in options.pop('indexes'):
data = {}
try:
data = self.do_index_command(index, **options)
except TransportError as ex:
logger.warning("ElasticSearch threw an error: %s", ex)
data = {
"index": index,
"status": ex.status_code,
"reason": ex.error,
}
finally:
logger.info(data)
|
[
"hugo@yunojuno.com"
] |
hugo@yunojuno.com
|
96d23b92026e5ac28fb9bdcdb0b268cbd883af0d
|
938a496fe78d5538af94017c78a11615a8498682
|
/algorithms/901-/1030.matrix-cells-in-distance-order.py
|
56b21b33d5488d66697d5f02c50976a56e730edd
|
[] |
no_license
|
huilizhou/Leetcode-pyhton
|
261280044d15d0baeb227248ade675177efdb297
|
6ae85bf79c5a21735e3c245c0c256f29c1c60926
|
refs/heads/master
| 2020-03-28T15:57:52.762162
| 2019-11-26T06:14:13
| 2019-11-26T06:14:13
| 148,644,059
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
# 距离顺序排列矩阵单元格
class Solution(object):
def allCellsDistOrder(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
# res = [[i, j] for i in range(R) for j in range(C)]
# res.sort(key=lambda x: abs(x[0] - r0) + abs(x[1] - c0))
# return res
res = []
for i in range(R):
for j in range(C):
res.append((abs(i - r0) + abs(j - c0), [i, j]))
res.sort()
return list(item[1] for item in res)
print(Solution().allCellsDistOrder(R=2, C=2, r0=0, c0=1))
|
[
"2540278344@qq.com"
] |
2540278344@qq.com
|
347e83a77741b61d8657d3cb2a0956f362fe55e5
|
426f216e3d38d2030d337c8be6463cc4cd7af6c3
|
/day07/mul/multipro8_pool.py
|
ddfcdc95d2e644dd1b33c973e98dbae909fcf69d
|
[
"Apache-2.0"
] |
permissive
|
zhangyage/Python-oldboy
|
c7b43801935fc9e08e973ee0b852daa8e8667fb7
|
a95c1b465929e2be641e425fcb5e15b366800831
|
refs/heads/master
| 2021-01-23T02:59:37.574638
| 2019-10-27T05:35:58
| 2019-10-27T05:35:58
| 86,039,220
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
进程池
'''
from multiprocessing import Process,Pool
import time
def f(x):
#print x*x
time.sleep(1)
return x*x
if __name__ == '__main__':
pool = Pool(processes=5) #并行5个进程
res_list = []
for i in range(10):
res = pool.apply_async(f,[i,]) #pool.apply_async异步执行 pool.apply同步执行即串行
res_list.append(res) #将进程的结果追加到一个列表中 注意这里的列表中的元素实际上都是一个实例
for r in res_list: #由于列表元素是进程实例因此需要使用如下方式遍历
print r.get()
|
[
"zhangyage2015@163.com"
] |
zhangyage2015@163.com
|
0cc2e6b5afbcc6588596c4313893d206bcec3465
|
930309163b930559929323647b8d82238724f392
|
/abc104_c.v2.py
|
1c56b7dd77d336db84c82ba6490babad3fded1b6
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894
| 2021-09-12T13:32:29
| 2021-09-12T13:32:29
| 11,724,396
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
import math
N,G=map(int,input().split())
A=[]
for i in range(1,N+1):
score = i*100
num,comp=map(int,input().split())
A.append((score, num, comp))
P = 1<<len(A)
ans = math.inf
for i in range(P):
num = 0
score = 0
for j in range(len(A)):
if i&(1<<j) > 0:
score += A[j][2] + A[j][0]*A[j][1]
num += A[j][1]
if G > score:
for j in reversed(range(len(A))):
if i&(1<<j) == 0:
for _ in range(A[j][1]):
score += A[j][0]
num += 1
if G <= score:
break
if G <= score:
break
ans = min(ans, num)
print(ans)
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
e82273798d8afec26681b06523573669549ef37e
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/raw_scripts/132.230.102.123-10.21.9.51/1569576154.py
|
df8c1f338e4ae9d73116683d95475c2e528dc852
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,688
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def is_palindromic(n: int):
if not n > 0:
return False
else:
x = str(n)
y = reversed(x)
x = str(y)
if n == int(x):
return True
else:
return False
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
original = None
def coverage(func):
nonlocal covered, target, count, original
def wrapper(n):
nonlocal covered, count
s = str (n)
lens = len (s)
if lens == 1:
covered.add(0)
if lens == 2:
covered.add(1)
if (lens > 2) and ( lenr % 2 == 0):
covered.add(2)
if lens > 2 and lenr % 2 == 1:
covered.add(3)
r = func (n)
if r:
covered.add (4)
else:
covered.add (5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func == "original": return original
original = func
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
is_palindromic = coverage(is_palindromic)
except:
pass
## Lösung Teil 2. (Tests)
is_palindromic(1)
def test_is_palindromic():
assert(is_palindromic(0)) == False
assert(is_palindromic(1)) == True
assert(is_palindromic(1)) == True
######################################################################
## hidden restores unadorned function
is_palindromic = coverage ("original")
## Lösung Teil 3.
## Lösung Teil 4.
######################################################################
## test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_is_palindromic(self):
assert is_palindromic
assert 'n' in getfullargspec(is_palindromic).args
def test_gen_palindromic(self):
assert gen_palindromic
assert 'n' in getfullargspec(gen_palindromic).args
def test_represent(self):
assert represent
assert 'n' in getfullargspec(represent).args
class TestGrades:
def test_docstring_present(self):
assert is_palindromic.__doc__ is not None
assert gen_palindromic.__doc__ is not None
assert represent.__doc__ is not None
def test_typing_present(self):
assert is_palindromic.__hints__ == typing.get_type_hints(self.is_palindromic_oracle)
assert typing.get_type_hints (gen_palindromic) == typing.get_type_hints (self.gen_palindromic_oracle)
assert typing.get_type_hints (represent) == typing.get_type_hints (self.represent_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def is_palindromic_oracle(self, n:int)->list:
s = str(n)
while len (s) > 1:
if s[0] != s[-1]:
return False
s = s[1:-1]
return True
def gen_palindromic_oracle (self, n:int):
return (j for j in range (n + 1, 0, -1) if self.is_palindromic_oracle (j))
def represent_oracle (self, n:int) -> list:
for n1 in self.gen_palindromic_oracle (n):
if n1 == n:
return [n1]
for n2 in self.gen_palindromic_oracle (n - n1):
if n2 == n - n1:
return [n1, n2]
for n3 in self.gen_palindromic_oracle (n - n1 - n2):
if n3 == n - n1 - n2:
return [n1, n2, n3]
# failed to find a representation
return []
def test_is_palindromic(self):
## fill in
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
def test_gen_palindromic(self):
## fill in
pass
def test_represent (self):
def check(n, r):
for v in r:
assert self.is_palindromic_oracle (v)
assert n == sum (r)
for n in range (1,100):
r = represent (n)
check (n, r)
for i in range (100):
n = random.randrange (10000)
r = represent (n)
check (n, r)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
934fe6bcebc4666c645ddb6e55d9d59459a6fbc4
|
ffe2e0394c3a386b61e0c2e1876149df26c64970
|
/mobile.py
|
610210dbea874a2893e22f8ffea4e82ccb93aab5
|
[] |
no_license
|
garethpaul/WillBeOut
|
202e0ad7a12800c6008ec106c67ee7d23d256a07
|
c8c40f2f71238c5a5ac6f5ce0cfb3a07e166b341
|
refs/heads/master
| 2016-09-05T14:02:15.648358
| 2013-01-16T17:26:43
| 2013-01-16T17:26:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import tornado.auth
import tornado.web
import base
import json
import urllib
class IndexHandler(base.BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.render('mobile_index.html')
class EventsHandler(base.BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
_id = self.get_current_user()['id']
events = self.db.query(
"SELECT * FROM willbeout_events WHERE userid = %s AND DATE(f) >= DATE(NOW())", int(_id))
self.render('mobile_events.html', events=events)
class EventHandler(base.BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
_id = self.get_argument('id')
event = self.db.get(
"SELECT * FROM willbeout_events WHERE id = %s", int(_id))
places = self.db.query("""select a.id, a.event_id, a.address, a.city, a.name, a.url, a.user_id, a.user_name, count(b.suggestion_id) as friends from willbeout_suggest as a
LEFT JOIN willbeout_votes as b ON a.id = b.suggestion_id
WHERE a.event_id = %s
GROUP BY a.id ORDER BY friends DESC;""", int(_id))
self.render('mobile_event.html', event=event, places=places)
|
[
"gareth@garethpaul.com"
] |
gareth@garethpaul.com
|
6f6d3de7df03b19b23300c93004ab7cdd98c3362
|
7da5bb08e161395e06ba4283e0b64676f362435c
|
/stackstrom/st2/bin/st2-migrate-datastore-scopes.py
|
404be7add19d12826953893b8610e0c1f575a948
|
[
"LicenseRef-scancode-generic-cla",
"curl",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
devopszone/sensu_st2_backup
|
b25a061d21c570a7ce5c020fa8bd82ea4856c9f6
|
2aae0801c35c209fb33fed90b936a0a35ccfacdb
|
refs/heads/master
| 2020-03-22T09:32:20.970848
| 2018-07-05T13:17:30
| 2018-07-05T13:17:30
| 139,843,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
#!/opt/stackstorm/st2/bin/python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import traceback as tb
from st2common import config
from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE, SYSTEM_SCOPE
from st2common.constants.keyvalue import FULL_USER_SCOPE, USER_SCOPE
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.persistence.keyvalue import KeyValuePair
from st2common.service_setup import db_setup
from st2common.service_setup import db_teardown
def migrate_datastore():
key_value_items = KeyValuePair.get_all()
try:
for kvp in key_value_items:
kvp_id = getattr(kvp, 'id', None)
secret = getattr(kvp, 'secret', False)
scope = getattr(kvp, 'scope', SYSTEM_SCOPE)
if scope == USER_SCOPE:
scope = FULL_USER_SCOPE
if scope == SYSTEM_SCOPE:
scope = FULL_SYSTEM_SCOPE
new_kvp_db = KeyValuePairDB(id=kvp_id, name=kvp.name,
expire_timestamp=kvp.expire_timestamp,
value=kvp.value, secret=secret,
scope=scope)
KeyValuePair.add_or_update(new_kvp_db)
except:
print('ERROR: Failed migrating datastore item with name: %s' % kvp.name)
tb.print_exc()
raise
def main():
config.parse_args()
# Connect to db.
db_setup()
# Migrate rules.
try:
migrate_datastore()
print('SUCCESS: Datastore items migrated successfully.')
exit_code = 0
except:
print('ABORTED: Datastore migration aborted on first failure.')
exit_code = 1
# Disconnect from db.
db_teardown()
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
[
"root@stackstrom.c.glassy-hue-205107.internal"
] |
root@stackstrom.c.glassy-hue-205107.internal
|
e13c2b343d21f52283ccae9e50298c08e8d346bc
|
1733c48ea06f8265835b11dc5d2770a6ad5b23ac
|
/tests/device/test_measure_voltage.py
|
2373c86a8f3a626eb237656b65debee411eae59f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Sensirion/python-shdlc-sensorbridge
|
c16374d018ca149c2e2a907929129e310e367252
|
c441c17d89697ecf0f7b61955f54c3da195e30e6
|
refs/heads/master
| 2021-06-30T07:49:24.032949
| 2021-03-19T10:00:04
| 2021-03-19T10:00:04
| 224,618,118
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
# -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from sensirion_shdlc_sensorbridge import SensorBridgePort
import pytest
@pytest.mark.needs_device
@pytest.mark.parametrize("port", [
SensorBridgePort.ONE,
SensorBridgePort.TWO,
])
def test_valid_port(device, port):
"""
Test if the measure_voltage() function works when passing a valid port.
"""
voltage = device.measure_voltage(port)
assert type(voltage) is float
@pytest.mark.needs_device
@pytest.mark.parametrize("port", [
2,
SensorBridgePort.ALL,
])
def test_invalid_port(device, port):
"""
Test if the measure_voltage() function raises the correct exception when
passing an invalid port.
"""
with pytest.raises(ValueError):
device.measure_voltage(port)
|
[
"urban.bruhin@sensirion.com"
] |
urban.bruhin@sensirion.com
|
dad18d91eddb0dbdb35c8fae190c95de6cdec4bd
|
6858b0e8da83676634e6208829ada13d1ea46bd1
|
/vendor/pathtools/scripts/nosy.py
|
ad97406cc752bb38380997533f187eed485af9fc
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
iVerb/armada-pipeline
|
452045da1b9dfc85c5d0bb4350feeee2061f761d
|
9f0d0fd7c23fe382ca9c9ea1d44fcbb3dd5cbf01
|
refs/heads/master
| 2023-05-02T00:52:19.209982
| 2021-05-14T14:57:06
| 2021-05-14T14:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,569
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# nosy: continuous integration for watchdog
#
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:module: nosy
:synopsis: Rewrite of Jeff Winkler's nosy script tailored to testing watchdog
:platform: OS-independent
"""
import os.path
import sys
import stat
import time
import subprocess
from fnmatch import fnmatch
def match_patterns(pathname, patterns):
"""Returns ``True`` if the pathname matches any of the given patterns."""
for pattern in patterns:
if fnmatch(pathname, pattern):
return True
return False
def filter_paths(pathnames, patterns=None, ignore_patterns=None):
"""Filters from a set of paths based on acceptable patterns and
ignorable patterns."""
result = []
if patterns is None:
patterns = ['*']
if ignore_patterns is None:
ignore_patterns = []
for pathname in pathnames:
if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns):
result.append(pathname)
return result
def absolute_walker(pathname, recursive):
if recursive:
walk = os.walk
else:
def walk(_path):
try:
return next(os.walk(_path))
except NameError:
return os.walk(_path).next()
for root, directories, filenames in walk(pathname):
yield root
for directory in directories:
yield os.path.abspath(os.path.join(root, directory))
for filename in filenames:
yield os.path.abspath(os.path.join(root, filename))
def glob_recursive(pathname, patterns=None, ignore_patterns=None):
full_paths = []
for root, _, filenames in os.walk(pathname):
for filename in filenames:
full_path = os.path.abspath(os.path.join(root, filename))
full_paths.append(full_path)
filepaths = filter_paths(full_paths, patterns, ignore_patterns)
return filepaths
def check_sum(pathname='.', patterns=None, ignore_patterns=None):
checksum = 0
for f in glob_recursive(pathname, patterns, ignore_patterns):
stats = os.stat(f)
checksum += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return checksum
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
else:
path = '.'
if len(sys.argv) > 2:
command = sys.argv[2]
else:
commands = [
# Build documentation automatically as well as the armada code
# changes.
"make SPHINXBUILD=../bin/sphinx-build -C docs html",
# The reports coverage generates all by itself are more
# user-friendly than the ones which `nosetests --with-coverage`
# generates. Therefore, we call `coverage` explicitly to
# generate reports, and to keep the reports in synchronization
# with the armada code, we erase all coverage information
# before regenerating reports or running `nosetests`.
"bin/coverage erase",
"bin/python-tests tests/run_tests.py",
"bin/coverage html",
]
command = '; '.join(commands)
previous_checksum = 0
while True:
calculated_checksum = check_sum(path, patterns=['*.py', '*.rst', '*.rst.inc'])
if calculated_checksum != previous_checksum:
previous_checksum = calculated_checksum
subprocess.Popen(command, shell=True)
time.sleep(2)
|
[
"borbs727@gmail.com"
] |
borbs727@gmail.com
|
33a7a7621b037b3e0b1dcd9b23bca3b857ee8c29
|
8bf8ab29cb25de00c6a799d1f58610528b810592
|
/파이썬 SW 문제해결 기본/4861. [파이썬 SW 문제해결 기본] 3일차 - 회문/main.py
|
4c37bd74d29cd8d1697e936726dc4dd023cdfd8d
|
[] |
no_license
|
mgh3326/sw_expert_academy_algorithm
|
fa93fb68862cabeba8f9f5fff00a87f26a014afc
|
97cbd2a1845e42f142d189e9121c3cd5822fc8d8
|
refs/heads/master
| 2020-07-03T21:40:29.948233
| 2019-11-23T07:26:15
| 2019-11-23T07:26:15
| 202,058,567
| 0
| 0
| null | 2019-11-30T06:11:34
| 2019-08-13T03:40:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
import sys
sys.stdin = open("./input.txt")
def is_palindrome(input_str):
if len(input_str) < 2:
return False
for i in range(len(input_str) // 2):
if input_str[i] != input_str[len(input_str) - 1 - i]:
return False
return True
def generate_substr(input_str):
global result
global is_end
for i in range(len(input_str) - m + 1):
substr = input_str[i:i + m]
if is_palindrome(substr):
result = substr
is_end = True
return
test_case_num = int(input())
for test_case_index in range(test_case_num):
result = ""
is_end = False
n, m = map(int, input().split())
board_list = []
for _ in range(n):
temp_str = input()
board_list.append(temp_str)
# substring
for board in board_list:
generate_substr(board)
if is_end:
break
if not is_end:
for w in range(n):
temp_str = ""
for h in range(n):
temp_str += board_list[h][w]
generate_substr(temp_str)
if is_end:
break
# 회문인지 비교하는 함수
print("#%d %s" % (test_case_index + 1, result))
|
[
"mgh3326@naver.com"
] |
mgh3326@naver.com
|
afe1ff7d941c26b1091c800390340a09b4dbfa91
|
b2750720aee1300f46fd8e21038719693f6f4204
|
/gestao_RH/urls.py
|
9f1d60bd53b755609a9aba765eb37223783d633c
|
[] |
no_license
|
matheuskaio/ProjetoDjangoGestaoRH
|
458393c9b39c8ebdf99e4fee206b3d0a1cdbad7f
|
8a3541c60bd71bfa72eb2d1e0d14e9a24c8d1bbb
|
refs/heads/master
| 2020-04-11T10:26:52.488685
| 2018-12-14T22:11:49
| 2018-12-14T22:11:49
| 161,713,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('apps.core.urls')),
path('funcionarios/', include('apps.funcionarios.urls')),
path('empresas/', include('apps.empresas.urls')),
path('documentos/', include('apps.documentos.urls')),
path('departamentos/', include('apps.departamentos.urls')),
path('horas-extras/', include('apps.registro_hora_extra.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
# ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"m.k.m.p.2000@gmail.com"
] |
m.k.m.p.2000@gmail.com
|
d174feba10caf4afadcac27921d4f36fc6d5cf8c
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj9-10_014908.87+011342.6/sdB_SDSSJ910_014908.87+011342.6_lc.py
|
805c78e017b0ef45b1ed827d291be153bd5923f0
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[27.286958,1.2285], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SDSSJ910_014908.87+011342.6 /sdB_SDSSJ910_014908.87+011342.6_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
2ec998d98e1fe7fd656f310f8b2b22f355e7b9f3
|
b891f38eb12eeafdbcec9deee2320acfaac3a7ad
|
/0x11-python-network_1/2-post_email.py
|
8c341dd0f2a209939ee201a9cbc73cde4e7f02c1
|
[] |
no_license
|
davixcky/holbertonschool-higher_level_programming
|
bb112af3e18994a46584ac3e78385e46c3d918f6
|
fe4cd0e95ee976b93bd47c85c2bc810049f568fa
|
refs/heads/master
| 2023-01-11T00:41:03.145968
| 2020-09-22T22:55:53
| 2020-09-22T22:55:53
| 259,390,611
| 0
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!/usr/bin/python3
""" Fetches https://intranet.hbtn.io/status"""
from urllib import request, parse
import sys
if __name__ == '__main__':
url, email = sys.argv[1:]
body = {'email': email}
data = parse.urlencode(body)
data = data.encode('ascii')
req = request.Request(url, data)
with request.urlopen(req) as response:
html = response.read()
print(html.decode('utf-8'))
|
[
"dvdizcky@gmail.com"
] |
dvdizcky@gmail.com
|
328fe766830ed081ceaf0df646470ba614068b59
|
4476597f6af6b9cd4614bf558553a7eb57c9f993
|
/tensorflow学习/tensorflow_learn.py
|
28dbea053a72c051c37b9874fa968b88f2678813
|
[] |
no_license
|
zhengziqiang/mypython
|
07dff974f475d1b9941b33518af67ece9703691a
|
7a2b419ff59a31dc937666e515490295f6be8a08
|
refs/heads/master
| 2021-07-14T20:01:34.231842
| 2017-04-19T01:18:25
| 2017-04-19T01:18:25
| 56,583,430
| 3
| 1
| null | 2020-07-23T11:46:35
| 2016-04-19T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 404
|
py
|
#coding=utf-8
import tensorflow as tf
x=tf.placeholder(tf.float32,[None,784])
w=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
y=tf.nn.softmax(tf.matmul(x,w)+b)
y_=tf.placeholder("float",[None,10])
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
|
[
"1174986943@qq.com"
] |
1174986943@qq.com
|
993fdb71b1cfd755ab19dfa75580530c9d7055fc
|
c6548d34568618afa7edc4bfb358d7f22426f18b
|
/project-addons/acp_contrato_bufete/__init__.py
|
8d0e6954a36c6247a7571913dfbe95f5bf9a15b6
|
[] |
no_license
|
Comunitea/CMNT_00071_2016_JUA
|
77b6cbb6ec8624c8ff7d26b5833b57b521d8b2a4
|
206b9fb2d4cc963c8b20001e46aa28ad38b2f7f0
|
refs/heads/master
| 2020-05-21T16:22:32.569235
| 2017-10-04T12:10:00
| 2017-10-04T12:10:00
| 62,816,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import acp_contrato
import res_partner
import sale_order
import wizard
import account_voucher
import account_invoice
import product
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"javierjcf@gmail.com"
] |
javierjcf@gmail.com
|
1eb337a91fba49e0d21bb0111796ad7754e21348
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_340/ch40_2020_03_25_17_24_54_674946.py
|
90382adbb982876f65ccf3ac36f1ba2dc7c75c02
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
def soma_valores(elementos):
s=0
i=0
while i<len(elementos):
s+=elementos[i]
i+=1
return s
|
[
"you@example.com"
] |
you@example.com
|
f3fa6a313038553ec69e6b0fac7b52445884eef9
|
5a394c53a7099bc871401e32cf3fc782546f9f7d
|
/.history/lab1_d/lab1/exam/test_20210203181948.py
|
73102cdae2950dabaa44d91e8cca8d6dfdad27c3
|
[] |
no_license
|
ajaygc95/advPy
|
fe32d67ee7910a1421d759c4f07e183cb7ba295b
|
87d38a24ef02bcfe0f050840179c6206a61384bd
|
refs/heads/master
| 2023-03-27T10:10:25.668371
| 2021-03-23T08:28:44
| 2021-03-23T08:28:44
| 334,614,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
from collections import defaultdict, namedtuple
class Temperature:
def __init__(self):
self.data = defaultdict()
def readdata(self):
with open('temperature.csv',r):
|
[
"gcajay95@gmail.com"
] |
gcajay95@gmail.com
|
2f096768afa9f6f0836a187e39994e461fe13b6e
|
d5a3c744b70c9a68c8efcf4252c9f13eb9c9b551
|
/动态下拉刷新页面爬取-demo45练习1.py
|
0145b9812bcd692c970958bc542fe29ad9355c65
|
[] |
no_license
|
zhouf1234/untitled8
|
9689b33aa53c49fcd4e704976a79b1a65578f137
|
c54634398800ba3c85f91885e6cf990e3645b2f6
|
refs/heads/master
| 2020-05-05T02:42:23.034426
| 2019-04-05T08:49:07
| 2019-04-05T08:49:07
| 179,648,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
import requests
import re
# 某一新浪微博主内容,转发数量,留言数量,点赞数量,未完成。微博内容有点问题。建议用demo45练习2
page = 1 #选择页数:第几页
uid = 1669879400 #选择微博主网页的uid:同https://m.weibo.cn/profile/1669879400的1669879400
nurl = '/api/container/getIndex?containerid=230413'
nurl = nurl+str(uid)+'_-_WEIBO_SECOND_PROFILE_WEIBO&page_type=03&page='+str(page)
# print('https://m.weibo.cn'+nurl) #连接拼接
# 爬取页面,获取的中文是unicode码
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
request = requests.get('https://m.weibo.cn'+nurl,headers=header)
c = request.text
# print(c)
# 微博主微博点赞数
# patt=re.compile('"created_at".*?"attitudes_count":(\d+)',re.S)
# titles=re.findall(patt,c)
# print(len(titles))
#
# # # 微博主微博评论数
# pat=re.compile('"created_at".*?"comments_count":(\d+)',re.S)
# title=re.findall(pat,c)
# print(len(title))
#
# # # 微博主微博转发数
# pa=re.compile('"created_at".*?"reposts_count":(\d+)',re.S)
# titl=re.findall(pa,c)
# print(len(titl))
# 微博主微博内容,总共10条,只取到8条,有些没出来,有些和上一条黏在一起了,建议不用此方法取内容
p = re.sub('<a.*?>|<.*?a>|@','',c)
# print(p)
p2 = re.compile('"text":"(.*?)"',re.S)
tit = re.findall(p2,p)
print(len(tit))
for i in tit:
print(i.encode('latin-1').decode('unicode_escape'))
|
[
"="
] |
=
|
ef83acb1830849c0e46fdb0f33f0b4ee6b03c16e
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/vsphere/tests/legacy/test_metadata_cache.py
|
04695b37ae258f148227b4f2b37cb78669509635
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.vsphere.legacy.metadata_cache import MetadataCache, MetadataNotFoundError
@pytest.fixture
def cache():
return MetadataCache()
def test_contains(cache):
with pytest.raises(KeyError):
cache.contains("instance", "foo")
cache._metadata["instance"] = {"foo_id": {}}
assert cache.contains("instance", "foo_id") is True
assert cache.contains("instance", "foo") is False
def test_set_metadata(cache):
cache._metadata["foo_instance"] = {}
cache.set_metadata("foo_instance", {"foo_id": {}})
assert "foo_id" in cache._metadata["foo_instance"]
def test_set_metrics(cache):
cache._metric_ids["foo_instance"] = []
cache.set_metric_ids("foo_instance", ["foo"])
assert "foo" in cache._metric_ids["foo_instance"]
assert len(cache._metric_ids["foo_instance"]) == 1
def test_get_metadata(cache):
with pytest.raises(KeyError):
cache.get_metadata("instance", "id")
cache._metadata["foo_instance"] = {"foo_id": {"name": "metric_name"}}
assert cache.get_metadata("foo_instance", "foo_id")["name"] == "metric_name"
with pytest.raises(MetadataNotFoundError):
cache.get_metadata("foo_instance", "bar_id")
def test_get_metrics(cache):
with pytest.raises(KeyError):
cache.get_metric_ids("instance")
cache._metric_ids["foo_instance"] = ["foo"]
assert cache.get_metric_ids("foo_instance") == ["foo"]
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
d8539f1bf6ab8cbfd8fbabe5ef96bacc654049b3
|
0e5f7fbea53b56ddeb0905c687aff43ae67034a8
|
/src/port_adapter/api/grpc/listener/BaseListener.py
|
fe0dc7ab7c4c74bdb126db44e244dc94027a5174
|
[] |
no_license
|
arkanmgerges/cafm.identity
|
359cdae2df84cec099828719202b773212549d6a
|
55d36c068e26e13ee5bae5c033e2e17784c63feb
|
refs/heads/main
| 2023-08-28T18:55:17.103664
| 2021-07-27T18:50:36
| 2021-07-27T18:50:36
| 370,453,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
"""
@author: Arkan M. Gerges<arkan.m.gerges@gmail.com>
"""
from src.resource.logging.decorator import debugLogger
class BaseListener:
@debugLogger
def _token(self, context) -> str:
metadata = context.invocation_metadata()
for key, value in metadata:
if "token" == key:
return value
return ""
|
[
"arkan.m.gerges@gmail.com"
] |
arkan.m.gerges@gmail.com
|
93c6c7dd56c60fb13f08f2d97e65e9d1e39305a3
|
c7cce6315bf8439faedbe44e2f35e06087f8dfb3
|
/Lab_Excercises/Lab_06/task_1.py
|
509866639df36f81fb0e45767cd470a3ad2b40b5
|
[] |
no_license
|
sipakhti/code-with-mosh-python
|
d051ab7ed1153675b7c44a96815c38ed6b458d0f
|
d4baa9d7493a0aaefefa145bc14d8783ecb20f1b
|
refs/heads/master
| 2020-12-26T13:05:06.783431
| 2020-07-08T07:00:59
| 2020-07-08T07:00:59
| 237,517,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
str1 = list(input("Please input the string: "))
encrypted_string = ""
for i in range(len(str1)):
if str1[i].lower() in "aeiou" and i % 2 != 0:
str1[i] = "_"
for char in str1:
encrypted_string = encrypted_string + char
print(encrypted_string)
|
[
"476061@gmail.com"
] |
476061@gmail.com
|
08e57f662a5ed15727ebead11dbee1da91274819
|
f0ee987789f5a6fe8f104890e95ee56e53f5b9b2
|
/pythia-0.8/packages/pyre/pyre/odb/fs/CodecODB.py
|
2d64e24630bc15ec801aa5092f03fd4056070e69
|
[] |
no_license
|
echoi/Coupling_SNAC_CHILD
|
457c01adc439e6beb257ac8a33915d5db9a5591b
|
b888c668084a3172ffccdcc5c4b8e7fff7c503f2
|
refs/heads/master
| 2021-01-01T18:34:00.403660
| 2015-10-26T13:48:18
| 2015-10-26T13:48:18
| 19,891,618
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,236
|
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.odb.common.Codec import Codec
class CodecODB(Codec):
def open(self, db, mode='r'):
"""open the file <db> in mode <mode> and place its contents in a shelf"""
filename = self.resolve(db)
import os
exists = os.path.isfile(filename)
if mode in ['w'] and not exists:
raise IOError("file not found: '%s'" % filename)
shelf = self._shelf(filename, False)
self._decode(shelf)
if mode == 'r':
shelf._const = True
else:
shelf._const = False
return shelf
def resolve(self, db):
return db + '.' + self.extension
def __init__(self, encoding, extension=None):
if extension is None:
extension = encoding
Codec.__init__(self, encoding, extension)
# public data
self.renderer = self._createRenderer()
# private data
self._locker = self._createLocker()
return
def _shelf(self, filename, const):
"""create a shelf for the contents of the db file"""
from Shelf import Shelf
return Shelf(filename, const, self)
def _decode(self, shelf):
"""lock and then read the contents of the file into the shelf"""
stream = file(shelf.name)
self._locker.lock(stream, self._locker.LOCK_EX)
exec stream in shelf
self._locker.unlock(stream)
return
def _createRenderer(self):
"""create a weaver for storing shelves"""
from pyre.weaver.Weaver import Weaver
weaver = Weaver()
return weaver
def _createLocker(self):
from FileLocking import FileLocking
return FileLocking()
# version
__id__ = "$Id: CodecODB.py,v 1.1.1.1 2005/03/08 16:13:41 aivazis Exp $"
# End of file
|
[
"echoi2@memphis.edu"
] |
echoi2@memphis.edu
|
482d7b56dd358e962f6dedb3cd96e67a87f389dd
|
f6f1e8b6bf2bde4e3b9eef80cc7e942854bd2e83
|
/bin_search.py
|
e502e1d9e10705648fe00c1841a0103e926de0a0
|
[] |
no_license
|
stevekutz/django_algo_exp1
|
178d84bda0520db39273b8f38b070c30758e222a
|
ef4e56b4f443868350deab7913b77678d093c6d6
|
refs/heads/master
| 2021-09-28T18:45:12.955842
| 2020-01-31T04:45:43
| 2020-01-31T04:45:43
| 236,881,770
| 0
| 0
| null | 2021-09-22T18:34:18
| 2020-01-29T01:35:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,339
|
py
|
dict_history = [];
def binary_search(list, item):
# array indices
low = 0
high = len(list) - 1
global dict_history;
def print_dic(dict):
for val in dict:
# print(val) # prints dict at index # {'item': 9, 'low': 0, 'high': 100, 'mid': 50, 'guess': 50}
# prints in nicely formatted python2
# search val: 9 low: 0 high: 100 mid: 50 guess: 50
print('search val: %s \t low: %s \t high: %s \t mid: %s \t guess: %s' % (val['item'], val['low'], val['high'], val['mid'], val['guess']))
# this will print out val
for k, v in val.items(): # we can use any variable for key, value positions
# print(f'\t Key: {k} \t Value: {v}') # python 3
print("\t Key: %s \t Value: %i " % (k, v))
while low <= high:
# print(f'search val: {item} low: {low} high: {high} mid: {mid}')
mid = (low + high) // 2 # gives floor, rounded down val
guess = list[mid] # check the middle val
# python3 syntax
# print(f'search val: {item} \t low: {low} \t high: {high} \t mid: {mid} \t guess: {guess}')
# python2 syntax
# print('search val: %s \t low: %s \t high: %s \t mid: %s \t guess: %s' % (item, low, high, mid, guess))
# dict_history.append({item: item, low: low ,high: high, mid: mid, guess: guess}) # saves k &v as same e.g. { 9: 9, 50: 50, ...}
dict_history.append({'item': item, 'low': low, 'high': high, 'mid': mid, 'guess': guess})
if guess == item:
# return mid # middle is actual item --> use with # print(binary_search(test_list, find))
# python 3 syntax
#return print(f' item located: {guess}')
# python 2 syntax
print("item located {} after {} iterations".format(guess, len(dict_history)) )
print_dic(dict_history)
return None
elif guess > item:
high = mid - 1 # look in lower half
else:
low = mid + 1 # look in upper half
return None
test_list = list(range(0,101)) # generate list 1 to 100
find = 9
# print(binary_search(test_list, find))
binary_search(test_list, find)
|
[
"stkutz@gmail.com"
] |
stkutz@gmail.com
|
9b3ac2d7b530dc6140c8a3ba781a071f7dc425e8
|
db2ae9b2d769d768f685be8a1e830ad3f71e4ad3
|
/torch_scatter/utils.py
|
fd4be5998ae87a9d375db9fc0e77353725ae74d2
|
[
"MIT"
] |
permissive
|
hmaarrfk/pytorch_scatter
|
45623acd179bc309474f492d8d2358e0a9556b09
|
8d05f6108105d02b53b8fba35f28006cfdd1539f
|
refs/heads/master
| 2023-08-24T17:32:08.331457
| 2021-10-22T13:45:02
| 2021-10-22T13:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
import torch
def broadcast(src: torch.Tensor, other: torch.Tensor, dim: int):
if dim < 0:
dim = other.dim() + dim
if src.dim() == 1:
for _ in range(0, dim):
src = src.unsqueeze(0)
for _ in range(src.dim(), other.dim()):
src = src.unsqueeze(-1)
src = src.expand_as(other)
return src
|
[
"matthias.fey@tu-dortmund.de"
] |
matthias.fey@tu-dortmund.de
|
bb0bc1b070cdb39864536526363f9329311660dd
|
a5f0e7c09c36bb2fc91f95e5f3ec7f95c0ed305e
|
/cafe_backend/core/constants/sizes.py
|
962e36be4636824dd4958f081aa06e3356612c30
|
[] |
no_license
|
ecmascriptguru/cafe_backend
|
e703047c7f04d68596f76dcbff06828afbf5cc68
|
0c4152692d68e951481b39f0789bc58e94e0d20c
|
refs/heads/master
| 2022-10-26T00:31:50.070430
| 2020-06-18T15:30:02
| 2020-06-18T15:30:02
| 184,465,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
MAX_IMAGE_WIDTH = 1920
MAX_IMAGE_HEIGHT = 768
class DEFAULT_IMAGE_SIZE:
tiny = (int(MAX_IMAGE_WIDTH / 30), int(MAX_IMAGE_HEIGHT / 16))
small = (int(MAX_IMAGE_WIDTH / 10), int(MAX_IMAGE_HEIGHT / 8))
normal = (int(MAX_IMAGE_WIDTH / 4), int(MAX_IMAGE_HEIGHT / 4))
big = (int(MAX_IMAGE_WIDTH / 2), int(MAX_IMAGE_HEIGHT / 2))
|
[
"ecmascript.guru@gmail.com"
] |
ecmascript.guru@gmail.com
|
c934cbc782156852dd476482a5d236715cf5ff97
|
552a6f227dea50887a4bcbf1a120289f3ae90fc0
|
/pandas/tests/tseries/frequencies/test_freq_code.py
|
0aa29e451b1ba4513e1beb37ec82f83724339f9d
|
[
"BSD-3-Clause"
] |
permissive
|
Lucifer82/pandas
|
bbf6132e84585aebcfefe098d14ab6fa9adcf6d3
|
cdfdd77b65df350386ce27142ef3babd9e5186d2
|
refs/heads/master
| 2020-04-30T15:30:27.180080
| 2019-03-21T03:07:43
| 2019-03-21T03:07:43
| 176,922,084
| 1
| 0
|
BSD-3-Clause
| 2019-03-21T10:26:38
| 2019-03-21T10:26:38
| null |
UTF-8
|
Python
| false
| false
| 4,707
|
py
|
import pytest
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.frequencies import (
FreqGroup, _period_code_map, get_freq, get_freq_code)
import pandas.compat as compat
import pandas.tseries.offsets as offsets
@pytest.fixture(params=list(compat.iteritems(_period_code_map)))
def period_code_item(request):
return request.param
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000),
("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("W", 4000), ("W-MON", 4001), ("W-FRI", 4005)
])
def test_freq_code(freqstr, expected):
assert get_freq(freqstr) == expected
def test_freq_code_match(period_code_item):
freqstr, code = period_code_item
assert get_freq(freqstr) == code
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000), ("A-JAN", 1000),
("A-MAY", 1000), ("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("Y-JAN", 1000), ("Y-MAY", 1000), (offsets.YearEnd(), 1000),
(offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000),
("W", 4000), ("W-MON", 4000), ("W-FRI", 4000), (offsets.Week(), 4000),
(offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000),
("T", FreqGroup.FR_MIN),
])
def test_freq_group(freqstr, expected):
assert resolution.get_freq_group(freqstr) == expected
def test_freq_group_match(period_code_item):
freqstr, code = period_code_item
str_group = resolution.get_freq_group(freqstr)
code_group = resolution.get_freq_group(code)
assert str_group == code_group == code // 1000 * 1000
@pytest.mark.parametrize("freqstr,exp_freqstr", [
("D", "D"), ("W", "D"), ("M", "D"),
("S", "S"), ("T", "S"), ("H", "S")
])
def test_get_to_timestamp_base(freqstr, exp_freqstr):
tsb = libfrequencies.get_to_timestamp_base
assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]
_reso = resolution.Resolution
@pytest.mark.parametrize("freqstr,expected", [
("A", "year"), ("Q", "quarter"), ("M", "month"),
("D", "day"), ("H", "hour"), ("T", "minute"),
("S", "second"), ("L", "millisecond"),
("U", "microsecond"), ("N", "nanosecond")
])
def test_get_str_from_freq(freqstr, expected):
assert _reso.get_str_from_freq(freqstr) == expected
@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H",
"T", "S", "L", "U", "N"])
def test_get_freq_roundtrip(freq):
result = _reso.get_freq(_reso.get_str_from_freq(freq))
assert freq == result
@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"])
def test_get_freq_roundtrip2(freq):
result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq)))
assert freq == result
@pytest.mark.parametrize("args,expected", [
((1.5, "T"), (90, "S")), ((62.4, "T"), (3744, "S")),
((1.04, "H"), (3744, "S")), ((1, "D"), (1, "D")),
((0.342931, "H"), (1234551600, "U")), ((1.2345, "D"), (106660800, "L"))
])
def test_resolution_bumping(args, expected):
# see gh-14378
assert _reso.get_stride_from_decimal(*args) == expected
@pytest.mark.parametrize("args", [
(0.5, "N"),
# Too much precision in the input can prevent.
(0.3429324798798269273987982, "H")
])
def test_cat(args):
msg = "Could not convert to integer offset at any resolution"
with pytest.raises(ValueError, match=msg):
_reso.get_stride_from_decimal(*args)
@pytest.mark.parametrize("freq_input,expected", [
# Frequency string.
("A", (get_freq("A"), 1)),
("3D", (get_freq("D"), 3)),
("-2M", (get_freq("M"), -2)),
# Tuple.
(("D", 1), (get_freq("D"), 1)),
(("A", 3), (get_freq("A"), 3)),
(("M", -2), (get_freq("M"), -2)),
((5, "T"), (FreqGroup.FR_MIN, 5)),
# Numeric Tuple.
((1000, 1), (1000, 1)),
# Offsets.
(offsets.Day(), (get_freq("D"), 1)),
(offsets.Day(3), (get_freq("D"), 3)),
(offsets.Day(-2), (get_freq("D"), -2)),
(offsets.MonthEnd(), (get_freq("M"), 1)),
(offsets.MonthEnd(3), (get_freq("M"), 3)),
(offsets.MonthEnd(-2), (get_freq("M"), -2)),
(offsets.Week(), (get_freq("W"), 1)),
(offsets.Week(3), (get_freq("W"), 3)),
(offsets.Week(-2), (get_freq("W"), -2)),
(offsets.Hour(), (FreqGroup.FR_HR, 1)),
# Monday is weekday=0.
(offsets.Week(weekday=1), (get_freq("W-TUE"), 1)),
(offsets.Week(3, weekday=0), (get_freq("W-MON"), 3)),
(offsets.Week(-2, weekday=4), (get_freq("W-FRI"), -2)),
])
def test_get_freq_code(freq_input, expected):
assert get_freq_code(freq_input) == expected
def test_get_code_invalid():
with pytest.raises(ValueError, match="Invalid frequency"):
get_freq_code((5, "baz"))
|
[
"jeff@reback.net"
] |
jeff@reback.net
|
867849b4a1a74bad8e87de49c3ee8b8079072654
|
3b78d0d2dda1e316d9be02ad05884102422484cf
|
/exercises/19_1_blog/blogs/models.py
|
fd86a36ea7f815487a3761af65455c2f3bf251a8
|
[] |
no_license
|
xerifeazeitona/PCC_WebApp
|
4d28caedf44f5a5b6617a75393256bb0eb9d436c
|
26f73805bf20a01f3879a05bf96e8ff6db0449fe
|
refs/heads/main
| 2023-03-06T08:40:18.422416
| 2021-02-22T21:21:38
| 2021-02-22T21:21:38
| 340,138,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from django.db import models
from django.contrib.auth.models import User
class BlogPost(models.Model):
"""Simple model of a basic blog post."""
title = models.CharField(max_length=200)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
"""Return a string representation of the model."""
return self.title
|
[
"juliano.amaral@gmail.com"
] |
juliano.amaral@gmail.com
|
0362844276cdcce64c66e350f09c947d57457c2f
|
264ce32d9eebb594cc424ecb3b8caee6cb75c2f3
|
/content/hw/02_bootstrap/ok/tests/q9.py
|
b2cd04951595906ae26cf1f60d6afb44d329d8d3
|
[] |
no_license
|
anhnguyendepocen/psych101d
|
a1060210eba2849f371d754e8f79e416754890f9
|
41057ed5ef1fd91e243ab41040f71b51c6443924
|
refs/heads/master
| 2022-03-24T02:20:32.268048
| 2019-12-21T02:51:02
| 2019-12-21T02:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
test = {
"name": "Putting It All Together",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
>>> ## Did you define the right variables?
>>> "easy_boot_delta_means" in globals().keys()
True
>>> "hard_boot_delta_means" in globals().keys()
True
>>> "no_difference_easy" in globals().keys()
True
>>> "no_difference_hard" in globals().keys()
True
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> ## Are the left sides located in the right spot relative to 0?
>>> np.percentile(easy_boot_delta_means, 5) < 0
True
>>> np.percentile(hard_boot_delta_means, 5) < 0
False
>>> ## Are the means reasonable?
>>> np.mean(easy_boot_delta_means) > 0.15
True
>>> np.mean(hard_boot_delta_means) > 1.5
True
>>> ## Are the final inferences correct?
>>> no_difference_easy, no_difference_hard
(True, False)
""",
"hidden": False,
"locked": False
}
],
"setup": r"""
>>> eps = 1e-5
""",
"teardown": r"""
""",
"type": "doctest"}]
}
|
[
"charlesfrye@berkeley.edu"
] |
charlesfrye@berkeley.edu
|
0811d6891a523db246ae901e3caaa94f48a7ec08
|
8fc999f5262b5a2dadc830f1cc345f51b6dde862
|
/samples/conceptual_samples/remaining/tuple.py
|
cc388fd0fa5107256c5ce382ac3135df515ef79c
|
[] |
no_license
|
pandiyan07/python_2.x_tutorial_for_beginners_and_intermediate
|
5ca5cb5fcfe7ce08d109fb32cdf8138176ac357a
|
a4c14deaa518fea1f8e95c2cc98783c8ca3bd4ae
|
refs/heads/master
| 2022-04-09T20:33:28.527653
| 2020-03-27T06:35:50
| 2020-03-27T06:35:50
| 250,226,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
# this sample python script program is been created to demonstrate the tuple packing and tuple unpacking.
data=("Name: pandiyan","Wannabe: I want to be a pythoneer","Nationality: indian","Proffession: hacker","Mothertounge: tamil")
name,wannabe,nationality,proffession,mothertounge=data
def details():
print name
print wannabe
print nationality
print proffession
print mothertounge
print"Are you sure that you want to see my details ..??\t(y/n)"
option=raw_input("> ")
if option=='y':
details()
elif option=='n':
print'thank you for opening this file \n now just get lost..!!'
else:
print"please enter 'y' for yes or enter 'n' for no"
#the end of the program file . happy coding..!!
|
[
"becool.pandiyan@gmail.com"
] |
becool.pandiyan@gmail.com
|
b49cc96396beee95aa535d05b7ed2be3897f7ec1
|
1160a607aa9b445ba96674f4e3b86079fede9bdc
|
/fichasManage/utils.py
|
bedae7e98cc8ae74ed1ceee6136f557b2de2618a
|
[] |
no_license
|
astandre/fichas-geologicas-cliente
|
70820bca77c9ffa4de28d207ff84490205a8cc56
|
90ae40afd6aa4a331316e5106950a8406a38cf1f
|
refs/heads/master
| 2022-12-12T03:05:29.945240
| 2019-02-04T18:46:44
| 2019-02-04T18:46:44
| 165,874,901
| 0
| 0
| null | 2021-06-10T21:07:43
| 2019-01-15T15:26:21
|
Python
|
UTF-8
|
Python
| false
| false
| 6,608
|
py
|
from .constants import *
def build_ficha_geologica(ficha):
if "nomenclaturaUnidadGeologica" in ficha:
try:
ficha["nomenclaturaUnidadGeologica"] = UNIDAD_GEOLOGICA[ficha["nomenclaturaUnidadGeologica"]]
except KeyError:
print("Key error")
if "tipoContactoGeo" in ficha:
try:
ficha["tipoContactoGeo"] = UNIDAD_GEOLOGICA[ficha["tipoContactoGeo"]]
except KeyError:
print("Key error")
if "limiteContactoGeo" in ficha:
try:
ficha["limiteContactoGeo"] = UNIDAD_GEOLOGICA[ficha["limiteContactoGeo"]]
except KeyError:
print("Key error")
if "certezaContactoGeo" in ficha:
try:
ficha["certezaContactoGeo"] = UNIDAD_GEOLOGICA[ficha["certezaContactoGeo"]]
except KeyError:
print("Key error")
if "origenRoca" in ficha:
try:
ficha["origenRoca"] = UNIDAD_GEOLOGICA[ficha["origenRoca"]]
except KeyError:
print("Key error")
if "estructuraRoca" in ficha:
try:
ficha["estructuraRoca"] = UNIDAD_GEOLOGICA[ficha["estructuraRoca"]]
except KeyError:
print("Key error")
if "pliegue" in ficha:
if "tipo" in ficha["pliegue"]:
try:
ficha["pliegue"]["tipo"] = PLIEGUE_TIPO[ficha["pliegue"]["tipo"]]
except KeyError:
print("Key error")
if "posicion" in ficha["pliegue"]:
try:
ficha["posicion"] = PLIEGUE_POSICION[ficha["pliegue"]["posicion"]]
except KeyError:
print("Key error")
if "anguloEntreFlancos" in ficha["pliegue"]:
try:
ficha["pliegue"]["anguloEntreFlancos"] = PLIEGUE_ANGULO_ENTRE_FLANCOS[
ficha["pliegue"]["anguloEntreFlancos"]]
except KeyError:
print("Key error")
if "perfil" in ficha["pliegue"]:
try:
ficha["pliegue"]["perfil"] = PLIEGUE_PERFIL[ficha["pliegue"]["perfil"]]
except KeyError:
print("Key error")
if "sistema" in ficha["pliegue"]:
try:
ficha["pliegue"]["sistema"] = PLIEGUE_SISTEMA[ficha["pliegue"]["sistema"]]
except KeyError:
print("Key error")
if "eslineal" in ficha:
if "lineacion" in ficha["eslineal"]:
try:
ficha["eslineal"]["lineacion"] = EST_LINEAL_LINEAMIENTO[ficha["eslineal"]["lineacion"]]
except KeyError:
print("Key error")
if "claseEstrLineal" in ficha["eslineal"]:
try:
ficha["eslineal"]["claseEstrLineal"] = EST_LINEAL_CLASE[ficha["eslineal"]["claseEstrLineal"]]
except KeyError:
print("Key error")
if "buzamiento" in ficha["eslineal"]:
try:
ficha["eslineal"]["buzamiento"] = EST_LINEAL_BUZAMIENTO[ficha["eslineal"]["buzamiento"]]
except KeyError:
print("Key error")
if "asociacion" in ficha["eslineal"]:
try:
ficha["eslineal"]["asociacion"] = EST_LINEAL_ASOCIACION[ficha["eslineal"]["asociacion"]]
except KeyError:
print("Key error")
if "formacion" in ficha["eslineal"]:
try:
ficha["eslineal"]["formacion"] = EST_LINEAL_FORMACION[ficha["eslineal"]["formacion"]]
except KeyError:
print("Key error")
if "diaclasaClase" in ficha["eslineal"]:
try:
ficha["eslineal"]["diaclasaClase"] = EST_LINEAL_DIACLASA_OR_ROCAS[ficha["eslineal"]["diaclasaClase"]]
except KeyError:
print("Key error")
if "esplanar" in ficha:
if "buzamientoIntensidad" in ficha["esplanar"]:
try:
ficha["esplanar"]["buzamientoIntensidad"] = EST_PLANAR_BUZ_INTEN[
ficha["esplanar"]["buzamientoIntensidad"]]
except KeyError:
print("Key error")
if "clivaje" in ficha["esplanar"]:
try:
ficha["esplanar"]["clivaje"] = EST_PLANAR_CLIVAJE[ficha["esplanar"]["clivaje"]]
except KeyError:
print("Key error")
if "estratificacion" in ficha["esplanar"]:
try:
ficha["esplanar"]["estratificacion"] = EST_PLANAR_ESTRAT[ficha["esplanar"]["estratificacion"]]
except KeyError:
print("Key error")
if "fotogeologia" in ficha["esplanar"]:
try:
ficha["esplanar"]["fotogeologia"] = EST_PLANAR_FOTO[ficha["esplanar"]["fotogeologia"]]
except KeyError:
print("Key error")
if "zonaDeCizalla" in ficha["esplanar"]:
try:
ficha["esplanar"]["zonaDeCizalla"] = EST_PLANAR_ZONA[ficha["esplanar"]["zonaDeCizalla"]]
except KeyError:
print("Key error")
if "rocasMetaforicas" in ficha["esplanar"]:
try:
ficha["esplanar"]["rocasMetaforicas"] = EST_LINEAL_DIACLASA_OR_ROCAS[
ficha["esplanar"]["rocasMetaforicas"]]
except KeyError:
print("Key error")
if "rocasIgneas" in ficha["esplanar"]:
try:
ficha["esplanar"]["rocasIgneas"] = EST_LINEAL_DIACLASA_OR_ROCAS[
ficha["esplanar"]["rocasIgneas"]]
except KeyError:
print("Key error")
if "afloramiento" in ficha:
if "dimension" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["dimension"] = AFL_DIMEN[
ficha["afloramiento"]["dimension"]]
except KeyError:
print("Key error")
if "origen" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["origen"] = AFL_ORIGEN_ROCA[
ficha["afloramiento"]["origen"]]
except KeyError:
print("Key error")
if "tipoRoca" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["tipoRoca"] = AFL_TIPO_ROCA[
ficha["afloramiento"]["tipoRoca"]]
except KeyError:
print("Key error")
if "sitio" in ficha["afloramiento"]:
try:
ficha["afloramiento"]["sitio"] = AFL_SITIO[
ficha["afloramiento"]["sitio"]]
except KeyError:
print("Key error")
return ficha
|
[
"andreherrera97@hotmail.com"
] |
andreherrera97@hotmail.com
|
d52becaa8c882ebedbde683171421ae43a6d6d7b
|
79d3fd089addc6a13ff1a83617398ffd1a0880b0
|
/topics/complex_numbers.py
|
5ecc6a01cc16be43797347bd88d1af7ab792b75a
|
[] |
no_license
|
stoeckley/manim
|
1ee27f5c73d028b5b1bd948c6067508a9e393d7b
|
0af9b3005cb659c98226c8ad737bfc1e7b97517f
|
refs/heads/master
| 2021-05-31T19:34:34.098497
| 2016-01-17T02:08:51
| 2016-01-17T02:08:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,362
|
py
|
from helpers import *
from number_line import NumberPlane
from animation.transform import ApplyPointwiseFunction
from animation.simple_animations import Homotopy
from scene import Scene
def complex_string(complex_num):
return filter(lambda c : c not in "()", str(complex_num))
class ComplexPlane(NumberPlane):
DEFAULT_CONFIG = {
"color" : GREEN,
"unit_to_spatial_width" : 1,
"line_frequency" : 1,
"faded_line_frequency" : 0.5,
"number_at_center" : complex(0),
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
kwargs.update({
"x_unit_to_spatial_width" : self.unit_to_spatial_width,
"y_unit_to_spatial_height" : self.unit_to_spatial_width,
"x_line_frequency" : self.line_frequency,
"x_faded_line_frequency" : self.faded_line_frequency,
"y_line_frequency" : self.line_frequency,
"y_faded_line_frequency" : self.faded_line_frequency,
"num_pair_at_center" : (self.number_at_center.real,
self.number_at_center.imag),
})
NumberPlane.__init__(self, **kwargs)
def number_to_point(self, number):
number = complex(number)
return self.num_pair_to_point((number.real, number.imag))
def get_coordinate_labels(self, *numbers):
result = []
nudge = 0.1*(DOWN+RIGHT)
if len(numbers) == 0:
numbers = range(-int(self.x_radius), int(self.x_radius))
numbers += [
complex(0, y)
for y in range(-int(self.y_radius), int(self.y_radius))
]
for number in numbers:
point = self.number_to_point(number)
if number == 0:
num_str = "0"
else:
num_str = str(number).replace("j", "i")
num = TexMobject(num_str)
num.scale(self.number_scale_factor)
num.shift(point-num.get_corner(UP+LEFT)+nudge)
result.append(num)
return result
def add_coordinates(self, *numbers):
self.add(*self.get_coordinate_labels(*numbers))
return self
def add_spider_web(self, circle_freq = 1, angle_freq = np.pi/6):
self.fade(self.fade_factor)
config = {
"color" : self.color,
"density" : self.density,
}
for radius in np.arange(circle_freq, SPACE_WIDTH, circle_freq):
self.add(Circle(radius = radius, **config))
for angle in np.arange(0, 2*np.pi, angle_freq):
end_point = np.cos(angle)*RIGHT + np.sin(angle)*UP
end_point *= SPACE_WIDTH
self.add(Line(ORIGIN, end_point, **config))
return self
class ComplexFunction(ApplyPointwiseFunction):
def __init__(self, function, mobject = ComplexPlane, **kwargs):
if "path_func" not in kwargs:
self.path_func = path_along_arc(
np.log(function(complex(1))).imag
)
ApplyPointwiseFunction.__init__(
self,
lambda (x, y, z) : complex_to_R3(function(complex(x, y))),
instantiate(mobject),
**kwargs
)
class ComplexHomotopy(Homotopy):
def __init__(self, complex_homotopy, mobject = ComplexPlane, **kwargs):
"""
Complex Hootopy a function Cx[0, 1] to C
"""
def homotopy((x, y, z, t)):
c = complex_homotopy((complex(x, y), t))
return (c.real, c.imag, z)
Homotopy.__init__(self, homotopy, mobject, *args, **kwargs)
class ComplexMultiplication(Scene):
@staticmethod
def args_to_string(multiplier, mark_one = False):
num_str = complex_string(multiplier)
arrow_str = "MarkOne" if mark_one else ""
return num_str + arrow_str
@staticmethod
def string_to_args(arg_string):
parts = arg_string.split()
multiplier = complex(parts[0])
mark_one = len(parts) > 1 and parts[1] == "MarkOne"
return (multiplier, mark_one)
def construct(self, multiplier, mark_one = False, **plane_config):
norm = np.linalg.norm(multiplier)
arg = np.log(multiplier).imag
plane_config["faded_line_frequency"] = 0
plane_config.update(DEFAULT_PLANE_CONFIG)
if norm > 1 and "density" not in plane_config:
plane_config["density"] = norm*DEFAULT_POINT_DENSITY_1D
if "radius" not in plane_config:
radius = SPACE_WIDTH
if norm > 0 and norm < 1:
radius /= norm
else:
radius = plane_config["radius"]
plane_config["x_radius"] = plane_config["y_radius"] = radius
plane = ComplexPlane(**plane_config)
self.plane = plane
self.add(plane)
# plane.add_spider_web()
self.anim_config = {
"run_time" : 2.0,
"path_func" : path_along_arc(arg)
}
plane_config["faded_line_frequency"] = 0.5
background = ComplexPlane(color = "grey", **plane_config)
# background.add_spider_web()
labels = background.get_coordinate_labels()
self.paint_into_background(background, *labels)
self.mobjects_to_move_without_molding = []
if mark_one:
self.draw_dot("1", 1, True)
self.draw_dot("z", multiplier)
self.mobjects_to_multiply = [plane]
self.additional_animations = []
self.multiplier = multiplier
if self.__class__ == ComplexMultiplication:
self.apply_multiplication()
def draw_dot(self, tex_string, value, move_dot = False):
dot = Dot(
self.plane.number_to_point(value),
radius = 0.1*self.plane.unit_to_spatial_width,
color = BLUE if value == 1 else YELLOW
)
label = TexMobject(tex_string)
label.shift(dot.get_center()+1.5*UP+RIGHT)
arrow = Arrow(label, dot)
self.add(label)
self.play(ShowCreation(arrow))
self.play(ShowCreation(dot))
self.dither()
self.remove(label, arrow)
if move_dot:
self.mobjects_to_move_without_molding.append(dot)
return dot
def apply_multiplication(self):
def func((x, y, z)):
complex_num = self.multiplier*complex(x, y)
return (complex_num.real, complex_num.imag, z)
mobjects = self.mobjects_to_multiply
mobjects += self.mobjects_to_move_without_molding
mobjects += [anim.mobject for anim in self.additional_animations]
self.add(*mobjects)
full_multiplications = [
ApplyMethod(mobject.apply_function, func, **self.anim_config)
for mobject in self.mobjects_to_multiply
]
movements_with_plane = [
ApplyMethod(
mobject.shift,
func(mobject.get_center())-mobject.get_center(),
**self.anim_config
)
for mobject in self.mobjects_to_move_without_molding
]
self.dither()
self.play(*reduce(op.add, [
full_multiplications,
movements_with_plane,
self.additional_animations
]))
self.dither()
|
[
"grantsanderson7@gmail.com"
] |
grantsanderson7@gmail.com
|
41409f82ccd2588398fdf051d1696b159d04542a
|
b122b0d43455c6af3344e4319bead23bb9162dac
|
/instagram/insta_hossem.py
|
2dea5a7824669a6cb69d3c39770d92c21c404dde
|
[] |
no_license
|
firchatn/scripts-python
|
85c7704170404f8a2e531164258f6c8b7e0d27f8
|
25a6a298aae279f23f08c2ce4674d866c2fca0ef
|
refs/heads/master
| 2021-03-16T06:09:23.484585
| 2018-11-02T10:09:36
| 2018-11-02T10:09:36
| 105,776,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
browser = webdriver.Firefox()
browser.get('http://instagram.com')
time.sleep(10)
browser.find_element_by_xpath("//a[contains(@class, '_b93kq')]").click()
compte = ''
password = ''
follow = 'css'
user_name = browser.find_element_by_name('username')
user_name.clear()
user_name.send_keys(compte)
password_el = browser.find_element_by_name('password')
password_el.clear()
password_el.send_keys(password)
password_el.send_keys(Keys.RETURN)
time.sleep(5)
search = browser.find_element_by_xpath("//input[contains(@class, '_avvq0 _o716c')]")
time.sleep(5)
search.send_keys(follow)
time.sleep(5)
browser.find_element_by_xpath("//span[contains(@class, '_sgi9z')]").click()
time.sleep(5)
browser.find_element_by_xpath("//div[contains(@class, '_mck9w _gvoze _f2mse')]").click()
time.sleep(5)
browser.find_element_by_xpath("//a[contains(@class, '_nzn1h _gu6vm')]").click()
time.sleep(3)
print("list now")
list = browser.find_elements(By.XPATH, "//button[contains(@class, '_qv64e _gexxb _4tgw8 _njrw0')]")
time.sleep(3)
for i in range(5):
if list[i].text == 'Follow':
list[i].click()
|
[
"firaschaabencss@gmail.com"
] |
firaschaabencss@gmail.com
|
d25c31f1bf4a4fe5bfe3e31be5b3e8435213d236
|
ad38d8b669a6e173773ee4eb61ace40d6b508e21
|
/setup.py
|
25a29626aa99ce9d64ae330b3062737e5c27f025
|
[] |
no_license
|
CJWorkbench/intercom
|
c3bf3eb407ea7c36460cb3ada8359e42938f31c9
|
c8da8e94584af7d41e350b9bf580bcebc035cbc1
|
refs/heads/main
| 2021-06-19T01:16:32.996932
| 2021-03-19T20:47:58
| 2021-03-19T20:47:58
| 192,569,734
| 0
| 0
| null | 2021-03-19T20:48:41
| 2019-06-18T15:44:12
|
Python
|
UTF-8
|
Python
| false
| false
| 392
|
py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name="intercom",
version="0.0.1",
description="Download user lists from Intercom",
author="Adam Hooper",
author_email="adam@adamhooper.com",
url="https://github.com/CJWorkbench/intercom",
packages=[""],
py_modules=["libraryofcongress"],
install_requires=["pandas==0.25.0", "cjwmodule>=1.3.0"],
)
|
[
"adam@adamhooper.com"
] |
adam@adamhooper.com
|
29ef7a6457b0ff26e9d975f5624e21f36614095c
|
6bd94dab1b8b4fc0827bf0de9a405234d4e52bf6
|
/prototype/database/db.py
|
ab8c327d25436a630561f4be50bb53ac4841bf33
|
[
"BSD-3-Clause"
] |
permissive
|
shenghuatang/ApiLogicServer
|
4f11d512bb72a504120e12684168f1ca932d83ea
|
ea5134d907b3ccf03f6514a2c9a1c25b5a737c68
|
refs/heads/main
| 2023-06-22T20:27:41.125374
| 2021-07-17T15:22:12
| 2021-07-17T15:22:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
import safrs
db = safrs.DB
Base: declarative_base = db.Model
session: Session = db.session
print("got session: " + str(session))
def remove_session():
db.session.remove()
|
[
"valjhuber@gmail.com"
] |
valjhuber@gmail.com
|
caa066ac6f9008d3b8b8f1ba7e83cfe88b54852e
|
ac2c3e8c278d0aac250d31fd023c645fa3984a1b
|
/saleor/saleor/shipping/__init__.py
|
92a86a08b6125f404c3f263a17cdc15aa85fe5f4
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
jonndoe/saleor-test-shop
|
152bc8bef615382a45ca5f4f86f3527398bd1ef9
|
1e83176684f418a96260c276f6a0d72adf7dcbe6
|
refs/heads/master
| 2023-01-21T16:54:36.372313
| 2020-12-02T10:19:13
| 2020-12-02T10:19:13
| 316,514,489
| 1
| 1
|
BSD-3-Clause
| 2020-11-27T23:29:20
| 2020-11-27T13:52:33
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 199
|
py
|
class ShippingMethodType:
PRICE_BASED = "price"
WEIGHT_BASED = "weight"
CHOICES = [
(PRICE_BASED, "Price based shipping"),
(WEIGHT_BASED, "Weight based shipping"),
]
|
[
"testuser@151-248-122-3.cloudvps.regruhosting.ru"
] |
testuser@151-248-122-3.cloudvps.regruhosting.ru
|
86c2f3c1a2765b31b3aaed5f9b777ff8028bc955
|
6f8266e36a252e3d0c3c5ec94d2238b21325af3e
|
/unsupervised_learning/1.Clustering/n3_inspectClustering.py
|
d7af304d5a1891720eb395184e707584d049a141
|
[] |
no_license
|
ptsouth97/DeepLearn
|
c64b9b36850deb075020276d2b01c833c4b70c7d
|
e1eede4beb645c43545264a7a3ab828ae00c2a4f
|
refs/heads/master
| 2020-03-22T06:41:42.046163
| 2019-01-25T18:31:43
| 2019-01-25T18:31:43
| 139,651,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
#!/usr/bin/python3
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import n1_clusters, n2_clustering2Dpoints
def main():
''' create a KMeans model to find 3 clusters, and fit it to the data '''
# Create array of data
points, new_points = n1_clusters.make_points()
# Assign the columns of new_points: xs and ys
xs = new_points[:,0]
ys = new_points[:,1]
# Get the labels and the model
labels, model = n2_clustering2Dpoints.get_labels()
# Make a scatter plot of xs and ys, using labels to define the colors
plt.scatter(xs, ys, c=labels, alpha=0.5)
# Assign the cluster centers: centroids
centroids = model.cluster_centers_
# Assign the columns of centroids: centroids_x, centroids_y
centroids_x = centroids[:,0]
centroids_y = centroids[:,1]
# Make a scatter plot of centroids_x and centroids_y
plt.scatter(centroids_x, centroids_y, marker='D', s=50)
plt.show()
if __name__ == '__main__':
main()
|
[
"ptsouth97@gmail.com"
] |
ptsouth97@gmail.com
|
152a43e71398d137edfc3a6bce005472d32e4ccf
|
2dfa9a135508d0c123fe98f81e4b2598e14e0dc0
|
/pytorch_translate/dual_learning/dual_learning_models.py
|
ad8d19ccdf8808e2d2311f20804192e8adec49a5
|
[
"BSD-3-Clause"
] |
permissive
|
Meteorix/translate
|
1ee46c6b8787a18f43f49de4f56871c2aa9660f7
|
1a40e4ae440118bb108d52f7888cad29b154defd
|
refs/heads/master
| 2020-07-02T02:28:34.013666
| 2019-08-07T16:51:44
| 2019-08-07T16:55:38
| 201,386,214
| 0
| 0
|
BSD-3-Clause
| 2019-08-09T03:58:59
| 2019-08-09T03:58:59
| null |
UTF-8
|
Python
| false
| false
| 7,109
|
py
|
#!/usr/bin/env python3
import logging
import torch.nn as nn
from fairseq.models import BaseFairseqModel, register_model
from pytorch_translate import rnn
from pytorch_translate.rnn import (
LSTMSequenceEncoder,
RNNDecoder,
RNNEncoder,
RNNModel,
base_architecture,
)
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
logger = logging.getLogger(__name__)
@register_model("dual_learning")
class DualLearningModel(BaseFairseqModel):
"""
An architecture to jointly train primal model and dual model by leveraging
distribution duality, which exist for both parallel data and monolingual
data.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__()
self.args = args
self.task_keys = ["primal", "dual"]
self.models = nn.ModuleDict(
{"primal": primal_model, "dual": dual_model, "lm": lm_model}
)
def forward(self, src_tokens, src_lengths, prev_output_tokens=None):
"""
If batch is monolingual, need to run beam decoding to generate
fake prev_output_tokens.
"""
# TODO: pass to dual model too
primal_encoder_out = self.models["primal"].encoder(src_tokens, src_lengths)
primal_decoder_out = self.models["primal"].decoder(
prev_output_tokens, primal_encoder_out
)
return primal_decoder_out
def max_positions(self):
return {
"primal_source": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_source": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
"primal_parallel": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_parallel": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
}
@register_model("dual_learning_rnn")
class RNNDualLearningModel(DualLearningModel):
"""Train two models for a task and its duality jointly.
This class uses RNN arch, but can be extended to take arch as an arument.
This class takes translation as a task, but the framework is intended
to be general enough to be applied to other tasks as well.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__(args, task, primal_model, dual_model, lm_model)
@staticmethod
def add_args(parser):
rnn.RNNModel.add_args(parser)
parser.add_argument(
"--unsupervised-dual",
default=False,
action="store_true",
help="Train with dual loss from monolingual data.",
)
parser.add_argument(
"--supervised-dual",
default=False,
action="store_true",
help="Train with dual loss from parallel data.",
)
@classmethod
def build_model(cls, args, task):
""" Build both the primal and dual models.
For simplicity, both models share the same arch, i.e. the same model
params would be used to initialize both models.
Support for different models/archs would be added in further iterations.
"""
base_architecture(args)
if args.sequence_lstm:
encoder_class = LSTMSequenceEncoder
else:
encoder_class = RNNEncoder
decoder_class = RNNDecoder
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_encoder = encoder_class(
task.primal_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
primal_decoder = decoder_class(
src_dict=task.primal_src_dict,
dst_dict=task.primal_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
primal_task = PytorchTranslateTask(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_model = rnn.RNNModel(primal_task, primal_encoder, primal_decoder)
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.dual_src_dict, task.dual_tgt_dict
)
dual_encoder = encoder_class(
task.dual_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
dual_decoder = decoder_class(
src_dict=task.dual_src_dict,
dst_dict=task.dual_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
dual_task = PytorchTranslateTask(args, task.dual_src_dict, task.dual_tgt_dict)
dual_model = rnn.RNNModel(dual_task, dual_encoder, dual_decoder)
# TODO (T36875783): instantiate a langauge model
lm_model = None
return RNNDualLearningModel(args, task, primal_model, dual_model, lm_model)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
438d4d3c754121c02e3148d534a7f49f501ba665
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/third_party/google/cloud/pubsublite/internal/wire/default_routing_policy.py
|
9d0428069840f818b4cf68ad25018cf2cc91f352
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import random
from google.cloud.pubsublite.internal.wire.routing_policy import RoutingPolicy
from google.cloud.pubsublite.types.partition import Partition
from google.cloud.pubsublite_v1.types import PubSubMessage
class DefaultRoutingPolicy(RoutingPolicy):
"""
The default routing policy which routes based on sha256 % num_partitions using the key if set or round robin if
unset.
"""
_num_partitions: int
_current_round_robin: Partition
def __init__(self, num_partitions: int):
self._num_partitions = num_partitions
self._current_round_robin = Partition(random.randint(0, num_partitions - 1))
def route(self, message: PubSubMessage) -> Partition:
"""Route the message using the key if set or round robin if unset."""
if not message.key:
result = Partition(self._current_round_robin.value)
self._current_round_robin = Partition(
(self._current_round_robin.value + 1) % self._num_partitions
)
return result
sha = hashlib.sha256()
sha.update(message.key)
as_int = int.from_bytes(sha.digest(), byteorder="big")
return Partition(as_int % self._num_partitions)
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
bd34f0c623410189976e30913ec1b9bcb28f7ef9
|
cf9f61e354658d92cd4f8fadc5ca6f1eb8dff9c1
|
/chicagoID.py
|
7197e04eddba0c5620f32e71e8afa5ce24d3c78e
|
[] |
no_license
|
cphalen/IDCard
|
c183bd383c935ae9b4d4cecb07fc07d937e92229
|
78c7c044a25f363182b0b52927df127866e82925
|
refs/heads/master
| 2021-05-04T20:49:46.855457
| 2018-09-29T02:47:55
| 2018-09-29T02:47:55
| 119,841,029
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
from sharedCode import send
print("Type 'exit' to exit the program")
f = open('ISO-Dump.csv')
lines = f.read().split("\n")
chiID = {}
for line in lines:
if line != "":
cols = line.replace(' ', '').split(",")
chiID[cols[5]] = cols[:]
while True:
input_var = str(raw_input("Enter ID #: "))
if input_var == 'exit':
exit()
try:
idInput = input_var
record = chiID[idInput]
send(record)
except:
print("error reading ID, try again")
|
[
"="
] |
=
|
8fc0b4eca57a8acee9d14094dab8fc6f6b7ff91f
|
f48f9798819b12669a8428f1dc0639e589fb1113
|
/desktop/kde/l10n/kde-l10n-de/actions.py
|
fe0c826ffa47e8dea11b10ba4cb6c4f14a5849cd
|
[] |
no_license
|
vdemir/PiSiPackages-pardus-2011-devel
|
781aac6caea2af4f9255770e5d9301e499299e28
|
7e1867a7f00ee9033c70cc92dc6700a50025430f
|
refs/heads/master
| 2020-12-30T18:58:18.590419
| 2012-03-12T03:16:34
| 2012-03-12T03:16:34
| 51,609,831
| 1
| 0
| null | 2016-02-12T19:05:41
| 2016-02-12T19:05:40
| null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import cmaketools
from pisi.actionsapi import get
from pisi.actionsapi import kde4
from pisi.actionsapi import pisitools
WorkDir="%s-%s" % (get.srcNAME(), get.srcVERSION())
shelltools.export("HOME", get.workDIR())
def setup():
# Remove after switching to new KDEPIM 4.6 tree
# We already have those translations in kdepim-4.4 package
pisitools.dosed("messages/CMakeLists.txt", "add_subdirectory\\(kdepim\\-runtime\\)", "#add_subdirectory(kdepim-runtime)")
pisitools.dosed("messages/CMakeLists.txt", "add_subdirectory\\(kdepim\\)", "#add_subdirectory(kdepim)")
kde4.configure()
def build():
kde4.make()
def install():
kde4.install()
|
[
"kaptan@pisipackages.org"
] |
kaptan@pisipackages.org
|
2047511f9cf33dec55bba5391866c91495f9d24d
|
981fcfe446a0289752790fd0c5be24020cbaee07
|
/python2_Grammer/src/ku/matplot_/dot/2设置点的样式.py
|
03d7118fd8acf4abea24a3c319252e4eff548180
|
[] |
no_license
|
lijianbo0130/My_Python
|
7ba45a631049f6defec3977e680cd9bd75d138d1
|
8bd7548c97d2e6d2982070e949f1433232db9e07
|
refs/heads/master
| 2020-12-24T18:42:19.103529
| 2016-05-30T03:03:34
| 2016-05-30T03:03:34
| 58,097,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
#coding=utf-8
'''
Created on 2015年5月4日
@author: Administrator
'''
def scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, verts=None, hold=None,
**kwargs):
'''
:param x: X向量
:param y: Y向量
:param s:size 点的大小
:param c:color 点的颜色
:param marker: 点的样式
:param cmap:
:param norm:
:param vmin:
:param vmax:
:param alpha:
:param linewidths:
:param verts:
:param hold:
'''
|
[
"lijianbo@zhongbiao.mobi"
] |
lijianbo@zhongbiao.mobi
|
9a96ee3e3890d83296ae2133c7c9d1595dd7c3c6
|
00b762e37ecef30ed04698033f719f04be9c5545
|
/scripts/test_results/scikit-learn_test_results/files/4_plot_roc_local.py
|
43df54f29e33f98eb8290a943711f1a858793715
|
[] |
no_license
|
kenji-nicholson/smerge
|
4f9af17e2e516333b041727b77b8330e3255b7c2
|
3da9ebfdee02f9b4c882af1f26fe2e15d037271b
|
refs/heads/master
| 2020-07-22T02:32:03.579003
| 2018-06-08T00:40:53
| 2018-06-08T00:40:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,373
|
py
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging). Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot ROC curve
plt.clf()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
[
"srhee4@cs.washington.edu"
] |
srhee4@cs.washington.edu
|
e001f4413a6b84f5417a4140027ceaf7b64db41c
|
82be39549f5d90b1ca1bb65407ae7695e1686ed8
|
/code_challenges/264/clamy_fernet.py
|
1b42ee8ab2c6228e9691947ac3cc2b4f0b6f4467
|
[] |
no_license
|
dcribb19/bitesofpy
|
827adc9a8984d01c0580f1c03855c939f286507f
|
a1eb0a5553e50e88d3568a36b275138d84d9fb46
|
refs/heads/master
| 2023-03-02T02:04:46.865409
| 2021-02-12T01:20:30
| 2021-02-12T01:20:30
| 259,764,008
| 1
| 0
| null | 2020-10-06T13:48:16
| 2020-04-28T22:16:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from dataclasses import dataclass
from os import urandom
from typing import ByteString
@dataclass
class ClamyFernet:
"""Fernet implementation by clamytoe
Takes a bytestring as a password and derives a Fernet
key from it. If a key is provided, that key will be used.
:param password: ByteString of the password to use
:param key: ByteString of the key to use, else defaults to None
Other class variables that you should implement that are hard set:
salt, algorithm, length, iterations, backend, and generate a base64
urlsafe_b64encoded key using self.clf().
"""
password: ByteString = b"pybites"
key: ByteString = None
algorithm = hashes.SHA256()
length: int = 32
salt: str = urandom(16)
iterations: int = 100000
backend = default_backend()
def __post_init__(self):
if not self.key:
self.key = base64.urlsafe_b64encode(self.kdf.derive(self.password))
@property
def kdf(self):
"""Derives the key from the password
Uses PBKDF2HMAC to generate a secure key. This is where you will
use the salt, algorithm, length, iterations, and backend variables.
"""
kdf = PBKDF2HMAC(
algorithm=self.algorithm,
length=self.length,
salt=self.salt,
iterations=self.iterations,
backend=self.backend
)
return kdf
@property
def clf(self):
"""Generates a Fernet object
Key that is derived from cryptogrophy's fermet.
"""
return Fernet(self.key)
def encrypt(self, message: str) -> ByteString:
"""Encrypts the message passed to it"""
return self.clf.encrypt(message.encode('utf-8'))
def decrypt(self, token: ByteString) -> str:
"""Decrypts the encrypted message passed to it"""
return self.clf.decrypt(token).decode()
|
[
"daniel.cribb.10@gmail.com"
] |
daniel.cribb.10@gmail.com
|
a68dd80c57ee01d1dce08e0314b100dd89bc0f14
|
5ecfa1bf82a7a9fcb542f5063e0ef1c439e0607d
|
/chapter_14/tally_predictions.py
|
8f225d50913c2f317809641b1695bea200917665
|
[
"MIT"
] |
permissive
|
alexiewx/PracticalDeepLearningPython
|
54256e1e973d30d4290ae9346ee2d314ab6f59c8
|
7466bac89e6e8e1e491dcacc5172598a05920d79
|
refs/heads/main
| 2023-08-30T22:39:04.848840
| 2021-10-24T17:47:39
| 2021-10-24T17:47:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
def tally_predictions(model, x, y):
pp = model.predict(x)
p = np.zeros(pp.shape[0], dtype="uint8")
for i in range(pp.shape[0]):
p[i] = 0 if (pp[i,0] > pp[i,1]) else 1
tp = tn = fp = fn = 0
for i in range(len(y)):
if (p[i] == 0) and (y[i] == 0):
tn += 1
elif (p[i] == 0) and (y[i] == 1):
fn += 1
elif (p[i] == 1) and (y[i] == 0):
fp += 1
else:
tp += 1
score = float(tp+tn) / float(tp+tn+fp+fn)
return [tp, tn, fp, fn, score]
|
[
"oneelkruns@hotmail.com"
] |
oneelkruns@hotmail.com
|
c8038c6ab4252fe32f824e803f86d7d7591701ef
|
bcc7011cb121e653d831e77206e541675e348337
|
/Ugly_Number_II.py
|
afcfbde2313398bcaea4e0213ada86f9ccf34003
|
[] |
no_license
|
Built00/Leetcode
|
2115c20bf91e9f9226ce952293132bc7a852fe86
|
ec3c0d4bd368dd1039f0fed2a07bf89e645a89c3
|
refs/heads/master
| 2020-11-24T09:12:08.172973
| 2018-03-27T01:23:08
| 2018-03-27T01:23:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Write a program to find the n-th ugly number.
# Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
# For example, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.
# Note that 1 is typically treated as an ugly number, and n does not exceed 1690.
# Credits:
# Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
# 596 / 596 test cases passed.
# Status: Accepted
# Runtime: 199 ms
# Your runtime beats 52.37 % of python submissions.
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 0:
return False
ans = [1]
factor2 = 0
factor3 = 0
factor5 = 0
for i in range(1, n):
ans += min(
ans[factor2] * 2,
ans[factor3] * 3,
ans[factor5] * 5,
),
if ans[-1] == ans[factor2] * 2:
factor2 += 1
if ans[-1] == ans[factor3] * 3:
factor3 += 1
if ans[-1] == ans[factor5] * 5:
factor5 += 1
return ans[-1]
# 596 / 596 test cases passed.
# Status: Accepted
# Runtime: 52 ms
# Your runtime beats 97.52 % of python submissions.
class Solution(object):
ans = sorted(
(2 ** i) * (3 ** j) * (5 ** k)
for i in range(32)
for j in range(20)
for k in range(14)
)
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
# Use self, the instance only once iteration.
return self.ans[n - 1]
# 596 / 596 test cases passed.
# Status: Accepted
# Runtime: 459 ms
# Your runtime beats 15.13 % of python submissions.
import heapq
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
q2, q3, q5 = [2], [3], [5]
ugly = 1
for u in heapq.merge(q2, q3, q5):
if n == 1:
print(q2, q3, q5)
return ugly
if u > ugly:
ugly = u
n -= 1
q2 += 2 * u,
q3 += 3 * u,
q5 += 5 * u,
if __name__ == '__main__':
print(Solution().nthUglyNumber(10))
print(Solution().nthUglyNumber(1))
|
[
"binwengan@gmail.com"
] |
binwengan@gmail.com
|
005133cbc738ddb6c51030f6fb44f6dae7a2faf9
|
6d7678e3d79c97ddea2e2d65f2c2ef03b17f88f6
|
/venv/lib/python3.6/site-packages/pysmi/searcher/__init__.py
|
cd95edb37e9541b91f70578821dc87adce859e79
|
[
"MIT"
] |
permissive
|
PitCoder/NetworkMonitor
|
b47d481323f26f89be120c27f614f2a17dc9c483
|
36420ae48d2b04d2cc3f13d60d82f179ae7454f3
|
refs/heads/master
| 2020-04-25T11:48:08.718862
| 2019-03-19T06:19:40
| 2019-03-19T06:19:40
| 172,757,390
| 2
| 0
|
MIT
| 2019-03-15T06:07:27
| 2019-02-26T17:26:06
|
Python
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysmi/license.html
#
from pysmi.searcher.pyfile import PyFileSearcher
from pysmi.searcher.pypackage import PyPackageSearcher
from pysmi.searcher.stub import StubSearcher
from pysmi.searcher.anyfile import AnyFileSearcher
|
[
"overlord.lae@gmail.com"
] |
overlord.lae@gmail.com
|
83aaca0d2c081e8e5f248cf12397bcef49910b51
|
9452f681ea486fc53ad88d05392aed5fc450805c
|
/data_language_all/python/python_358.txt
|
3202680a961720d40a417dbb85b675ccb1b70be6
|
[] |
no_license
|
CoryCollins/src-class
|
11a6df24f4bd150f6db96ad848d7bfcac152a695
|
f08a2dd917f740e05864f51ff4b994c368377f97
|
refs/heads/master
| 2023-08-17T11:53:28.754781
| 2021-09-27T21:13:23
| 2021-09-27T21:13:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
txt
|
#!/usr/bin/env python
# coding:utf-8
import time
from gae import __version__
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain; charset=UTF-8')])
if environ['PATH_INFO'] == '/robots.txt':
yield '\n'.join(['User-agent: *', 'Disallow: /'])
else:
timestamp = long(environ['CURRENT_VERSION_ID'].split('.')[1])/2**28
ctime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp+8*3600))
yield "GoAgent 服务端已经在 %s 升级到 %s 版本, 请更新您的客户端。" % (ctime, __version__)
|
[
"znsoft@163.com"
] |
znsoft@163.com
|
1dcd4823d7cd977207da1e30b6e356b847c5aaac
|
81fbbf5a2c34b6d48aafa90b05faf0f56db892e1
|
/lib/fileop/errors.py
|
5a9f6ba40301635909a3731013191234f0129063
|
[
"MIT"
] |
permissive
|
ellenlee1/dynamo
|
d1ea05a6f93668875576520d2e62cd3174120e4f
|
1d42ef49f523f5bc56ff2cf42fdf8e47a4108e2e
|
refs/heads/master
| 2020-03-25T00:37:09.660391
| 2018-08-01T18:50:12
| 2018-08-01T18:50:12
| 143,195,297
| 0
| 0
|
MIT
| 2018-08-01T18:47:01
| 2018-08-01T18:47:01
| null |
UTF-8
|
Python
| false
| false
| 3,738
|
py
|
import errno
# For whatever reason, the following codes are missing from python errno
errno.ENOMEDIUM = 123
errno.EMEDIUMTYPE = 124
errno.ECANCELED = 125
errno.ENOKEY = 126
errno.EKEYEXPIRED = 127
errno.EKEYREVOKED = 128
errno.EKEYREJECTED = 129
# message, error code
msg_to_code = [
('performance marker', errno.ETIMEDOUT),
('Name or service not known', errno.EHOSTUNREACH),
('Connection timed out', errno.ETIMEDOUT),
('Operation timed out', errno.ETIMEDOUT),
('Idle Timeout', errno.ETIMEDOUT),
('end-of-file was reached', errno.EREMOTEIO),
('end of file occurred', errno.EREMOTEIO),
('SRM_INTERNAL_ERROR', errno.ECONNABORTED),
('Internal server error', errno.ECONNABORTED),
('was forcefully killed', errno.ECONNABORTED),
('operation timeout', errno.ETIMEDOUT),
('proxy expired', errno.EKEYEXPIRED),
('with an error 550 File not found', errno.ENOENT),
('ile exists and overwrite', errno.EEXIST),
('No such file', errno.ENOENT),
('SRM_INVALID_PATH', errno.ENOENT),
('The certificate has expired', errno.EKEYEXPIRED),
('The available CRL has expired', errno.EKEYEXPIRED),
('SRM Authentication failed', errno.EKEYREJECTED),
('SRM_DUPLICATION_ERROR', errno.EKEYREJECTED),
('SRM_AUTHENTICATION_FAILURE', errno.EKEYREJECTED),
('SRM_AUTHORIZATION_FAILURE', errno.EKEYREJECTED),
('Authentication Error', errno.EKEYREJECTED),
('SRM_NO_FREE_SPACE', errno.ENOSPC),
('digest too big for rsa key', errno.EMSGSIZE),
('Can not determine address of local host', errno.ENONET),
('Permission denied', errno.EACCES),
('System error in write', errno.EIO),
('File exists', errno.EEXIST),
('checksum do not match', errno.EIO),
('CHECKSUM MISMATCH', errno.EIO),
('gsiftp performance marker', errno.ETIMEDOUT),
('Could NOT load client credentials', errno.ENOKEY),
('Error reading host credential', errno.ENOKEY),
('File not found', errno.ENOENT),
('SRM_FILE_UNAVAILABLE', errno.ENOENT),
('Unable to connect', errno.ENETUNREACH),
('could not open connection to', errno.ENETUNREACH),
('user not authorized', errno.EACCES),
('Broken pipe', errno.EPIPE),
('limit exceeded', errno.EREMOTEIO),
('write denied', errno.EACCES),
('error in reading', errno.EIO),
('over-load', errno.EREMOTEIO),
('connection limit', errno.EMFILE)
]
def find_msg_code(msg):
for m, c in msg_to_code:
if m in msg:
return c
return None
# from FTS3 heuristics.cpp + some originals
irrecoverable_errors = set([
-1, # Job was not even submitted
errno.ENOENT, # No such file or directory
errno.EPERM, # Operation not permitted
errno.EACCES, # Permission denied
errno.EEXIST, # Destination file exists
errno.EISDIR, # Is a directory
errno.ENAMETOOLONG, # File name too long
errno.E2BIG, # Argument list too long
errno.ENOTDIR, # Part of the path is not a directory
errno.EFBIG, # File too big
errno.ENOSPC, # No space left on device
errno.EROFS, # Read-only file system
errno.EPROTONOSUPPORT, # Protocol not supported by gfal2 (plugin missing?)
errno.ECANCELED, # Canceled
errno.EIO, # I/O error
errno.EMSGSIZE, # Message too long
errno.ENONET, # Machine is not on the network
errno.ENOKEY, # Could not load key
errno.EKEYEXPIRED, # Key has expired
errno.EKEYREJECTED, # Key was rejected by service
errno.ENETUNREACH, # Network is unreachable
errno.ECOMM # Communication error (may be recoverable in some cases?)
])
|
[
"yiiyama@mit.edu"
] |
yiiyama@mit.edu
|
5cdc0af5edab6e038cb4eeb8d2e7b703f2d36c10
|
4c7baee40b96e6499f96d6fe81935437264c9c88
|
/stock_scraper/MainDriver/IndicatorTester/ADXIndicatorTesting.py
|
e6644bf00c73eb419ffdcb89a30ffbd596017f75
|
[
"MIT"
] |
permissive
|
webclinic017/Stock-Analysis
|
083d376484adebcad2d52113749a513aa48b09a8
|
eea8cb5bcb635f12eb15ac13306ef16e2892cd92
|
refs/heads/master
| 2022-04-13T00:20:54.287730
| 2020-03-29T21:05:22
| 2020-03-29T21:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,729
|
py
|
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
sys.path.append( path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) ) ) )
import pandas as pd
from scraper.utility import *
from MainDriver.IndicatorTester.IndicatorTesterClass import kIndicatorTesterClass
from multiprocessing import Process, Queue
class kADXIndicatorTesting(kIndicatorTesterClass):
def __init__(self, symbol, noOfDays=3):
self.symbol = symbol
self.filename = CSV_DATA_DIRECTORY + symbol + CSV_INDICATOR_EXTENSTION
kIndicatorTesterClass.__init__(self, self.symbol, "ADX", noOfDays)
self.noOfDays = noOfDays
# creating percentage dataframe
self.perPassFailFile = CSV_SINGAL_DATA_DIRECTORY + "ADX" + "PercentagePassFail" + CSV_INDICATOR_EXTENSTION
try:
self.perPassFailDF = pd.read_csv(self.perPassFailFile, index_col=0)
except Exception as e:
print "Exception while reading signal test file"
print e
# Use this to test effeciency of this indicator
def testBackData(self):
startIndex = START_INDEX
endIndex = END_INDEX
df = self.getDataFrame()
#self.df.drop(self.df.index[range(28)], inplace=True)
plusDIs = df["+DI14"]
minusDIs = df["-DI14"]
adxs = df["ADX"]
close = df["Close"]
flag = True
if minusDIs[startIndex] > plusDIs[startIndex]:
flag = False
for i in range(startIndex + 1, plusDIs.count() - endIndex):
if minusDIs[i-1] > plusDIs[i-1]:
flag = False
else:
flag = True
if flag:
if minusDIs[i] > plusDIs[i] and adxs[i] > 20:
#print "plusDI: "+ str(plusDIs[i]) + " minusDI: " + str(minusDIs[i]) + " adx: " + str(adxs[i])
self.sellSignal(i)
flag = False
else:
if plusDIs[i] > minusDIs[i] and adxs[i] > 20:
#print "plusDI: "+ str(plusDIs[i]) + " minusDI: " + str(minusDIs[i]) + " adx: " + str(adxs[i])
self.buySignal(i)
flag = True
print "====== Test result b/w +DI14 & -DI14 having ADX>20 ======"
self.buySignalResult()
print "===================================="
self.sellSignalResult()
def __call__(self):
startIndex = START_INDEX
df = self.getDataFrame()
#self.df.drop(self.df.index[range(28)], inplace=True)
plusDIs = df["+DI14"]
minusDIs = df["-DI14"]
adxs = df["ADX"]
close = df["Close"]
flag = True
lastIndex = plusDIs.count()-1
if (minusDIs[lastIndex-1] > plusDIs[lastIndex-1]):
flag = False
else:
flag = True
if flag:
if minusDIs[lastIndex] > plusDIs[lastIndex] and adxs[lastIndex] > 20:
for i in range(self.perPassFailDF["Symbol"].__len__()):
if self.perPassFailDF["Symbol"][i+1] == self.symbol and str(self.perPassFailDF["Type"][i+1])=="Sell":
if self.perPassFailDF["Normal %"][i+1] < thresholdPassFailPer:
return
else:
break
ltp = close[lastIndex]
tp1 = ltp*(1-TARGET_PRICE_1*0.01)
tp2 = ltp*(1-TARGET_PRICE_2*0.01)
tp3 = ltp*(1-TARGET_PRICE_3*0.01)
sl = ltp*(1+STOP_LOSS*0.01)
subject = "Stock Alert | Date " + str(df.index[lastIndex])
content = "Sell signal for " + self.symbol + ". LTP: " + str(close[lastIndex])
content += "\n\tTarget prices: " + str(tp1) + ", " + str(tp2) + ", " + str(tp3)
content += "\n\tStoploss: " + str(sl)
content += "\n\nADX Indicator"
SENDMAIL(subject, content)
print "\n\n\n########################### Sell signal " + self.symbol + " on " + str(df.index[lastIndex]) + ": LTP " + str(close[lastIndex]) + "Target prices: " + str(tp1) + ", " + str(tp2) + ", " + str(tp3) + "Stoploss: " + str(sl) + " ###########################\n\n\n"
else:
print "No Sell signal for " + self.symbol + " on " + str(df.index[lastIndex])
else:
if plusDIs[lastIndex] > minusDIs[lastIndex] and adxs[lastIndex] > 20:
for i in range(self.perPassFailDF["Symbol"].__len__()):
if self.perPassFailDF["Symbol"][i+1] == self.symbol and str(self.perPassFailDF["Type"][i+1])=="Buy":
if self.perPassFailDF["Normal %"][i+1] < thresholdPassFailPer:
return
else:
break
ltp = close[lastIndex]
tp1 = ltp*(1+TARGET_PRICE_1*0.01)
tp2 = ltp*(1+TARGET_PRICE_2*0.01)
tp3 = ltp*(1+TARGET_PRICE_3*0.01)
sl = ltp*(1-STOP_LOSS*0.01)
subject = "Stock Alert | Date " + str(df.index[lastIndex])
content = "Buy signal for " + self.symbol + ". LTP: " + str(close[lastIndex])
content += "\n\tTarget prices: " + str(tp1) + ", " + str(tp2) + ", " + str(tp3)
content += "\n\tStoploss: " + str(sl)
content += "\n\nADX Indicator"
SENDMAIL(subject, content)
print "\n\n\n########################### Buy Signal for " + self.symbol + " on " + str(df.index[lastIndex]) + ": LTP " + str(close[lastIndex]) + "Target prices: " + str(tp1) + ", " + str(tp2) + ", " + str(tp3) + "Stoploss: " + str(sl) +" ###########################\n\n\n"
else:
print "No Buy signal for " + self.symbol + " on " + str(df.index[lastIndex])
class kCommand:
def __init__(self, *args):
self.args = args
def run_job(self, queue, args):
try:
adxIndicatorTesting = kADXIndicatorTesting(symbol=self.args[0][0])
adxIndicatorTesting()
queue.put(None)
except Exception as e:
queue.put(e)
def do(self):
queue = Queue()
process = Process(target=self.run_job, args=(queue, self.args))
process.start()
result = queue.get()
process.join()
if result is not None:
raise result
def get_name(self):
return "ADX indicator testing command"
if __name__ == "__main__":
adxTesting = kADXIndicatorTesting("SBIN", 3)
#adxTesting.testBackData()
adxTesting()
|
[
"singhanurag50@gmail.com"
] |
singhanurag50@gmail.com
|
a48890bd444f48bc8314e5f840ce4da4bb1a5bb1
|
c2849586a8f376cf96fcbdc1c7e5bce6522398ca
|
/ch28/ex28-5.py
|
199e45d67884a4e186e1a99aebbe52ea8959076e
|
[] |
no_license
|
freebz/Learning-Python
|
0559d7691517b4acb0228d1cc76de3e93915fb27
|
7f577edb6249f4bbcac4f590908b385192dbf308
|
refs/heads/master
| 2020-09-23T01:48:24.009383
| 2019-12-02T12:26:40
| 2019-12-02T12:26:40
| 225,371,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# 버전 이식성: Print 함수
# py -2 person.py
# ('Bob Smith', 0)
# ('Sue Jones', 100000)
from __fure__ import print_function
print('{0} {1}'.format(bob.name, bob.pay)) # 포맷 메서드
print('%s %s' % (bob.name, bob.pay)) # 포맷 표현식
|
[
"freebz@hananet.net"
] |
freebz@hananet.net
|
dbf9e6875fa49ed080fb938a4779cee26ebb7abd
|
1c9c0918637209b31fae10fec8329a864f4ddf2a
|
/lib/fabio/test/testGEimage.py
|
b59504e1c386bef104ac73221b7488a5d34e3eb0
|
[] |
no_license
|
albusdemens/astrarecon_tests
|
224f2695ba14e4e6c8a2173132c1d30edba24e1b
|
6b0ee69a2357eb568e2fde1deccfa8b6dd998496
|
refs/heads/master
| 2021-01-18T19:43:17.485309
| 2017-07-21T12:17:54
| 2017-07-21T12:17:54
| 100,536,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fable Input Output
# https://github.com/silx-kit/fabio
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
# Unit tests
# builds on stuff from ImageD11.test.testpeaksearch
28/11/2014
"""
from __future__ import print_function, with_statement, division, absolute_import
import unittest
import sys
import os
import numpy
import gzip
import bz2
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "fabio.test")
from .utilstest import UtilsTest
logger = UtilsTest.get_logger(__file__)
fabio = sys.modules["fabio"]
from fabio.GEimage import GEimage
# filename dim1 dim2 min max mean stddev
TESTIMAGES = """GE_aSI_detector_image_1529 2048 2048 1515 16353 1833.0311 56.9124
GE_aSI_detector_image_1529.gz 2048 2048 1515 16353 1833.0311 56.9124
GE_aSI_detector_image_1529.bz2 2048 2048 1515 16353 1833.0311 56.9124"""
class TestGE(unittest.TestCase):
def setUp(self):
"""
download images
"""
self.GE = UtilsTest.getimage("GE_aSI_detector_image_1529.bz2")
def test_read(self):
for line in TESTIMAGES.split("\n"):
vals = line.split()
name = vals[0]
dim1, dim2 = [int(x) for x in vals[1:3]]
mini, maxi, mean, stddev = [float(x) for x in vals[3:]]
obj = GEimage()
obj.read(os.path.join(os.path.dirname(self.GE), name))
self.assertAlmostEqual(mini, obj.getmin(), 4, "getmin")
self.assertAlmostEqual(maxi, obj.getmax(), 4, "getmax")
self.assertAlmostEqual(mean, obj.getmean(), 4, "getmean")
self.assertAlmostEqual(stddev, obj.getstddev(), 4, "getstddev")
self.assertEqual(dim1, obj.dim1, "dim1")
self.assertEqual(dim2, obj.dim2, "dim2")
def suite():
testsuite = unittest.TestSuite()
testsuite.addTest(TestGE("test_read"))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
[
"mannaro85@gmail.com"
] |
mannaro85@gmail.com
|
d05dba8c42acde9c067414753a54c7e22f014b8b
|
d5a5ff1ed1f508c47e9506a552bf44844bcdc071
|
/labor/business.py
|
30a68565c63be37f889370060e9f8f8d0cbd6cb1
|
[] |
no_license
|
sintaxyzcorp/prometeus
|
5c9dc20e3c2f33ea6b257b850ff9505621302c47
|
2508603b6692023e0a9e40cb6cd1f08465a33f1c
|
refs/heads/master
| 2021-09-01T09:31:36.868784
| 2017-12-26T07:58:27
| 2017-12-26T07:58:27
| 113,787,842
| 0
| 1
| null | 2017-12-18T08:25:31
| 2017-12-10T22:16:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,230
|
py
|
# -*- coding: utf-8 -*-
# Django's Libraries
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
# Own's Libraries
from .models import IncidentReport
class IncidentReportBusiness(object):
@classmethod
def get(self, _pk):
requisition = get_object_or_404(IncidentReport, pk=_pk)
return requisition
@classmethod
def get_No_Pendings(self):
quantity = IncidentReport.objects.filter(
status__in=["pen", 'inc']).count()
return quantity
@classmethod
def get_FilterByEmployee(self, _value, _profile):
if _value:
records = IncidentReport.objects \
.filter(employee=_profile) \
.filter(
Q(type__name__icontains=_value) |
Q(reason__icontains=_value)
).order_by("-created_date")
else:
records = IncidentReport.objects \
.filter(employee=_profile) \
.order_by("-created_date")
return records
@classmethod
def get_Pending(self, _value, _profile=None):
if _value:
records = IncidentReport.objects \
.filter(
Q(type__name__icontains=_value) |
Q(pk__icontains=_value) |
Q(reason__icontains=_value) |
Q(response__icontains=_value) |
Q(employee__user__first_name__icontains=_value) |
Q(employee__user__last_name__icontains=_value)
) \
.filter(status__in=["pen", 'inc']) \
.order_by("-created_date")
else:
records = IncidentReport.objects \
.filter(status__in=["pen", 'inc']) \
.order_by("-created_date")
if _profile:
records = records.filter(employee=_profile)
return records
@classmethod
def get_All(self, _value, _profile=None):
if _value:
records = IncidentReport.objects \
.filter(
Q(type__name__icontains=_value) |
Q(pk__icontains=_value) |
Q(reason__icontains=_value) |
Q(response__icontains=_value) |
Q(employee__user__first_name__icontains=_value) |
Q(employee__user__last_name__icontains=_value)
) \
.order_by("-created_date")
else:
records = IncidentReport.objects \
.order_by("-created_date")
if _profile:
records = records.filter(employee=_profile)
return records
@classmethod
def get_Paginated(self, _records, _current_page):
paginator = Paginator(_records, 10)
current_pagina = _current_page
try:
_records = paginator.page(current_pagina)
except PageNotAnInteger:
_records = paginator.page(1)
except EmptyPage:
_records = paginator.page(paginator.num_page)
return _records
|
[
"carloxdev@gmail.com"
] |
carloxdev@gmail.com
|
65d52cdedac7d0a6460e1e1980f18c2c594b6c1b
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/stat/cluster/clusternode_stats.py
|
abe2d6e3e45fa778aaa226fb9adbf4931f703567
|
[
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953
| 2017-07-05T18:09:09
| 2017-07-05T18:09:09
| 95,933,896
| 2
| 5
| null | 2017-07-05T16:51:29
| 2017-07-01T01:03:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,729
|
py
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class clusternode_stats(base_resource) :
r""" Statistics for cluster node resource.
"""
def __init__(self) :
self._nodeid = None
self._clearstats = None
self._clsyncstate = ""
self._clnodeeffectivehealth = ""
self._clnodeip = ""
self._clmasterstate = ""
self._cltothbtx = 0
self._cltothbrx = 0
self._nnmcurconn = 0
self._nnmtotconntx = 0
self._nnmtotconnrx = 0
self._clptpstate = ""
self._clptptx = 0
self._clptprx = 0
self._nnmerrmsend = 0
@property
def nodeid(self) :
r"""ID of the cluster node for which to display statistics. If an ID is not provided, statistics are shown for all nodes.<br/>Minimum value = 0<br/>Maximum value = 31.
"""
try :
return self._nodeid
except Exception as e:
raise e
@nodeid.setter
def nodeid(self, nodeid) :
r"""ID of the cluster node for which to display statistics. If an ID is not provided, statistics are shown for all nodes.
"""
try :
self._nodeid = nodeid
except Exception as e:
raise e
@property
def clearstats(self) :
r"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
r"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def clnodeip(self) :
r"""NSIP address of the cluster node.
"""
try :
return self._clnodeip
except Exception as e:
raise e
@property
def clsyncstate(self) :
r"""Sync state of the cluster node.
"""
try :
return self._clsyncstate
except Exception as e:
raise e
@property
def nnmcurconn(self) :
r"""Number of connections open for node-to-node communication.
"""
try :
return self._nnmcurconn
except Exception as e:
raise e
@property
def nnmerrmsend(self) :
r"""Number of errors in sending node-to-node multicast/broadcast messages. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._nnmerrmsend
except Exception as e:
raise e
@property
def clnodeeffectivehealth(self) :
r"""Health of the cluster node.
"""
try :
return self._clnodeeffectivehealth
except Exception as e:
raise e
@property
def nnmtotconnrx(self) :
r"""Number of node-to-node messages received. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._nnmtotconnrx
except Exception as e:
raise e
@property
def cltothbrx(self) :
r"""Number of heartbeats received. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._cltothbrx
except Exception as e:
raise e
@property
def clptprx(self) :
r"""Number of PTP packets received on the node. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._clptprx
except Exception as e:
raise e
@property
def nnmtotconntx(self) :
r"""Number of node-to-node messages sent. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._nnmtotconntx
except Exception as e:
raise e
@property
def clmasterstate(self) :
r"""Operational state of the cluster node.
"""
try :
return self._clmasterstate
except Exception as e:
raise e
@property
def clptptx(self) :
r"""Number of PTP packets transmitted by the node. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._clptptx
except Exception as e:
raise e
@property
def clptpstate(self) :
r"""PTP state of the node. This state is Master for one node and Slave for the rest. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows UNKNOWN. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._clptpstate
except Exception as e:
raise e
@property
def cltothbtx(self) :
r"""Number of heartbeats sent. When executed from the NSIP address, shows the statistics for local node only. For remote node it shows a value of 0. When executed from the cluster IP address, shows all the statistics.
"""
try :
return self._cltothbtx
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(clusternode_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.clusternode
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.nodeid is not None :
return str(self.nodeid)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch the statistics of all clusternode_stats resources that are configured on netscaler.
set statbindings=True in options to retrieve bindings.
"""
try :
obj = clusternode_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.nodeid = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class clusternode_response(base_response) :
def __init__(self, length=1) :
self.clusternode = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.clusternode = [clusternode_stats() for _ in range(length)]
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
fdecc8791d1fe84d3a7a0b13de466a0681441769
|
97af32be868ceba728202fe122852e887841e20c
|
/posts/viewsets.py
|
70d9194711b064d33b77fc0e298efcb2fb906e53
|
[] |
no_license
|
Paguru/paguru_challenge_api
|
40e5b52300a260d463a19685addb50786f3acbe6
|
8f08a5f0e9f957402a7722dd9aa3846cdd018725
|
refs/heads/main
| 2023-01-13T19:15:11.720923
| 2020-11-23T14:25:30
| 2020-11-23T14:25:30
| 313,945,901
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
from rest_framework import viewsets, permissions
from .serializers import PostSerializer
from .models import Post
class IsAuthor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.user:
if request.user.is_superuser:
return True
else:
return obj.author == request.user
else:
return False
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def get_permissions(self):
self.permission_classes = [permissions.IsAuthenticated]
if self.action in ['update', 'destroy']:
self.permission_classes = [IsAuthor]
return super(self.__class__, self).get_permissions()
|
[
"leonardofreitasdev@gmail.com"
] |
leonardofreitasdev@gmail.com
|
8891c63d771f6a79d7d21a6d4e1aa2b6b160da21
|
a9fc606f6d86d87fe67290edc49265986a89b882
|
/0x01-challenge/square.py
|
968a4ca633933d88ff03bcf97ff32862dd42e645
|
[] |
no_license
|
Jilroge7/Fix_My_Code_Challenge
|
5a4a1aae18ebc600fdb327bcf8958e67475562f5
|
b9309addde32a714a533cbb43e87ba180b19e67a
|
refs/heads/master
| 2022-12-25T22:23:09.815638
| 2020-10-01T18:28:50
| 2020-10-01T18:28:50
| 287,828,672
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
#!/usr/bin/python3
class square():
width = 0
height = 0
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def area_of_my_square(self):
""" Area of the square """
return self.width * self.height
def PermiterOfMySquare(self):
return (self.width * 2) + (self.height * 2)
def __str__(self):
return "{}/{}".format(self.width, self.height)
if __name__ == "__main__":
s = square(width=12, height=9)
print(s)
print(s.area_of_my_square())
print(s.PermiterOfMySquare())
|
[
"1672@holbertonschool.com"
] |
1672@holbertonschool.com
|
e1bbf8182e588f5f52ffb8cadb97eaa892c613ba
|
1e263d605d4eaf0fd20f90dd2aa4174574e3ebce
|
/plugins/support-acl/setup.py
|
f1476d2d80a32ac08318c6525eb595d35a343679
|
[] |
no_license
|
galiminus/my_liveblog
|
698f67174753ff30f8c9590935d6562a79ad2cbf
|
550aa1d0a58fc30aa9faccbfd24c79a0ceb83352
|
refs/heads/master
| 2021-05-26T20:03:13.506295
| 2013-04-23T09:57:53
| 2013-04-23T09:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
'''
Created on June 14, 2012
@package: support acl
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
'''
# --------------------------------------------------------------------
from setuptools import setup, find_packages
# --------------------------------------------------------------------
setup(
name="support_acl",
version="1.0",
packages=find_packages(),
install_requires=['ally_core >= 1.0'],
platforms=['all'],
zip_safe=True,
# metadata for upload to PyPI
author="Gabriel Nistor",
author_email="gabriel.nistor@sourcefabric.org",
description="Support for acl",
long_description='Support for acl definitions',
license="GPL v3",
keywords="Ally REST framework support acl plugin",
url="http://www.sourcefabric.org/en/superdesk/", # project home page
)
|
[
"etienne@spillemaeker.com"
] |
etienne@spillemaeker.com
|
2e8b12f1688ccdc4e2dbffa82c03704c1569082b
|
48832d27da16256ee62c364add45f21b968ee669
|
/res/scripts/client/messenger/gui/scaleform/channels/bw/__init__.py
|
baf94f407f20cf5ee2e4f40fcc8f9542bff22cb2
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144
| 2016-08-04T18:08:34
| 2016-08-04T18:08:34
| 64,955,694
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 492
|
py
|
# 2016.08.04 19:54:00 Střední Evropa (letní čas)
# Embedded file name: scripts/client/messenger/gui/Scaleform/channels/bw/__init__.py
from messenger.gui.Scaleform.channels.bw.factories import LobbyControllersFactory
__all__ = 'LobbyControllersFactory'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\gui\scaleform\channels\bw\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:54:00 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
f5414a7b48746434a7782d7536d9dbd9b7408df0
|
a5e71a333a86476b9cb1bdf6989bb5f47dd5e409
|
/ScrapePlugins/M/MangaStreamLoader/ContentLoader.py
|
a34c254432bb1bbfe774256e4929aa4e787bc847
|
[] |
no_license
|
GDXN/MangaCMS
|
0e797299f12c48986fda5f2e7de448c2934a62bd
|
56be0e2e9a439151ae5302b3e6ceddc7868d8942
|
refs/heads/master
| 2021-01-18T11:40:51.993195
| 2017-07-22T12:55:32
| 2017-07-22T12:55:32
| 21,105,690
| 6
| 1
| null | 2017-07-22T12:55:33
| 2014-06-22T21:13:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,185
|
py
|
import logSetup
import runStatus
import bs4
import nameTools as nt
import os
import os.path
import processDownload
import ScrapePlugins.RetreivalBase
import settings
import traceback
import urllib.parse
import webFunctions
import zipfile
class ContentLoader(ScrapePlugins.RetreivalBase.RetreivalBase):
loggerPath = "Main.Manga.Ms.Cl"
pluginName = "MangaStream.com Content Retreiver"
tableKey = "ms"
dbName = settings.DATABASE_DB_NAME
tableName = "MangaItems"
wg = webFunctions.WebGetRobust(logPath=loggerPath+".Web")
retreivalThreads = 1
def getImage(self, imageUrl, referrer):
if imageUrl.startswith("//"):
imageUrl = "http:" + imageUrl
content, handle = self.wg.getpage(imageUrl, returnMultiple=True, addlHeaders={'Referer': referrer})
if not content or not handle:
raise ValueError("Failed to retreive image from page '%s'!" % referrer)
fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1])
fileN = bs4.UnicodeDammit(fileN).unicode_markup
self.log.info("retreived image '%s' with a size of %0.3f K", fileN, len(content)/1000.0)
return fileN, content
def getImageUrls(self, baseUrl):
pages = set()
nextUrl = baseUrl
chapBase = baseUrl.rstrip('0123456789.')
imnum = 1
while 1:
soup = self.wg.getSoup(nextUrl)
imageDiv = soup.find('div', class_='page')
if not imageDiv.a:
raise ValueError("Could not find imageDiv?")
pages.add((imnum, imageDiv.img['src'], nextUrl))
nextUrl = imageDiv.a['href']
if not chapBase in nextUrl:
break
imnum += 1
self.log.info("Found %s pages", len(pages))
return pages
def getLink(self, link):
sourceUrl = link["sourceUrl"]
seriesName = link["seriesName"]
chapterVol = link["originName"]
try:
self.log.info( "Should retreive url - %s", sourceUrl)
self.updateDbEntry(sourceUrl, dlState=1)
imageUrls = self.getImageUrls(sourceUrl)
if not imageUrls:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Page not found - 404")
self.updateDbEntry(sourceUrl, dlState=-1)
return
self.log.info("Downloading = '%s', '%s' ('%s images)", seriesName, chapterVol, len(imageUrls))
dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
if link["flags"] == None:
link["flags"] = ""
if newDir:
self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
chapterName = nt.makeFilenameSafe(chapterVol)
fqFName = os.path.join(dlPath, chapterName+" [MangaStream.com].zip")
loop = 1
while os.path.exists(fqFName):
fqFName, ext = os.path.splitext(fqFName)
fqFName = "%s (%d)%s" % (fqFName, loop, ext)
loop += 1
self.log.info("Saving to archive = %s", fqFName)
images = []
for imgNum, imgUrl, referrerUrl in imageUrls:
imageName, imageContent = self.getImage(imgUrl, referrerUrl)
images.append([imgNum, imageName, imageContent])
if not runStatus.run:
self.log.info( "Breaking due to exit flag being set")
self.updateDbEntry(sourceUrl, dlState=0)
return
self.log.info("Creating archive with %s images", len(images))
if not images:
self.updateDbEntry(sourceUrl, dlState=-1, seriesName=seriesName, originName=chapterVol, tags="error-404")
return
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(fqFName, "w")
for imgNum, imageName, imageContent in images:
arch.writestr("{:03} - {}".format(imgNum, imageName), imageContent)
arch.close()
dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True, rowId=link['dbId'])
self.log.info( "Done")
filePath, fileName = os.path.split(fqFName)
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, seriesName=seriesName, originName=chapterVol, tags=dedupState)
return
except Exception:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Traceback = %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
if __name__ == '__main__':
import utilities.testBase as tb
with tb.testSetup():
cl = ContentLoader()
cl.do_fetch_content()
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
a18d6a2dec529d3ec7e607d68fa268b6e10ab14f
|
fea398a9638acdfa2fb06e7a9695d5894452ded7
|
/0x03-python-data_structures/6-print_matrix_integer.py
|
94690f8578ca1d676f2b335b5640454178a148b3
|
[] |
no_license
|
OscarDRT/holbertonschool-higher_level_programming
|
d15585aa93ced9bc04464ced9bfd4197e73c42fa
|
f57ef3344df6350bded78ffce975eea693e67727
|
refs/heads/master
| 2020-09-30T19:56:30.788311
| 2020-05-14T19:52:10
| 2020-05-14T19:52:10
| 227,360,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
#!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
if (matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print("{:d}".format(matrix[i][j]), end='')
if (j < len(matrix[i]) - 1):
print(' ', end='')
print()
|
[
"oscarnetworkingpro@gmail.com"
] |
oscarnetworkingpro@gmail.com
|
fd7d056dca6eb683dac51fb9acb8977975310b3c
|
872cd13f25621825db0c598268ecd21b49cc2c79
|
/Lesson_11/unit_tests/test_client.py
|
10a7ec604a3caf9f471dcc27973a5e6aa6a5b511
|
[] |
no_license
|
ss2576/client_server_applications_Python
|
c4e9ebe195d23c8ca73211894aa50a74014013d5
|
9b599e37e5dae5af3dca06e197916944f12129d5
|
refs/heads/master
| 2022-12-15T10:40:22.935880
| 2020-08-12T11:02:21
| 2020-08-12T11:02:21
| 271,764,749
| 0
| 0
| null | 2020-06-12T10:05:00
| 2020-06-12T09:52:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
import sys
import os
sys.path.append(os.path.join(os.getcwd(), '..'))
from unittest import TestCase, main
from common.classes import *
from common.variables import *
from common.utils import *
from common.codes import *
class TestJimClasses(TestCase):
def test_request_dict(self):
body = 'test'
time = dt.timestamp(dt.now())
request = Request(RequestAction.PRESENCE, body)
self.assertEqual(request.get_dict(), {ACTION: RequestAction.PRESENCE, TIME: time, BODY: body})
request = Request(RequestAction.QUIT)
self.assertEqual(request.get_dict(), {ACTION: RequestAction.QUIT, TIME: time, BODY: ''})
self.assertRaises(TypeError, Request)
def test_response_dict(self):
time = dt.timestamp(dt.now())
response = Response(OK)
self.assertEqual(response.get_dict(), {CODE: 200, TIME: time, MESSAGE: 'OK'})
self.assertRaises(TypeError, Response)
class TestJimFunctions(TestCase):
class TestSocket:
encoded_data = None
request = None
def __init__(self, data):
self.data = data
def send(self, request):
json_str = json.dumps(self.data)
self.encoded_data = json_str.encode(ENCODING)
self.request = request
def recv(self, buf):
json_str = json.dumps(self.data)
return json_str.encode(ENCODING)
def test_send_request(self):
request = Request(RequestAction.MESSAGE)
socket = self.TestSocket(request.get_dict())
send_data(socket, request)
self.assertEqual(socket.encoded_data, socket.request)
def test_get_data(self):
response = Response(BASIC)
socket = self.TestSocket(response.get_dict())
self.assertEqual(get_data(socket), response.get_dict())
self.assertEqual(Response.from_dict(get_data(socket)), response)
if __name__ == '__main__':
main()
|
[
"ss2576@mail.ru"
] |
ss2576@mail.ru
|
2f07645844113c62897b33114cef7c03ca4b7b31
|
7d172bc83bc61768a09cc97746715b8ec0e13ced
|
/facebook/views.py
|
bc76a115e91554ace708d6e9fc2227bacf2b21cf
|
[] |
no_license
|
shivam1111/jjuice
|
a3bcd7ee0ae6647056bdc62ff000ce6e6af27594
|
6a2669795ed4bb4495fda7869eeb221ed6535582
|
refs/heads/master
| 2020-04-12T05:01:27.981792
| 2018-11-08T13:00:49
| 2018-11-08T13:00:49
| 81,114,622
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
from django.shortcuts import render
from django.views import View
from django.http import JsonResponse
import requests
from django.conf import settings
class Ratings(View):
def get(self, request, template_name="index.html"):
response = {}
payload ={
'access_token':settings.FACEBOOK_ACCESSS_TOKEN,
'fields':"has_rating,has_review,rating,review_text,reviewer"
}
res = requests.get('https://graph.facebook.com/v2.9/vapejjuice/ratings',params=payload)
return JsonResponse(data=res.json(), status=200, safe=False)
|
[
"shivam1111@gmail.com"
] |
shivam1111@gmail.com
|
1dc802022a2096fe6390e9c8c00491b79e22fd57
|
c7a5448821669b2fdebf5c2a4eb0ea70bba545d3
|
/creme/optim/adam.py
|
3c29444f912d89f1b786e209b735cfb90c961960
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
brp-sara/creme
|
e5eb44e5d75cea0120c8fd17c20a963a1fe6c153
|
56c3baf6ee160015b72ab8ebedc0e03da32a6eae
|
refs/heads/master
| 2020-09-08T17:10:18.903069
| 2019-11-11T12:14:32
| 2019-11-11T12:14:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
import collections
from . import base
__all__ = ['Adam']
class Adam(base.Optimizer):
"""Adam optimizer.
Example:
::
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> from creme import stream
>>> from sklearn import datasets
>>> X_y = stream.iter_sklearn_dataset(
... dataset=datasets.load_breast_cancer(),
... shuffle=True,
... random_state=42
... )
>>> optimizer = optim.Adam()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer)
... )
>>> metric = metrics.F1()
>>> model_selection.online_score(X_y, model, metric)
F1: 0.959554
References:
1. `Adam: A method for stochastic optimization <https://arxiv.org/pdf/1412.6980.pdf>`_
"""
def __init__(self, lr=0.1, beta_1=0.9, beta_2=0.999, eps=1e-8):
super().__init__(lr)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.m = collections.defaultdict(float)
self.v = collections.defaultdict(float)
def _update_after_pred(self, w, g):
for i, gi in g.items():
self.m[i] = self.beta_1 * self.m[i] + (1 - self.beta_1) * gi
self.v[i] = self.beta_2 * self.v[i] + (1 - self.beta_2) * gi ** 2
m = self.m[i] / (1 - self.beta_1 ** (self.n_iterations + 1))
v = self.v[i] / (1 - self.beta_2 ** (self.n_iterations + 1))
w[i] -= self.learning_rate * m / (v ** 0.5 + self.eps)
return w
|
[
"maxhalford25@gmail.com"
] |
maxhalford25@gmail.com
|
0c84a9d6e3298e137bf520780a4fa47a312b78ad
|
2324d8e4544a9b813153ce0ed0f858972ea7f909
|
/135-分发糖果.py
|
fc857a4ba5410fc1316af0a1170fd5c03458002d
|
[] |
no_license
|
Terry-Ma/Leetcode
|
af8a4ad8059975f8d12b0351610336f1f5f01097
|
cc7f41e2fb3ed5734c2a5af97e49a5bc17afbceb
|
refs/heads/master
| 2021-08-10T16:40:20.482851
| 2021-07-03T08:35:56
| 2021-07-03T08:35:56
| 225,814,239
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
class Solution:
def candy(self, ratings: List[int]) -> int:
left = [1] * len(ratings)
right = 1
for i in range(1, len(ratings)):
if ratings[i] > ratings[i - 1]:
left[i] = left[i - 1] + 1
res = left[-1]
for i in range(len(ratings) - 2, -1, -1):
if ratings[i] > ratings[i + 1]:
right += 1
else:
right = 1
res += max(right, left[i])
return res
|
[
"rssmyq@aliyun.com"
] |
rssmyq@aliyun.com
|
f61caaf7302bda93ce12e0e98e8ec61ca87ffdfc
|
cde11aea86ce9e1e370b02fb14553358b4aaab8b
|
/practice/hard/_51_disk_stacking.py
|
69b5dbd133b93eaa83f31887401f81f3562c17be
|
[] |
no_license
|
pavankumarag/ds_algo_problem_solving_python
|
56f9a2bb64dd62f16028c3f49a72542b8588369a
|
cbd323de31f2f4a4b35334ce3249bb3e9525dbf8
|
refs/heads/master
| 2023-06-21T20:29:41.317005
| 2023-06-10T18:11:39
| 2023-06-10T18:11:39
| 223,919,558
| 2
| 1
| null | 2023-06-10T18:11:40
| 2019-11-25T10:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
"""
Tower of Honoi
we have three rods and n disks the objective of the puzzle is to move the entire stack to another rod,
obeying the following simple rules:
1) Only one disk can be moved at a time.
2) Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack i.e.
a disk can only be moved if it is the uppermost disk on a stack.
3) No disk may be placed on top of a smaller disk.
"""
def tower_of_honoi(n, from_rod, to_rod, aux_rod):
if n == 1:
print "Move disk 1 from ", from_rod, "to ", to_rod
return
tower_of_honoi(n-1, from_rod, aux_rod, to_rod)
print "Move disk",n, "from ", from_rod, "to ", to_rod
tower_of_honoi(n-1, aux_rod, to_rod, from_rod)
if __name__ == "__main__":
n = 4
tower_of_honoi(n, 'A', 'C', 'B')
|
[
"pavan.govindraj@nutanix.com"
] |
pavan.govindraj@nutanix.com
|
ec25fbfa0846875e29b7c321050a45e0d6c05ffb
|
65e54ca14ac21d2c2572ba35ba351df5903cb667
|
/src/petronia/core/layout/binding/bootstrap.py
|
7fc0858f4fa68c43fe1e660bdcc50a8a0f177cf0
|
[
"MIT"
] |
permissive
|
groboclown/petronia
|
29b93e88b82d2732bb529621ad8bff50334d36b9
|
486338023d19cee989e92f0c5692680f1a37811f
|
refs/heads/master
| 2022-07-25T10:08:58.468385
| 2020-01-23T14:59:03
| 2020-01-23T14:59:03
| 71,741,212
| 22
| 3
|
NOASSERTION
| 2022-07-13T15:27:32
| 2016-10-24T01:30:01
|
Python
|
UTF-8
|
Python
| false
| false
| 7,016
|
py
|
"""
Bootstrap the hotkey bindings for the layout events.
"""
from typing import List
from ....aid.std import i18n as _
from ....aid.std import (
EventBus,
EventId,
ParticipantId,
ErrorReport,
report_error,
create_user_error,
)
from ....aid.bootstrap import (
ANY_VERSION,
create_singleton_identity,
)
from ....aid.lifecycle import create_module_listener_helper
from ....base.internal_.internal_extension import petronia_extension
from ....base.util.simple_type import (
PersistTypeSchemaItem,
PERSISTENT_TYPE_SCHEMA_NAME__DOC,
PERSISTENT_TYPE_SCHEMA_TYPE__BOOL,
PERSISTENT_TYPE_SCHEMA_TYPE__STR,
PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT,
optional_str, optional_int, optional_bool,
collect_errors,
)
from ...hotkeys.api import (
HotkeyEventTriggeredEvent,
BoundServiceActionSchema,
as_hotkey_event_triggered_listener,
)
from ..tile.api import (
RequestMoveResizeFocusedWindowEvent,
send_request_move_resize_focused_window_event,
RequestShiftLayoutFocusEvent,
send_request_shift_layout_focus_event,
RequestSetFocusedWindowVisibilityEvent,
send_request_set_window_visibility_event,
)
from ..window.api import (
)
from ..navigation.api import (
)
TARGET_ID_LAYOUT_HOTKEYS = create_singleton_identity("core.layout.binding")
HOTKEY_ACTION_MOVE_ACTIVE = 'move-active'
HOTKEY_ACTION_SHIFT_FOCUS = 'shift-focus'
HOTKEY_ACTION_SET_VISIBILITY = 'set-visible'
def bootstrap_layout_handlers(bus: EventBus) -> None:
listeners = create_module_listener_helper(bus, TARGET_ID_LAYOUT_HOTKEYS)
def handler(
_event_id: EventId,
_target_id: ParticipantId,
event_obj: HotkeyEventTriggeredEvent
) -> None:
errors: List[ErrorReport] = []
# -------------------------------------------------------------------
if event_obj.data.action == HOTKEY_ACTION_MOVE_ACTIVE:
dx = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dx',
lambda: create_user_error(handler, _('"dx" must be a number'))
)) or 0
dy = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dy',
lambda: create_user_error(handler, _('"dy" must be a number'))
)) or 0
dw = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dw',
lambda: create_user_error(handler, _('"dw" must be a number'))
)) or 0
dh = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dh',
lambda: create_user_error(handler, _('"dh" must be a number'))
)) or 0
dz = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dz',
lambda: create_user_error(handler, _('"dz" must be a number'))
)) or 0
send_request_move_resize_focused_window_event(bus, dx, dy, dw, dh, dz)
# -------------------------------------------------------------------
elif event_obj.data.action == HOTKEY_ACTION_SHIFT_FOCUS:
name = collect_errors(errors, optional_str(
event_obj.data.parameters, 'name',
lambda: create_user_error(handler, _('"name" must be a string'))
)) or ''
index = collect_errors(errors, optional_int(
event_obj.data.parameters, 'index',
lambda: create_user_error(handler, _('"index" must be a number'))
)) or 0
print("DEBUG data {0} -> {1}/{2}".format(event_obj.data.parameters, name, index))
send_request_shift_layout_focus_event(bus, name, index)
# -------------------------------------------------------------------
elif event_obj.data.action == HOTKEY_ACTION_SET_VISIBILITY:
visible = collect_errors(errors, optional_bool(
event_obj.data.parameters, 'visible',
lambda: create_user_error(handler, _('"visible" must be true or false'))
)) or False
send_request_set_window_visibility_event(bus, visible)
for error in errors:
report_error(bus, error)
listeners.listen(TARGET_ID_LAYOUT_HOTKEYS, as_hotkey_event_triggered_listener, handler)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_MOVE_ACTIVE, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestMoveResizeFocusedWindowEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"dx": PersistTypeSchemaItem(
"Change in window x position (move)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dy": PersistTypeSchemaItem(
"Change in window y position (move)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dw": PersistTypeSchemaItem(
"Change in window width (resize)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dh": PersistTypeSchemaItem(
"Change in window height (resize)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dz": PersistTypeSchemaItem(
"Change in window z-order (focus)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
}
)
)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_SHIFT_FOCUS, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestShiftLayoutFocusEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"name": PersistTypeSchemaItem(
"Layout focus shift name", PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"index": PersistTypeSchemaItem(
"Layout focus shift index", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
}
)
)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_SET_VISIBILITY, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestSetFocusedWindowVisibilityEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"visible": PersistTypeSchemaItem(
"True to make the window visible, False to make it hidden", PERSISTENT_TYPE_SCHEMA_TYPE__BOOL
),
}
)
)
EXTENSION_METADATA = petronia_extension({
"name": "core.layout.binding",
"type": "standalone",
"version": (1, 0, 0,),
"depends": ({
"extension": "core.hotkeys.api",
"minimum": ANY_VERSION,
}, {
"extension": "core.layout.api",
"minimum": ANY_VERSION,
},),
})
|
[
"matt@groboclown.net"
] |
matt@groboclown.net
|
c1d39ebc5f1174152c28d88c2a6e92745f8fea7c
|
1e35944fcd9a0e2209e069fb0056f23597e3196c
|
/0x02-python-import_modules/4-hidden_discovery.py
|
9f95073b69cc6970c576aeb2f8a13779a4a17885
|
[] |
no_license
|
sonnentag/holbertonschool-higher_level_programming
|
1496be9390f557cfa7a3e31bb74b208a7dfbb98f
|
5992e3c7ff97ab3fefe33bec5632bdca4d3d8a05
|
refs/heads/master
| 2022-12-23T12:47:02.957781
| 2020-09-25T04:01:27
| 2020-09-25T04:01:27
| 259,382,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
#!/usr/bin/python3
if __name__ == "__main__":
import hidden_4
for func in dir(hidden_4):
if func[1] != "_":
print(func)
|
[
"zocle@zocle.net"
] |
zocle@zocle.net
|
b970cb7a9421a179fb53f5272a8b21908a4e9e7e
|
8b81588cea990aca1ecc4ce3fe45847cc46e7d00
|
/x11/library/libXScrnSaver/actions.py
|
676e427c37687edf597e963446797e167056b929
|
[] |
no_license
|
Zaryob/SulinRepository
|
67a4a6d15d909422f73d5ec4bbc8bd16f40057a9
|
c89c643b9773d191996d721b262dd739e4203bc0
|
refs/heads/main
| 2021-06-12T19:30:34.281242
| 2019-04-18T17:56:24
| 2019-04-18T17:56:24
| 201,469,580
| 11
| 2
| null | 2021-06-02T16:51:13
| 2019-08-09T13:08:57
|
Roff
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
inarytools.dodoc("ChangeLog", "COPYING", "README")
|
[
"zaryob.dev@gmail.com"
] |
zaryob.dev@gmail.com
|
2cacc35dad927239826dea74300b3926c7cc1092
|
cbca22133ba7c02ba0532bc046d7e6b0524c2f4c
|
/Matplotlib_With_PYQT/封装toolbar功能/fnag.py
|
5b9a1a4d3a75baf71b162e3e9c3e93eb74751638
|
[] |
no_license
|
Inc175/ll_crowluya-Matplotlib_With_PYQT-master
|
a923c195121f5e1d382b702b6a9ea0732c60c204
|
dcf1fd6725f4fffd0b7ff6b9298cc3635735b30d
|
refs/heads/master
| 2021-09-24T23:58:02.044255
| 2018-10-05T16:00:11
| 2018-10-05T16:00:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
import sys
import matplotlib
import PyQt5.sip
# matplotlib的键盘按压事件引入到pyqt5中
# from matplotlib.backend_bases import key_press_handler
matplotlib.use("Qt5Agg")
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QSizePolicy, QAction, QLabel,
QWidget,QStackedWidget, QPushButton,QTabWidget, QAction, QMessageBox, QFileDialog, QHBoxLayout)
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from MatploWidget import PlotCanvas # qt绘制matplotlib图像的类
from mainFrom import Ui_MainWindow # 弹出为屏幕中心主窗口
from loadlines import load_all_lines # 加载数据
# 添加曲线到画布上
from PyQt5.QtWidgets import QDesktopWidget, QApplication, QMainWindow, QPushButton
from utils import log
# from MatploWidget import PlotCanvas
fig = plt.figure()
ax = fig.add_subplot(111)
lines = load_all_lines()
tab1 = PlotCanvas(width=9, height=6, dpi=100)
tab1.draw_one_line(lines[0])
# fig.add_subplot(tab1)
tab1.draw()
# plt.show()
|
[
"s1107308633@gmail.com"
] |
s1107308633@gmail.com
|
42e748ffc45d9278916009d2483b54f316602368
|
7133de159c5cdc06b92bc5b168fe193caf0bea2a
|
/packages/grid_control/parameters/psource_data.py
|
f2f1003f2b7798eb21834106677ade5e27e87a17
|
[] |
no_license
|
thomas-mueller/grid-control
|
fac566c21bb79b0bd4439d36421a0c0b14bc8776
|
36f01d19b71c41c8dd55eddd190181db8849f920
|
refs/heads/master
| 2020-12-28T23:34:59.983357
| 2016-04-22T06:28:57
| 2016-04-22T06:28:57
| 56,689,010
| 0
| 0
| null | 2016-04-20T13:26:29
| 2016-04-20T13:26:29
| null |
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
# | Copyright 2009-2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os, time
from grid_control import utils
from grid_control.datasets import DataProvider
from grid_control.gc_exceptions import UserError
from grid_control.parameters.psource_base import ParameterSource
from python_compat import md5_hex
class DataParameterSource(ParameterSource):
def __init__(self, dataDir, srcName, dataProvider, dataSplitter, dataProc, keepOld = True):
ParameterSource.__init__(self)
(self._dataDir, self._srcName, self._dataProvider, self._dataSplitter, self._part_proc) = \
(dataDir, srcName, dataProvider, dataSplitter, dataProc)
if not dataProvider:
pass # debug mode - used by scripts - disables resync
elif os.path.exists(self.getDataPath('cache.dat') and self.getDataPath('map.tar')):
self._dataSplitter.importPartitions(self.getDataPath('map.tar'))
else:
DataProvider.saveToFile(self.getDataPath('cache.dat'), self._dataProvider.getBlocks(silent = False))
self._dataSplitter.splitDataset(self.getDataPath('map.tar'), self._dataProvider.getBlocks())
self._maxN = self._dataSplitter.getMaxJobs()
self._keepOld = keepOld
def getNeededDataKeys(self):
return self._part_proc.getNeededKeys(self._dataSplitter)
def getMaxParameters(self):
return self._maxN
def fillParameterKeys(self, result):
result.extend(self._part_proc.getKeys())
def fillParameterInfo(self, pNum, result):
splitInfo = self._dataSplitter.getSplitInfo(pNum)
self._part_proc.process(pNum, splitInfo, result)
def getHash(self):
return md5_hex(str(self._srcName) + str(self._dataSplitter.getMaxJobs()) + str(self.resyncEnabled()))
def show(self):
return ['%s: src = %s' % (self.__class__.__name__, self._srcName)]
def __repr__(self):
return 'data(%s)' % utils.QM(self._srcName == 'data', '', self._srcName)
def getDataPath(self, postfix):
return os.path.join(self._dataDir, self._srcName + postfix)
def resync(self):
(result_redo, result_disable, result_sizeChange) = ParameterSource.resync(self)
if self.resyncEnabled() and self._dataProvider:
# Get old and new dataset information
old = DataProvider.loadFromFile(self.getDataPath('cache.dat')).getBlocks()
self._dataProvider.clearCache()
new = self._dataProvider.getBlocks()
self._dataProvider.saveToFile(self.getDataPath('cache-new.dat'), new)
# Use old splitting information to synchronize with new dataset infos
jobChanges = self._dataSplitter.resyncMapping(self.getDataPath('map-new.tar'), old, new)
if jobChanges:
# Move current splitting to backup and use the new splitting from now on
def backupRename(old, cur, new):
if self._keepOld:
os.rename(self.getDataPath(cur), self.getDataPath(old))
os.rename(self.getDataPath(new), self.getDataPath(cur))
backupRename( 'map-old-%d.tar' % time.time(), 'map.tar', 'map-new.tar')
backupRename('cache-old-%d.dat' % time.time(), 'cache.dat', 'cache-new.dat')
old_maxN = self._dataSplitter.getMaxJobs()
self._dataSplitter.importPartitions(self.getDataPath('map.tar'))
self._maxN = self._dataSplitter.getMaxJobs()
result_redo.update(jobChanges[0])
result_disable.update(jobChanges[1])
result_sizeChange = result_sizeChange or (old_maxN != self._maxN)
self.resyncFinished()
return (result_redo, result_disable, result_sizeChange)
def create(cls, pconfig = None, src = 'data'): # pylint:disable=arguments-differ
if src not in DataParameterSource.datasetsAvailable:
raise UserError('Dataset parameter source "%s" not setup!' % src)
result = DataParameterSource.datasetsAvailable[src]
DataParameterSource.datasetsUsed.append(result)
return result
create = classmethod(create)
DataParameterSource.datasetsAvailable = {}
DataParameterSource.datasetsUsed = []
ParameterSource.managerMap['data'] = 'DataParameterSource'
|
[
"stober@cern.ch"
] |
stober@cern.ch
|
819f916451d212969a294520210767ee7b4da40d
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/Leetcode/Algorithm/python/3000/02079-Watering Plants.py
|
4e375badaaf013ccb48f4140475ac47e3102f9c7
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860
| 2023-07-09T15:30:27
| 2023-07-09T15:30:27
| 3,009,276
| 51
| 23
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
class Solution(object):
def wateringPlants(self, plants, capacity):
cur = capacity
step = 0
for i, plant in enumerate(plants):
if plant > cur:
cur = capacity - plant
step += i * 2 + 1
else:
cur -= plant
step += 1
return step
|
[
"noreply@github.com"
] |
Wizmann.noreply@github.com
|
4045d144a83b1c65582baa5d98f4ceece2698cd4
|
498a2d08c19eaf36945468e11fad1be97d62135b
|
/yaml_lsp/main.py
|
125cdb4ed5c7ac43fcdd5ddc1769dfca7aed8329
|
[
"BSD-3-Clause"
] |
permissive
|
martinRenou/yaml-lsp
|
94f4dc1744b5e8a4763983725cf482a5ab3f1207
|
79186d50289d172d2dc5a8420f1dc2cad1046ce7
|
refs/heads/master
| 2023-08-25T08:40:39.172933
| 2021-04-08T14:37:04
| 2021-04-08T14:37:04
| 417,399,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
import pathlib
import shutil
import subprocess
import sys
NODE_LOCATION = (
shutil.which("node") or
shutil.which("node.exe") or
shutil.which("node.cmd")
)
NODE = str(pathlib.Path(NODE_LOCATION).resolve())
PATH_TO_BIN_JS = str(
(
pathlib.Path(__file__).parent /
'node_modules' / 'yaml-language-server' /
'bin' / 'yaml-language-server'
).resolve()
)
def main():
p = subprocess.Popen(
[NODE, PATH_TO_BIN_JS, '--stdio', *sys.argv[1:]],
stdin=sys.stdin, stdout=sys.stdout
)
sys.exit(p.wait())
def load(app):
return {
"yaml-language-server": {
"version": 2,
"argv": ['yaml-lsp'],
"languages": ["yaml"],
"mime_types": [
"text/x-yaml", "text/yaml"
]
}
}
if __name__ == "__main__":
main()
|
[
"martin.renou@gmail.com"
] |
martin.renou@gmail.com
|
34df80d44954fbb824a9dad7091e6ee2e6eb9a0a
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/datastreams/test_processor.py
|
d8b3879b0af52c14c14035455326ecafe89c7cd8
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
import time
from ddtrace.internal.datastreams.processor import ConsumerPartitionKey
from ddtrace.internal.datastreams.processor import DataStreamsProcessor
from ddtrace.internal.datastreams.processor import PartitionKey
def test_data_streams_processor():
processor = DataStreamsProcessor("http://localhost:8126")
now = time.time()
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 1)
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 2)
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 4)
processor.on_checkpoint_creation(2, 4, ["direction:in", "topic:topicA", "type:kafka"], now, 1, 2)
now_ns = int(now * 1e9)
bucket_time_ns = int(now_ns - (now_ns % 1e10))
aggr_key_1 = (",".join(["direction:out", "topic:topicA", "type:kafka"]), 1, 2)
aggr_key_2 = (",".join(["direction:in", "topic:topicA", "type:kafka"]), 2, 4)
assert processor._buckets[bucket_time_ns].pathway_stats[aggr_key_1].full_pathway_latency.count == 3
assert processor._buckets[bucket_time_ns].pathway_stats[aggr_key_2].full_pathway_latency.count == 1
assert (
abs(processor._buckets[bucket_time_ns].pathway_stats[aggr_key_1].full_pathway_latency.get_quantile_value(1) - 4)
<= 4 * 0.008
) # relative accuracy of 0.00775
assert (
abs(processor._buckets[bucket_time_ns].pathway_stats[aggr_key_2].full_pathway_latency.get_quantile_value(1) - 2)
<= 2 * 0.008
) # relative accuracy of 0.00775
def test_data_streams_loop_protection():
processor = DataStreamsProcessor("http://localhost:8126")
ctx = processor.set_checkpoint(["direction:in", "topic:topicA", "type:kafka"])
parent_hash = ctx.hash
processor.set_checkpoint(["direction:out", "topic:topicB", "type:kafka"])
# the application sends data downstream to two different places.
# Use the consume checkpoint as the parent
child_hash = processor.set_checkpoint(["direction:out", "topic:topicB", "type:kafka"]).hash
expected_child_hash = ctx._compute_hash(["direction:out", "topic:topicB", "type:kafka"], parent_hash)
assert child_hash == expected_child_hash
def test_kafka_offset_monitoring():
processor = DataStreamsProcessor("http://localhost:8126")
now = time.time()
processor.track_kafka_commit("group1", "topic1", 1, 10, now)
processor.track_kafka_commit("group1", "topic1", 1, 14, now)
processor.track_kafka_produce("topic1", 1, 34, now)
processor.track_kafka_produce("topic1", 2, 10, now)
now_ns = int(now * 1e9)
bucket_time_ns = int(now_ns - (now_ns % 1e10))
assert processor._buckets[bucket_time_ns].latest_produce_offsets[PartitionKey("topic1", 1)] == 34
assert processor._buckets[bucket_time_ns].latest_produce_offsets[PartitionKey("topic1", 2)] == 10
assert processor._buckets[bucket_time_ns].latest_commit_offsets[ConsumerPartitionKey("group1", "topic1", 1)] == 14
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.