blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a817ecffb86c41f42b6deea0488e93cad9bc8f1
|
0760b6a6912914dda2923f0fa79fc1a6571ef376
|
/turtle/graphics pattern.py
|
23c8f87a8e6efd6e58bebc6843f0815740aa7cc1
|
[] |
no_license
|
PrateekJain999/Python-Codes
|
030cc8aedcea52e3142d545fb92aeff5f895ca5f
|
21e2f5a135e9646ac8fb5845ad10bc6bbf3c23c7
|
refs/heads/main
| 2023-02-26T11:11:28.951296
| 2021-02-04T11:36:09
| 2021-02-04T11:36:09
| 331,254,596
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
import turtle as t
t.setpos(0,0)
t.circle(50)
t.penup()
t.setpos(-50,50)
t.pendown()
t.fd(100)
t.penup()
t.rt(90)
t.setpos(0,100)
t.pendown()
t.fd(100)
t.penup()
t.setpos(0,-50)
t.lt(90)
t.pendown()
t.circle(100)
|
[
"jain22719@gmail.com"
] |
jain22719@gmail.com
|
62696409136a5cc3fdb711a033171a0ac2283072
|
8799cbe3a261fea3ff05af2fba7e3eade40b57f5
|
/SocialMedia/chat/migrations/0003_message.py
|
8d63257134147432e000872e0dc0071bfe366b5a
|
[] |
no_license
|
Anoop-Suresh/Training
|
83b5759db0d2113bb90731b243a1dd2d5be5992f
|
e6f4dd8a77fec058917dd25c424a1f3afc7df236
|
refs/heads/master
| 2022-11-30T08:18:21.432284
| 2019-10-13T03:48:15
| 2019-10-13T03:48:15
| 190,737,085
| 0
| 0
| null | 2022-11-22T04:17:20
| 2019-06-07T12:05:47
|
Python
|
UTF-8
|
Python
| false
| false
| 843
|
py
|
# Generated by Django 2.2.4 on 2019-08-19 09:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0002_delete_message'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"anoopsuresh.sayonetech@gmail.com"
] |
anoopsuresh.sayonetech@gmail.com
|
11334580d227b390aefdefb498b228ab139c24cd
|
b5f9f93a415a5cc0117a580c5da12804e68c141d
|
/scripts/motions/test/follow1.py
|
2f7db1b8b9f8c5fa6b6ae576d5ae6484649a95d7
|
[] |
no_license
|
akihikoy/lfd_trick
|
71f89d80abc27ffc6fbd5bc609322918a4f8264e
|
b7bf0189db7bcef07772db17de29302d6e8ba2bf
|
refs/heads/master
| 2021-01-10T14:22:53.341666
| 2016-03-29T18:16:15
| 2016-03-29T18:16:15
| 50,623,958
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,335
|
py
|
#!/usr/bin/python
from core_tool import *
def Help():
return '''Test: follow a q_traj that we experienced a failure.
Usage: test.follow1'''
def Run(t,*args):
if not t.robot.Is('Baxter'):
CPrint(4,'This test is only for Baxter.')
return
q_traj= [
[-1.0544096854806089, -0.2776862141411402, 0.28087579936236179, 1.6743394798167504, -0.56010443409106647, -1.4960295455121526, 0.57304766831757548],
[-1.0159048510309103, -0.35670424794068467, 0.33155411397969409, 1.8633858091147795, -0.47760512948645528, -1.5669787327781961, 0.42943210292684869],
[-0.96476264324992855, -0.42311836836398564, 0.39891331930441265, 2.0439506789702975, -0.37623150979886999, -1.6560476331050802, 0.28551453075495919],
[-0.92135636878025318, -0.46074409246091941, 0.45219267225159882, 2.1723939241120336, -0.29270704439420275, -1.7386809973087964, 0.17859944748797052],
[-0.91656936668207734, -0.46815040865258956, 0.44552228082284107, 2.23785467540214, -0.27927621904754196, -1.7900609544614621, 0.13306757513154097],
[-0.93221128726280056, -0.46074694640818792, 0.4028452571883796, 2.2804654161510274, -0.30677314754121038, -1.8313667753638549, 0.11292969674597057],
[-0.95616162214784528, -0.4438852029022185, 0.3410417135902617, 2.3111201017741747, -0.35492921493141205, -1.8713039748545086, 0.10151482877769019],
[-0.98726375374869657, -0.41888678102704663, 0.26351591185117751, 2.3249457989019198, -0.4190347361664789, -1.9063122558890606, 0.097811529213398987],
[-1.0242863194214729, -0.38779126582438822, 0.17653166339142254, 2.3179885163791289, -0.49143987933855837, -1.9329493608376833, 0.1015981943603622],
[-1.0656439567205895, -0.35291260947696607, 0.088150274951116991, 2.288431318010566, -0.5628497638992821, -1.9487387446436377, 0.11298602776161597],
[-1.1093022995220323, -0.31625308645432337, 0.0060286379659039453, 2.2371535204199069, -0.62494896087120411, -1.9526402401261265, 0.13156378816155415],
[-1.1529369448473732, -0.27921815728965166, -0.064511176363831246, 2.1674541197858996, -0.67262675398564087, -1.9448884257954011, 0.15577138596582199],
[-1.1941583016955246, -0.2427746643154888, -0.12106907666635845, 2.0842902079877921, -0.70442681842261712, -1.9265182356842492, 0.18293576997005481],
[-1.2306277028657173, -0.2078345031326814, -0.16355796294151093, 1.9935418251652242, -0.72147025495074257, -1.8989935296867262, 0.20976421147725308],
[-1.2601494641579252, -0.1754772449445727, -0.19278311631736536, 1.8987549292218027, -0.72543267928147837, -1.8618017119766488, 0.2344030341002333],
[-1.2820829098828739, -0.140350093340425, -0.20468057608344964, 1.7555429674580416, -0.71194974213929796, -1.7803069844426007, 0.27883255052236938],
[-1.2963234838184399, -0.091146422224025386, -0.20502880942036245, 1.5646316591788036, -0.69127656886362843, -1.6637240498207164, 0.33486503311347571],
[-1.3040405608865098, -0.027555656757953781, -0.20189327344255442, 1.3558203264485023, -0.67360914409511163, -1.5378054333056337, 0.38594834680744561],
[-1.3052683310380491, 0.032722855229927496, -0.19958386393719424, 1.1805017632013304, -0.66021822984803624, -1.4315895338502982, 0.4147167986184731],
[-1.2965664001224004, 0.051749212231033355, -0.19847320614447614, 1.1254808297736065, -0.64272551631330899, -1.386163254253725, 0.39388220133306295]]
t_traj= [1.779648830049163, 3.2565732776900154, 4.1970153081875035, 4.69874673452241, 4.94874673452241, 5.19874673452241, 5.44874673452241,
5.69874673452241, 5.94874673452241, 6.19874673452241, 6.44874673452241, 6.69874673452241, 6.94874673452241, 7.19874673452241, 7.44874673452241,
7.896284115034163, 8.642031412999936, 9.729590437636922, 11.099266713005452, 11.958968797813638]
x_traj= [t.robot.FK(q,arm=LEFT).tolist() for q in q_traj]
#['s0', 's1', 'e0', 'e1', 'w0', 'w1', 'w2']
#qvel_limits= [0.5, 0.5, 0.8, 0.8, 0.8, 0.8, 0.8] #ORIGINAL
#qvel_limits= [0.5, 0.5, 0.6, 0.6, 0.6, 0.6, 0.6]
#qvel_limits= [0.1]*7
#LimitQTrajVel(q_start=q_traj[0], q_traj=q_traj, t_traj=t_traj, qvel_limits=qvel_limits)
#print 'Modified q_traj:',q_traj
#print 'Modified t_traj:',t_traj
'''WARNING: In the following code, following the trajectory always fails.
This is due to a joint angle, e.g.
'w1' (6th element) in
[-1.0242863194214729, -0.38779126582438822, 0.17653166339142254, 2.3179885163791289, -0.49143987933855837, -1.9329493608376833, 0.1015981943603622],
exceeds the joint limits.
The trajectory was originally generated by IK with KDL, which is the reason of this issue.
NOTE: In IK in baxter_pykdl, the joint limits are not taken into account.
There was an error from /joint_trajectory_action_server:
[ERROR] [WallTime: 1452044236.093979] /joint_trajectory_action_server: Exceeded Error Threshold on left_w1: -0.350152589571
NOTE: This code has been fixed by myself@2016-01-06
With this new baxter_pykdl code, IK fails in FollowXTraj.
'''
t.robot.MoveToQ(q_traj[0])
CPrint(1,'Follow the trajectory. Ready?')
if not t.AskYesNo(): return
#t.robot.FollowQTraj(q_traj, t_traj, arm=LEFT, blocking=True)
t.robot.FollowXTraj(x_traj, t_traj, arm=LEFT, blocking=True)
#t.robot.MoveToQ(q_traj[0])
#for q,tm in zip(q_traj,t_traj):
#CPrint(1,'Move to next point?',q)
#if not t.AskYesNo(): break
#t.robot.MoveToQ(q)
|
[
"info@akihikoy.net"
] |
info@akihikoy.net
|
fba4aaf68de22858534fd2c92373acfe6ed79e88
|
4b0e25df6e219ed71689405f371f097b715869ee
|
/scripts/util/dump_yearly.py
|
767b09337b14fc709769b3a1c64ccdb89e76213d
|
[
"MIT"
] |
permissive
|
geogismx/dep
|
4ce9be3c6a42c3ad8dd1762d7819ab19404fae3f
|
303c715e70000b48c5c71df0b59e259b8c246e9c
|
refs/heads/master
| 2020-04-16T17:50:11.805351
| 2019-01-09T16:21:59
| 2019-01-09T16:21:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,670
|
py
|
"""Dump some monthly data"""
from __future__ import print_function
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn
# East Nish
DATA = """102400030603
102400030704
102400030701
102400030702
102400030601
102400030602
102400030303
102400030304
102400030406
102400030502
102400030302
102400030104
102400030301
102400030404
102400030405
102400030102
102400030501
102400030103
102400030206
102400030402
102400030101
102400030401
102400030403
102400030204
102400030205
102400030203
102400030202
102400030201
102400030707
102400030705
102400030708
102400030703
102400030706"""
DATA = """102400020402
102400020607
102400020505
102400020703
102400020606
102400020804
102400020803
102400020705
102400020802
102400020704
102400020805
102400020801
102400020303
102400020504
102400020401
102400020503
102400020702
102400020301
102400020605
102400020502
102400020603
102400020302
102400020501
102400020602
102400020105
102400020604
102400020701
102400020106
102400020209
102400020601
102400020104
102400020103
102400020208
102400020207
102400020102
102400020101
102400020203
102400020205
102400020202
102400020206
102400020204
102400020201
102400020806
102400020706"""
# Beaver Creek
DATA = """071000040901
071000040902
071000040903
071000040904
071000040905
071000040906
071000040907
071000040908
071000040909
071000040910
071000040911"""
# North Raccoon
DATA = """071000061502
071000061602
071000060605
071000061201
071000060401
071000061501
071000060802
071000060208
071000060403
071000061202
071000060602
071000060207
071000060502
071000061004
071000061402
071000061204
071000060805
071000060201
071000061001
071000060904
071000060702
071000061002
071000060203
071000060205
071000061703
071000060304
071000060601
071000060310
071000061405
071000061203
071000060804
071000060903
071000060604
071000060803
071000060505
071000061701
071000060303
071000061702
071000061301
071000061302
071000061005
071000061401
071000060308
071000061504
071000060306
071000060301
071000061003
071000061102
071000060902
071000060901
071000060603
071000060305
071000060701
071000060503
071000060101
071000060103
071000060204
071000061403
071000061404
071000060206
071000060307
071000061503
071000060309
071000060302
071000060202
071000060801
071000061406
071000060504
071000060501
071000061601
071000061505
071000060402
071000061101
071000060806
071000060102"""
HUCS = [x.strip() for x in DATA.split("\n")]
def main():
"""Go Main Go"""
pgconn = get_dbconn('idep', user='nobody')
df = read_sql("""
SELECT huc_12, extract(year from valid) as year,
sum(avg_loss) * 4.463 as loss_ton_per_acre,
sum(avg_delivery) * 4.463 as delivery_ton_per_acre,
sum(qc_precip) / 25.4 as precip_inch,
sum(avg_runoff) / 25.4 as runoff_inch
from results_by_huc12 WHERE
scenario = 0 and huc_12 in %s and valid >= '2007-01-01'
and valid < '2018-01-01' GROUP by huc_12, year
""", pgconn, params=(tuple(HUCS), ))
writer = pd.ExcelWriter(
'dep_yearly.xlsx', options={'remove_timezone': True})
df.to_excel(writer, 'Yearly Totals', index=False)
gdf = df.groupby('huc_12').mean()
gdf[['loss_ton_per_acre', 'delivery_ton_per_acre', 'precip_inch',
'runoff_inch']].to_excel(writer, 'Yearly Averages')
format1 = writer.book.add_format({'num_format': '0.00'})
worksheet = writer.sheets['Yearly Totals']
worksheet.set_column('A:A', 18)
worksheet.set_column('C:F', 20, format1)
worksheet = writer.sheets['Yearly Averages']
worksheet.set_column('A:A', 18)
worksheet.set_column('B:E', 20, format1)
writer.save()
if __name__ == '__main__':
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
9236262b222aaefac7e5c01449e61de6661ebb41
|
bab1fb6a6879574f010d79b0bccd3ad2681a0034
|
/forumsite/forum/post/admin.py
|
6f314e5be0a43b8dfa0f652393edec73842f96d2
|
[
"Unlicense"
] |
permissive
|
lyjhj/dj
|
ee5e2234d1e1347e5bdffeffcc7a176bd47934a2
|
867ae008a3a65fb38fb0ed95b93c616e753f3903
|
refs/heads/master
| 2020-03-23T18:00:15.498064
| 2018-06-13T11:17:38
| 2018-06-13T11:17:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Category, Item, Comment
admin.site.register(Category)
class CommentInline(admin.TabularInline):
model = Comment
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ['title', 'publish', 'created', 'cat']
list_filter = ['publish', 'cat']
list_search = ['title', 'body']
inlines = [ CommentInline, ]
|
[
"ch1huizong@gmail.com"
] |
ch1huizong@gmail.com
|
ed97e7bb8c4a7062ac88f90ec47428281e6b77b5
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/6666dc4eec284bd286b22d2f63110743.py
|
2ead89f1a06dfe3793ffe43dd3eb7edce4e3206a
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#
# Skeleton file for the Python "Bob" exercise.
#
def is_question(what):
return what[-1] == '?'
def is_yelling(what):
return what.isupper()
def is_empty(what):
return not what.strip()
def hey(what):
if is_yelling(what):
return 'Whoa, chill out!'
elif is_empty(what):
return 'Fine. Be that way!'
elif is_question(what):
return 'Sure.'
else:
return 'Whatever.'
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
a4f3850908d168eab535b69ca348c26c249e1c88
|
0cc7fbe68074113b3db7a6b42a303dcd970da326
|
/exercises/reinforcement_learning/monte_carlo_policy_iteration.py
|
6187b706ca8664251e6a496def7406e0349a1dde
|
[] |
no_license
|
jay-woo/comprobo2014
|
a7c32a37b56933635ece69821b00f0f93df83d15
|
4e8b77bb5a9926b3b7735020fc24f294f61533b8
|
refs/heads/master
| 2020-04-01T20:03:23.113694
| 2015-09-01T00:41:41
| 2015-09-01T00:41:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,893
|
py
|
#!/usr/bin/env python
"""
This Python script demonstrates the basics of the Monte-Carlo Policy Iteration algorithm with exploring starts
"""
import numpy as np
import pdb
from copy import copy
def evaluate_policy_montecarlo(policy,p,r,gamma,n):
""" computes v^policy and q^policy using monte-carlo evaluation
"""
# compute the q function as well
q = np.zeros((p.shape[1],p.shape[0]))
v = np.zeros((p.shape[1],1))
for i in range(q.shape[0]):
for j in range(q.shape[1]):
returns = []
for trial in range(n):
x = i
u = j
rsum = r[x,u]
probs = p[u,x,:]
# 100 is an arbitrary threshold where gamma**100 is sufficiently low
for t in range(1,100):
probs = p[u,x,:]
x = np.random.choice(np.arange(p.shape[1]),p=probs)
u = policy[x]
rsum += r[x,u]*gamma**t
returns.append(rsum)
q[i,j] = sum(returns)/n
for i in range(q.shape[0]):
v[i] = q[i,policy[i]]
return v,q
def improve_policy(policy,q):
newpolicy = copy(policy)
for i in range(len(policy)):
newpolicy[i] = np.argmax(q[i,:])
return newpolicy
# encodes p(x_{t+1} | x_t, u_t), the first dimension is u_t, next is x_t, and the third is x_{t+1}
p = np.array([[[0.6, .25, .15],
[0.0, 1.0, 0.0],
[0.3, 0.0, 0.7]],
[[0.1, .8, .1],
[0.0, 0.0, 1.0],
[0.0, 0.5, 0.5]]])
# encodes r(x_t, u_t), the first dimension is x_t, and the second is u_t
r = np.array([[0.0, 0.0],
[0.0, 0.0],
[1.0, 1.0]])
# the discount factor for the MDP
gamma = 0.9
# initialize the policy (at first always execute action 0)
policy = [0, 0, 0]
print "policy is ", policy
converged = False
while not(converged):
# evaluate the policy
v,q = evaluate_policy_montecarlo(policy,p,r,gamma,100)
print "value function is", v
oldpolicy = policy
# improve the policy
policy = improve_policy(policy,q)
converged = oldpolicy == policy
print "new policy is ", policy
|
[
"paullundyruvolo@gmail.com"
] |
paullundyruvolo@gmail.com
|
fb2755119d89487927d6f007973870fb4c5e228f
|
83316dd8a01070711fe8c42cd38d245da9a4711e
|
/testmethodology/results/ResultUtils.py
|
836fe4807b83806f812225e9b4955097b55d74b9
|
[] |
no_license
|
CmWork/STAKCommands
|
fa46d561d0a85ac49c14f1b1fc6c014d2e0955bc
|
8b3fb68912116f7973fa9b3677d4e3d43c92f194
|
refs/heads/master
| 2020-05-02T00:57:20.940300
| 2015-07-10T04:32:37
| 2015-07-10T04:32:37
| 38,860,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,669
|
py
|
import os
import json
from StcIntPythonPL import *
from spirent.methodology.results.Status import Status
from spirent.methodology.results.ResultEnum import (
EnumVerdict,
EnumDataClass,
EnumDataFormat
)
import spirent.methodology.results.LogUtils as logger
from spirent.methodology.results.ResultConst import ResultConst
from spirent.methodology.results.ProviderConst import ProviderConst as pc
import copy
import datetime
import time
def summarize_status(obj):
verdict = EnumVerdict.none
verdict_text = ResultConst.NONE
for result in obj._data:
if Status.get_dict_name() in result:
if Status.get_apply_verdict_dict_name() in result[Status.get_dict_name()]:
if result[Status.get_dict_name()][Status.get_apply_verdict_dict_name()] is False:
continue
new_verdict = result[Status.get_dict_name()][Status.get_verdict_dict_name()]
if EnumVerdict.do_override_verdict(verdict, new_verdict):
verdict = new_verdict
verdict_text = \
result[Status.get_dict_name()][Status.get_verdict_text_dict_name()]
obj._status.verdict = verdict
if verdict == EnumVerdict.passed:
obj._status.verdict_text = ResultConst.TEST_PASS_VERDICT_TEXT
else:
obj._status.verdict_text = verdict_text
def generate_report_file(report_name, data):
filename = os.path.join(CTestResultSettingExt.GetResultDbBaseDirectory(), report_name)
logger.info("Saving file:" + filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = open(filename, "w")
f.write(json.dumps(data, separators=(',', ':'), sort_keys=False))
f.close()
CFileManager.AddFile(filename, 'RESULT')
return filename
def wrap_data_as_single_group(data, group_tag=ResultConst.ALL_GROUPS):
groupdata = {}
groupdata[ResultConst.TAG] = group_tag
if not isinstance(data, list):
groupdata[ResultConst.CHILDREN] = []
groupdata[ResultConst.CHILDREN].append(data)
else:
groupdata[ResultConst.CHILDREN] = data
return groupdata
def report_group_comparator(data1, data2):
value1 = CMeta.GetEnumerationValue('spirent.methodology.ResultBaseCommand',
'ReportGroup',
str(data1[pc.INFO][pc.REPORT_GROUP]))
value2 = CMeta.GetEnumerationValue('spirent.methodology.ResultBaseCommand',
'ReportGroup',
str(data2[pc.INFO][pc.REPORT_GROUP]))
return value1 - value2
def group_data_using_report_group(data):
data.sort(report_group_comparator)
groupdata = {}
groupdata[ResultConst.TAG] = ResultConst.ALL_GROUPS
groupdata[ResultConst.CHILDREN] = []
result_group = {}
result_group[pc.CLASS] = EnumDataClass.result_group
result_group[pc.DATA_FORMAT] = EnumDataFormat.group
mydata = {}
mydata[ResultConst.TAG] = 'ResultGroup'
mydata[ResultConst.CHILDREN] = []
result_group[pc.DATA] = mydata
cgdata = copy.deepcopy(result_group)
cdata = cgdata[pc.DATA]
cgroup = pc.DEFAULT_REPORT_GROUP
for pdata in data:
if cdata[ResultConst.CHILDREN]:
if pdata[pc.INFO][pc.REPORT_GROUP] == cgroup:
cdata[ResultConst.CHILDREN].append(pdata)
continue
else:
groupdata[ResultConst.CHILDREN].append(cgdata)
cgdata = copy.deepcopy(result_group)
cdata = cgdata[pc.DATA]
cdata[ResultConst.CHILDREN].append(pdata)
cgroup = pdata[pc.INFO][pc.REPORT_GROUP]
cdata[ResultConst.TAG] = "Report Group " + cgroup
if cdata[ResultConst.CHILDREN]:
groupdata[ResultConst.CHILDREN].append(cgdata)
return groupdata
def validate_report_group(stringValue):
try:
CMeta.GetEnumerationValue('spirent.methodology.ResultBaseCommand',
'ReportGroup',
str(stringValue))
return stringValue
except:
return pc.DEFAULT_REPORT_GROUP
def insert_report_group_if_not_defined(dict_data):
if not (pc.INFO in dict_data):
dict_data[pc.INFO] = {}
if not (pc.REPORT_GROUP in dict_data[pc.INFO]):
dict_data[pc.INFO][pc.REPORT_GROUP] = pc.DEFAULT_REPORT_GROUP
else:
dict_data[pc.INFO][pc.REPORT_GROUP] = \
validate_report_group(dict_data[pc.INFO][pc.REPORT_GROUP])
return dict_data
def get_current_time_string():
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
|
[
"c@m.com"
] |
c@m.com
|
e40bad0cc4a7daf31b43b5798a29c426db6e2f2a
|
65dce36be9eb2078def7434455bdb41e4fc37394
|
/234 Palindrome Linked List.py
|
57debdf93004c385c901d8d4fc1ac7a1101d855a
|
[] |
no_license
|
EvianTan/Lintcode-Leetcode
|
9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a
|
d12dd31e98c2bf24acc20c5634adfa950e68bd97
|
refs/heads/master
| 2021-01-22T08:13:55.758825
| 2017-10-20T21:46:23
| 2017-10-20T21:46:23
| 92,607,185
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
'''
Given a singly linked list, determine if it is a palindrome.
Follow up:
Could you do it in O(n) time and O(1) space?
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
nodes = []
while head:
nodes.append(head.val)
head = head.next
return nodes == nodes[::-1]
|
[
"yiyun.tan@uconn.edu"
] |
yiyun.tan@uconn.edu
|
8d7b3d53d319729a792f4c4c9540b11d79a2188e
|
1646b3fe9000c3109695e99b4bb75679577906ff
|
/187.RepeatedDNASequences.py
|
4c2cf6b3b75206c0e73051291ec5b5a969f38cf3
|
[] |
no_license
|
yao9208/lc
|
5ecf6720886beb951c9a70433f53a0ec0bcb74dc
|
024c1b5c98a9e85706e110fc2be8dcebf0f460c3
|
refs/heads/master
| 2020-04-03T20:55:40.199637
| 2017-02-10T08:30:46
| 2017-02-10T08:30:46
| 56,478,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
from sets import Set
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
result = []
dic = Set()
resultSet = Set()
for i in range(len(s)-9):
sub = s[i:i+10]
#key = self.transform(sub)
if sub in dic:
resultSet.add(sub)
else:
dic.add(sub)
return list(resultSet)
def transform(self, s):
dic = {'A':0, 'T':1, 'C':2, 'G':3}
result = 0
for ch in s:
result = result<<2+dic[ch]
return result
|
[
"yao9208@foxmail.com"
] |
yao9208@foxmail.com
|
cc0b564573152e35e57785774f3f8f3b7ae477e6
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pdMwiMpYkJkn8WY83_4.py
|
f98843713468da39e6fdfb127999e93b793e8ef5
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
"""
Write a function that **recursively** determines if a string is a palindrome.
### Examples
is_palindrome("abcba") ➞ True
is_palindrome("b") ➞ True
is_palindrome("") ➞ True
is_palindrome("ad") ➞ False
### Notes
An empty string counts as a palindrome.
"""
def is_palindrome(w):
if len(w) <= 1:
return True
elif w[0] == w[-1]:
return is_palindrome(w[1:-1])
else:
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0189ebc033ef3f0dba5aa432cc867ce49bd47944
|
3da15577cf3faeeab75cf48f6230372e22e1ae39
|
/shop/api/permissions.py
|
08b7c9799ae7bdca64b6519bd140092e074eec4e
|
[] |
no_license
|
deepdik/cityapl
|
991428e52f0bd33ba48bf42391244661512edd17
|
9e62ce2924018b0ca5e4d2e884279128605d5e0c
|
refs/heads/master
| 2020-04-04T22:37:42.507940
| 2018-11-06T06:02:05
| 2018-11-06T06:02:05
| 156,329,922
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
from rest_framework.permissions import BasePermission,SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.user == request.user
class IsUser(BasePermission):
def has_permission(self, request, view):
print("hello")
if 'HTTP_USER_AGENT' in request.META:
# print (request.META[''])
if 'Mozilla' in request.META['HTTP_USER_AGENT']:
if(request.META.get('HTTP_REFERER')):
return True
return False
|
[
"dk5f95@gmail.com"
] |
dk5f95@gmail.com
|
270980529a30f03e325df9ebbce376402f7393dd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_fop.py
|
6e20e2da412efa43b68bdc5ddc1b75f6b64b011f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
#calss header
class _FOP():
def __init__(self,):
self.name = "FOP"
self.definitions = [u'(especially in the past) a man who is extremely interested in his appearance and who wears very decorative clothes']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c030633d3f8acd75c40c4fc8a8369f4a9c37a819
|
334dea3e7941871a6b23be65cfc9b14d6be49db0
|
/apps/master/migrations/0011_auto_20210414_2324.py
|
ff34056bdb39745aa68bcc063db3a7463c01b49c
|
[] |
no_license
|
HilmiZul/walikelas
|
e2f3d06dfab3ab48373eda2b1b363fe1e64caef6
|
3febaf97272c78310e488c883a9647b269e25930
|
refs/heads/master
| 2023-08-15T20:56:58.011519
| 2021-10-07T04:32:43
| 2021-10-07T04:32:43
| 367,083,389
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
# Generated by Django 2.2.17 on 2021-04-14 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('master', '0010_gurumapel_rombel'),
]
operations = [
migrations.RenameField(
model_name='rombel',
old_name='guru',
new_name='walikelas',
),
]
|
[
"netspytux@gmail.com"
] |
netspytux@gmail.com
|
cb4b2fa92a7b554dd223c4162d6d61fa9d634a54
|
4eddf6a34715752dc652571b1ab274f51ceb5da0
|
/.history/yjs/test_20210606212007.py
|
7d6be688304d2c14aecf1dd4e6af756385e35e82
|
[] |
no_license
|
Suelt/Hust-SE-introduction-to-ML
|
649aba0e5b41363ceac03330ef02982982a0615d
|
a66785c3085da573f5748d13608eabf02e616321
|
refs/heads/master
| 2023-05-27T13:13:41.058545
| 2021-06-10T05:44:02
| 2021-06-10T05:44:02
| 375,582,438
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,503
|
py
|
import numpy as np
from tensorflow.keras.datasets import mnist
import random
class NumpyinMnist():
def __init__(self):
self.layers=2
self.weight=[]
self.weight.append(np.random.randn(30,784))
self.weight.append(np.random.randn(10,30))
self.bias=[]
self.bias.append(np.random.rand(30,1))
self.bias.append(np.random.rand(10,1))
# size[784,30,10]
# w:[output, input]
# b:[output]
def forward(self,x):
for i in range(2):
b=self.bias[i]
w=self.weight[i]
# b_axis1=[]
# for i in range(len(b)):
# b_axis1.append(b[i][0])
z = w@x+b
x = sigmoid(z)
return x
def backpropagation(self, x, y):
x=x.reshape(784,1)
gradient_w = [np.zeros(w.shape) for w in self.weight]
gradient_b = [np.zeros(b.shape) for b in self.bias]
intermediate_list = []
zs = []
intermediate = x
for i in range(2):
b=self.bias[i]
w=self.weight[i]
z = w@intermediate + b
intermediate = sigmoid(z)
zs.append(z)
intermediate_list.append(intermediate)
#隐层->输出层
loss=np.power((intermediate_list[-1]-y),2).sum()
delta = intermediate_list[-1] * (1 - intermediate_list[-1]) * (intermediate_list[-1] - y)
gradient_b[-1] = delta
intermediate_output=intermediate_list[-2].T
delta_w=delta@intermediate_output
gradient_w[-1] = delta_w
#隐层->输入层
z = zs[-2]
a = intermediate_list[-2]
delta = np.dot(self.weight[-1].T, delta) * a * (1 - a)
gradient_b[-2] = delta
delta_w=delta@x.T
gradient_w[-2] = delta_w
return gradient_w, gradient_b,loss
def train(self, training_data,test_data, epoches, batch_size, lr):
n = 60000
for j in range(epoches):
#random.shuffle(train_data)
batches = [
training_data[k:k + batch_size]
for k in range(0, n, batch_size)]
for batch in batches:
batch_gradient_w = [np.zeros(w.shape) for w in self.weight]
batch_gradient_b = [np.zeros(b.shape) for b in self.bias]
batch_loss=0
for x, y in batch:
gradient_w, gradient_b,loss = self.backpropagation(x, y)
batch_gradient_w = [batch_w + w for batch_w, w in zip(batch_gradient_w, gradient_w)]
batch_gradient_b = [batch_b + b for batch_b, b in zip(batch_gradient_b, gradient_b)]
batch_loss+=loss
batch_gradient_w = [w / len(batch) for w in batch_gradient_w]
batch_gradient_b = [b / len(batch) for b in batch_gradient_b]
batch_loss=batch_loss/len(batch)
self.weight = [w - lr * batch_w for w,batch_w in zip(self.weight, batch_gradient_w)]
self.bias = [b - lr * batch_b for b, batch_b in zip(self.bias, batch_gradient_b)]
loss=batch_loss
if test_data:
n_test = len(test_data)
print("Epoch {0}:{1}/{2}".format(j, self.evaluate(test_data), n_test),loss)
else:
print("Epoch {0} complete".format(j))
# def update_mini_batch(self, batch, lr):
# batch_gradient_w = [np.zeros(w.shape) for w in self.weight]
# batch_gradient_b = [np.zeros(b.shape) for b in self.bias]
# batch_loss=0
# # for every sample in current batch
# for x, y in batch:
# # list of every w/b gradient
# # [w1,w2,w3]
# gradient_w, gradient_b,loss = self.backpropagation(x, y)
# batch_gradient_w = [batch_w + w for batch_w, w in zip(batch_gradient_w, gradient_w)]
# batch_gradient_b = [batch_b + b for batch_b, b in zip(batch_gradient_b, gradient_b)]
# batch_loss+=loss
# batch_gradient_w = [w / len(batch) for w in batch_gradient_w]
# batch_gradient_b = [b / len(batch) for b in batch_gradient_b]
# batch_loss=batch_loss/len(batch)
# # w = w - lr * nabla_w
# self.weight = [w - lr * batch_w for w,batch_w in zip(self.weight, batch_gradient_w)]
# self.bias = [b - lr * batch_b for b, batch_b in zip(self.bias, batch_gradient_b)]
# return batch_loss
def evaluate(self, test_data):
sum=0
for x,y in test_data:
pred=np.argmax(self.forward(x.reshape([784,1])))
if(pred==y):
sum+=1
return sum
def convert_to_one_hot(y, C):
return np.eye(C)[y.reshape(-1)].T
def sigmoid(X):
return 1.0 / (1 + np.exp(-X))
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_data = []
train_x = x_train.reshape([x_train.shape[0], x_train.shape[1]*x_train.shape[2]])
test_data = []
test_x = x_test.reshape([x_test.shape[0], x_test.shape[1]*x_test.shape[2]])
for i in range(train_x.shape[0]):
train_data.append([train_x[i]/255, convert_to_one_hot(y_train[i], 10)])
for i in range(test_x.shape[0]):
test_data.append([test_x[i]/255, y_test[i]])
demo=NumpyinMnist()
demo.train(train_data,test_data,10,100,0.1)
|
[
"2552925383@qq.com"
] |
2552925383@qq.com
|
2f84b258aeb88034ffb315eab22e7c3a81441c17
|
c09a8ed8cc41f1c60341aaa4a6c267950022d336
|
/database-test/code/app.py
|
2bfc3c647d519e86dfbc6a90541e1e17c4f6e196
|
[] |
no_license
|
chuiizeet/Flask-bois
|
796abc1c135dd2d0032179a818a9227ee919d86e
|
a7c06e5167e169bc57a83bb4e17bef85e620c2fb
|
refs/heads/master
| 2020-05-30T02:06:43.128860
| 2019-06-25T22:08:12
| 2019-06-25T22:08:12
| 189,491,906
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from secure import authenticate, identity
from user import UserRegister
from item import Item, ItemList
app = Flask(__name__)
app.secret_key = 'chuy'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(UserRegister, '/register')
if __name__ == "__main__":
app.run(port=5000, debug=True)
|
[
"chuy.guerra19@outlook.es"
] |
chuy.guerra19@outlook.es
|
98084a7476472c8403acef49cb2182be1e15d07c
|
e1867a1c31a4b8c61ab04e2e9b20750fe4481137
|
/test/test_ibnode.py
|
512d3bd9751bd59b55cf26fdef4f4860bab67d84
|
[
"MIT"
] |
permissive
|
tmsincomb/pyontutils
|
4c0235178818adae9b971d0ffa85726dacba7f79
|
4444ec1e8d903fdf61465b19c4157d52376e866a
|
refs/heads/master
| 2023-03-20T00:51:47.392754
| 2023-03-06T16:58:03
| 2023-03-06T16:58:03
| 97,636,258
| 0
| 0
| null | 2017-07-18T19:32:22
| 2017-07-18T19:32:22
| null |
UTF-8
|
Python
| false
| false
| 7,689
|
py
|
import unittest
from pathlib import Path
import rdflib
from pyontutils.core import yield_recursive
from pyontutils.identity_bnode import bnodes, IdentityBNode
from .common import temp_path
class TestIBNode(unittest.TestCase):
def setUp(self):
self.graph1 = rdflib.Graph()
file = Path('ttlser/test/nasty.ttl')
with open(file.as_posix(), 'rb') as f:
self.ser1 = f.read()
self.graph1.parse(data=self.ser1, format='turtle')
g2format = 'nt'
# broken serialization :/ with full lenght prefixes
self.ser2 = self.graph1.serialize(format=g2format, encoding='utf-8')
with open('test_ser2.ttl', 'wb') as f:
f.write(self.ser2)
self.graph2 = rdflib.Graph()
self.graph2.parse(data=self.ser2, format=g2format)
# FIXME this doesn't account for changes in identity
# under normalization for example by ttlser
# IBNode should not do the normalization itself
# because we do want normalized forms to have a
# different identity, the question does arrise however
# about where symmetric predicates fit ... I think those
# are not a normalization of representation case I think
# they are clearly an ordering cases and thus in scope for
# IBNode, in the say way reordering lists is in scope
def test_bytes(self):
test = b'hello'
ident = IdentityBNode(test).identity
m = IdentityBNode.cypher()
m.update(test)
h = m.digest()
assert ident == h, ident
def test_string(self):
test = 'hello'
ident = IdentityBNode(test).identity
m = IdentityBNode.cypher()
m.update(test.encode(IdentityBNode.encoding))
h = m.digest()
assert ident == h, ident
def test_pair(self):
test = 'hello', 'world'
ibn = IdentityBNode(test)
ident = ibn.identity
m = IdentityBNode.cypher()
for i, t in enumerate(test):
m.update(t.encode(IdentityBNode.encoding))
if not i % 2:
m.update(ibn.cypher_field_separator_hash)
h = m.digest()
assert ident == h, ident
def test_ser(self):
assert IdentityBNode(self.ser1) != IdentityBNode(self.ser2), 'serialization matches!'
def test_nodes(self):
assert IdentityBNode('hello there') == IdentityBNode('hello there')
assert IdentityBNode(b'hello there') == IdentityBNode(b'hello there')
try:
assert IdentityBNode(rdflib.BNode()) != IdentityBNode(rdflib.BNode())
# TODO consider returning the bnode itself?
raise AssertionError('identity bnode returned identity for bnode')
except ValueError as e:
pass
try:
bnode = rdflib.BNode()
assert IdentityBNode(bnode) == IdentityBNode(bnode)
raise AssertionError('identity bnode returned identity for bnode')
except ValueError as e:
pass
lit1 = rdflib.Literal('hello there')
lit2 = rdflib.Literal('hello there', datatype=rdflib.XSD.string)
lit3 = rdflib.Literal('hello there', lang='klingon')
assert IdentityBNode(lit1) == IdentityBNode(lit1)
assert IdentityBNode(lit2) == IdentityBNode(lit2)
assert IdentityBNode(lit3) == IdentityBNode(lit3)
assert IdentityBNode(lit1) != IdentityBNode(lit2)
assert IdentityBNode(lit1) != IdentityBNode(lit3)
assert IdentityBNode(lit2) != IdentityBNode(lit3)
uri1 = rdflib.URIRef('http://example.org/1')
uri2 = rdflib.URIRef('http://example.org/2')
assert IdentityBNode(uri1) == IdentityBNode(uri1)
assert IdentityBNode(uri2) == IdentityBNode(uri2)
assert IdentityBNode(uri1) != IdentityBNode(uri2)
def test_bnodes(self):
assert sorted(bnodes(self.graph1)) != sorted(bnodes(self.graph2)), 'bnodes match!'
def test_nifttl(self):
fmt = 'nifttl'
s1 = self.graph1.serialize(format=fmt)
g2 = rdflib.Graph()
[g2.add(t) for t in self.graph1]
[g2.namespace_manager.bind(k, str(v)) for k, v in self.graph1.namespaces()]
s2 = g2.serialize(format=fmt)
try:
assert s1 == s2
except AssertionError as e:
with open(temp_path / 'f1.ttl', 'wb') as f1, open(temp_path / 'f2.ttl', 'wb') as f2:
f1.write(s1)
f2.write(s2)
raise e
def test_ibnode(self):
def sbs(l1, l2):
for a, b in zip(l1, l2):
print('', a[:5], a[-5:], '\n', b[:5], b[-5:], '\n\n')
def ds(d1, d2):
for (k1, v1), (k2, v2) in sorted(zip(sorted(d1.items()), sorted(d2.items()))):
if k1 != k2:
# TODO len t1 != len t2
for t1, t2 in sorted(zip(sorted(v1), sorted(v2))):
print(tuple(e[:5] if type(e) == bytes else e for e in t1))
print(tuple(e[:5] if type(e) == bytes else e for e in t2))
print()
id1 = IdentityBNode(self.graph1, debug=True)
id2 = IdentityBNode(self.graph2, debug=True)
idni1 = sorted(id1.named_identities)
idni2 = sorted(id2.named_identities)
assert idni1 == idni2, 'named identities do not match'
idli1 = sorted(id1.connected_identities)
idli2 = sorted(id2.connected_identities)
assert idli1 == idli2, 'linked identities do not match'
idfi1 = sorted(id1.free_identities)
idfi2 = sorted(id2.free_identities)
try:
assert idfi1 == idfi2, 'free identities do not match'
except AssertionError as e:
_ = [[print(e[:10]) for e in t] and print() for t in zip(idfi1, idfi2)]
lu1 = {v:k for k, v in id1.unnamed_subgraph_identities.items()}
lu2 = {v:k for k, v in id2.unnamed_subgraph_identities.items()}
s1 = set(id1.unnamed_subgraph_identities.values())
s2 = set(id2.unnamed_subgraph_identities.values())
diff = (s1 | s2) - (s1 & s2)
for d in diff:
if d in lu1:
s = lu1[d]
p, o = next(id1._thing[s])
print('id1 extra')
[print(t)
for t in sorted(yield_recursive(s, p, o, id1._thing),
key=lambda t:t[::-1])]
else:
s = lu2[d]
p, o = next(id2._thing[s])
print('id2 extra')
[print(t)
for t in sorted(yield_recursive(s, p, o, id2._thing),
key=lambda t:t[::-1])]
assert len(set(idfi1)) == len(idfi1), 'HRM 1'
assert len(set(idfi2)) == len(idfi2), 'HRM 2'
print(len(idfi1), len(idfi2)) # wow... terrifying that these don't match
print(e)
embed()
raise e
assert id1.identity == id2.identity, 'identities do not match'
def test_symmetric(self):
msp = 'my-sym-pred'
forward = 'a', msp, 'b'
backward = tuple(reversed(forward))
f = IdentityBNode([forward], symmetric_predicates=[msp])
b = IdentityBNode([backward], symmetric_predicates=[msp])
assert f == b
def test_check(self):
id1 = IdentityBNode(self.graph1)
assert id1.check(self.graph2), 'check failed!'
def test_dropout(self):
# TODO
# test dropout of all but one subgraphs that share an identity
pass
|
[
"tgbugs@gmail.com"
] |
tgbugs@gmail.com
|
753998dd45ddabf269b8d6490c60b19f915a0d0d
|
74230f68a1bb947c88b3448e537fe41ce2c820b9
|
/2018/xman/rmd5.py
|
b8eb76b7ee8e429b6d3f7a489f189d28b76eecaa
|
[] |
no_license
|
virink/ctflog
|
98934a3d6af470ed9eb338beefc7487dbdaf3b3f
|
e5ed8f7fdf7945f9aa781824184131b00529d073
|
refs/heads/master
| 2020-05-02T13:15:08.256096
| 2020-03-09T03:48:05
| 2020-03-09T03:48:05
| 177,979,168
| 49
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,267
|
py
|
import multiprocessing
from concurrent import futures
import sys
import os
import md5
BASENUM = 1000000
global encoded
global stop
global result
stop = False
def md5x(str):
m1 = md5.new()
m1.update(str)
return m1.hexdigest()
class Multicpu():
def __init__(self, cpu_num, thread_num):
self.cpu_num = cpu_num
self.thread_num = thread_num
def _multi_cpu(self, func, job_queue, timeout):
if getLen(job_queue) == 0:
return []
index = get_index(job_queue, self.cpu_num)
cpu_pool = multiprocessing.Pool(processes=self.cpu_num)
mgr = multiprocessing.Manager()
process_bar = mgr.list()
for i in range(self.cpu_num):
process_bar.append(0)
result_queue = cpu_pool.map(_multi_thread, [[func, self.cpu_num, self.thread_num, job_queue[
index[i][0]: index[i][1] + 1], timeout, process_bar, i] for i in range(len(index))])
result = []
for rl in result_queue:
for r in rl:
result.append(r)
return result
def _func(argv):
argv[2][argv[3]] = round((argv[4] * 100.0 / argv[5]), 2)
sys.stdout.write(str(argv[2]) + ' ||' + '->' + "\r")
sys.stdout.flush()
return argv[0](argv[1])
def _multi_thread(argv):
thread_num = argv[2]
if getLen(argv[3]) < thread_num:
thread_num = argv[3]
func_argvs = [[argv[0], argv[3][i], argv[5], argv[6],
i, len(argv[3])] for i in range(len(argv[3]))]
result = []
if thread_num == 1:
for func_argv in func_argvs:
result.append(_func(func_argv))
return result
# else
thread_pool = futures.ThreadPoolExecutor(max_workers=thread_num)
result = thread_pool.map(_func, func_argvs, timeout=argv[4])
return [r for r in result]
def get_index(job_queue, split_num):
job_num = getLen(job_queue)
if job_num < split_num:
split_num = job_num
each_num = job_num / split_num
index = [[i * each_num, i * each_num + each_num - 1]
for i in range(split_num)]
residual_num = job_num % split_num
for i in range(residual_num):
index[split_num - residual_num + i][0] += i
index[split_num - residual_num + i][1] += i + 1
return index
def getLen(_list):
if _list == None:
return 0
return len(_list)
def multi_cpu(func, job_queue, cpu_num=1, thread_num=1, timeout=None):
multicpu_instance = Multicpu(cpu_num, thread_num)
return multicpu_instance._multi_cpu(func, job_queue, timeout)
def runmd5(num):
global encoded
global stop
start = BASENUM * (int(num))
i = start
while i <= start * 10:
if os.getenv('runmd5') or stop:
break
if md5x(str(i))[0:6] == encoded:
print('DeCode : %d\n' % i)
os.setenv('runmd5', '1')
stop = True
return i
i += 1
return False
if __name__ == '__main__':
global encoded
encoded = raw_input('code : ')
while encoded:
os.setenv('runmd5', '0')
print('Runing... %s' % encoded)
m = multi_cpu(runmd5, [i for i in range(1, 100)], 5, 10)
print(m)
encoded = raw_input('code : ')
|
[
"virink@outlook.com"
] |
virink@outlook.com
|
3f87bea7d7804e1c019ab879e8ee5c1cbb99215c
|
c359314f459e613e2d7ff20efe3998f3b5267f0c
|
/put_repo_requests_in_db.py
|
15a68523ac47d991ad008f8ccdf19b10cb241b28
|
[
"MIT"
] |
permissive
|
curiousleo/oadoi
|
30a247796982dd467f5536735c1be7ebbc447331
|
bd63cb3840cc08cc1846b1baf121bea0a84af079
|
refs/heads/master
| 2020-04-17T12:47:27.959453
| 2019-01-18T21:09:50
| 2019-01-18T21:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,844
|
py
|
import csv
import os
import json
import gspread
import datetime
import unicodecsv as csv
from oauth2client.service_account import ServiceAccountCredentials
from app import db
from util import safe_commit
from repository import Endpoint
from repository import Repository
from repository import RepoRequest
# this file inspired by https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html
# use creds to create a client to interact with the Google Drive API
scopes = ['https://spreadsheets.google.com/feeds']
json_creds = os.getenv("GOOGLE_SHEETS_CREDS_JSON")
creds_dict = json.loads(json_creds)
# hack to get around ugly new line escaping issues
# this works for me, but later found links to what might be cleaner solutions:
# use ast.literal_eval? https://github.com/googleapis/google-api-go-client/issues/185#issuecomment-422732250
# or maybe dumping like this might fix it? https://coreyward.svbtle.com/how-to-send-a-multiline-file-to-heroku-config
creds_dict["private_key"] = creds_dict["private_key"].replace("\\\\n", "\n")
# now continue
creds = ServiceAccountCredentials.from_json_keyfile_dict(creds_dict, scopes)
client = gspread.authorize(creds)
# Find a workbook by url
spreadsheet = client.open_by_url("https://docs.google.com/spreadsheets/d/1RcQuetbKVYRRf0GhGZQi38okY8gT1cPUs6l3RM94yQo/edit#gid=704459328")
sheet = spreadsheet.sheet1
# Extract and print all of the values
rows = sheet.get_all_values()
print(rows[0:1])
with open('out.csv','wb') as f:
w = csv.DictWriter(f, fieldnames=RepoRequest.list_fieldnames(), encoding='utf-8-sig')
for row in rows[1:]: # skip header row
my_repo_request = RepoRequest()
my_repo_request.set_id_seed(row[0])
column_num = 0
for fieldname in RepoRequest.list_fieldnames():
if fieldname != "id":
setattr(my_repo_request, fieldname, row[column_num])
column_num += 1
w.writerow(my_repo_request.to_dict())
db.session.merge(my_repo_request)
safe_commit(db)
#
# my_requests = RepoRequest.query.all()
# for my_request in my_requests:
# matching_repo = None
# matching_endpoint = None
#
# endpoint_matches = my_request.matching_endpoints()
# print u"\n"
# if endpoint_matches:
# matching_endpoint = endpoint_matches[0]
# matching_repo = matching_endpoint.meta
# else:
# print u"no matching endpoint for {}".format(my_request.pmh_url)
# matching_endpoint = Endpoint()
# matching_endpoint.pmh_url = my_request.pmh_url
# # db.session.add(matching_endpoint)
#
# if matching_repo:
# print u"yay! for {} matches {}".format(my_request.pmh_url, matching_endpoint.pmh_url)
# print u"has repository '{}'".format(matching_repo)
# else:
# repo_matches = my_request.matching_repositories()
# if repo_matches:
# matching_repo = repo_matches[0]
# print u"yay! for {} {} matches repository {}".format(
# my_request.institution_name, my_request.repo_name, matching_repo)
# else:
# print u"no matching repository for {}: {}".format(
# my_request.institution_name, my_request.repo_name)
# matching_repo = Repository()
# # db.session.add(matching_repo)
#
# # overwrite stuff with request
# matching_repo.institution_name = my_request.institution_name
# matching_repo.repository_name = my_request.repo_name
# matching_repo.home_page = my_request.repo_home_page
# matching_endpoint.repo_unique_id = matching_repo.id
# matching_endpoint.email = my_request.email
# matching_endpoint.repo_request_id = my_request.id
#
# db.session.merge(matching_endpoint)
# db.session.merge(matching_repo)
#
# safe_commit(db)
|
[
"hpiwowar@gmail.com"
] |
hpiwowar@gmail.com
|
6d2e13a3dc13e31fe80281f8964313acebd61442
|
78e60a7d8a67ed76244004e8a3ed573fbf396e41
|
/samples/get_sq_state.py
|
64970c14e689bd3ca55171246f996d154cc4c310
|
[
"MIT"
] |
permissive
|
Crivez/apiclient-python
|
837a9f7cc0453ccd3121311adc7920b5fe6b3e33
|
860fc054f546152a101e29b1af388c381075ac47
|
refs/heads/master
| 2023-06-08T13:24:09.249704
| 2021-06-17T12:16:35
| 2021-06-17T12:16:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Gets the current state of the smart queue with id = 1.
APPLICATION_ID = 1
SQ_QUEUE_ID = 1
try:
res = voxapi.get_sq_state(application_id=APPLICATION_ID,
sq_queue_id=SQ_QUEUE_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
[
"andrey@voximplant.com"
] |
andrey@voximplant.com
|
4a54783d2c394f8ba238fbd5d64ef46f8dbfddc5
|
09b8638b41ada6d8341920f88642d829fa45fe34
|
/blog/migrations/0001_initial.py
|
517d17b4e456a459c9352af883d1d8e30761c004
|
[] |
no_license
|
HanHyunsoo/Django_Blog
|
31bf85616ed4956c894f0f6830428a29e9926be4
|
ef6b1eb3f8f03fddce1a08f2b10353ee3b529e47
|
refs/heads/master
| 2022-12-14T22:56:26.761304
| 2019-09-08T12:16:31
| 2019-09-08T12:16:31
| 157,580,624
| 0
| 1
| null | 2022-12-08T01:40:54
| 2018-11-14T16:47:45
|
Python
|
UTF-8
|
Python
| false
| false
| 614
|
py
|
# Generated by Django 2.1.3 on 2019-03-06 15:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('body', models.TextField()),
],
),
]
|
[
"gustn4563@gmail.com"
] |
gustn4563@gmail.com
|
4e5c72caa25fd48fe9c962c1cc3b9048f5ac051c
|
cb2a40b70bc21d0057c96ddb2c86edceffe19707
|
/studioadmin/views/waiting_list.py
|
9382d6b048ddc7a4a35e7e40495f5bebbbae5b0f
|
[] |
no_license
|
rebkwok/pipsevents
|
ceed9f420b08cd1a3fa418800c0870f5a95a4067
|
c997349a1b4f3995ca4bb3a897be6a73001c9810
|
refs/heads/main
| 2023-08-09T14:11:52.227086
| 2023-07-27T20:21:01
| 2023-07-27T20:21:01
| 29,796,344
| 1
| 1
| null | 2023-09-13T14:32:16
| 2015-01-24T23:53:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,813
|
py
|
import logging
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.template.response import TemplateResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import HttpResponseRedirect
from django.urls import reverse
from booking.models import Event, WaitingListUser
from studioadmin.views.helpers import is_instructor_or_staff
from studioadmin.views.helpers import url_with_querystring
from activitylog.models import ActivityLog
logger = logging.getLogger(__name__)
@login_required
@is_instructor_or_staff
def event_waiting_list_view(request, event_id):
event = get_object_or_404(Event, id=event_id)
waiting_list_users = WaitingListUser.objects.filter(
event__id=event_id).order_by('user__username')
ev_type = 'lessons' if event.event_type.event_type == 'CL' else 'events'
template = 'studioadmin/event_waiting_list.html'
if request.method == 'POST' and 'remove_user' in request.POST:
remove_wluser_id = request.POST.getlist('remove_user')[0]
wl_user_to_remove = WaitingListUser.objects.get(id=remove_wluser_id)
waiting_list_users.exclude(id=remove_wluser_id)
user_to_remove = User.objects.get(id=wl_user_to_remove.user.id)
wl_user_to_remove.delete()
messages.success(
request,
"{} {} ({}) has been removed from the waiting list".format(
user_to_remove.first_name,
user_to_remove.last_name,
user_to_remove.username
)
)
ActivityLog.objects.create(
log="{} {} ({}) removed from the waiting list "
"by admin user {}".format(
user_to_remove.first_name,
user_to_remove.last_name,
user_to_remove.username,
request.user.username
)
)
return TemplateResponse(
request, template, {
'waiting_list_users': waiting_list_users, 'event': event,
'sidenav_selection': '{}_register'.format(ev_type)
}
)
@login_required
@is_instructor_or_staff
def email_waiting_list(request, event_id):
event = get_object_or_404(Event, id=event_id)
waiting_list_users = WaitingListUser.objects.filter(
event__id=event_id).values_list("user_id", flat=True)
request.session['users_to_email'] = list(waiting_list_users)
if event.event_type.event_type == "CL":
lesson_ids = [event.id]
event_ids = []
else:
event_ids = [event.id]
lesson_ids = []
return HttpResponseRedirect(
url_with_querystring(
reverse('studioadmin:email_users_view'),
events=event_ids, lessons=lesson_ids)
)
|
[
"rebkwok@gmail.com"
] |
rebkwok@gmail.com
|
8af051b8ebbdbb0c18dadaa9850c01bee070223c
|
d3cbf02ebab6a3748cceaf56757b0ec7390921ce
|
/investment/migrations/0025_auto_20201123_1602.py
|
ffd8700b5acf84e40c35d8f91796581b223201ac
|
[] |
no_license
|
rexcorp01/inv
|
69d6ec96c1f9206b3ae14b6b13bd3123e13ed3a6
|
99462cea1f8b027bc9e38d79a99e9194d1e72548
|
refs/heads/master
| 2023-09-05T11:46:48.804587
| 2021-11-04T06:34:16
| 2021-11-04T06:34:16
| 426,082,304
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
# Generated by Django 3.1.2 on 2020-11-23 16:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('investment', '0024_fundpurchasefee_operate'),
]
operations = [
migrations.AlterModelOptions(
name='fundpurchasefee',
options={'verbose_name': '基金申购赎回费率(天天基金网)', 'verbose_name_plural': '基金申购赎回费率(天天基金网)'},
),
migrations.AlterField(
model_name='fundpurchasefee',
name='operate',
field=models.CharField(choices=[('buy', '买'), ('sell', '卖')], max_length=4, verbose_name='交易方向'),
),
migrations.CreateModel(
name='StockRealtimePrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=4, max_digits=10, null=True, verbose_name='实时价格')),
('date', models.DateField(verbose_name='日期')),
('time', models.TimeField(verbose_name='时间')),
('secucode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='investment.stock', verbose_name='股票代码')),
],
options={
'verbose_name': '股票实时价格',
'verbose_name_plural': '股票实时价格',
'db_table': 'sma_stocks_realtime_price',
},
),
]
|
[
"31942935+PengchuanC@users.noreply.github.com"
] |
31942935+PengchuanC@users.noreply.github.com
|
5f7f0eecae2ec43241c24ac83f12dedb2313b931
|
f0066a2eb7b2f92d7c04dc314af6be320724c614
|
/nova/pci/request.py
|
f9703b9962b6f268bb4bf5ab9bfa4f2a87ef5df5
|
[
"Apache-2.0"
] |
permissive
|
hyphon81/nova-for-gpu-passthrough
|
80392ea7462ade8457e77843482387d8f6593797
|
7c164980d7355d8fc40a6b155e31e325191b6a5e
|
refs/heads/master
| 2021-01-20T14:10:38.016142
| 2017-02-10T08:03:45
| 2017-02-10T08:03:45
| 82,746,438
| 0
| 1
|
Apache-2.0
| 2020-07-24T00:41:48
| 2017-02-22T01:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,951
|
py
|
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Example of a PCI alias::
| pci_alias = '{
| "name": "QuicAssist",
| "product_id": "0443",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| }'
Aliases with the same name and the same device_type are OR operation::
| pci_alias = '{
| "name": "QuicAssist",
| "product_id": "0442",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| }'
These 2 aliases define a device request meaning: vendor_id is "8086" and
product id is "0442" or "0443".
"""
import copy
import jsonschema
from oslo_serialization import jsonutils
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import utils
PCI_NET_TAG = 'physical_network'
PCI_DEVICE_TYPE_TAG = 'dev_type'
DEVICE_TYPE_FOR_VNIC_TYPE = {
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF
}
CONF = nova.conf.CONF
_ALIAS_DEV_TYPE = [obj_fields.PciDeviceType.STANDARD,
obj_fields.PciDeviceType.SRIOV_PF,
obj_fields.PciDeviceType.SRIOV_VF]
_ALIAS_CAP_TYPE = ['pci']
_ALIAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 256,
},
"capability_type": {
"type": "string",
"enum": _ALIAS_CAP_TYPE,
},
"product_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"vendor_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"device_type": {
"type": "string",
"enum": _ALIAS_DEV_TYPE,
},
},
"required": ["name"],
}
def _get_alias_from_config():
"""Parse and validate PCI aliases from the nova config."""
jaliases = CONF.pci_alias
aliases = {} # map alias name to alias spec list
try:
for jsonspecs in jaliases:
spec = jsonutils.loads(jsonspecs)
jsonschema.validate(spec, _ALIAS_SCHEMA)
# It should keep consistent behaviour in configuration
# and extra specs to call strip() function.
name = spec.pop("name").strip()
dev_type = spec.pop('device_type', None)
if dev_type:
spec['dev_type'] = dev_type
if name not in aliases:
aliases[name] = [spec]
else:
if aliases[name][0]["dev_type"] == spec["dev_type"]:
aliases[name].append(spec)
else:
reason = _("Device type mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
except exception.PciInvalidAlias:
raise
except Exception as e:
raise exception.PciInvalidAlias(reason=six.text_type(e))
return aliases
def _translate_alias_to_requests(alias_spec):
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
pci_requests = [] # list of a specs dict
for name, count in [spec.split(':') for spec in alias_spec.split(',')]:
name = name.strip()
if name not in pci_aliases:
raise exception.PciRequestAliasNotDefined(alias=name)
else:
request = objects.InstancePCIRequest(
count=int(count),
spec=copy.deepcopy(pci_aliases[name]),
alias_name=name)
pci_requests.append(request)
return pci_requests
def get_pci_requests_from_flavor(flavor):
"""Get flavor's pci request.
The pci_passthrough:alias scope in flavor extra_specs
describes the flavor's pci requests, the key is
'pci_passthrough:alias' and the value has format
'alias_name_x:count, alias_name_y:count, ... '. The alias_name is
defined in 'pci_alias' configurations.
The flavor's requirement is translated into pci requests list,
each entry in the list is a dictionary. The dictionary has
three keys. The 'specs' gives the pci device properties
requirement, the 'count' gives the number of devices, and the
optional 'alias_name' is the corresponding alias definition name.
Example:
Assume alias configuration is::
| {'vendor_id':'8086',
| 'device_id':'1502',
| 'name':'alias_1'}
The flavor extra specs includes: 'pci_passthrough:alias': 'alias_1:2'.
The returned pci_requests are::
| pci_requests = [{'count':2,
| 'specs': [{'vendor_id':'8086',
| 'device_id':'1502'}],
| 'alias_name': 'alias_1'}]
:param flavor: the flavor to be checked
:returns: a list of pci requests
"""
pci_requests = []
if ('extra_specs' in flavor and
'pci_passthrough:alias' in flavor['extra_specs']):
pci_requests = _translate_alias_to_requests(
flavor['extra_specs']['pci_passthrough:alias'])
return objects.InstancePCIRequests(requests=pci_requests)
|
[
"zero812n@gmail.com"
] |
zero812n@gmail.com
|
8ca2e20d5c2475857408ec3a9a8133b6303b1e7a
|
ef187d259d33e97c7b9ed07dfbf065cec3e41f59
|
/work/atcoder/abc/abc092/C/answers/257583_yurutechdon.py
|
a3ac0a13cdaaa6ce8a218886ee08a47873ccc801
|
[] |
no_license
|
kjnh10/pcw
|
847f7295ea3174490485ffe14ce4cdea0931c032
|
8f677701bce15517fb9362cc5b596644da62dca8
|
refs/heads/master
| 2020-03-18T09:54:23.442772
| 2018-07-19T00:26:09
| 2018-07-19T00:26:09
| 134,586,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
def calc(N, A, i):
x = 0
r = 0
for j, a in enumerate(A):
if j == i:
continue
r += abs(x - a)
x = a
r += abs(x)
return r
def calc0(N, A):
x = 0
r = 0
for a in A:
r += abs(x - a)
x = a
return r
def calc1(N, A, ra, i):
x0 = A[i - 1]
x1 = A[i]
x2 = A[i + 1]
return ra - abs(x0 - x1) - abs(x1 - x2) + abs(x0 - x2)
#N = 1000
#A = list(range(N))
N = int(input())
A = [int(_) for _ in input().split()]
A += [0]
ra = calc0(N, A)
for i in range(N):
print(calc1(N, A, ra, i))
|
[
"kojinho10@gmail.com"
] |
kojinho10@gmail.com
|
9de8a8cb58a3e52341efd46b1d033cd4c7310da3
|
2612f762ec75a0723a4d12ae1d63a30792e4c236
|
/build/dynamic_tutorials/catkin_generated/pkg.installspace.context.pc.py
|
7c78d468565d0bbaaf124e072286ccc7dfeb1f09
|
[] |
no_license
|
aransena/catkin_ws
|
efdf1a52b7dbbefbfa9cb748630f7be1ffd7f628
|
eae6b83c80803a718a8e41569d3b4e7c1c838926
|
refs/heads/master
| 2021-01-18T21:12:48.557260
| 2016-06-03T13:39:22
| 2016-06-03T13:39:22
| 52,208,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/aransena/catkin_ws/install/include".split(';') if "/home/aransena/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dynamic_tutorials"
PROJECT_SPACE_DIR = "/home/aransena/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"aransena@gmail.com"
] |
aransena@gmail.com
|
fbcd67d7d74c3ecae26dd5bef37a5504acbf0d17
|
183e4126b2fdb9c4276a504ff3ace42f4fbcdb16
|
/I семестр/Програмування (Python)/Лабораторні/Братун 6305/Labs/PRES_12/Q-q19.py
|
d6abad875de624e2a57c49fc115606475640bb15
|
[] |
no_license
|
Computer-engineering-FICT/Computer-engineering-FICT
|
ab625e2ca421af8bcaff74f0d37ac1f7d363f203
|
80b64b43d2254e15338060aa4a6d946e8bd43424
|
refs/heads/master
| 2023-08-10T08:02:34.873229
| 2019-06-22T22:06:19
| 2019-06-22T22:06:19
| 193,206,403
| 3
| 0
| null | 2023-07-22T09:01:05
| 2019-06-22T07:41:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
def func(e1, е2, е3):
return e1 + е2 + е3 # Повертаємо нове значення
arr1 = [1, 2, 3, 4, 5]
arr2 = [10, 20, 30, 40, 50]
arr3 = [100, 200, 300, 400, 500]
print(list(map(func, arr1, arr2, arr3)))
#[111, 222, 333, 444, 555]
|
[
"mazanyan027@gmail.com"
] |
mazanyan027@gmail.com
|
d0d370e8a80da8ba1d93250ca1666c0f4f25f327
|
414393a5048e5212223051d6a5541ecb873bcc53
|
/imagenet_VGG16/imagenet_custom_dataset.py
|
e2a6edb648b40588e771ff29562d5403fb9103e3
|
[] |
no_license
|
byh1321/CIFAR100_Distorted_Channel_Selective
|
5a0fc1107ab9d60ce12504a8e474144762eda8df
|
897f2dea4e645329dfc3bf3df6b147c783bfa83f
|
refs/heads/master
| 2020-03-21T02:31:24.024771
| 2019-08-12T05:59:53
| 2019-08-12T05:59:53
| 138,002,631
| 0
| 0
| null | 2019-08-02T02:26:49
| 2018-06-20T08:26:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
from torch.utils.data.dataset import Dataset
from torchvision import transforms
import pandas as pd
import numpy as np
from PIL import Image
'''
class IMAGENET2010VAL(Dataset):
def __init__(self, csv_path):
self.transformations = transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
self.data_info = pd.read_csv(csv_path, header=None)
self.image_arr = np.asarray(self.data_info.iloc[:, 0])
self.label_arr = np.asarray(self.data_info.iloc[:, 1])
self.data_len = len(self.data_info.index)
def __getitem__(self, index): # returns the data and labels. This function is called from dataloader like this
single_image_name = self.image_arr[index]
img_as_img = Image.open(single_image_name)
img_as_tensor = self.transformations(img_as_img)
single_image_label = np.int(self.label_arr[index])
return (img_as_tensor, single_image_label)
def __len__(self):
return self.data_len
if __name__ == '__main__':
cifar100_dirty = CIFAR100DIRTY_TEST('/home/mhha/A2S/cifar100_test_targets.csv')
'''
#'''
class IMAGENET2010VAL(Dataset):
def __init__(self, csv_path):
self.transformations = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])
self.data_info = pd.read_csv(csv_path, header=None)
self.image_arr = np.asarray(self.data_info.iloc[:, 0])
self.label_arr = np.asarray(self.data_info.iloc[:, 1])
self.data_len = len(self.data_info.index)
def __getitem__(self, index): # returns the data and labels. This function is called from dataloader like this
single_image_name = self.image_arr[index]
img_as_img = Image.open(single_image_name)
img_as_tensor = self.transformations(img_as_img)
single_image_label = np.int(self.label_arr[index])
return (img_as_tensor, single_image_label)
def __len__(self):
return self.data_len
if __name__ == '__main__':
cifar100_dirty = CIFAR100DIRTY_TEST('/home/mhha/A2S/cifar100_test_targets.csv')
#'''
""""""
|
[
"byh1321@naver.com"
] |
byh1321@naver.com
|
8bd6b805e0f95619642fbddbf479b7164451afec
|
fcd64a87118a8c1e060449d8fd5b02034ac3dea7
|
/test/test_v1launchpadauthorization_merchant_data_customer.py
|
6349a458649f0386383fb31524ad115dcc8dd808
|
[] |
no_license
|
carlosgalvez-tiendeo/python-paycomet_client
|
2b68e4e1f7cfbab81d50357513f79753cf8c2f0e
|
71f1fe29495ce67e37aaed4ecc9acf5994de011a
|
refs/heads/master
| 2023-08-03T02:27:50.857164
| 2021-06-16T13:04:46
| 2021-06-16T13:04:46
| 377,492,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
# coding: utf-8
"""
PAYCOMET REST API
PAYCOMET API REST for customers. # noqa: E501
OpenAPI spec version: 2.28.0
Contact: tecnico@paycomet.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import paycomet_client
from paycomet_client.models.v1launchpadauthorization_merchant_data_customer import V1launchpadauthorizationMerchantDataCustomer # noqa: E501
from paycomet_client.rest import ApiException
class TestV1launchpadauthorizationMerchantDataCustomer(unittest.TestCase):
"""V1launchpadauthorizationMerchantDataCustomer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1launchpadauthorizationMerchantDataCustomer(self):
"""Test V1launchpadauthorizationMerchantDataCustomer"""
# FIXME: construct object with mandatory attributes with example values
# model = paycomet_client.models.v1launchpadauthorization_merchant_data_customer.V1launchpadauthorizationMerchantDataCustomer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"carlos.galvez@tiendeo.com"
] |
carlos.galvez@tiendeo.com
|
953b99ea8392e243fb3aede49205985f3a58c2e2
|
ad8ec2bdb69f50768cb3faeac5865f1da6db43d5
|
/virtual/bin/gunicorn_paster
|
54c4570989b837f12d17815b4850ff67b3eb0c57
|
[
"MIT"
] |
permissive
|
RisperAkinyi/NewsHighlight
|
319fcca85fd1af6980a6ee66e14caf6d09767865
|
a2e941f8158862f2b6f874458b651dfaa743c1d0
|
refs/heads/master
| 2020-06-26T16:28:05.935334
| 2019-07-31T08:21:40
| 2019-07-31T08:21:40
| 199,684,005
| 0
| 0
|
MIT
| 2019-08-01T06:48:05
| 2019-07-30T15:57:46
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
#!/home/moringa/Desktop/projects/NewsHighlight/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"email@example.com"
] |
email@example.com
|
|
9bde30845ac14b116df8313ee40cac471bd37dbc
|
f6fa3e49fa292cb50ae5b795c52c486cd471c3e1
|
/crawl/test/testss.py
|
7ca0c0c829f3cdd27e601f629f30ef4956290019
|
[] |
no_license
|
longhui001/stockAnalysis
|
32e84ea7ed5e21258c2b7316fb77d7b599d65ce4
|
2cf5c073f98ed4b36b7d048c3df577ce2fc00bc2
|
refs/heads/master
| 2021-01-20T02:54:11.604864
| 2015-07-30T01:17:59
| 2015-07-30T01:17:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
scrapy crawl sinabbsCrawler -a start_url="http://guba.sina.com.cn/?s=bar&name=sz000415"
scrapy shell "http://guba.sina.com.cn/?s=bar&name=sz000415"
scrapy crawl sinaCrawler -a start_url="http://vip.stock.finance.sina.com.cn/corp/go.php/vCB_AllNewsStock/symbol/sz000415.phtml"
|
[
"liyi193328@gmail.com"
] |
liyi193328@gmail.com
|
cab0e28f8d9618731251bbf91595ef878c2c5d8b
|
5af75a70f82257b7808246cfeea1988a3235155d
|
/BigGAN and DiffAugGAN/utils/load_checkpoint.py
|
10c47ce163f42de8f2829cf86cd511006071737f
|
[
"MIT"
] |
permissive
|
MannyKayy/Ultra-Data-Efficient-GAN-Training
|
00be93333ecf1d1f658951582f4de32c7b676de7
|
11267b560a3a285582eae40d0bdcba87168f679f
|
refs/heads/main
| 2023-03-12T18:51:31.864943
| 2021-03-02T04:53:13
| 2021-03-02T04:53:13
| 343,878,169
| 1
| 0
|
MIT
| 2021-03-02T18:47:45
| 2021-03-02T18:47:45
| null |
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
# PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN
# The MIT License (MIT)
# See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details
# utils/load_checkpoint.py
import os
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
def pruning_generate(model, state_dict):
parameters_to_prune =[]
for (name, m) in model.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m = prune.custom_from_mask(m, name = 'weight', mask = state_dict[name + ".weight_mask"])
def load_checkpoint(model, optimizer, filename, metric=False, ema=False):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_step = 0
if ema:
checkpoint = torch.load(filename)
#pruning_generate(model, checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
return model
else:
checkpoint = torch.load(filename)
seed = checkpoint['seed']
run_name = checkpoint['run_name']
start_step = checkpoint['step']
#pruning_generate(model, checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
ada_p = checkpoint['ada_p']
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
if metric:
best_step = checkpoint['best_step']
best_fid = checkpoint['best_fid']
best_fid_checkpoint_path = checkpoint['best_fid_checkpoint_path']
return model, optimizer, seed, run_name, start_step, ada_p, best_step, best_fid, best_fid_checkpoint_path
return model, optimizer, seed, run_name, start_step, ada_p
|
[
"wiwjp619@gmail.com"
] |
wiwjp619@gmail.com
|
3655136ac48e59cbf41490ad21958f3bb469b27c
|
e11dff811ca981f428644fd70d10a7369c671bcb
|
/src/tools/ecos/cvxpy/examples/extensions/mixed_integer/integer.py
|
4a7d49126f1aa2305ef4a38eff9d78707bca0801
|
[
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
riadnassiffe/Simulator
|
3c4a036b5635534929fdb04b0e9c96d64c0da71f
|
7d9ff09f26367d3714e3d10be3dd4a9817b8ed6b
|
refs/heads/master
| 2021-06-20T09:31:36.033427
| 2021-04-17T00:03:17
| 2021-04-17T00:03:17
| 16,033,879
| 0
| 0
|
MIT
| 2021-03-22T23:20:34
| 2014-01-18T20:58:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from noncvx_variable import NonCvxVariable
class IntVar(NonCvxVariable):
""" An integer variable. """
# All values set rounded to the nearest integer.
def _round(self, matrix):
for i,v in enumerate(matrix):
matrix[i] = round(v)
return matrix
# Constrain all entries to be the value in the matrix.
def _fix(self, matrix):
return [self == matrix]
|
[
"riad.nassiffe@gmail.com"
] |
riad.nassiffe@gmail.com
|
576cc242fbb413901eaf07b20357bfdae1a3385d
|
4664328482163fd927603d66f47209b28471cf0f
|
/venv/lib/python3.7/site-packages/patoolib/programs/unadf.py
|
496cbd68757468f0b3e1f3f569b858d1f2dc4cda
|
[
"MIT"
] |
permissive
|
emmetaobrien/dats-validator
|
08706ddab795d272391b3611cd3ba0de8c4a91a1
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
refs/heads/master
| 2020-12-19T05:03:17.179117
| 2020-01-22T17:28:38
| 2020-01-22T17:28:38
| 235,626,049
| 0
| 0
|
MIT
| 2020-01-22T17:24:56
| 2020-01-22T17:24:56
| null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2015 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Archive commands for the unadf program."""
def extract_adf (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an ADF archive."""
return [cmd, archive, '-d', outdir]
def list_adf (archive, compression, cmd, verbosity, interactive):
"""List an ADF archive."""
return [cmd, '-l', archive]
test_adf = list_adf
|
[
"giulia.ippoliti@mail.mcgill.ca"
] |
giulia.ippoliti@mail.mcgill.ca
|
8ba7a124b9cc544202f061e3a6b1a5171c7de535
|
7a6aca7d300c0752f2a73730b743a1a7361e941b
|
/tensorflow_graphics/projects/radiance_fields/sharf/geometry_net/model.py
|
00ce6b103e63258cebfce5f79b8131029bc38dc8
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/graphics
|
ef0abe102398a58eb7c41b709393df3d0b0a2811
|
1b0203eb538f2b6a1013ec7736d0d548416f059a
|
refs/heads/master
| 2023-09-03T20:41:25.992578
| 2023-08-08T21:16:36
| 2023-08-08T21:17:31
| 164,626,274
| 2,920
| 413
|
Apache-2.0
| 2023-08-27T14:26:47
| 2019-01-08T10:39:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,406
|
py
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the geometry network."""
from absl import logging
import tensorflow as tf
import tensorflow_graphics.projects.radiance_fields.sharf.geometry_net.layers as geometry_layers
import tensorflow_graphics.projects.radiance_fields.sharf.voxel_functions as voxel_functions
class GeometryNetwork:
"""Network for generating voxels from latent codes."""
def __init__(self,
num_latent_codes=4371,
latent_code_dim=256,
fc_channels=512,
fc_activation='relu',
conv_size=4,
norm3d='batchnorm',
bce_gamma=0.8,
proj_weight=0.01,
mirror_weight=1.0):
self.num_latent_codes = num_latent_codes
self.latent_code_dim = latent_code_dim
self.fc_channels = fc_channels
self.fc_activation = fc_activation
self.conv_size = conv_size
self.norm3d = norm3d
self.model = None
self.model_backup = None
self.latent_code_vars = None
self.network_vars = None
self.global_step = None
self.latest_epoch = None
self.optimizer_network = None
self.optimizer_latent = None
self.checkpoint = None
self.manager = None
self.summary_writer = None
self.bce_gamma = bce_gamma
self.proj_weight = proj_weight
self.mirror_weight = mirror_weight
self.mask_voxels = voxel_functions.get_mask_voxels()
def get_model(self):
"""Voxel GLO network."""
fc_channels = self.fc_channels
norm3d = self.norm3d
activation = self.fc_activation
with tf.name_scope('Network/'):
latent_code = tf.keras.layers.Input(shape=(self.latent_code_dim,))
with tf.name_scope('FC_layers'):
fc0 = tf.keras.layers.Dense(fc_channels,
activation=activation)(latent_code)
fc1 = tf.keras.layers.Dense(fc_channels, activation=activation)(fc0)
fc2 = tf.keras.layers.Dense(fc_channels, activation=activation)(fc1)
fc2_as_volume = tf.keras.layers.Reshape((1, 1, 1, fc_channels))(fc2)
with tf.name_scope('GLO_VoxelDecoder'):
decoder_1 = geometry_layers.conv_t_block_3d(fc2_as_volume,
num_filters=32,
size=self.conv_size,
strides=2,
normalization=norm3d) # 2
decoder_2 = geometry_layers.conv_t_block_3d(decoder_1,
num_filters=32,
size=self.conv_size,
strides=2,
normalization=norm3d) # 4
decoder_3 = geometry_layers.conv_t_block_3d(decoder_2,
num_filters=32,
size=self.conv_size,
strides=2,
normalization=norm3d) # 8
decoder_4 = geometry_layers.conv_t_block_3d(decoder_3,
num_filters=16,
size=self.conv_size,
strides=2,
normalization=norm3d) # 16
decoder_5 = geometry_layers.conv_t_block_3d(decoder_4,
num_filters=8,
size=self.conv_size,
strides=2,
normalization=norm3d) # 32
decoder_6 = geometry_layers.conv_t_block_3d(decoder_5,
num_filters=4,
size=self.conv_size,
strides=2,
normalization=norm3d) # 64
volume_out = tf.keras.layers.Conv3DTranspose(
1,
self.conv_size,
strides=2,
padding='same',
kernel_initializer=tf.keras.initializers.glorot_normal(),
use_bias=False)(decoder_6) # 128
return tf.keras.Model(inputs=[latent_code], outputs=[volume_out])
def init_model(self):
"""Initialize models and variables."""
self.model = self.get_model()
self.model_backup = self.get_model()
self.latest_epoch = tf.Variable(0, trainable=False, dtype=tf.int64)
self.global_step = tf.Variable(0, trainable=False, dtype=tf.int64)
init_latent_code = tf.random.normal((self.num_latent_codes,
self.latent_code_dim))
self.latent_code_vars = tf.Variable(init_latent_code, trainable=True)
self.network_vars = self.model.trainable_variables
def init_optimizer(self, learning_rate_network=0.0001,
learning_rate_codes=0.0001):
"""Initialize the optimizers with a scheduler."""
self.optimizer_network = tf.keras.optimizers.Adam(
learning_rate=learning_rate_network)
self.optimizer_latent = tf.keras.optimizers.Adam(
learning_rate=learning_rate_codes)
def init_checkpoint(self, checkpoint_dir, checkpoint=None):
"""Initialize the checkpoints."""
self.summary_writer = tf.summary.create_file_writer(checkpoint_dir)
self.checkpoint = tf.train.Checkpoint(
model=self.model,
latent_code_var=self.latent_code_vars,
optimizer_network=self.optimizer_network,
optimizer_latent=self.optimizer_latent,
epoch=self.latest_epoch,
global_step=self.global_step)
self.manager = tf.train.CheckpointManager(checkpoint=self.checkpoint,
directory=checkpoint_dir,
max_to_keep=2)
self.load_checkpoint(checkpoint=checkpoint)
def load_checkpoint(self, checkpoint=None):
"""Load checkpoints."""
if checkpoint is None:
latest_checkpoint = self.manager.latest_checkpoint
else:
latest_checkpoint = checkpoint
if latest_checkpoint is not None:
logging.info('Checkpoint %s restored', latest_checkpoint)
_ = self.checkpoint.restore(latest_checkpoint).expect_partial()
for a, b in zip(self.model_backup.variables,
self.model.variables):
a.assign(b)
else:
logging.warning('No checkpoint was restored.')
def reset_models(self):
for a, b in zip(self.model.variables,
self.model_backup.variables):
a.assign(b)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
825534e31d01bd229b80de104734afbb4af5bb62
|
ea85e903db500eee66fe70ed3029b05577494d9d
|
/2020-12/1143. 最长公共子序列.py
|
c18e12ab3d0bf3adfc179ad5b9dcb2118d11acc3
|
[] |
no_license
|
baolibin/leetcode
|
fcd975eb23e5ca3fc7febbd6c47ec833595b5a51
|
bc0540ec42131439be144cca19f6355a01de992a
|
refs/heads/master
| 2021-08-15T20:40:25.580955
| 2021-01-20T09:57:21
| 2021-01-20T09:57:21
| 76,557,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
# coding:utf-8
'''
1143. 最长公共子序列
给定两个字符串 text1 和 text2,返回这两个字符串的最长公共子序列的长度。
一个字符串的 子序列 是指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某些字符
(也可以不删除任何字符)后组成的新字符串。
例如,"ace" 是 "abcde" 的子序列,但 "aec" 不是 "abcde" 的子序列。
两个字符串的「公共子序列」是这两个字符串所共同拥有的子序列。
若这两个字符串没有公共子序列,则返回 0。
示例 1:
输入:text1 = "abcde", text2 = "ace"
输出:3
解释:最长公共子序列是 "ace",它的长度为 3。
示例 2:
输入:text1 = "abc", text2 = "abc"
输出:3
解释:最长公共子序列是 "abc",它的长度为 3。
示例 3:
输入:text1 = "abc", text2 = "def"
输出:0
解释:两个字符串没有公共子序列,返回 0。
提示:
1 <= text1.length <= 1000
1 <= text2.length <= 1000
输入的字符串只含有小写英文字符。
'''
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m, n = len(text1), len(text2)
# 构建 DP table 和 base case
dp = [[0] * (n + 1) for _ in range(m + 1)]
# 进行状态转移
for i in range(1, m + 1):
for j in range(1, n + 1):
if text1[i - 1] == text2[j - 1]:
# 找到一个 lcs 中的字符
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
return dp[-1][-1]
'''
讲解可以参考这个:
https://leetcode-cn.com/problems/longest-common-subsequence/solution/dong-tai-gui-hua-zhi-zui-chang-gong-gong-zi-xu-lie/
https://leetcode-cn.com/problems/longest-common-subsequence/solution/dong-tai-gui-hua-tu-wen-jie-xi-by-yijiaoqian/
方法:
最长公共子序列(Longest Common Subsequence,简称 LCS)是一道非常经典的面试题目,因为它的解法是典型的二维动态规划,大部分比较困难的字符串问题都和这个问题一个套路,比如说编辑距离。而且,这个算法稍加改造就可以用于解决其他问题,所以说 LCS 算法是值得掌握的。
动态规划算法做的就是穷举+剪枝,它俩天生一对,所以可以说只要涉及子序列问题,十有八九都需要动态规划来解决
'''
|
[
"yangfengling@inttech.cn"
] |
yangfengling@inttech.cn
|
cd87b175cdbc12d805759c77827d25017d9a2df3
|
5182b22e2262dabfb11f0b89a8512de7491791b0
|
/ceska/spiders/spider.py
|
947674400c7d713bd221011e9012960641942fc4
|
[] |
no_license
|
SimeonYS/ceska
|
10d6bd8510012862a4c8ef536b9f5a816e73a9cf
|
7ccea2162639f8dc2c54f7f47e7da7660cead430
|
refs/heads/main
| 2023-03-24T18:38:29.212626
| 2021-03-19T12:36:51
| 2021-03-19T12:36:51
| 349,417,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import CeskaItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
base = 'https://www.cnb.cz/system/modules/org.opencms.apollo/elements/list-ajax.jsp?contentpath=/cs/.content/lists/l_00014.xml&instanceId=li_793d48a5&elementId=le_3f676017&sitepath=/cs/cnb-news/aktuality/&subsite=/sites/cnb/cs/&__locale=cs&loc=cs&option=append&hideOptions=true&reloaded&sort=priority_d&page={}'
class CeskaSpider(scrapy.Spider):
name = 'ceska'
page = 1
start_urls = [base.format(page)]
def parse(self, response):
articles = response.xpath('//div[@class="teaser "]')
for article in articles:
date = ''.join(article.xpath('.//div[@class="date"]/text()').get().split())
post_links = article.xpath('.//h2/a/@href').get()
try:
if not 'pdf' in post_links:
yield response.follow(post_links, self.parse_post, cb_kwargs=dict(date=date))
except TypeError:
print("Article not available")
if response.xpath('//h2/a').get():
self.page += 1
yield response.follow(base.format(self.page), self.parse)
def parse_post(self, response, date):
title = response.xpath('//h1/text()').get()
content = response.xpath('//div[@class="text"]//text() | //div[@class="block news"]/div/p//text() | //main//div[@class="ap-section "]//text()[not (ancestor::div[@class="ap-panel panel-group"])] | //table//text() | //div//em//text()|//div[@class="boarddecision-record"]/div//following-sibling::p|//div[@class="block vlog"]/div//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=CeskaItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
yield item.load_item()
|
[
"simeon.simeonov@ADPVT.com"
] |
simeon.simeonov@ADPVT.com
|
6a4a56e8e4f6c66d3f76478204297dccc60f4250
|
358519772669c73092f625f630722c38e1d33783
|
/ctools/Testing/Types/CrossBondAngleAngleType.py
|
339e54158084f93f3295be2ef32cb069ec76645f
|
[] |
no_license
|
minghao2016/mmtools
|
e7e61aca084498408ceae965dd6c9450ad89eafa
|
3ade988afb51cd54ee5a4067d8deaad88afbb0fe
|
refs/heads/master
| 2021-09-21T01:02:22.522187
| 2014-09-19T03:40:03
| 2014-09-19T03:40:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import sys
sys.path.append('..')
from Decorators import *
from Types.AbstractAngleType import *
class CrossBondAngleAngleType(AbstractAngleType):
@accepts_compatible_units(None,
None,
None,
None,
units.nanometers,
units.nanometers,
units.nanometers,
units.kilojoules_per_mole * units.nanometers**(-2))
def __init__(self, atom1, atom2, atom3, type, r1, r2, r3, k):
AbstractAngleType.__init__(self, atom1, atom2, atom3, type)
self.r1 = r1
self.r2 = r2
self.r3 = r3
self.k = k
|
[
"choderaj@mskcc.org"
] |
choderaj@mskcc.org
|
96d971c15a7a79c262bd96806888c96e132fc4fa
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_chaperons.py
|
4d072675f3dfa5acff95f8dd2a06dec5ec22139e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _CHAPERONS():
def __init__(self,):
self.name = "CHAPERONS"
self.definitions = chaperon
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['chaperon']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1aba486226c54e541a0a920e558d5808fe2eb584
|
0b358a0d64eb03655c030b36c0ae87880b153951
|
/mmcv-1.4.7/tests/test_ops/test_roiaware_pool3d.py
|
1d63e398dabaf0ac6cfdf28bcb617a18368f6fd5
|
[
"Apache-2.0"
] |
permissive
|
jshilong/DDQ
|
db05ff309d63316c62faa59b28c66d65eef973d1
|
de9331e4579aaafab4d69e3a9a3c6638efc5392c
|
refs/heads/main
| 2023-06-03T15:02:09.949907
| 2023-05-24T03:32:12
| 2023-05-24T03:32:12
| 498,974,099
| 199
| 6
|
Apache-2.0
| 2022-06-02T05:01:53
| 2022-06-02T03:10:25
| null |
UTF-8
|
Python
| false
| false
| 6,209
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmcv.ops import (RoIAwarePool3d, points_in_boxes_all, points_in_boxes_cpu,
points_in_boxes_part)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_RoIAwarePool3d():
roiaware_pool3d_max = RoIAwarePool3d(
out_size=4, max_pts_per_voxel=128, mode='max')
roiaware_pool3d_avg = RoIAwarePool3d(
out_size=4, max_pts_per_voxel=128, mode='avg')
rois = torch.tensor(
[[1.0, 2.0, 3.0, 5.0, 4.0, 6.0, -0.3 - np.pi / 2],
[-10.0, 23.0, 16.0, 20.0, 10.0, 20.0, -0.5 - np.pi / 2]],
dtype=torch.float32).cuda(
) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor(
[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9],
[-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]],
dtype=torch.float32).cuda() # points (n, 3) in lidar coordinate
pts_feature = pts.clone()
pooled_features_max = roiaware_pool3d_max(
rois=rois, pts=pts, pts_feature=pts_feature)
assert pooled_features_max.shape == torch.Size([2, 4, 4, 4, 3])
assert torch.allclose(pooled_features_max.sum(),
torch.tensor(51.100).cuda(), 1e-3)
pooled_features_avg = roiaware_pool3d_avg(
rois=rois, pts=pts, pts_feature=pts_feature)
assert pooled_features_avg.shape == torch.Size([2, 4, 4, 4, 3])
assert torch.allclose(pooled_features_avg.sum(),
torch.tensor(49.750).cuda(), 1e-3)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_points_in_boxes_part():
boxes = torch.tensor(
[[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]],
[[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
dtype=torch.float32).cuda(
) # boxes (b, t, 7) with bottom center in lidar coordinate
pts = torch.tensor(
[[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2]],
[[3.8, 7.6, -2], [-10.6, -12.9, -20], [-16, -18, 9], [-21.3, -52, -5],
[0, 0, 0], [6, 7, 8], [-2, -3, -4], [6, 4, 9]]],
dtype=torch.float32).cuda() # points (b, m, 3) in lidar coordinate
point_indices = points_in_boxes_part(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[0, 0, 0, 0, 0, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1]],
dtype=torch.int32).cuda()
assert point_indices.shape == torch.Size([2, 8])
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]],
dtype=torch.float32).cuda() # 30 degrees
pts = torch.tensor(
[[[4, 6.928, 0], [6.928, 4, 0], [4, -6.928, 0], [6.928, -4, 0],
[-4, 6.928, 0], [-6.928, 4, 0], [-4, -6.928, 0], [-6.928, -4, 0]]],
dtype=torch.float32).cuda()
point_indices = points_in_boxes_part(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[-1, -1, 0, -1, 0, -1, -1, -1]],
dtype=torch.int32).cuda()
assert (point_indices == expected_point_indices).all()
def test_points_in_boxes_cpu():
boxes = torch.tensor(
[[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
dtype=torch.float32
) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor(
[[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [
-16, -18, 9
], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]]],
dtype=torch.float32) # points (n, 3) in lidar coordinate
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]],
dtype=torch.int32)
assert point_indices.shape == torch.Size([1, 15, 2])
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]],
dtype=torch.float32) # 30 degrees
pts = torch.tensor(
[[[4, 6.928, 0], [6.928, 4, 0], [4, -6.928, 0], [6.928, -4, 0],
[-4, 6.928, 0], [-6.928, 4, 0], [-4, -6.928, 0], [-6.928, -4, 0]]],
dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[[0], [0], [1], [0], [1], [0], [0], [0]]], dtype=torch.int32)
assert (point_indices == expected_point_indices).all()
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_points_in_boxes_all():
boxes = torch.tensor(
[[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3],
[-10.0, 23.0, 16.0, 10, 20, 20, 0.5]]],
dtype=torch.float32).cuda(
) # boxes (m, 7) with bottom center in lidar coordinate
pts = torch.tensor(
[[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6],
[0.8, 1.2, 3.9], [-9.2, 21.0, 18.2], [3.8, 7.9, 6.3],
[4.7, 3.5, -12.2], [3.8, 7.6, -2], [-10.6, -12.9, -20], [
-16, -18, 9
], [-21.3, -52, -5], [0, 0, 0], [6, 7, 8], [-2, -3, -4]]],
dtype=torch.float32).cuda() # points (n, 3) in lidar coordinate
point_indices = points_in_boxes_all(points=pts, boxes=boxes)
expected_point_indices = torch.tensor(
[[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]],
dtype=torch.int32).cuda()
assert point_indices.shape == torch.Size([1, 15, 2])
assert (point_indices == expected_point_indices).all()
|
[
"2392587229zsl@gmail.com"
] |
2392587229zsl@gmail.com
|
1f06329bdb958c952b45f9999504208e107b2d5f
|
9df795e57589a99838199f97945e96811e288e75
|
/W1H6.py
|
c7697994bf32feea0b63e1fc816073e065b0b847
|
[] |
no_license
|
JakeAttard/2810ICTPythonExercises
|
945783908a6bf981fc8128a5fc0b4bda6fd52eea
|
199cc42402a5cf4d8b86060af377d3906af00429
|
refs/heads/master
| 2020-06-17T19:07:46.788283
| 2019-07-16T11:22:15
| 2019-07-16T11:22:15
| 196,018,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
from PyTest import *
##//////////////////////////// PROBLEM STATEMENT //////////////////////////
## Given a list of ints, decide which is larger of the first and //
## last elements in the list, and set all the other elements to be that //
## that value. Print the changed list. Implement functions for: //
## - reading the list //
## - finding the maximum of 2 integers //
## - setting all elements of a list to a single value //
## - printing a list //
## 1, 2, 3 -> 3, 3, 3 //
## 11, 5, 9 -> 11, 11, 11 //
## 2, 11, 3 -> 3, 3, 3 //
##/////////////////////////////////////////////////////////////////////////
|
[
"jakeattard18@gmail.com"
] |
jakeattard18@gmail.com
|
5111a6203d84626a25438f3983595a8afe6d5062
|
6679fd1102802bf190294ef43c434b6047840dc2
|
/openconfig_bindings/bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/__init__.py
|
d39885c64343ce820dca9f5c1b53b778f4985ee5
|
[] |
no_license
|
robshakir/pyangbind-openconfig-napalm
|
d49a26fc7e38bbdb0419c7ad1fbc590b8e4b633e
|
907979dc14f1578f4bbfb1c1fb80a2facf03773c
|
refs/heads/master
| 2023-06-13T17:17:27.612248
| 2016-05-10T16:46:58
| 2016-05-10T16:46:58
| 58,091,515
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,293
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import config
import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-bgp - based on the path /bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'prefix-limit'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'bgp', u'neighbors', u'neighbor', u'afi-safis', u'afi-safi', u'l2vpn-evpn', u'prefix-limit']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /bgp/neighbors/neighbor/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/bgp', defining_module='openconfig-bgp', yang_type='container', is_config=True)
config = property(_get_config, _set_config)
state = property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
|
[
"rjs@jive.com"
] |
rjs@jive.com
|
58d84b967fea13f23dfd606dbbbaff2c1b0c6fca
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/arq5x/gemini/gemini/gemini_subjects.py
|
8e37ac5264f4dcab2ca74e6bbb8e117536329421
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 5,028
|
py
|
#!/usr/bin/env python
import sys
from collections import defaultdict
from compiler import compile
from inheritance import Family
import sqlalchemy as sql
import database
from gemini_constants import *
import GeminiQuery
from functools import wraps
def compile_decorator(f):
"""decorator to automatically compile the eval strings returned from
the filter methods"""
@wraps(f)
def wrapper(*args, **kwargs):
query_string = f(*args, **kwargs)
if query_string == "False" or query_string == {"any": "False"}:
return None
if not isinstance(query_string, dict):
return compile(query_string, "<string>", "eval")
query_dict = query_string
for k, stmt in query_dict.iteritems():
query_dict[k] = compile(stmt, "<string>", "eval")
return query_dict
return wrapper
def get_phred_query(sample_id, gt_ll, genotype, prefix=" and ", invert=False):
"""Default is to test < where a low value phred-scale is high
confidence for that genotype
>>> get_phred_query(2, 22, "het")
' and gt_phred_ll_het[1] < 22'
>>> get_phred_query(2, 22, "het", prefix="")
'gt_phred_ll_het[1] < 22'
>>> get_phred_query(2, 22, "het", prefix="", invert=True)
'gt_phred_ll_het[1] > 22'
"""
assert genotype in ("het", "homref", "homalt")
if not gt_ll: return ""
# they passed in the subject:
if hasattr(sample_id, "sample_id"):
sample_id = sample_id.sample_id
sign = ["<", ">"][int(invert)]
s = "gt_phred_ll_{genotype}[{sample_id}] {sign} {gt_ll}"\
.format(sample_id=sample_id-1, genotype=genotype,
gt_ll=gt_ll, sign=sign)
return prefix + s
class Subject(object):
"""
Describe a single subject in the the samples table.
"""
def __init__(self, row):
self._set_fields_from_row(row)
def __repr__(self):
return "\t".join(map(str, [self.name, self.paternal_id,
self.maternal_id, self.phenotype]))
def set_father(self):
self.father = True
def set_mother(self):
self.mother = True
def _set_fields_from_row(self, row):
self.__dict__.update(row)
#for k, v in zip(row.keys(), row):
# self.__dict__[k] = v
self.phenotype = int(self.phenotype) if self._has_phenotype() else None
self._set_affected_status()
def _has_phenotype(self):
if hasattr(self, 'phenotype') and self.phenotype is not None:
return True
def _set_affected_status(self):
# 1 = unaffected
# 2 = affected
# 0 or -9 is unknown.
# http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
pheno = str(self.phenotype)
if pheno == "2":
self.affected = True
elif pheno == "1":
self.affected = False
# distinguish unknown from known to be unaffected.
else:
self.affected = None
def get_families(db, selected_families=None):
"""
Query the samples table to return a list of Family
objects that each contain all of the Subjects in a Family.
"""
conn, metadata = database.get_session_metadata(db)
families_dict = Family.from_cursor(conn)
# if the user has specified a set of selected families
# to which the analysis should be restricted, then
# first sanity check that the family ids they specified are valid.
if selected_families is not None:
for family in selected_families.split(','):
if family not in families_dict:
sys.exit("ERROR: family \"%s\" is not a valid family_id\n" % family)
families = []
for fam in families_dict:
if selected_families is None or fam in selected_families:
families.append(families_dict[fam])
return families
def get_family_dict(args):
families = defaultdict(list)
subjects = get_subjects(args)
for subject in subjects.values():
families[subject.family_id].append(subject)
return families
def get_subjects(args, skip_filter=False):
"""
return a dictionary of subjects, optionally using the
subjects_query argument to filter them.
"""
gq = GeminiQuery.GeminiQuery(args.db)
#query = "SELECT * FROM samples"
query = ""
if not skip_filter:
if hasattr(args, 'sample_filter') and args.sample_filter:
query += args.sample_filter
res = gq.metadata.tables["samples"].select().where(sql.text(query)).execute()
samples_dict = {}
for row in res:
subject = Subject(row)
samples_dict[subject.name] = subject
return samples_dict
def get_subjects_in_family(args, family):
subjects = get_subjects(args)
family_names = [f.name for f in family]
subject_dict = {}
for subject in subjects:
if subject in family_names:
subject_dict[subject] = subjects[subject]
return subject_dict
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
846c2d99b09c295dd8697264c90fbd338692b861
|
cdca191b2a50d173dc2768d2401b069983b4bc5a
|
/log_entries/core/test/test_models.py
|
dcfe95f2012cd39abbc739917742869882ad5220
|
[] |
no_license
|
lffsantos/demo_django_rest_framework
|
20bdefd7ab302eba54034ee52e361a8a38d4bc3d
|
f8eb5d23187a7de83254b2ff15f18135312d8d64
|
refs/heads/master
| 2021-01-21T10:46:26.734931
| 2017-03-08T02:35:42
| 2017-03-08T02:35:42
| 83,486,265
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from log_entries.core.models import Category, Event
class CategoryModelTest(TestCase):
def setUp(self):
self.category = Category.objects.create(name='Category Name')
def test_create(self):
self.assertTrue(Category.objects.exists())
class EventModelTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='lffsantos', password='test',
email='lffsantos@gmail.com')
self.category = Category.objects.create(name='Category Name')
self.event = Event.objects.create(
start_date=datetime.now(), note='small note', category=self.category,
user=self.user
)
def test_create(self):
self.assertTrue(Category.objects.exists())
|
[
"lffsantos@gmail.com"
] |
lffsantos@gmail.com
|
96c4c58bea2421f64d9d6901a99f7c763bf37060
|
5dd7c4ec44b76180040badc67849ad44f81690f9
|
/unittests/test_taskbar.py
|
4b2756dc29ed07e37d2acf6e41035bf09ba17dfb
|
[] |
no_license
|
myluco/Phoenix
|
68f9abe15a673fe56da6ef4375849ba6a642622d
|
2de746beda35b8b5db547658cae1c65cfe164039
|
refs/heads/master
| 2021-01-18T15:59:05.001240
| 2016-12-04T00:08:36
| 2016-12-04T00:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
import unittest
from unittests import wtc
import wx
import wx.adv
import os
icoFile = os.path.join(os.path.dirname(__file__), 'mondrian.ico')
#---------------------------------------------------------------------------
class taskbar_Tests(wtc.WidgetTestCase):
def test_taskbar1(self):
icon = wx.adv.TaskBarIcon(wx.adv.TBI_DOCK)
icon.SetIcon(wx.Icon(icoFile), "The tip string")
self.assertTrue(icon.IsOk())
icon.Destroy()
self.myYield()
def test_taskbar2(self):
wx.adv.TBI_DOCK
wx.adv.TBI_CUSTOM_STATUSITEM
wx.adv.TBI_DEFAULT_TYPE
wx.adv.TaskBarIconEvent
wx.adv.wxEVT_TASKBAR_MOVE
wx.adv.wxEVT_TASKBAR_LEFT_DOWN
wx.adv.wxEVT_TASKBAR_LEFT_UP
wx.adv.wxEVT_TASKBAR_RIGHT_DOWN
wx.adv.wxEVT_TASKBAR_RIGHT_UP
wx.adv.wxEVT_TASKBAR_LEFT_DCLICK
wx.adv.wxEVT_TASKBAR_RIGHT_DCLICK
wx.adv.wxEVT_TASKBAR_CLICK
wx.adv.wxEVT_TASKBAR_BALLOON_TIMEOUT
wx.adv.wxEVT_TASKBAR_BALLOON_CLICK
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"robin@alldunn.com"
] |
robin@alldunn.com
|
8d9aa0ed19ba794ae0ed1f5ff5699dc0c7a128fc
|
a93cf8f695c1d1d97b8d0b9ccec6932c5e499ff8
|
/프로그래머스/프로그래머스/seungjun/더맵게.py
|
416fd1611e932a873f6f1a24c27cc884e86eb9dc
|
[] |
no_license
|
tmdwns1101/AlgorithmStudy
|
0abc108a7a73895934da2633998c7a90137d49ea
|
e8ca069e202f40e074b7311626fe8da8af057589
|
refs/heads/master
| 2021-07-22T11:32:30.817108
| 2020-05-20T16:28:27
| 2020-05-20T16:28:27
| 165,377,838
| 0
| 1
| null | 2020-02-07T07:56:16
| 2019-01-12T11:02:59
|
C++
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
import heapq
def more_spicy(scoville, K):
ans = 0
heapq.heapify(scoville)
print(scoville)
while True:
first_low = heapq.heappop(scoville)
if first_low >= K:
break
ans += 1
if len(scoville) < 1:
ans = -1
break
second_low = heapq.heappop(scoville)
new_scoville = first_low + (second_low *2)
heapq.heappush(scoville, new_scoville)
return ans
res = more_spicy([1, 12, 3, 9, 10, 2], 7)
print(res)
|
[
"tmdwns1101@naver.com"
] |
tmdwns1101@naver.com
|
7c6fbf8d50d58b7f0a0e3921338317cc87a7d3ef
|
6c8579c6d825ba6bb8ca6eee3bbdd49149989eca
|
/second_phase/ex02_Greedy.py
|
f82876045c9a7e044e31bb99505c592ed3e7f808
|
[
"MIT"
] |
permissive
|
kapuni/exercise_py
|
d7fc137d6da332174e86b28a4bec9596efca17c9
|
b60ba8462d2545cae57483bcb0b3428b03c5d522
|
refs/heads/master
| 2020-06-24T00:56:20.566689
| 2019-09-28T02:22:44
| 2019-09-28T02:22:44
| 198,801,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
"""
贪婪法:在对问题求解时,总是做出在当前看来是最好的选择,不追求最优解,快速找到满意解。
输入:
20 6
电脑 200 20
收音机 20 4
钟 175 10
花瓶 50 2
书 10 1
油画 90 9
"""
class Thing(object):
"""物品"""
def __init__(self, name, price, weight):
self.name = name
self.price = price
self.weight = weight
@property
def value(self):
"""价格重量比"""
return self.price / self.weight
def input_thing():
"""输入物品信息"""
name_str, price_str, weight_str = input().split()
return name_str, int(price_str), int(weight_str)
def main():
"""主函数"""
max_weight, num_of_things = map(int, input().split())
all_things = []
for _ in range(num_of_things):
all_things.append(Thing(*input_thing()))
all_things.sort(key=lambda x: x.value, reverse=True)
total_weight = 0
total_price = 0
for thing in all_things:
if total_weight + thing.weight <= max_weight:
print(f'小偷拿走了{thing.name}')
total_weight += thing.weight
total_price += thing.price
print(f'总价值: {total_price}美元')
if __name__ == '__main__':
main()
|
[
"910048757@qq.com"
] |
910048757@qq.com
|
727ffe3c5be6860e52c9b43e1c5b8da8a1c06e48
|
bc539788b876773e294383863252c1637de9eb7f
|
/scrapy/PycharmProjects/Reptile/ven/renrenche.py
|
9d7e185cb2e3ca6d2f245a584b281c79b339c800
|
[] |
no_license
|
umsung/scrapy
|
4eb56bf74f3e617e49dcdec61cf77010eb912f4f
|
deacd9f289159c5af114b0dd3110448ad7eb43e8
|
refs/heads/master
| 2020-05-31T14:11:46.530793
| 2019-10-16T01:32:25
| 2019-10-16T01:32:25
| 190,321,772
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,628
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import re
from fontTools.ttLib import TTFont
import requests
class RenrencheSpider(scrapy.Spider):
name = 'renrenche'
start_urls = ['https://www.renrenche.com/gz/ershouche/?&plog_id=bd49e5ed507aebb8599cdde8188a3eef']
def start_requests(self):
# for i in range(1, 5, 1):
# self.start_urls.append(
# 'https://www.renrenche.com/gz/ershouche/p{}/?&plog_id=79d79d263044559732d687b64c258ab4'.format(i))
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
"""
https://www.renrenche.com/gz/ershouche/p1/?&plog_id=79d79d263044559732d687b64c258ab4
初步看了下,从列表页到内容页,并没有用ajax加载数据,只需要用xpath提取元素字段即可。
打开源代码会发现,原来TM有“投毒”,源代码数据与显示数据不一致,看来这也是一种反爬措施
"""
# html = response.body.decode('utf-8')
# select = etree.HTML(html)
# style = select.xpath('//style/text()')[0]
# # url('https://misc.rrcimg.com/ttf/rrcttf86293b76594a1bf9ace9fd979b62db63.woff') format('woff')
# font_url = re.search(r"url\('(.*?\.woff)'\) format\('woff'\),", str(style)).group(1)
# 获取字体url
font_url = re.findall('(https://misc.rrcimg.com.*\.ttf)', response.body.decode('utf-8'))[0]
# 字体文件下载
with open('人人车.ttf', 'wb') as f:
f.write(requests.get(font_url).content)
font_dict = font_name('人人车.ttf')
node_list = response.xpath('//*[@id="search_list_wrapper"]/div/div/div[1]/ul//li')
for node in node_list:
item = {}
# 车的名字
item['car_name'] = node.xpath('./a/h3/text()').extract_first('')
item['car_name'] = base_font(font_dict, item['car_name']), response.url
# 车的信息
item['car_info'] = node.xpath('./a/div[2]/span').xpath('string(.)').extract_first('')
item['car_info'] = re.sub('\s', '', item['car_info'])
item['car_info'] = base_font(font_dict, item['car_info']), response.url
# 车的价格
item['car_price'] = node.xpath('./a/div[4]/div/text()').extract_first('')
item['car_price'] = re.sub('\s', '', item['car_price'])
# 首付金额
item['car_down_payment'] = node.xpath('./a/div[4]//div[@class="m-l"]/text()').extract_first('')
# 链接
item['car_link'] = node.xpath('./a/@href').extract_first('')
item['car_link'] = response.urljoin(item['car_link'])
yield scrapy.Request(url=item['car_link'], callback=self.parse_item, meta={'item': item})
next_pages = response.xpath('//ul[@class="pagination js-pagination"]/li[last()]/a/@href').extract_first('')
next_pages = response.urljoin(next_pages)
yield scrapy.Request(url=next_pages, callback=self.parse)
def parse_item(self, response):
item = response.meta['item']
# 新车购置税
item['car_tax'] = response.xpath('//div[@class="middle-content"]/div/div').xpath('string(.)').extract_first('')
item['car_tax'] = re.sub('\s', '', item['car_tax'])
# 购买方式
item['car_method'] = response.xpath('//div[@class="list payment-list"]/p[1]/text()').extract_first('')
# 首付金额
item['car_payment'] = response.xpath('//div[@class="list payment-list"]/p[2]/text()').extract_first('')
# 月供金额
item['car_month'] = response.xpath('//div[@class="list payment-list"]/p[3]/text()').extract_first('')
# 服务费
item['car_fee'] = response.xpath('//div[@class="detail-version3-service"]/p[2]').xpath(
'string(.)').extract_first('')
item['car_fee'] = re.sub('\s', '', item['car_fee'])
# 车牌所在地
item['car_location'] = response.xpath('//div[@class="licensed-city"]/p/strong/text()').extract_first('')
# 外迁查询
item['car_find'] = response.xpath('//li[@class="span5 car-fluid-standard"]/div/p/strong/text()').extract_first(
'')
# # 车辆到期时间
# item['car_annual'] = response.xpath('//div[@class="info-about-car"]/div/ul/li[2]/text()').extract_first('')
# item['car_annual'] = re.sub('\s', '', item['car_annual'])
# # 商业险到期时间
# item['car_insurance'] = response.xpath('//div[@class="info-about-car"]/div/ul/li[4]/text()').extract_first(
# default='')
# item['car_insurance'] = re.sub('\s', '', item['car_insurance'])
# # 有无发票
# item['car_invoice'] = response.xpath('//div[@class="info-about-car"]/div/ul/li[6]/text()').extract_first(
# default='')
# item['car_invoice'] = re.sub('\s', '', item['car_invoice'])
# # 是否保养
# item['car_maintenance'] = response.xpath('//div[@class="info-about-car"]/div/ul/li[8]/text()').extract_first(
# default='')
# item['car_maintenance'] = re.sub('\s', '', item['car_maintenance'])
yield item
def font_name(name):
'''
通过手敲的映射关系,解析字体文件
'''
number_map = {'eight': '8', 'five': '5', 'one': '1', 'nine': '9', 'period': '?', 'three': '3', 'six': '6',
'two': '2', 'seven': '7', 'four': '4', 'zero': '0'}
# 下载下来的font文件
font = TTFont(name)
num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# 取出来font文件中的zero到nine,从第一个开始
font_num = font.getGlyphOrder()[1:]
# print('--------------',font_num) # ['zero', 'one', 'two', 'three', 'four', 'five', 'seven', 'eight', 'six', 'nine']
dic_font = dict(zip(num, font_num))
# print('**************',dic_font) # {'0': 'zero', '1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five', '6': 'seven', '7': 'eight', '8': 'six', '9': 'nine'}
dict_num = {}
for k, v in dic_font.items():
for x, y in number_map.items():
if dic_font[k] == x:
dict_num[y] = k
return dict_num
def base_font(dict, base_str):
'''
对照字典,解码字符串
:param dict:
:param base_str:
:return:
'''
str_lis = []
num_lis = list(dict.keys())
for i in base_str:
if i in num_lis:
i = dict[i]
str_lis.append(i)
else:
i = i
str_lis.append(i)
str_ = ''.join(str_lis)
return str_
|
[
"545699233@qq.com"
] |
545699233@qq.com
|
a2345ab272adc99d54adbc6b432d99beb050fffd
|
19c198a6b7c39d5d5bf617071ff4e04da00bc37c
|
/utils/__init__.py
|
a6109e2f30dbf1816ac7f9be98bee1326e09776b
|
[
"MIT"
] |
permissive
|
corenel/lintcode
|
3bed15e023468ab748f81bdad0f57288c90a10e7
|
e985cc8541ad26352a4ae2c8c8e4a572a5368e43
|
refs/heads/master
| 2020-04-24T04:11:16.034144
| 2019-03-30T05:33:48
| 2019-03-30T05:33:48
| 171,694,555
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from .linked_list import ListNode, DListNode, LinkedList
from .binary_tree import TreeNode, BinaryTree
__all__ = (
ListNode, DListNode, LinkedList,
TreeNode, BinaryTree
)
|
[
"xxdsox@gmail.com"
] |
xxdsox@gmail.com
|
9d3bdd21b443a2196b748c9140a0d1065a874532
|
a0c030be3f64e854fb93f9068463574a71445409
|
/smartlink2/base/management/commands/load_initial_data.py
|
d2bf603b0eeb17a67532ac42300e285127ba126d
|
[
"Apache-2.0"
] |
permissive
|
quanpower/smartlink2
|
a233178fb5cecd95fcbcb7f29819035e05a31aee
|
051d4a1b63de7885f47d141d37f1dd8e667bc977
|
refs/heads/master
| 2020-03-12T03:54:57.119170
| 2018-05-16T07:49:18
| 2018-05-16T07:49:18
| 130,433,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.management import call_command
from wagtail.core.models import Site, Page
class Command(BaseCommand):
def handle(self, **options):
fixtures_dir = os.path.join(settings.BASE_DIR, 'base', 'fixtures')
fixture_file = os.path.join(fixtures_dir, 'smartlink2.json')
# Wagtail creates default Site and Page instances during install, but we already have
# them in the data load. Remove the auto-generated ones.
if Site.objects.filter(hostname='localhost').exists():
Site.objects.get(hostname='localhost').delete()
if Page.objects.filter(title='Welcome to your new Wagtail site!').exists():
Page.objects.get(title='Welcome to your new Wagtail site!').delete()
call_command('loaddata', fixture_file, verbosity=0)
print("Awesome. Your data is loaded! The bakery's doors are almost ready to open...")
|
[
"quanpower@gmail.com"
] |
quanpower@gmail.com
|
c8b517029204f44f4f96df1948befb308303c2a2
|
7b3711d4c6d7284255ba0270d49d120f984bf7c6
|
/problems/993_cousins_in_binary_tree.py
|
5f16f8c11d24961eae2193364d16e0279402ba9f
|
[] |
no_license
|
loganyu/leetcode
|
2d336f30feb55379aaf8bf0273d00e11414e31df
|
77c206305dd5cde0a249365ce7591a644effabfc
|
refs/heads/master
| 2023-08-18T09:43:10.124687
| 2023-08-18T00:44:51
| 2023-08-18T00:44:51
| 177,875,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
'''
In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
We are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.
Return true if and only if the nodes corresponding to the values x and y are cousins.
Example 1:
Input: root = [1,2,3,4], x = 4, y = 3
Output: false
Example 2:
Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
Output: true
Example 3:
Input: root = [1,2,3,null,4], x = 2, y = 3
Output: false
Note:
The number of nodes in the tree will be between 2 and 100.
Each node has a unique integer value from 1 to 100.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
x_found = False
y_found = False
queue = collections.deque([root])
while queue:
for _ in range(len(queue)):
node = queue.popleft()
if node.val == x:
x_found = True
elif node.val == y:
y_found = True
if node.left and node.right:
if node.left.val == x and node.right.val == y:
return False
elif node.left.val == y and node.right.val == x:
return False
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
if x_found or y_found:
return x_found and y_found
return False
|
[
"yu.logan@gmail.com"
] |
yu.logan@gmail.com
|
137f03b89a24f65eb9a523a4f90a4cc5e1d107ba
|
4806f706b703a4b9ccd8cbca8fcbf300621c32ec
|
/easy/Remove Duplicates from Sorted List/solution.py
|
1ee8ac610e9793d7cf1a9a39cada702b76117f65
|
[
"MIT"
] |
permissive
|
vishsanghishetty/LC-Python
|
aa46151162d74ae3d1edb89462848c56e6e39575
|
65f99a3694549af88c7702b598de1a8ccb7db5fb
|
refs/heads/main
| 2023-07-19T00:08:50.813224
| 2021-09-14T15:32:09
| 2021-09-14T15:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# Time complexity: O(n)
# Approach: Two pointer Solution
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
nHead = ListNode(-1)
nHead.next = head
tmp, tmp2 = nHead.next, nHead.next
while tmp and tmp2:
while tmp and tmp2 and tmp.val==tmp2.val:
tmp2 = tmp2.next
tmp.next = tmp2
tmp = tmp.next
return nHead.next
|
[
"ashutoshhathidara98@gmail.com"
] |
ashutoshhathidara98@gmail.com
|
23c0c6b7b562579f9c4d1030cb28c3548153bc47
|
15fb5a41109e43fb185fad66b8d452f177d1d24c
|
/conf.py
|
87f80182776cd8fd60f5599ec19aea5f74ce31c2
|
[
"MIT"
] |
permissive
|
Rgveda/QUANTAXIS
|
2f546a3de00f2aacfd09dc257c39bafbdbc41be0
|
70a0ff84774253b0c23a5d0e39b7e772df440b7c
|
refs/heads/master
| 2021-06-20T21:49:59.500785
| 2021-01-29T03:52:51
| 2021-01-29T03:52:51
| 177,425,581
| 20
| 7
|
MIT
| 2020-02-26T15:24:55
| 2019-03-24T14:26:17
|
Python
|
UTF-8
|
Python
| false
| false
| 6,131
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx_rtd_theme
import os
import sys
import QUANTAXIS as QA
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
sys.path.insert(0, os.path.abspath('QUANTAXIS'))
project = 'QUANTAXIS'
copyright = '2018, yutiansut'
author = 'yutiansut'
# The short X.Y version
version = QA.__version__
# The full version, including alpha/beta/rc tags
release = QA.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
#'sphinx_automodapi.automodapi'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'python'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
#html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'QUANTAXISdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'QUANTAXIS.tex', 'QUANTAXIS Documentation',
'yutiansut', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'quantaxis', 'QUANTAXIS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'QUANTAXIS', 'QUANTAXIS Documentation',
author, 'QUANTAXIS', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
[
"yutiansut@qq.com"
] |
yutiansut@qq.com
|
42bdc6b8462003678eef904f90fc3fb8a5bebc0b
|
3cefad27ca9f6ba70cd76237d320f402299ac6ac
|
/antlir/tests/test_subvol_utils_inner.py
|
3595ec4b74c93008399eed4a9a5d662e40cb3954
|
[
"MIT"
] |
permissive
|
lhl2617/antlir
|
1880be26c1d5aa46d8e516dd294f3eb040b75847
|
1041732e8163c1316d3e45c0ba4db7937faa4809
|
refs/heads/main
| 2023-05-29T03:25:17.558306
| 2021-06-12T23:44:59
| 2021-06-12T23:45:50
| 376,619,288
| 0
| 0
|
MIT
| 2021-06-13T21:15:55
| 2021-06-13T18:58:56
| null |
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import unittest
from ..fs_utils import temp_dir
from ..subvol_utils import (
Subvol,
volume_dir,
)
class InnerSubvolTestCase(unittest.TestCase):
def test_delete_inner_subvols(self):
# This branch is used for testing inside an image via the
# `:test-subvol-utils-inner` test. The hostname is set in the
# test definition.
if platform.node() == "test-subvol-utils-inner":
volume_tmp_dir = b"/"
# This branch is used for "non-image" testing, ie: when the test is run
# in the context of the host via a standard `python_unittest`.
else:
volume_tmp_dir = volume_dir() / "tmp"
try:
os.mkdir(volume_tmp_dir)
except FileExistsError:
pass
with temp_dir(
dir=volume_tmp_dir.decode(), prefix="delete_recursive"
) as td:
try:
outer = Subvol(td / "outer")
outer.create()
inner1 = Subvol(td / "outer/inner1")
inner1.create()
inner2 = Subvol(td / "outer/inner1/inner2")
inner2.create()
inner3 = Subvol(td / "outer/inner3")
inner3.create()
outer.delete()
self.assertEqual([], td.listdir())
except BaseException: # Clean up even on Ctrl-C
try:
inner2.delete()
finally:
try:
inner1.delete()
finally:
try:
inner3.delete()
finally:
outer.delete()
raise
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
a015801564b213bcb89b27edf3d4d90abb2ecac3
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/maxProduct_20200731211104.py
|
6ad7e36b9ea31311c74a36dc56eac3420a783a37
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
import sys
def maxThree(arr):
if len(arr) < 3:
return -1
maxProduct = -(sys.maxsize -1)
print(maxProduct)
maxThree([-3,1,2,-])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
bdd51190ad4dd56994b752ddc0e68c516bb6af37
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_084/ch88_2020_06_22_17_23_50_676212.py
|
adf3e7aaf91173c5155e038a15e213075660b05a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
from math import *
class retangulo:
def __init__(self,P1,P2):
self.IE=P1
self.SD=P2
def calcula_perimetro(self):
P=2*(abs(self.IE.x-self.SD.x)+abs(self.IE.y+self.SD.y))
return P
def calcula_area(self):
A=abs(self.IE.x-self.SD.x)*abs(self.IE.y-self.SD.y)
return A
|
[
"you@example.com"
] |
you@example.com
|
e61fc4c01995d4e2b99acccdde8d6e7b89fbbcbd
|
f0120ec71e44284f59f1fca92e41ff6d05df4d9b
|
/production/server/server_interface.py
|
fea942e61f1141957fcfcdf264f06ab0b0f4c4d7
|
[] |
no_license
|
Vlad-Shcherbina/icfpc2017-tbd
|
55d8c7022485843d47723c4c0c9fddbba5b8ee56
|
695e9e525e460b231c75461294176d6ebc0a6f3d
|
refs/heads/master
| 2021-03-30T15:46:19.387400
| 2017-10-14T16:01:04
| 2017-10-14T16:01:04
| 80,864,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from typing import NamedTuple, List, Dict
from datetime import datetime
from time import time
from production.bot_interface import Map, Settings
INFINITY = 3600
class PlayerStats(NamedTuple):
ID: int
name: str
token: str
games: int
mu: float
sigma: float
class GameStats(NamedTuple):
ID: int
mapname: str
futures: bool
options: bool
splurges: bool
participants: List[str]
scores: List[int]
timestamp: int # TODO: datetime?
class MatchInfo(NamedTuple):
participants: List[str]
map: Map
mapname: str
settings: Settings
class ServerInterrupt(KeyboardInterrupt):
pass
class Estimation:
def __init__(self):
self.estimation = time()
pass
def start(self, N, turns):
self.timestart = time()
self.turns = turns
self.estimation = time() + 10 * N + turns
def set(self, i):
if i == 0:
return
self.estimation = (time() - self.timestart) / i * self.turns + self.timestart
def get(self):
return self.estimation
class Player:
def __init__(self, stats: PlayerStats):
self.stats = stats
# self.lastgame = lastgame
self.waiting = []
self.ongoing = []
def new_conn(self, deadline):
self.waiting.append(deadline)
def dead_conn(self, deadline):
self.waiting.remove(deadline)
def in_game(self, deadline, time_estimation):
self.waiting.remove(deadline)
self.ongoing.append(time_estimation)
def end_game(self, time_estimation):
while time_estimation in self.ongoing:
self.ongoing.remove(time_estimation)
def count(self):
return len(self.waiting) + len(self.ongoing)
def first_deadline(self):
return min(self.waiting) if self.waiting else time() + INFINITY
def first_finish(self):
return min(x.get() for x in self.ongoing) if self.ongoing else time() + INFINITY
def copy(self):
newplayer = Player(self.stats)
newplayer.waiting = self.waiting[:]
newplayer.ongoing = self.ongoing[:]
return newplayer
|
[
"kyra-mensk@yandex.ru"
] |
kyra-mensk@yandex.ru
|
baeb1d0716b3fe402977b96429be369bb60a25c8
|
c49590eb7f01df37c8ec5fef00d0ffc7250fa321
|
/openapi_client/models/sell.py
|
b079fbb0b63c1a32d9a27046bc53008c0892fdb4
|
[] |
no_license
|
harshad5498/ks-orderapi-python
|
373a4b85a56ff97e2367eebd076f67f972e92f51
|
237da6fc3297c02e85f0fff1a34857aaa4c1d295
|
refs/heads/master
| 2022-12-09T19:55:21.938764
| 2020-09-03T05:22:51
| 2020-09-03T05:22:51
| 293,533,651
| 0
| 0
| null | 2020-09-07T13:19:25
| 2020-09-07T13:19:24
| null |
UTF-8
|
Python
| false
| false
| 4,247
|
py
|
# coding: utf-8
"""
KS Trade API's
The version of the OpenAPI document: 1.0
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class Sell(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'price': 'float',
'quantity': 'int',
'orders': 'int'
}
attribute_map = {
'price': 'price',
'quantity': 'quantity',
'orders': 'orders'
}
def __init__(self, price=None, quantity=None, orders=None, local_vars_configuration=None): # noqa: E501
"""Sell - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._price = None
self._quantity = None
self._orders = None
self.discriminator = None
if price is not None:
self.price = price
if quantity is not None:
self.quantity = quantity
if orders is not None:
self.orders = orders
@property
def price(self):
"""Gets the price of this Sell. # noqa: E501
:return: The price of this Sell. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this Sell.
:param price: The price of this Sell. # noqa: E501
:type price: float
"""
self._price = price
@property
def quantity(self):
"""Gets the quantity of this Sell. # noqa: E501
:return: The quantity of this Sell. # noqa: E501
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this Sell.
:param quantity: The quantity of this Sell. # noqa: E501
:type quantity: int
"""
self._quantity = quantity
@property
def orders(self):
"""Gets the orders of this Sell. # noqa: E501
:return: The orders of this Sell. # noqa: E501
:rtype: int
"""
return self._orders
@orders.setter
def orders(self, orders):
"""Sets the orders of this Sell.
:param orders: The orders of this Sell. # noqa: E501
:type orders: int
"""
self._orders = orders
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Sell):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Sell):
return True
return self.to_dict() != other.to_dict()
|
[
"thebhushanp@gmail.com"
] |
thebhushanp@gmail.com
|
6c4ccb2da13c5f4a8c909a769b8db1f7f5e11b6a
|
62e45255088abb536e9ea6fcbe497e83bad171a0
|
/ippython/funciones_duplica_348.py
|
7e6eeddb5b9a16c421c5e012523191dca58e6cca
|
[] |
no_license
|
jmery24/python
|
a24f562c8d893a97a5d9011e9283eba948b8b6dc
|
3e35ac9c9efbac4ff20374e1dfa75a7af6003ab9
|
refs/heads/master
| 2020-12-25T21:56:17.063767
| 2015-06-18T04:59:05
| 2015-06-18T04:59:05
| 36,337,473
| 0
| 0
| null | 2015-05-27T02:26:54
| 2015-05-27T02:26:54
| null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 07:30:41 2013
@author: daniel
"""
#programa: funciones_duplica_numero_348.py
#definir una funcion que duplique los valores de la lista original
#modificando la lista original
#ejercicio 348
#definir funcion <duplica>
def duplica(lista):
for i in range(len(lista)):
lista[i] = lista[i]*2
return lista
#cuerpo del programa
#input
numeros = [1, 6, 15, 25, 30, 40]
#activa procedimiento
print 'Numeros originales: ', numeros
print
print 'Numeros duplicados: ', duplica(numeros)
print
print 'lista original modificada: ', numeros
|
[
"danmery@gmail.com"
] |
danmery@gmail.com
|
83ceaca4cedb1cf4a5f043b17207f25b96b7b66c
|
ab72563047515d98cd43481d8c42b4be73a9e7ae
|
/tests/trac/test-trac-0179.py
|
30e2ee5a8132e3a0e2351f6fdeabecfebef6ecc4
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
CantemoInternal/pyxb
|
08924a260886106cf499617a225de71cbf075a84
|
6979a4d3c13e10059da2e2d096acef6c06fc1ced
|
refs/heads/master
| 2021-01-18T10:52:39.712997
| 2014-10-19T11:18:38
| 2014-10-19T11:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:complexType name="tAny">
<xsd:all minOccurs="0">
<xsd:element type="xsd:int" name="a" minOccurs="1"/>
<xsd:element type="xsd:int" name="b" minOccurs="1"/>
</xsd:all>
</xsd:complexType>
<xsd:element name="eAny" type="tAny"/>
</xsd:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0179 (unittest.TestCase):
def testBasic (self):
instance = CreateFromDocument("<eAny/>")
self.assertTrue(instance.a is None)
self.assertTrue(instance.b is None)
instance = CreateFromDocument("<eAny><a>1</a><b>2</b></eAny>")
self.assertEqual(instance.a, 1)
self.assertEqual(instance.b, 2)
instance = CreateFromDocument("<eAny><b>2</b><a>1</a></eAny>")
self.assertEqual(instance.a, 1)
self.assertEqual(instance.b, 2)
self.assertRaises(pyxb.IncompleteElementContentError, CreateFromDocument, "<eAny><a>1</a></eAny>")
if __name__ == '__main__':
unittest.main()
|
[
"pab@pabigot.com"
] |
pab@pabigot.com
|
5754ed817e041ad412ca2d519e7f16fc4c733763
|
deaf519d1ee104784a56df98a7eb3705efbd1120
|
/carsir_test/easy_sell/test/test_log_control.py
|
b2644c4aff10847586c5947bca5e5578339e16e4
|
[] |
no_license
|
606keng/weeds_study
|
6391fbb47fb06abf67b7651250373f169fbdfd3e
|
df9d96009cbdf84176efbf4b02f43cb1d5208524
|
refs/heads/master
| 2021-04-23T07:43:52.472097
| 2021-04-07T13:10:58
| 2021-04-07T13:10:58
| 249,910,294
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
import time
import logging
logging.basicConfig(level=logging.DEBUG)
def test_1():
log = logging.getLogger('test_1')
time.sleep(1)
log.debug('after 1 sec')
time.sleep(1)
log.debug('after 2 sec')
time.sleep(1)
log.debug('after 3 sec')
assert 1, 'should pass'
def test_2():
log = logging.getLogger('test_2')
time.sleep(1)
log.debug('after 1 sec')
time.sleep(1)
log.debug('after 2 sec')
time.sleep(1)
log.debug('after 3 sec')
assert 0, 'failing for demo purposes'
|
[
"18791076614@163.com"
] |
18791076614@163.com
|
009d0651c358c2e02347a5fb7444f1ed0a5ab0c3
|
23a4dcb819f4cc7fa0b046f8804be35b05b2779a
|
/modules/debugger/breakpoints/function_breakpoints.py
|
815e220b5e77546d5613b4d4ae2115ae0eb6b3dd
|
[
"MIT"
] |
permissive
|
quycao/sublime_debugger
|
07784b5892af9d3eacd5c9df59af541b268f4f42
|
2a168e409300fc260c85f3e2c8786577223e4232
|
refs/heads/master
| 2020-09-13T20:38:28.677494
| 2019-11-26T03:59:22
| 2019-11-26T03:59:22
| 222,896,722
| 0
| 0
|
MIT
| 2019-11-20T09:12:56
| 2019-11-20T09:12:55
| null |
UTF-8
|
Python
| false
| false
| 3,660
|
py
|
from ... typecheck import *
from ... import core
from ... import ui
from ... import dap
class FunctionBreakpoint:
def __init__(self, dap: dap.FunctionBreakpoint, enabled: bool = True) -> None:
self.enabled = enabled
self.dap = dap
self.result = None #type: Optional[dap.BreakpointResult]
def into_json(self) -> dict:
return {
'dap': self.dap.into_json(),
'enabled': self.enabled
}
@staticmethod
def from_json(json: dict) -> 'FunctionBreakpoint':
return FunctionBreakpoint(
dap.FunctionBreakpoint.from_json(json['dap']),
json['enabled']
)
@property
def image(self) -> ui.Image:
if not self.enabled:
return ui.Images.shared.dot_disabled
if not self.verified:
return ui.Images.shared.dot_emtpy
return ui.Images.shared.dot
@property
def tag(self) -> Optional[str]:
return 'ƒn'
@property
def name(self):
return self.dap.name
@property
def condition(self):
return self.dap.condition
@property
def hitCondition(self):
return self.dap.hitCondition
@property
def verified(self):
if self.result:
return self.result.verified
return True
class FunctionBreakpoints:
def __init__(self):
self.breakpoints = [] #type: List[FunctionBreakpoint]
self.on_updated = core.Event() #type: core.Event[List[FunctionBreakpoint]]
self.on_send = core.Event() #type: core.Event[List[FunctionBreakpoint]]
def __iter__(self):
return iter(self.breakpoints)
def into_json(self) -> list:
return list(map(lambda b: b.into_json(), self.breakpoints))
def load_json(self, json: list):
self.breakpoints = list(map(lambda j: FunctionBreakpoint.from_json(j), json))
self.on_updated(self.breakpoints)
def clear_session_data(self):
for breakpoint in self.breakpoints:
breakpoint.result = None
self.updated(send=False)
def updated(self, send: bool=True):
self.on_updated(self.breakpoints)
if send:
self.on_send(self.breakpoints)
def set_result(self, breakpoint: FunctionBreakpoint, result: dap.BreakpointResult) -> None:
breakpoint.result = result
self.updated(send=False)
def toggle(self, breakpoint: FunctionBreakpoint):
breakpoint.enabled = not breakpoint.enabled
self.updated()
def edit(self, breakpoint: FunctionBreakpoint):
def set_name(value: str):
if value:
breakpoint.dap.name = value
self.updated()
def set_condition(value: str):
breakpoint.dap.condition = value or None
self.updated()
def set_hit_condition(value: str):
breakpoint.dap.hitCondition = value or None
self.updated()
def toggle_enabled():
self.toggle(breakpoint)
def remove():
self.breakpoints.remove(breakpoint)
self.updated()
return ui.InputList([
ui.InputListItemCheckedText(
set_name,
"Function",
"Name of function to break on",
breakpoint.dap.name,
),
ui.InputListItemCheckedText(
set_condition,
"Condition",
"Breaks when expression is true",
breakpoint.dap.condition,
),
ui.InputListItemCheckedText(
set_hit_condition,
"Count",
"Breaks when hit count condition is met",
breakpoint.dap.hitCondition,
),
ui.InputListItemChecked (
toggle_enabled,
"Enabled",
"Disabled",
breakpoint.enabled,
),
ui.InputListItem(
remove,
"Remove"
),
], placeholder= "Edit Breakpoint on function {}".format(breakpoint.name))
def add_command(self) -> None:
ui.InputText(self.add, "Name of function to break on").run()
def add(self, name: str):
self.breakpoints.append(
FunctionBreakpoint(
dap=dap.FunctionBreakpoint(name, None, None),
enabled=True
)
)
self.updated()
def remove_all(self):
self.breakpoints = []
self.updated()
|
[
"2889367+daveleroy@users.noreply.github.com"
] |
2889367+daveleroy@users.noreply.github.com
|
e5dea951b67d3261a0a4358277126467dce5125d
|
7087a5dd1772c9456f098bc024a894dcaeef5432
|
/backup/file.py
|
d15661444d72f8287c7ccfca0d168d0ba5d63f21
|
[] |
no_license
|
santhoshchami/kubecctl-python
|
5be7a5a17cc6f08ec717b3eb1c11719ef7653aba
|
cd45af465e25b0799d65c573e841e2acb983ee68
|
refs/heads/master
| 2021-06-23T11:00:43.615062
| 2019-07-10T16:57:06
| 2019-07-10T16:57:06
| 145,669,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
import sys
import re
file_name=sys.argv[1]
with open(file_name) as fh:
def svcFile(value):
for line in fh:
if(re.search("^name=", line)):
data = re.findall('".*"', line)
name=re.sub('"', '', data[0])
print(name)
svcFile('name')
|
[
"root@kube-node02.local"
] |
root@kube-node02.local
|
72d0e3a9aa321c3bce22f15f1bc1f9a7979fafaa
|
3ee85c3a459ac04cb28da0a40dc50c88aaee7fd3
|
/src/python3/sdp/scripts/animation.py
|
d9e4d415957dc00f5311ae8318787f509e3d05fb
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
LeiShi/Synthetic-Diagnostics-Platform
|
48b6f99da6f9a1c70cd1c1387745e76fbcee4ef4
|
870120d3fd14b2a3c89c6e6e85625d1e9109a2de
|
refs/heads/master
| 2021-03-30T17:36:13.497935
| 2020-04-11T23:45:43
| 2020-04-11T23:45:43
| 52,027,597
| 7
| 5
|
BSD-3-Clause
| 2018-04-09T22:50:08
| 2016-02-18T18:08:18
|
Python
|
UTF-8
|
Python
| false
| false
| 889
|
py
|
"""
Short script providing simple animation creation functions
"""
import numpy as np
import matplotlib.animation as anim
import matplotlib.pyplot as plt
class MovieMaker:
"""create 2D movie from data
Inputs:
data: 3-dimension ndarray, shape (NT,NY,NX), NT is the time steps, NY,NX the vertical and horizontal pixel number
"""
def __init__(self,data,interval=50,**im_para):
self.data = data
self.interval = interval
self.frame = 0
self.fig = plt.figure()
self.im = plt.imshow(data[0],**im_para)
self.t_tag = self.fig.text(0.9,0.9,'t=0',ha = 'right',va='top')
def updatefig(self,t):
self.im.set_array(self.data[t])
self.t_tag.set_text('t={0}'.format(t))
def showmovie(self):
ani = anim.FuncAnimation(self.fig,self.updatefig,interval = self.interval)
plt.show()
|
[
"shilei8583@gmail.com"
] |
shilei8583@gmail.com
|
7f8a1c183fc1bc1863fe7a285aad59a164b6bb76
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/R4B/tests/test_enrollmentrequest.py
|
4470f6d2976fc1a82c1c5d502126c8e936f53030
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EnrollmentRequest
Release: R4B
Version: 4.3.0
Build ID: c475c22
Last updated: 2022-05-28T12:47:40.239+10:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import enrollmentrequest
def impl_enrollmentrequest_1(inst):
assert inst.candidate.reference == "Patient/1"
assert inst.coverage.reference == "Coverage/9876B1"
assert inst.created == fhirtypes.DateTime.validate("2014-08-16")
assert inst.id == "22345"
assert inst.identifier[0].system == "http://happyvalley.com/enrollmentrequest"
assert inst.identifier[0].value == "EN22345"
assert inst.insurer.reference == "Organization/2"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the EnrollmentRequest.</div>"
)
assert inst.text.status == "generated"
def test_enrollmentrequest_1(base_settings):
"""No. 1 tests collection for EnrollmentRequest.
Test File: enrollmentrequest-example.json
"""
filename = base_settings["unittest_data_dir"] / "enrollmentrequest-example.json"
inst = enrollmentrequest.EnrollmentRequest.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "EnrollmentRequest" == inst.resource_type
impl_enrollmentrequest_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "EnrollmentRequest" == data["resourceType"]
inst2 = enrollmentrequest.EnrollmentRequest(**data)
impl_enrollmentrequest_1(inst2)
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
673687f08641540fd9ab5e89b4947b4216c2a265
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/2c581ccde8187fbaad06bf1a57c4d61576a13386-<_meijerint_indefinite_1>-bug.py
|
1f2dc6ecd5e2843ba758fb7e0f266e0b13a1513c
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
def _meijerint_indefinite_1(f, x):
' Helper that does not attempt any substitution. '
from sympy import Integral, piecewise_fold, nan, zoo
_debug('Trying to compute the indefinite integral of', f, 'wrt', x)
gs = _rewrite1(f, x)
if (gs is None):
return None
(fac, po, gl, cond) = gs
_debug(' could rewrite:', gs)
res = S(0)
for (C, s, g) in gl:
(a, b) = _get_coeff_exp(g.argument, x)
(_, c) = _get_coeff_exp(po, x)
c += s
fac_ = ((fac * C) / (b * (a ** ((1 + c) / b))))
rho = (((c + 1) / b) - 1)
t = _dummy('t', 'meijerint-indefinite', S(1))
def tr(p):
return [((a + rho) + 1) for a in p]
if any(((b.is_integer and ((b <= 0) == True)) for b in tr(g.bm))):
r = (- meijerg(tr(g.an), (tr(g.aother) + [1]), (tr(g.bm) + [0]), tr(g.bother), t))
else:
r = meijerg((tr(g.an) + [1]), tr(g.aother), tr(g.bm), (tr(g.bother) + [0]), t)
place = 0
if ((b < 0) or f.subs(x, 0).has(nan, zoo)):
place = None
r = hyperexpand(r.subs(t, (a * (x ** b))), place=place)
res += powdenest((fac_ * r), polar=True)
def _clean(res):
"This multiplies out superfluous powers of x we created, and chops off\n constants:\n\n >> _clean(x*(exp(x)/x - 1/x) + 3)\n exp(x)\n\n cancel is used before mul_expand since it is possible for an\n expression to have an additive constant that doesn't become isolated\n with simple expansion. Such a situation was identified in issue 6369:\n\n\n >>> from sympy import sqrt, cancel\n >>> from sympy.abc import x\n >>> a = sqrt(2*x + 1)\n >>> bad = (3*x*a**5 + 2*x - a**5 + 1)/a**2\n >>> bad.expand().as_independent(x)[0]\n 0\n >>> cancel(bad).expand().as_independent(x)[0]\n 1\n "
from sympy import cancel
res = expand_mul(cancel(res), deep=False)
return Add._from_args(res.as_coeff_add(x)[1])
res = piecewise_fold(res)
if res.is_Piecewise:
newargs = []
for (expr, cond) in res.args:
expr = _my_unpolarify(_clean(expr))
newargs += [(expr, cond)]
res = Piecewise(*newargs)
else:
res = _my_unpolarify(_clean(res))
return Piecewise((res, _my_unpolarify(cond)), (Integral(f, x), True))
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
1fe5a25fee8855277893c53410b7569c72bd622f
|
ce08ceeeb6a681db53c7dbd07398e885e4367f8c
|
/lunmap.py
|
b1509024b070bdfaeb7e00cc41587bba5fce23ab
|
[] |
no_license
|
FengZiQ/cli
|
f4efb5caaeec5fae6b5abfdca0046316033413f7
|
49b14acb6bb04ecfc757918835e71397794f66c0
|
refs/heads/master
| 2021-09-07T14:52:11.143413
| 2018-02-24T08:20:57
| 2018-02-24T08:20:57
| 99,288,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,901
|
py
|
# -*- coding = utf-8 -*-
# 2018.01.23
from ssh_connect import ssh_conn
from cli_test import *
from remote import server
import json
from find_unconfigured_pd_id import find_pd_id
data = 'data/lunmap.xlsx'
def precondition():
try:
clean_up_environment()
pdId = find_pd_id()
# create pool
server.webapi('post', 'pool', {"name": "T_lunMap_P0", "pds": pdId[6:9], "raid_level": "raid5"})
# create volume and export it
for i in range(3):
server.webapi('post', 'volume', {'pool_id': 0, 'name': 'T_lunMap_V' + str(i), 'capacity': '100GB'})
server.webapi('post', 'volume/' + str(i) + '/export')
# create snapshot and export it
for i in range(3):
server.webapi('post', 'snapshot', {"name": "T_lunMap_SS" + str(i), "type": 'volume', "source_id": 2})
server.webapi('post', 'snapshot/' + str(i) + '/export')
# create clone and export it
for i in range(3):
server.webapi('post', 'clone', {"name": "T_lunMap_C" + str(i), "source_id": 2})
server.webapi('post', 'clone/' + str(i) + '/export')
# create initiator
for i in range(4):
server.webapi('post', 'initiator', {'type': 'iSCSI', 'name': 'T.com' + str(i)})
server.webapi('post', 'initiator', {'type': 'fc', 'name': '00-11-22-33-00-00-11-1' + str(i)})
except:
tolog("precondition is failed\r\n")
return
def clean_up_environment():
initiator_request = server.webapi('get', 'initiator')
try:
initiator_info = json.loads(initiator_request["text"])
for initiator in initiator_info:
# delete all initiator
server.webapi('delete', 'initiator/' + str(initiator['id']))
except:
tolog("precondition is failed\r\n")
return
def enable_lmm(c):
cli_setting = cli_test_setting()
cli_setting.setting(c, data, 'enable_lmm')
return cli_setting.FailFlag
def add_lunmap(c):
# precondition
precondition()
cli_setting = cli_test_setting()
cli_setting.setting(c, data, 'add_lunmap')
return cli_setting.FailFlag
def addun_lunmap(c):
cli_setting = cli_test_setting()
cli_setting.setting(c, data, 'addun_lunmap')
return cli_setting.FailFlag
def list_lunmap(c):
cli_list = cli_test_list()
cli_list.list(c, data, 'list_lunmap')
return cli_list.FailFlag
def del_lunmap(c):
cli_delete = cli_test_delete()
cli_delete.delete(c, data, 'del_lunmap', 5)
return cli_delete.FailFlag
def dellun_lunmap(c):
cli_delete = cli_test_delete()
cli_delete.delete(c, data, 'dellun_lunmap')
return cli_delete.FailFlag
def disable_lmm(c):
cli_setting = cli_test_setting()
cli_setting.setting(c, data, 'disable_lmm')
return cli_setting.FailFlag
def invalid_setting_for_lunmap(c):
cli_failed_test = cli_test_failed_test()
cli_failed_test.failed_test(c, data, 'invalid_setting_for_lunmap')
return cli_failed_test.FailFlag
def invalid_option_for_lunmap(c):
cli_failed_test = cli_test_failed_test()
cli_failed_test.failed_test(c, data, 'invalid_option_for_lunmap')
return cli_failed_test.FailFlag
def missing_parameter_for_lunmap(c):
cli_failed_test = cli_test_failed_test()
cli_failed_test.failed_test(c, data, 'missing_parameter_for_lunmap')
# clean up environment
clean_up_environment()
find_pd_id()
return cli_failed_test.FailFlag
if __name__ == "__main__":
start = time.clock()
c, ssh = ssh_conn()
enable_lmm(c)
add_lunmap(c)
addun_lunmap(c)
list_lunmap(c)
del_lunmap(c)
dellun_lunmap(c)
disable_lmm(c)
invalid_setting_for_lunmap(c)
invalid_option_for_lunmap(c)
missing_parameter_for_lunmap(c)
ssh.close()
elasped = time.clock() - start
print "Elasped %s" % elasped
|
[
"feng1025352529@qq.com"
] |
feng1025352529@qq.com
|
0fd640f856ffa0cc035d95776d7187fcc91031a4
|
1c7fa268ee031395806f38d52b6a7282ba5a4633
|
/hr_python/regex_parse/easy/DetectFloat.py
|
b2754c393f1d48d70ee773ddd148ef937800d8f8
|
[] |
no_license
|
murugesan-narayan/hr_python
|
d2f562ecd2aa6c4eef4aab8363d5d040447ed727
|
86542342fc77cf7c95ebd08e5142186410f6385d
|
refs/heads/master
| 2022-04-12T14:59:16.293611
| 2020-03-24T14:25:30
| 2020-03-24T14:25:30
| 249,729,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import re
if __name__ == '__main__':
for i in range(int(input())):
f = input()
print(re.match(r"^[+-]?\d*\.\d+$", f) is not None)
|
[
"murugesan.narayan@gmail.com"
] |
murugesan.narayan@gmail.com
|
b3733a246955de76c0490f84a680b6c786c3c7c3
|
1511782b2cc3dcf1f7e058e5046ec67a5561ba51
|
/2020/0310-0312_DP_gasshuku/abc007c.py
|
f12f7a270968711d5e8f016e556d2c6e22b48622
|
[] |
no_license
|
keiouok/atcoder
|
7d8a053b0cf5b42e71e265450121d1ad686fee6d
|
9af301c6d63b0c2db60ac8af5bbe1431e14bb289
|
refs/heads/master
| 2021-09-07T11:48:55.953252
| 2021-07-31T15:29:50
| 2021-07-31T15:29:50
| 186,214,079
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
H, W = MAP()
sy, sx = MAP()
gy, gx = MAP()
sy = sy - 1
sx = sx - 1
gy = gy - 1
gx = gx - 1
visited = [[False] * W for i in range(H)]
C = [list(input()) for i in range(H)]
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
def bfs():
que = deque([(sy, sx)])
C[sy][sx] = 0
visited[sy][sx] = True
while que:
y, x = que.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < W and 0 <= ny < H and C[ny][nx] != "#" and visited[ny][nx] == False:
C[ny][nx] = C[y][x] + 1
visited[ny][nx] = True
que.append((ny, nx))
print(C[gy][gx])
if __name__ == "__main__":
bfs()
|
[
"hu.youjia.hx2@is.naist.jp"
] |
hu.youjia.hx2@is.naist.jp
|
d73ea65a3ef7e920759b7d60129d750bb29618ce
|
87dd9022421d5f23f815ca6d794af48c007579dc
|
/items/admin.py
|
afe7d4f0b45ead1a4305f077baf2f05e1e4e2b22
|
[] |
no_license
|
iKenshu/todo-list
|
c94fc0558ef00625d9a59a45cbed7510947ff2b3
|
f115d4035b9f7fc82253388a51982637fcd98276
|
refs/heads/master
| 2020-03-19T18:37:21.852860
| 2018-06-19T13:13:36
| 2018-06-19T13:13:36
| 112,334,181
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
from django.contrib import admin
from .models import Item
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'pub_date')
list_editable = ('description',)
prepopulated_fields = {'slug': ('name',)}
|
[
"undefined.hlo.o@gmail.com"
] |
undefined.hlo.o@gmail.com
|
4f86f318e0f21834f77d5873ea25905b86d87f12
|
d227fb26e33128afe868bef60e3042f7c6576643
|
/editor/Welder/src/Core/MapEditor/BrushPanels.py
|
c8b08cf9ca3a4fbf50aa695014c5d2459f9d58bb
|
[] |
no_license
|
boisei0/arcreator
|
1e57b9cc61d5b38bfd0d62237592cfd9f371eca9
|
555739cafdeeed19d3c25c4948416a6ecb7697d5
|
refs/heads/master
| 2020-12-02T05:02:36.242572
| 2014-08-05T19:25:41
| 2014-08-05T19:25:41
| 22,642,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,801
|
py
|
import wx
from Boot import WelderImport
Kernel = WelderImport('Kernel')
Core = WelderImport('Core')
KM = Kernel.Manager
Panels = Core.Panels
class TilesetPanel(wx.ScrolledWindow, Panels.PanelBase):
_arc_panel_info_string = "Name Caption Left CloseB BestS MinimizeM Layer Row Pos MinimizeB DestroyOC"
_arc_panel_info_data = {
"Name": "Tileset",
"Caption": "Tileset",
"CloseB": False,
"BestS": (32 * 8, 32 * 12),
"MinimizeM": ["POS_SMART", "CAPT_SMART"],
"Layer": 1,
"Row": 1,
"Pos": 1,
"MinimizeB": True
}
def __init__(self, parent):
wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY)
self.bindFocus()
self.panel = wx.Panel(self, wx.ID_ANY)
self.SetScrollbars(32, 32, 8, 50)
class MapTreePanel(wx.Panel, Panels.PanelBase):
_arc_panel_info_string = "Name Caption Left CloseB BestS MinimizeM Layer Row Pos MinimizeB IconARCM DestroyOC"
_arc_panel_info_data = {
"Name": "Maps",
"Caption": "Maps",
"CloseB": False,
"BestS": (32 * 8, 32 * 4),
"MinimizeM": ["POS_SMART", "CAPT_SMART"],
"Layer": 1,
"Row": 1,
"Pos": 1,
"MinimizeB": True,
'IconARCM': 'project_icon'
}
def __init__(self, parent, mapEditerPanel=None):
wx.Panel.__init__(self, parent)
self.mapEditerPanel = mapEditerPanel
#set up Sizer
box = wx.BoxSizer(wx.VERTICAL)
#set up tree
mapTreeCtrl = KM.get_component("MapTreeCtrl").object
self.treectrl = mapTreeCtrl(self, -1, wx.Point(0, 0), wx.Size(160, 250), True)
#add ctrls to sizer
box.Add(self.treectrl, 1, wx.ALL | wx.EXPAND)
#set sizer
self.SetSizerAndFit(box)
#bind events
self.treectrl.Bind(wx.EVT_LEFT_DCLICK, self.TreeLeftDClick)
def TreeLeftDClick(self, event):
pt = event.GetPosition()
item, flags = self.treectrl.HitTest(pt)
if item:
data = self.treectrl.GetItemData(item).GetData()
if data:
map_id, name = data
project = Kernel.GlobalObjects.get_value("PROJECT")
map_ = project.getMapData(map_id)
tilesets = project.getData("Tilesets")
if Kernel.GlobalObjects.has_key("PanelManager"):
Kernel.GlobalObjects.get_value("PanelManager")\
.dispatch_panel("MapEditorPanel", "Map Editor Panel " + str(map_id), arguments=[map_, tilesets],
info="Name Caption", data={"Name": "[" + str(map_id) + "] " + name,
"Caption": "[" + str(map_id) + "] " + name})
event.Skip()
|
[
"boisei0@hubsec.eu"
] |
boisei0@hubsec.eu
|
527ab3f5e0e2ee94f044e89480b14ae779c9d6ae
|
b87b80f29d0012827582890d6e47ec98f85376a5
|
/prediction-models/app.py
|
151dc0fe2b45c3559a1822c92234f2fab0d2ebee
|
[] |
no_license
|
amlannandy/FitnessLive
|
843058a6b84029fc91ef53d0731804b715ceaf19
|
8514520c00804a42c5e46fc4dbdaff57de56a591
|
refs/heads/master
| 2023-03-08T02:02:22.036686
| 2021-02-19T07:43:20
| 2021-02-19T07:43:20
| 331,222,344
| 0
| 0
| null | 2021-02-19T06:36:59
| 2021-01-20T07:07:45
|
Dart
|
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
from flask import Flask, jsonify, request
from scripts.chd import run_chd_model
from scripts.diabetes import run_diabetes_model
app = Flask(__name__)
@app.route('/')
def home():
return 'Flask server is working!', 200
@app.route('/diabetes', methods=['POST'])
def run_diabetes():
try:
data = request.get_json()
except:
response = {
'success': False,
'msg': 'Health data missing',
}
return jsonify(response), 400
try:
glucose = data['glucose']
blood_pressure = data['bloodPressure']
res = run_diabetes_model(glucose, blood_pressure)
response = {
'success': True,
'result': res,
}
return jsonify(response), 200
except:
response = {
'success': False,
'msg': 'Server Error'
}
return jsonify(response), 500
@app.route('/coronary-heart-disease', methods=['POST'])
def run_chd():
try:
data = request.get_json()
except:
response = {
'success': False,
'msg': 'Health data missing',
}
return jsonify(response), 400
try:
res = run_chd_model()
response = {
'success': True,
'result': res,
}
return jsonify(response), 200
except:
response = {
'success': False,
'msg': 'Server Error'
}
return jsonify(response), 500
|
[
"amlannandy5@gmail.com"
] |
amlannandy5@gmail.com
|
ec558fecae74fcd4bc138c1706c2befffa2bf1a1
|
0abd5799f42e169ecd7eb25970d32121cd483bfd
|
/xpdtools/pipelines/save_tiff.py
|
c0682d205300daa24f1e67311d1d99acbf40bd0f
|
[] |
no_license
|
CJ-Wright/xpdtools
|
7ecede857d8e7b91e58861b064c53d5ab88ef540
|
680c79bb30d7b58c9ed03eb286b5a438161c39df
|
refs/heads/master
| 2021-06-24T07:14:04.308324
| 2017-12-05T18:54:50
| 2017-12-05T18:54:50
| 113,220,820
| 0
| 0
| null | 2017-12-05T18:55:09
| 2017-12-05T18:55:09
| null |
UTF-8
|
Python
| false
| false
| 7,878
|
py
|
"""Save tiff pipeline"""
import os
from operator import sub
import shed.event_streams as es
import tifffile
from shed.event_streams import star
from bluesky.callbacks.broker import LiveImage
from streamz import Stream
from xpdan.db_utils import query_dark, temporal_prox
from xpdan.dev_utils import _timestampstr
from xpdan.formatters import render_and_clean
from xpdan.io import dump_yml
from xpdan.pipelines.pipeline_utils import (if_dark, base_template)
_s = set()
# TODO: refactor templating
def conf_save_tiff_pipeline(db, save_dir, *, write_to_disk=False, vis=True,
image_data_key='pe1_image'):
"""Total data processing pipeline for XPD
Parameters
----------
db: databroker.broker.Broker instance
The databroker holding the data, this must be specified as a `db=` in
the function call (keyword only argument)
write_to_disk: bool, optional
If True write files to disk, defaults to False
save_dir: str
The folder in which to save the data, this must be specified as a
`save_dir=` in the function call (keyword only argument)
vis: bool, optional
If True visualize the data. Defaults to False
image_data_key: str, optional
The key for the image data, defaults to `pe1_image`
Returns
-------
source: Stream
The source for the graph
See also
--------
xpdan.tools.mask_img
"""
print('start pipeline configuration')
light_template = os.path.join(save_dir, base_template)
raw_source = Stream(stream_name='Raw Data') # raw data
source = es.fill_events(db, raw_source) # filled raw data
# DARK PROCESSING
# if not dark do dark subtraction
if_not_dark_stream = es.filter(lambda x: not if_dark(x), source,
input_info={0: ((), 0)},
document_name='start',
stream_name='If not dark',
full_event=True)
eventify_raw_start = es.Eventify(if_not_dark_stream,
stream_name='eventify raw start')
h_timestamp_stream = es.map(_timestampstr, if_not_dark_stream,
input_info={0: 'time'},
output_info=[('human_timestamp',
{'dtype': 'str'})],
full_event=True,
stream_name='human timestamp')
# only the primary stream
if_not_dark_stream_primary = es.filter(lambda x: x[0]['name'] == 'primary',
if_not_dark_stream,
document_name='descriptor',
stream_name='Primary')
dark_query = es.Query(db,
if_not_dark_stream,
query_function=query_dark,
query_decider=temporal_prox,
stream_name='Query for FG Dark')
dark_query_results = es.QueryUnpacker(db, dark_query,
stream_name='Unpack FG Dark')
# Do the dark subtraction
zlid = es.zip_latest(if_not_dark_stream_primary,
dark_query_results,
stream_name='Combine darks and lights')
dark_sub_fg = es.map(sub,
zlid,
input_info={0: (image_data_key, 0),
1: (image_data_key, 1)},
output_info=[('img', {'dtype': 'array',
'source': 'testing'})],
md=dict(stream_name='Dark Subtracted Foreground',
analysis_stage='dark_sub'))
if vis:
dark_sub_fg.sink(star(LiveImage('img')))
if write_to_disk:
eventify_raw_descriptor = es.Eventify(
if_not_dark_stream, stream_name='eventify raw descriptor',
document='descriptor')
exts = ['.tiff']
eventify_input_streams = [dark_sub_fg]
input_infos = [
{'data': ('img', 0), 'file': ('filename', 1)},
]
saver_kwargs = [{}]
eventifies = [es.Eventify(
s, stream_name='eventify {}'.format(s.stream_name)) for s in
eventify_input_streams]
mega_render = [
es.map(render_and_clean,
es.zip_latest(
es.zip(h_timestamp_stream,
# human readable event timestamp
if_not_dark_stream, # raw events,
stream_name='mega_render zip'
),
eventify_raw_start,
eventify_raw_descriptor,
analysed_eventify
),
string=light_template,
input_info={
'human_timestamp': (('data', 'human_timestamp'), 0),
'raw_event': ((), 1),
'raw_start': (('data',), 2),
'raw_descriptor': (('data',), 3),
'analyzed_start': (('data',), 4)
},
ext=ext,
full_event=True,
output_info=[('filename', {'dtype': 'str'})],
stream_name='mega render '
'{}'.format(analysed_eventify.stream_name)
)
for ext, analysed_eventify in zip(exts, eventifies)]
streams_to_be_saved = [dark_sub_fg]
save_callables = [tifffile.imsave]
md_render = es.map(render_and_clean,
eventify_raw_start,
string=light_template,
input_info={'raw_start': (('data',), 0), },
output_info=[('filename', {'dtype': 'str'})],
ext='.yml',
full_event=True,
stream_name='MD render')
make_dirs = [es.map(lambda x: os.makedirs(os.path.split(x)[0],
exist_ok=True),
cs,
input_info={0: 'filename'},
output_info=[('filename', {'dtype': 'str'})],
stream_name='Make dirs {}'.format(cs.stream_name)
) for cs in mega_render]
_s.update([es.map(writer_templater,
es.zip_latest(
es.zip(s1, s2, stream_name='zip render and data',
zip_type='truncate'), made_dir,
stream_name='zl dirs and render and data'
),
input_info=ii,
output_info=[('final_filename', {'dtype': 'str'})],
stream_name='Write {}'.format(s1.stream_name),
**kwargs) for
s1, s2, made_dir, ii, writer_templater, kwargs
in
zip(
streams_to_be_saved,
mega_render,
make_dirs, # prevent run condition btwn dirs and files
input_infos,
save_callables,
saver_kwargs
)])
_s.add(es.map(dump_yml, es.zip(eventify_raw_start, md_render),
input_info={0: (('data', 'filename'), 1),
1: (('data',), 0)},
full_event=True,
stream_name='dump yaml'))
return raw_source
|
[
"cjwright4242@gmail.com"
] |
cjwright4242@gmail.com
|
f30fe56073906398d23caa61ccf0a8e2ab8e16db
|
323f58ecefddd602431eeb285b60ac81316b774a
|
/aioreactive/operators/map.py
|
fabc6f774561b5bc2cf3945aa07c3b09a5d2753b
|
[
"MIT"
] |
permissive
|
tr11/aioreactive
|
aa9798ee5c2f98c0f5301111732e72093232ab8e
|
6219f9a0761f69fa1765129b990762affdf661c8
|
refs/heads/master
| 2021-01-25T13:58:51.892021
| 2018-03-02T22:01:23
| 2018-03-02T22:01:23
| 123,635,129
| 0
| 0
|
MIT
| 2018-03-02T21:56:46
| 2018-03-02T21:56:46
| null |
UTF-8
|
Python
| false
| false
| 1,727
|
py
|
from asyncio import iscoroutinefunction
from typing import Callable, TypeVar
from aioreactive.abc import AsyncDisposable
from aioreactive.core import AsyncObserver, AsyncObservable
from aioreactive.core import AsyncSingleStream, chain, AsyncCompositeDisposable
T1 = TypeVar('T1')
T2 = TypeVar('T2')
class Map(AsyncObservable[T2]):
def __init__(self, mapper: Callable[[T1], T2], source: AsyncObservable[T1]) -> None:
self._source = source
self._mapper = mapper
async def __asubscribe__(self, observer: AsyncObserver[T2]) -> AsyncDisposable:
sink = Map.Sink(self) # type: AsyncSingleStream[T2]
down = await chain(sink, observer)
up = await chain(self._source, sink) # type: AsyncDisposable
return AsyncCompositeDisposable(up, down)
class Sink(AsyncSingleStream[T2]):
def __init__(self, source: "Map") -> None:
super().__init__()
self._mapper = source._mapper
async def asend_core(self, value: T1) -> None:
try:
result = self._mapper(value)
except Exception as err:
await self._observer.athrow(err)
else:
await self._observer.asend(result)
def map(mapper: Callable[[T1], T2], source: AsyncObservable[T1]) -> AsyncObservable[T2]:
"""Project each item of the source observable.
xs = map(lambda value: value * value, source)
Keyword arguments:
mapper: A transform function to apply to each source item.
Returns an observable sequence whose elements are the result of
invoking the mapper function on each element of source.
"""
assert not iscoroutinefunction(mapper)
return Map(mapper, source)
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
00f660fdd4366d928e2b66f54f12ac2b923b5079
|
ce55c319f5a78b69fefc63595d433864a2e531b5
|
/后端知识/houduan/MyBlog/userapp/migrations/0001_initial.py
|
abcc3a807d851aa0ec3562c9d6c944221c0501b8
|
[] |
no_license
|
Suijng/1809_data
|
a072c875e8746190e3b715e53f1afe3323f4666b
|
45f8a57089f5c30ccc1a3cddb03b76dc95355417
|
refs/heads/master
| 2022-12-21T12:38:30.458291
| 2019-09-27T01:14:41
| 2019-09-27T01:14:41
| 211,207,071
| 0
| 0
| null | 2022-11-22T03:16:18
| 2019-09-27T00:55:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-12-03 08:10
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='BlogUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nickname', models.CharField(default='', max_length=20)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='EmailVerifyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(default='', max_length=50, verbose_name='验证码')),
('email', models.EmailField(max_length=50, verbose_name='邮箱')),
('send_type', models.CharField(choices=[('register', '注册'), ('forget', '找回密码'), ('update_email', '修改邮箱')], max_length=30, verbose_name='验证码类型')),
('send_time', models.DateTimeField(auto_now_add=True, verbose_name='发送时间')),
],
options={
'verbose_name_plural': '邮箱验证码',
'verbose_name': '邮箱验证码',
},
),
]
|
[
"1627765913@qq.com"
] |
1627765913@qq.com
|
2d4d044affb7d0cc3e77b5f6551c82aae70f4994
|
588b28e9be045543a22f0a8ca99ba527199dc429
|
/chapter18/__init__.py
|
29bb6dee7092d73411e10f9fd9bbd06c99d000f3
|
[
"MIT"
] |
permissive
|
NetworkRanger/python-spider-project
|
7404d4e36ac6a2cd70584d78dd81fec65a320628
|
f501e331a59608d9a321a0d7254fcbcf81b50ec2
|
refs/heads/master
| 2020-04-15T06:05:53.374965
| 2019-01-28T13:35:14
| 2019-01-28T13:35:14
| 164,448,396
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/28 9:31 PM
import logging
LOG_FORMAT = "[%(asctime)s] [%(processName)s:%(threadName)s] [%(levelname)s:%(filename)s:%(funcName)s:%(lineno)d] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
Logger = logging.getLogger(__name__)
class Demo(object):
def __init__(self):
pass
def step1(self):
Logger.debug('step1')
def step2(self):
Logger.debug('step2')
def run(self):
self.step1()
self.step2()
def main():
Demo().run()
if __name__ == '__main__':
main()
|
[
"17346503142@163.com"
] |
17346503142@163.com
|
1eb949c67d59888a91f020f8cad66698876c2348
|
130daef988750806f3d6ad66f94a4ff9ee4d8edd
|
/brambling/auth_backends.py
|
2a91a4a33e3d7f973717ff7c858fe9a43be17b65
|
[
"BSD-3-Clause"
] |
permissive
|
dancerfly/django-brambling
|
01866bc57add5b82d93e3c6c869906ec8c864b23
|
69aa3f5f702814969b41d62c19cd53db1f164397
|
refs/heads/master
| 2023-07-26T21:58:09.606139
| 2023-07-17T20:35:54
| 2023-07-17T20:35:54
| 16,976,982
| 2
| 1
|
BSD-3-Clause
| 2023-07-19T20:26:59
| 2014-02-19T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 972
|
py
|
class BramblingBackend(object):
"""
Handles object-based permissions for brambling models.
"""
def authenticate(self, **kwargs):
pass
def get_user(self, user_id):
pass
def get_all_permissions(self, user_obj, obj=None):
if obj is None:
return set()
if not user_obj.is_authenticated() or not user_obj.is_active:
return set()
if not hasattr(user_obj, '_brambling_perm_cache'):
user_obj._brambling_perm_cache = {}
perm_cache = user_obj._brambling_perm_cache
cls_name = obj.__class__.__name__
if cls_name not in perm_cache:
perm_cache[cls_name] = {}
if obj.pk not in perm_cache[cls_name]:
perm_cache[cls_name][obj.pk] = set(obj.get_permissions(user_obj))
return perm_cache[cls_name][obj.pk]
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj)
|
[
"stephen.r.burrows@gmail.com"
] |
stephen.r.burrows@gmail.com
|
afc46575b44bee62133c56169fa4210dd6ad9eb5
|
8760f182049d4caf554c02b935684f56f6a0b39a
|
/fabfile.py
|
518eb36179dfc6b586e00e71d9f2ce269767debe
|
[
"BSD-3-Clause"
] |
permissive
|
boar/boar
|
c674bc65623ee361af31c7569dd16c6eb8da3b03
|
6772ad31ee5bb910e56e650cc201a476adf216bc
|
refs/heads/master
| 2020-06-09T06:59:31.658154
| 2012-02-28T19:28:58
| 2012-02-28T19:28:58
| 1,734,103
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 2,939
|
py
|
from fabric.api import *
from fabric.contrib.files import exists, upload_template
from fabric.contrib.project import rsync_project
import time
env.user = 'root'
env.project_name = 'boar'
env.hosts = ['137.205.98.90']
env.path = '/var/www/theboar.org'
env.version = 'current'
######################################
# Helpers
######################################
def version_path():
return '%s/releases/%s' % (env.path, env.version)
def version(v):
env.version = v
def wwwrun(c):
sudo(c, user='www-data')
######################################
# Tasks
######################################
def bootstrap():
sudo('apt-get install -y puppet rsync')
def deploy():
require('hosts', 'path')
if not exists(env.path):
sudo('mkdir -p "%s"' % env.path)
sudo('chown -R www-data:www-data "%s"' % env.path)
version(time.strftime('%Y%m%d%H%M%S'))
with cd(env.path):
#if exists('releases/current'):
# wwwrun('cp -a releases/current releases/%s' % env.version)
#else:
wwwrun('mkdir -p releases/%s' % env.version)
rsync_project(
local_dir=".",
remote_dir=version_path(),
delete=True,
extra_opts='--exclude=static_root --exclude=".git*" --exclude="*.pyc" --exclude="apache-solr-*" --exclude="*.pyc" --exclude="*~" --exclude="._*" --exclude="boar/media" --exclude=".*.swp" --exclude=".DS_Store"'
)
with cd(version_path()):
revision = local(
'git rev-list HEAD | head -1 | cut -c-20',
capture=True
).strip()
upload_template('boar/settings/live.py', 'boar/settings/live.py', {
'GIT_REVISION': revision
})
sudo('chown -R www-data:www-data .')
with cd('deploy'):
sudo('puppet --templatedir=. deps.pp')
if exists('ve'):
run('rm -rf ve')
run('mkdir ve')
run('virtualenv ve')
run('pip install --upgrade -E ve -r requirements.txt', shell=True)
manage('collectstatic --noinput')
manage('compress')
with cd(env.path):
if exists('releases/previous'):
run('rm releases/previous')
if exists('releases/current'):
run('mv releases/current releases/previous')
run('ln -s %s releases/current' % version_path())
sudo('cp %s/solr/conf/schema.xml /etc/solr/conf/schema.xml' % version_path())
restart()
def restart():
sudo('/etc/init.d/supervisor stop')
env.warn_only = True
while True:
out = sudo('/etc/init.d/supervisor start')
if not out.failed:
break
time.sleep(1)
env.warn_only = False
sudo('/etc/init.d/nginx reload')
sudo('/etc/init.d/jetty force-reload')
def manage(c):
with cd(version_path()):
wwwrun('ve/bin/python boar/manage.py %s --settings=boar.settings.live' % c)
|
[
"ben@firshman.co.uk"
] |
ben@firshman.co.uk
|
c67ddc97da2553f8afb6a05eabb283fa8e04bd6a
|
df0afd5c143fbd799e47b2c1113c75edd16861dc
|
/train_config/training_server/image_classification/tensornet/data/utils.py
|
ae15a4c7718907e0a7194ee741d20850ca81224f
|
[
"MIT"
] |
permissive
|
amitkayal/Flash
|
7ee0021e6ebd143d250f702465b038bd081ef115
|
14a8b61e34b97d06aa9c9d1870ec37dc5a16e907
|
refs/heads/master
| 2023-02-17T06:50:05.146681
| 2021-01-16T09:09:25
| 2021-01-16T09:09:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,173
|
py
|
import torch
import numpy as np
def unnormalize(image, mean, std, transpose=False):
"""Un-normalize a given image.
Args:
image (numpy.ndarray or torch.Tensor): A ndarray
or tensor. If tensor, it should be in CPU.
mean (float or tuple): Mean. It can be a single value or
a tuple with 3 values (one for each channel).
std (float or tuple): Standard deviation. It can be a single
value or a tuple with 3 values (one for each channel).
transpose (bool, optional): If True, transposed output will
be returned. This param is effective only when image is
a tensor. If tensor, the output will have channel number
as the last dim. (default: False)
Returns:
Unnormalized image
"""
# Check if image is tensor, convert to numpy array
tensor = False
if type(image) == torch.Tensor: # tensor
tensor = True
if len(image.size()) == 3:
image = image.transpose(0, 1).transpose(1, 2)
image = np.array(image)
# Perform normalization
image = image * std + mean
# Convert image back to its original data type
if tensor:
if not transpose and len(image.shape) == 3:
image = np.transpose(image, (2, 0, 1))
image = torch.Tensor(image)
return image
def normalize(image, mean, std, transpose=False):
"""Normalize a given image.
Args:
image (numpy.ndarray or torch.Tensor): A ndarray
or tensor. If tensor, it should be in CPU.
mean (float or tuple): Mean. It can be a single value or
a tuple with 3 values (one for each channel).
std (float or tuple): Standard deviation. It can be a single
value or a tuple with 3 values (one for each channel).
transpose (bool, optional): If True, transposed output will
be returned. This param is effective only when image is
a tensor. If tensor, the output will have channel number
as the last dim. (default: False)
Returns:
Normalized image
"""
# Check if image is tensor, convert to numpy array
tensor = False
if type(image) == torch.Tensor: # tensor
tensor = True
if len(image.size()) == 3:
image = image.transpose(0, 1).transpose(1, 2)
image = np.array(image)
# Perform normalization
image = (image - mean) / std
# Convert image back to its original data type
if tensor:
if not transpose and len(image.shape) == 3:
image = np.transpose(image, (2, 0, 1))
image = torch.Tensor(image)
return image
def to_numpy(tensor):
"""Convert 3-D torch tensor to a 3-D numpy array.
Args:
tensor (torch.Tensor): Tensor to be converted.
Returns:
numpy.ndarray
"""
return tensor.transpose(0, 1).transpose(1, 2).clone().numpy()
def to_tensor(ndarray):
"""Convert 3-D numpy array to 3-D torch tensor.
Args:
ndarray (numpy.ndarray): Array to be converted.
Returns:
torch.Tensor
"""
return torch.Tensor(np.transpose(ndarray, (2, 0, 1)))
|
[
"thegeek.004@gmail.com"
] |
thegeek.004@gmail.com
|
8e11f65766869a66e9111df36118b83b5dd1434f
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Quantization/trend_MovingMedian/cycle_5/ar_12/test_artificial_32_Quantization_MovingMedian_5_12_100.py
|
40e8975b9b2b2180572bfb7c615e4fcc63f2f0b4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 276
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
52acc46cc82e17279e6e3e8f336bb42e466867ba
|
522cbeb324df9843f9433d190af60f090119b4b1
|
/PythonAulas_Desafios/Desafios/aula006desafio3.py
|
3ecff1916da58627f403711a9dd57a67c1a2dbf0
|
[] |
no_license
|
Andremarcucci98/CursoEmVideo
|
52d7c78d42b9dc2a28eef8f7db77972c774dece6
|
751d1fcd0f37da1201218d23828a6cf526f9d029
|
refs/heads/master
| 2023-05-29T03:38:36.002154
| 2021-06-23T14:28:50
| 2021-06-23T14:28:50
| 353,194,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
n1 = int(input('Digite um numero:'))
n2 = int(input('Digite outro:'))
soma = n1 + n2
print (f'A soma de {n1} e {n2} vale {soma}')
|
[
"marcuccimaciel.andre@gmail.com"
] |
marcuccimaciel.andre@gmail.com
|
a81c6aaf8228b7c68e349d1d0a4adb1928bc36d4
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/_am_utils_py/FPythonCode/FValidator.py
|
4f4b74aa575bf4b403617ac0957f80f30b42d05e
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,323
|
py
|
""" Compiled: 2020-09-18 10:38:50 """
#__src_file__ = "extensions/AMUtils/./etc/FValidator.py"
import collections
class ValidationError(object):
INFO = 0
WARN = 1
ERROR = 2
FATAL = 3
def __init__(self, errorMsg, obj, errorLevel=None, **kwargs):
self._errorMsg = errorMsg
self._obj = obj
if errorLevel is None:
self._errorLevel = self.ERROR
else:
self._errorLevel = errorLevel
self._additionalArguments = kwargs
def FormatObject(self):
return str(self.Object())
def LongMessage(self):
return self.FormatErrorMessage()
def FormatErrorMessage(self):
errorLevel = {self.INFO:'INFO', self.WARN:'WARN', self.ERROR:'ERROR', self.FATAL:'FATAL'}[self.ErrorLevel()]
return '{0} {1}: {2}'.format(errorLevel, self.FormatObject(), self.ErrorMessage())
def __str__(self):
return self.FormatErrorMessage()
def ErrorMessage(self):
return self._errorMsg
def Object(self):
return self._obj
def ErrorLevel(self):
return self._errorLevel
class ErrorList(list):
def __init__(self, parameter, sourceChain=None, errorClass=ValidationError):
list.__init__(self)
self._parameter = parameter
self._sourceChain = sourceChain
self._errorClass = errorClass
def AddError(self, msg, errorLevel=None, **kwargs):
error = self._errorClass(msg, self._parameter, errorLevel, **kwargs)
self.append(error)
class Validator(object):
@classmethod
def Validate(cls, obj, validObjectTypes=None):
errors = []
if isinstance(validObjectTypes, str):
validObjectTypes = [validObjectTypes]
elif isinstance(validObjectTypes, collections.Iterable):
validObjectTypes = list(validObjectTypes)
else:
validObjectTypes = None
try:
obj = cls.Object(obj)
except Exception as e:
return [cls.CreateError(str(e), obj)]
objType = cls.GetType(obj)
if not objType:
return [cls.CreateError('No Type', obj)]
elif validObjectTypes is not None and not objType in validObjectTypes:
return [cls.CreateError('Type "{0}" is not in the valid object types {1}'.format(objType, validObjectTypes), obj)]
try:
#Should use a deque or something instead
function = getattr(cls, 'Validate'+objType)
except AttributeError:
return [cls.CreateError('Validator function "{0}" available'.format('Validate'+objType), obj, ValidationError.WARN)]
try:
errors.extend(function(obj))
except Exception as e:
return [cls.CreateError('Could not run validator function {0}: {1}'.format('Validate'+objType, e), obj)]
return errors
@classmethod
def CreateError(cls, errorMsg, obj, errorLevel=None, **kwargs):
return ValidationError(errorMsg, obj, errorLevel, **kwargs)
@classmethod
def GetType(cls, parameter):
return type(parameter).__name__
@classmethod
def Object(cls, obj):
return obj
@classmethod
def _ErrorList(cls, parameter, sourceChain):
return ErrorList(parameter, sourceChain, cls.CreateError)
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
b96b505e0cb1220ac2b4625113c4bcca0a413048
|
59b72b8f662cd605b3ce31f54779c17e5ca066d0
|
/interview_q/剑指offer/10_斐波那契数列.py
|
de292e4f6715a9faab8360d5bd0767643da352c0
|
[] |
no_license
|
dongyang2/hello-world
|
c1f5853ccafd6b8f23836192547ab36f898e0891
|
1f859b53e2b21ed5a648da09b84950f03ec1b370
|
refs/heads/master
| 2022-12-11T22:07:22.853912
| 2022-11-24T03:52:35
| 2022-11-24T03:52:35
| 119,025,960
| 0
| 0
| null | 2018-01-26T10:09:58
| 2018-01-26T08:28:10
| null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
# 6,7,8,9都是C里面才有的数据结构的操作,Python里实现和操作都不太一样,这里先跳一跳
# coding: utf-8
# Python 3
# 这就是传说中的跳台阶,最最经典的动态规划。
def jump(n: int):
if n < 0:
raise ValueError()
if n == 0:
return 0
if n == 1:
return 1
fn_2 = 0
fn_1 = 1
f = 0
for i in range(1, n):
f = fn_1 + fn_2
fn_1, fn_2 = fn_1, f
return f
def main():
print(jump(4))
if __name__ == '__main__':
import time
print('-' * 15, 'Start', time.ctime(), '-' * 15, '\n')
main()
print('%s%s %s %s %s' % ('\n', '-' * 16, 'End', time.ctime(), '-' * 16))
|
[
"dongyangzhao@outlook.com"
] |
dongyangzhao@outlook.com
|
26990edf4fc3ae419d9308a62e95b9b113c53e05
|
89c9142a8d6e004f28cd0d22aa455d8571035ec4
|
/PyMOTW/datetime/datetime_datetime_strptime.py
|
6c457710fddeecaf1aefd1959bb80bc758485840
|
[] |
no_license
|
pkueecslibo/amw-python-study
|
c8d34f2151877fe098c06d71c51563dafd71e652
|
ff789e148158bfa8f7ae2b749240b868eed1e0bc
|
refs/heads/master
| 2021-01-20T17:27:16.262750
| 2014-08-19T15:18:54
| 2014-08-19T15:18:54
| 28,998,135
| 1
| 0
| null | 2015-01-09T03:09:50
| 2015-01-09T03:09:48
| null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
#!/usr/bin/python
#!-*- coding:utf-8 -*-
'''
Formatting and Parsing
format: 将时间格式化
parse : 将时间解析
'''
import datetime
format = '%a %b %d %H:%M:%S %Y'
today = datetime.datetime.today()
print 'ISO :', today
s = today.strftime(format)
print 'strftime :', s
d = datetime.datetime.strptime(s, format)
print 'strptime :', d.strftime(format)
|
[
"huangxiaobo.routon@gmail.com@cdea0e49-4e81-d697-b58f-f95e96613c0c"
] |
huangxiaobo.routon@gmail.com@cdea0e49-4e81-d697-b58f-f95e96613c0c
|
37221c2bd8bec9865778877ca06c82ed0f57ca5a
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations_async/_usage_operations_async.py
|
7c8a86de0c22b7f573b55b71abac3155b9dcafb2
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,107
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsageOperations:
"""UsageOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.ListUsagesResult"]:
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListUsagesResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListUsagesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'} # type: ignore
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
87335b3cb32c9e311cccd7caea020d28d7f292f7
|
d3f4da28261585201e151270a344ae8d07d55392
|
/app/views.py
|
b184b72bd30079103e0e687dabc96ba264d6617b
|
[] |
no_license
|
405102091/flask
|
abe4f44b36c9f3549f1cab4d82fe3b9f33536232
|
b1b5c74a652c269769a1551ce689885e5af9ee21
|
refs/heads/master
| 2020-03-06T18:24:50.352066
| 2018-04-03T12:00:27
| 2018-04-03T12:00:27
| 127,006,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
#coding:utf-8
from app import app
from flask import request,render_template,send_from_directory,url_for,redirect
@app.route('/')
def index_page():
return render_template('index.html')
@app.route('/<path:path>')
def other_page(path):
if path=='favicon.ico':
return redirect(url_for('send_icons',path='favicon.ico'))
return render_template(path) # auto search in templates folder
@app.route('/file/<path:path>')
def send_file(path):
return send_from_directory('./file',path)
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('./static/js',path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('./static/css',path)
@app.route('/images/<path:path>')
def send_images(path):
return send_from_directory('./static/images',path)
@app.route('/icons/<path:path>')
def send_icons(path):
return send_from_directory('./static/icons',path)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
ceb91a2b78e5a5ea055566071356b0781e484914
|
a8be4698c0a43edc3622837fbe2a98e92680f48a
|
/Programmers/DynamicProgramming/등굣길.py
|
1a6dab6d68269c829b646266b58b5d87d7d4cd50
|
[] |
no_license
|
blueboy1593/algorithm
|
fa8064241f7738a12b33544413c299e7c1e1a908
|
9d6fdd82b711ba16ad613edcc041cbecadd85e2d
|
refs/heads/master
| 2021-06-23T22:44:06.120932
| 2021-02-21T10:44:16
| 2021-02-21T10:44:16
| 199,543,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
from heapq import heappop, heappush
def solution(n, m, puddles):
answer = 0
road_map = [ [ 0 ] * n for _ in range(m) ]
for puddle in puddles:
road_map[puddle[1] - 1][puddle[0] - 1] = 1
visited = [ [ (float('inf'), 1) ] * n for _ in range(m) ]
print(*road_map, sep='\n')
print(*visited, sep='\n')
D = [(0, 1), (1, 0), (0, -1), (-1, 0)]
heap_stack = []
# [최단거리, i, j]
heappush(heap_stack, [0, 0, 0])
while heap_stack:
print(heap_stack)
a = heappop(heap_stack)
dis, i, j = a
for k in range(4):
idy = i + D[k][0]
jdx = j + D[k][1]
if 0 <= idy < m and 0 <= jdx < n:
if road_map[idy][jdx] == 0:
if dis + 1 < visited[idy][jdx][0]:
visited[idy][jdx] = (dis + 1, visited[i][j][1])
heappush(heap_stack, [dis + 1, idy, jdx])
elif dis + 1 == visited[idy][jdx][0]:
visited[idy][jdx] = (dis + 1, visited[idy][jdx][1] + visited[i][j][1])
# print(visited)
if visited[m-1][n-1][0] == float('inf'):
return 0
answer = visited[m-1][n-1][1] % 1000000007
print(answer)
print(*visited, sep='\n')
return answer
solution(4,3,[[2, 2]])
solution(4,3,[[1,2],[2, 2]])
|
[
"snb0303@naver.com"
] |
snb0303@naver.com
|
145bcf7fdca12ee0dff9f5c8c86ce0531ba16418
|
8f68af7b8854d8c5000f8ecbe3a3c4330b4d6a7c
|
/docs/interviewPrep/designPatterns/Behavioral_patterns/Visitor/python/Component.py
|
ef9ff0e13ced6286d405b95e35de59790d077f6c
|
[] |
no_license
|
reshinto/reshinto.github.io
|
7590d0fb26cbf239b2545fd3b745416ab31aa7aa
|
71e5b82d49a11d9a9171a38bcb3ac23dd07ee62f
|
refs/heads/dev
| 2022-12-05T13:45:53.578262
| 2022-12-01T15:34:59
| 2022-12-01T15:34:59
| 211,689,735
| 6
| 0
| null | 2022-08-07T22:07:36
| 2019-09-29T16:11:25
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 261
|
py
|
from abc import ABC, abstractmethod
class Component(ABC):
"""
The Component interface declares an `accept` method that should take the
base visitor interface as an argument.
"""
@abstractmethod
def accept(self, visitor):
pass
|
[
"terencekong2002@gmail.com"
] |
terencekong2002@gmail.com
|
32b4f98ea46f4d147614fd31e0b5a7c4576d7bea
|
1d7a40fde9c4da1cdb70f7e641a7dd65a8eba1a8
|
/libretto/migrations/0031_add_distribution_constraint.py
|
763b24cf80de146c9d943f4073935520cbb3a53c
|
[
"BSD-3-Clause"
] |
permissive
|
dezede/dezede
|
675c5f6c05beffa5ad855ab521c19c077a188039
|
f50d2d478b473ac2acce1c5f6f9748c6211d593c
|
refs/heads/master
| 2023-08-25T13:26:47.939378
| 2023-08-14T20:24:51
| 2023-08-14T20:25:02
| 9,086,660
| 18
| 6
|
BSD-3-Clause
| 2023-06-05T16:32:57
| 2013-03-28T21:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 645
|
py
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libretto', '0030_auto_20150626_1634'),
]
operations = [
migrations.RunSQL("""
DELETE FROM libretto_elementdedistribution
WHERE individu_id IS NULL AND ensemble_id IS NULL;
ALTER TABLE libretto_elementdedistribution
ADD CONSTRAINT individu_xor_ensemble
CHECK (individu_id IS NOT NULL <> ensemble_id IS NOT NULL);
""", """
ALTER TABLE libretto_elementdedistribution
DROP CONSTRAINT individu_xor_ensemble;
""")
]
|
[
"bordage.bertrand@gmail.com"
] |
bordage.bertrand@gmail.com
|
ee8246209d3f04aeb9f85bc70bc5260d1e00824d
|
1dd7fecaa182c1d7a29460dc5385066b68bcf676
|
/Add or Subtract Leading Spaces/add_spaces.py
|
cd0734d7a6b693d7a4c01b14afb10a06c50c1dcd
|
[] |
no_license
|
mainka1f/PythonUtilities
|
f081df31e6ea4311d4973ef7ba6bc0ff6be75fb1
|
f310d088a7a7a5f2c95c27cba3a7985207568d62
|
refs/heads/master
| 2021-12-02T19:21:11.915510
| 2012-05-01T21:43:57
| 2012-05-01T21:43:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,598
|
py
|
# filename: add_leading_spaces.py
# date of creation:
# Oct 9, 2008
# purpose:
# to add a specified number of spaces in
# front of designated lines; these occur when
# python code from one program is copied to
# a module, for example, where the defs start in
# column 1.
import os # to get current directory, etc.
import string # filenames are strings, dude
from tkFileDialog import * # for 'askopenfilename()', etc
import sys # for sys.exit()
from tkMessageBox import * # askokcancel, showinfo, showerror, etc.
def say_goodbye():
print '\nYou have elected to end this session.'
print 'Program terminated\n\n'
sys.exit()
currentDirectory = os.getcwd()
print '\n ** Welcome to "add_leading_spaces.py"'
print '\nThis program adds a user-specified amount'
print ' of leading spaces to each line of a code.'
print
print ' "Starting line" and "Ending line" are line numbers'
print ' as shown in any text editor.'
# define dictionary of options for askopenfilename(); open only python extensions initially
options = {}
options = {
'defaultextension' : '',
'filetypes' : [('All files','.*')],
# 'initialdir' : currentDirectory,
'initialfile' : '',
# 'parent' : root,
'title' : 'Open any text file...'
}
# get filename
# dirname, filename = os.path.split(askopenfilename(**options))
inputFile = askopenfilename(**options)
file=open(inputFile,'rU').readlines()
print '\nInput file:',inputFile
lenFile=len(file)
print '\nInput file has %s lines.' % len(file)
lineStart=int(raw_input('\nStarting line: '))
lineEnd=int(raw_input('Ending line: '))
numSpaces=int(raw_input('Number of leading white spaces to add: '))
if lineStart > lineEnd:
print '\nWARNING - "Ending line" cannot be less than "Starting line" '
print '\n Starting line =',lineStart
print ' Ending line =',lineEnd
print ' Check your input and start over.\n'
sys.exit()
if lineStart > lenFile or lineEnd > lenFile:
print '\nWARNING - Starting or Ending line numbers out of range'
print ' Max line number:',lenFile
print ' Starting line:',lineStart
print ' Ending line:',lineEnd
print ' Check your input and start over.\n'
sys.exit()
ans = askokcancel(
'start and end line numbers ok?',
'File = ' + inputFile + '\n' +
'\nStarting line = ' + str(lineStart) + '\n' +
'Ending line = ' + str(lineEnd) + '\n' +
'Leading white spaces to add per line = ' + str(numSpaces) + '\n\n' +
'Is this ok?'
)
if ans:
diff = lineEnd - lineStart + 1
print
for i in range(diff):
lineNumber=lineStart+i
line = file[lineNumber-1]
print '\nBefore change ...'
print '%s. %s' % (lineNumber,line)
if line[0] <> '#':
lineNew=' '*numSpaces + line
print ' After change ...'
print '%s. %s' % (lineNumber,lineNew)
file[lineNumber-1]=lineNew
else:
print ' Comment - no change'
# check if ok to write file back out to disk
ans=askokcancel(
'Write file...',
'OK to write file out to disk?'
)
if ans:
options={'title' : 'Save a file...'}
outputFile=asksaveasfilename(**options)
fileOut=open(outputFile,'w')
fileOut.writelines(file)
print '\nOutput file:',outputFile
print ' successfully written!\n'
print ' Program ended\n'
else:
say_goodbye()
else:
say_goodbye()
|
[
"dwbarne@gmail.com"
] |
dwbarne@gmail.com
|
8a3fa06daa3e454e257d5ac5eb773cb0916a5d79
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/objspace/std/test/test_setstrategies.py
|
1088d8c734cb348fdbe86ccf222f729d02453262
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 6,034
|
py
|
from pypy.objspace.std.setobject import W_SetObject
from pypy.objspace.std.setobject import (
BytesIteratorImplementation, BytesSetStrategy, EmptySetStrategy,
IntegerIteratorImplementation, IntegerSetStrategy, ObjectSetStrategy,
UnicodeIteratorImplementation, UnicodeSetStrategy)
from pypy.objspace.std.listobject import W_ListObject
class TestW_SetStrategies:
def wrapped(self, l):
return W_ListObject(self.space, [self.space.wrap(x) for x in l])
def test_from_list(self):
s = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
assert s.strategy is self.space.fromcache(IntegerSetStrategy)
s = W_SetObject(self.space, self.wrapped([1,"two",3,"four",5]))
assert s.strategy is self.space.fromcache(ObjectSetStrategy)
s = W_SetObject(self.space)
assert s.strategy is self.space.fromcache(EmptySetStrategy)
s = W_SetObject(self.space, self.wrapped([]))
assert s.strategy is self.space.fromcache(EmptySetStrategy)
s = W_SetObject(self.space, self.wrapped(["a", "b"]))
assert s.strategy is self.space.fromcache(BytesSetStrategy)
s = W_SetObject(self.space, self.wrapped([u"a", u"b"]))
assert s.strategy is self.space.fromcache(UnicodeSetStrategy)
def test_switch_to_object(self):
s = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s.add(self.space.wrap("six"))
assert s.strategy is self.space.fromcache(ObjectSetStrategy)
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s2 = W_SetObject(self.space, self.wrapped(["six", "seven"]))
s1.update(s2)
assert s1.strategy is self.space.fromcache(ObjectSetStrategy)
def test_switch_to_unicode(self):
s = W_SetObject(self.space, self.wrapped([]))
s.add(self.space.wrap(u"six"))
assert s.strategy is self.space.fromcache(UnicodeSetStrategy)
def test_symmetric_difference(self):
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s2 = W_SetObject(self.space, self.wrapped(["six", "seven"]))
s1.symmetric_difference_update(s2)
assert s1.strategy is self.space.fromcache(ObjectSetStrategy)
def test_intersection(self):
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s2 = W_SetObject(self.space, self.wrapped([4,5, "six", "seven"]))
s3 = s1.intersect(s2)
skip("for now intersection with ObjectStrategy always results in another ObjectStrategy")
assert s3.strategy is self.space.fromcache(IntegerSetStrategy)
def test_clear(self):
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s1.clear()
assert s1.strategy is self.space.fromcache(EmptySetStrategy)
def test_remove(self):
s1 = W_SetObject(self.space, self.wrapped([1]))
self.space.call_method(s1, 'remove', self.space.wrap(1))
assert s1.strategy is self.space.fromcache(EmptySetStrategy)
def test_union(self):
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s2 = W_SetObject(self.space, self.wrapped([4,5,6,7]))
s3 = W_SetObject(self.space, self.wrapped([4,'5','6',7]))
s4 = s1.descr_union(self.space, [s2])
s5 = s1.descr_union(self.space, [s3])
assert s4.strategy is self.space.fromcache(IntegerSetStrategy)
assert s5.strategy is self.space.fromcache(ObjectSetStrategy)
def test_discard(self):
class FakeInt(object):
def __init__(self, value):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if other == self.value:
return True
return False
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
s1.descr_discard(self.space, self.space.wrap("five"))
skip("currently not supported")
assert s1.strategy is self.space.fromcache(IntegerSetStrategy)
set_discard__Set_ANY(self.space, s1, self.space.wrap(FakeInt(5)))
assert s1.strategy is self.space.fromcache(ObjectSetStrategy)
def test_has_key(self):
class FakeInt(object):
def __init__(self, value):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if other == self.value:
return True
return False
s1 = W_SetObject(self.space, self.wrapped([1,2,3,4,5]))
assert not s1.has_key(self.space.wrap("five"))
skip("currently not supported")
assert s1.strategy is self.space.fromcache(IntegerSetStrategy)
assert s1.has_key(self.space.wrap(FakeInt(2)))
assert s1.strategy is self.space.fromcache(ObjectSetStrategy)
def test_iter(self):
space = self.space
s = W_SetObject(space, self.wrapped([1,2]))
it = s.iter()
assert isinstance(it, IntegerIteratorImplementation)
assert space.unwrap(it.next()) == 1
assert space.unwrap(it.next()) == 2
#
s = W_SetObject(space, self.wrapped(["a", "b"]))
it = s.iter()
assert isinstance(it, BytesIteratorImplementation)
assert space.unwrap(it.next()) == "a"
assert space.unwrap(it.next()) == "b"
#
s = W_SetObject(space, self.wrapped([u"a", u"b"]))
it = s.iter()
assert isinstance(it, UnicodeIteratorImplementation)
assert space.unwrap(it.next()) == u"a"
assert space.unwrap(it.next()) == u"b"
def test_listview(self):
space = self.space
s = W_SetObject(space, self.wrapped([1,2]))
assert sorted(space.listview_int(s)) == [1, 2]
#
s = W_SetObject(space, self.wrapped(["a", "b"]))
assert sorted(space.listview_bytes(s)) == ["a", "b"]
#
s = W_SetObject(space, self.wrapped([u"a", u"b"]))
assert sorted(space.listview_unicode(s)) == [u"a", u"b"]
|
[
"mssun@mesalock-linux.org"
] |
mssun@mesalock-linux.org
|
2c41931f9bfd7eaf9c22cbb2f15ada81301aaba1
|
84e661d5d293ec0c544fedab7727767f01e7ddcf
|
/gallery/migrations-old/0008_auto_20161104_1658.py
|
72efb6c44b6a86b56afd429493b3b02f9b29ad1d
|
[
"BSD-3-Clause"
] |
permissive
|
groundupnews/gu
|
ea6734fcb9509efc407061e35724dfe8ba056044
|
4c036e79fd735dcb1e5a4f15322cdf87dc015a42
|
refs/heads/master
| 2023-08-31T13:13:47.178119
| 2023-08-18T11:42:58
| 2023-08-18T11:42:58
| 48,944,009
| 21
| 23
|
BSD-3-Clause
| 2023-09-14T13:06:42
| 2016-01-03T11:56:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 408
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-04 14:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0007_auto_20161011_0201'),
]
operations = [
migrations.AlterModelOptions(
name='album',
options={'ordering': ['name']},
),
]
|
[
"nathangeffen@gmail.com"
] |
nathangeffen@gmail.com
|
58fcbce7e64476764524eb41a4a3d7d43b76f249
|
95495baeb47fd40b9a7ecb372b79d3847aa7a139
|
/test/test_ftd_cluster_device_container_list_container.py
|
f3f0da9a2931d619b0f6edf9e2553da51679d8ac
|
[] |
no_license
|
pt1988/fmc-api
|
b1d8ff110e12c13aa94d737f3fae9174578b019c
|
075f229585fcf9bd9486600200ff9efea5371912
|
refs/heads/main
| 2023-01-07T09:22:07.685524
| 2020-10-30T03:21:24
| 2020-10-30T03:21:24
| 308,226,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
# coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.ftd_cluster_device_container_list_container import FTDClusterDeviceContainerListContainer # noqa: E501
from swagger_client.rest import ApiException
class TestFTDClusterDeviceContainerListContainer(unittest.TestCase):
"""FTDClusterDeviceContainerListContainer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFTDClusterDeviceContainerListContainer(self):
"""Test FTDClusterDeviceContainerListContainer"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.ftd_cluster_device_container_list_container.FTDClusterDeviceContainerListContainer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"pt1988@gmail.com"
] |
pt1988@gmail.com
|
0a58061500b7599271d8283323f62de5837f949c
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/keras_applications/__init__.py
|
f55ee1563dfe72a1e4e57e1da0889e2100ce2a61
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fd8175358cf1d2bbbe0840de053501eb7f1fe237337da13b20799adfe278cfe5
size 1846
|
[
"github@cuba12345"
] |
github@cuba12345
|
470ce8862eb06700eb7ec3afcc26e0ae4b1527eb
|
fd7ed3266a6f802289508e5721b2bcd354763748
|
/notification_manager.py
|
fc2cf1ea95668dd6ad20c23f5b0c8a9269fcf244
|
[] |
no_license
|
ec500-software-engineering/exercise-1-modularity-jbw0410
|
817628ca81cb784c6da372ed3637bace4f36180b
|
9bbc6beb322c245fd4bca1d2d355861fd16d9d98
|
refs/heads/master
| 2020-04-22T02:57:27.675676
| 2019-02-14T19:28:11
| 2019-02-14T19:28:11
| 170,068,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
from common_types import MessageUrgency
class NotificationManager(object):
"""
:developer: Josh
This class uses urgency to dictate which messaging mediums get used
LOW_URGENCY = 0
MEDIUM_URGENCY = 1
HIGH_URGENCY = 2
"""
def __init__(self, main_contact):
self._main_contact = main_contact
def send_message(self, msg):
raise NotImplementedError
class FlexibleNotificationManager(NotificationManager):
def __init__(self, main_contact, sms_sender, telegram_sender, email_sender):
super().__init__(main_contact)
self._sms_sender = sms_sender
self._telegram_sender = telegram_sender
self._email_sender = email_sender
def send_message(self, msg):
if msg.get_urgency() == MessageUrgency.HIGH_URGENCY:
self._sms_sender.send_notification(msg, self._main_contact)
self._telegram_sender.send_notification(msg, self._main_contact)
elif msg.get_urgency() == MessageUrgency.MEDIUM_URGENCY:
self._telegram_sender.send_notification(msg, self._main_contact)
elif msg.get_urgency() == MessageUrgency.LOW_URGENCY:
self._email_sender.send_notification(msg, self._main_contact)
|
[
"noreply@github.com"
] |
ec500-software-engineering.noreply@github.com
|
4a4353352eab6a8cc98894df3052e0b68d2b8232
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_121/ch35_2020_09_21_13_25_01_007144.py
|
62b79e7010a777c8f5bf1e3e2107e925f0d9c7cc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
num=int(input('Digite seu número para adicionar: '))
while num!=0:
numero=int(input('Digite seu número para adicionar ou 0 para somar: '))
if numero==0:
print(num)
break
else:
num+=numero
|
[
"you@example.com"
] |
you@example.com
|
2dc0b1f1f82ad169a5d289da8a02dec624e5d2d1
|
92a619c043e0c26fb65e58619a0e1c5090a9efe0
|
/Useful_Code_Snippets/pramp_k_messed_array_sort.py
|
17542c02521c6a1bc4bb97355d7544669599c472
|
[] |
no_license
|
curieshicy/My_Utilities_Code
|
39150171f8e0aa4971cfc3d7adb32db7f45e6733
|
8b14a5c1112794d3451486c317d5e3c73efcd3b5
|
refs/heads/master
| 2022-06-22T06:06:39.901008
| 2022-06-20T16:00:51
| 2022-06-20T16:00:51
| 177,379,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
'''
0 1 2 3 4 5 6 7 8 9
arr = [1, 4, 5, 2, 3, 7, 8, 6, 10, 9] k = 2
i i
'''
import heapq
def sort_k_messed_array(arr, k):
heap = [i for i in arr[:k + 1]]
heapq.heapify(heap)
res = []
i = k + 1
while heap and i < len(arr):
min_val = heapq.heappop(heap)
res.append(min_val)
heapq.heappush(heap, arr[i])
i += 1
while heap:
min_val = heapq.heappop(heap)
res.append(min_val)
return res
arr = [1, 4, 5, 2, 3, 7, 8, 6, 10, 9]
k = 2
print (sort_k_messed_array(arr, k))
|
[
"noreply@github.com"
] |
curieshicy.noreply@github.com
|
2fb286b4006fe0afb28353f2573285435e790b8a
|
194a1e2ac246c5f9926b014c00d4c733f0cdaf0c
|
/btcgreen/wallet/did_wallet/did_info.py
|
553e00112df673b80a38510c0e18345282c7f978
|
[
"Apache-2.0"
] |
permissive
|
chia-os/btcgreen-blockchain
|
03e889cd0268284b7673917ab725ad71f980b650
|
2688e74de423ec59df302299e993b4674d69489e
|
refs/heads/main
| 2023-08-29T14:40:11.962821
| 2021-08-17T06:33:34
| 2021-08-17T06:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
from dataclasses import dataclass
from typing import List, Optional, Tuple
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.util.ints import uint64
from btcgreen.util.streamable import streamable, Streamable
from btcgreen.wallet.lineage_proof import LineageProof
from btcgreen.types.blockchain_format.program import Program
from btcgreen.types.blockchain_format.coin import Coin
@dataclass(frozen=True)
@streamable
class DIDInfo(Streamable):
origin_coin: Optional[Coin] # puzzlehash of this coin is our DID
backup_ids: List[bytes]
num_of_backup_ids_needed: uint64
parent_info: List[Tuple[bytes32, Optional[LineageProof]]] # {coin.name(): LineageProof}
current_inner: Optional[Program] # represents a Program as bytes
temp_coin: Optional[Coin] # partially recovered wallet uses these to hold info
temp_puzhash: Optional[bytes32]
temp_pubkey: Optional[bytes]
|
[
"svginsomnia@gmail.com"
] |
svginsomnia@gmail.com
|
d1f5e089482a623098d9cb6844fe079e8a317992
|
707d67f58b55cae19d3b1431d3c1fb2d5f283800
|
/withoutrest/test.py
|
7440a0d374410355bceeefda45974cbe4c0cced9
|
[] |
no_license
|
lipun111/API
|
c52954cfd604ee567e4033ba971ca2f2fd8fb48d
|
57e66c74ef8e0bb519fe9ba316c1cb2503d901ad
|
refs/heads/master
| 2021-01-06T08:16:05.342444
| 2020-02-23T10:32:49
| 2020-02-23T10:32:49
| 241,257,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
import requests
BASE_URL='http://127.0.0.1:8000/'
ENDPOINT='apijson'
resp = requests.get(BASE_URL+ENDPOINT)
data = resp.json()
print('Data From Django Application:')
print('#'*50)
print('Employee Number:', data['eno'])
print('Employee Name:', data['ename'])
print('Employee Salary:', data['esal'])
print('Employee Address:', data['eaddr'])
|
[
"lipunkumarmajhi95@gmail.com"
] |
lipunkumarmajhi95@gmail.com
|
e94e070e67a00687f1e3cae94b5554d4983e1d11
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/samcli/commands/pipeline/init/pipeline_templates_manifest.py
|
e9729511faa9af0042539a4c326e6ef6b34eece4
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
"""
Represents a manifest that lists the available SAM pipeline templates.
Example:
providers:
- displayName:Jenkins
id: jenkins
- displayName:Gitlab CI/CD
id: gitlab
- displayName:Github Actions
id: github-actions
templates:
- displayName: jenkins-two-environments-pipeline
provider: Jenkins
location: templates/cookiecutter-jenkins-two-environments-pipeline
- displayName: gitlab-two-environments-pipeline
provider: Gitlab
location: templates/cookiecutter-gitlab-two-environments-pipeline
- displayName: Github-Actions-two-environments-pipeline
provider: Github Actions
location: templates/cookiecutter-github-actions-two-environments-pipeline
"""
from pathlib import Path
from typing import Dict, List
import yaml
from samcli.commands.exceptions import AppPipelineTemplateManifestException
from samcli.yamlhelper import parse_yaml_file
class Provider:
"""CI/CD system such as Jenkins, Gitlab and GitHub-Actions"""
def __init__(self, manifest: Dict) -> None:
self.id: str = manifest["id"]
self.display_name: str = manifest["displayName"]
class PipelineTemplateMetadata:
"""The metadata of a Given pipeline template"""
def __init__(self, manifest: Dict) -> None:
self.display_name: str = manifest["displayName"]
self.provider: str = manifest["provider"]
self.location: str = manifest["location"]
class PipelineTemplatesManifest:
"""The metadata of the available CI/CD systems and the pipeline templates"""
def __init__(self, manifest_path: Path) -> None:
try:
manifest: Dict = parse_yaml_file(file_path=str(manifest_path))
self.providers: List[Provider] = list(map(Provider, manifest["providers"]))
self.templates: List[PipelineTemplateMetadata] = list(map(PipelineTemplateMetadata, manifest["templates"]))
except (FileNotFoundError, KeyError, TypeError, yaml.YAMLError) as ex:
raise AppPipelineTemplateManifestException(
"SAM pipeline templates manifest file is not found or ill-formatted. This could happen if the file "
f"{manifest_path} got deleted or modified."
"If you believe this is not the case, please file an issue at https://github.com/aws/aws-sam-cli/issues"
) from ex
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.