blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
705bb752a9258e3bc2c8ee9f16145cfd532bc894
|
60c0ca4ef3ad20bad04311473b2f4044f54739d2
|
/store/api/migrations/0005_order_sold_at.py
|
af648d3a29d3d09976b8254d48088c4f4700c7c2
|
[] |
no_license
|
Jimiliani/rainforest
|
361915024cc2a93a9bb8621372627b2d84176271
|
b1bf65ee4441d1a4980a2e65ce2cfc629b9d6a7a
|
refs/heads/main
| 2023-06-19T18:10:38.879924
| 2021-07-21T14:54:05
| 2021-07-21T14:54:05
| 387,679,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# Generated by Django 3.1.5 on 2021-07-20 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20210720_2159'),
]
operations = [
migrations.AddField(
model_name='order',
name='sold_at',
field=models.DateField(null=True),
),
]
|
[
"dikorolyov@mail.ru"
] |
dikorolyov@mail.ru
|
c724c19fb17cb22589d49e60505ecf79ee04e7c5
|
d1742451b25705fc128acc245524659628ab3e7d
|
/Data Structure & Algorithm/Disjoint Set Union/10685 - Nature.py
|
b0b9ec8421bcfe7e9e623074eb4e6f6e4a873ba0
|
[] |
no_license
|
Shovon588/Programming
|
ebab793a3c97aedddfcad5ea06e7e22f5c54a86e
|
e4922c9138998358eed09a1be7598f9b060c685f
|
refs/heads/master
| 2022-12-23T18:29:10.141117
| 2020-10-04T17:29:32
| 2020-10-04T17:29:32
| 256,915,133
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
def makeset(n):
par[n] = n
def find(r):
if par[r]==r:
return r
par[r] = find(par[r])
return find(par[r])
def joint(a,b):
u = find(a)
v = find(b)
if u!=v:
par[u] = v
def generate_result(dic):
res = -1
for i in range(1,n+1):
temp = find(i)
if temp in dic:
dic[temp]+=1
res = max(res,dic[temp])
else:
dic[temp]=1
res = max(res,dic[temp])
return res
while(1):
n,m = map(int,input().split())
if n==0 and m==0:
break
par = [None]*(n+1)
animals = {}
for i in range(n):
animal = input()
animals[animal]=i+1
makeset(i+1)
for i in range(m):
first, second = map(str,input().split())
a = animals[first]
b = animals[second]
joint(a,b)
dic = {}
result = generate_result(dic)
print(result)
s = input()
|
[
"mainulislam588@gmail.com"
] |
mainulislam588@gmail.com
|
76eb0ff4bccebf9ef628e4a625ec26945dffb10d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02573/s607211232.py
|
a15ccc098a97d0f2076b85071f021635d77845ac
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
class UnionFind:
def __init__(self, n):
self.r = [-1] * n
def root(self, x):
if self.r[x] < 0:
return x
self.r[x] = self.root(self.r[x])
return self.r[x]
def merge(self, x, y):
x, y = self.root(x), self.root(y)
if x == y:
return False
if self.r[x] > self.r[y]:
x, y = y, x
self.r[x] += self.r[y]
self.r[y] = x
return True
def size(self, x):
return -self.r[self.root(x)]
N, M = map(int, input().split())
f, uf = [set() for i in range(N)], UnionFind(N)
for _ in range(M):
A, B = map(lambda x: int(x)-1, input().split())
uf.merge(A, B)
print(max([uf.size(i) for i in range(N)]))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
762b1c64e435700c7347877040a1ae4aaaaabfe8
|
f51a03fee097195911c1577e8510908d02784853
|
/src/data/reg_ex/poker_888.py
|
9df00e106415573e9384ed7598704598977a71a5
|
[] |
no_license
|
aaaaaa2493/poker-engine
|
fc04cc4b93ad73189adf99b2f864d12a99a34dce
|
52aebf8572f87378fa78c999c252d60fcc80f5ce
|
refs/heads/master
| 2020-08-31T17:38:28.477260
| 2019-10-31T12:16:40
| 2019-10-31T12:16:40
| 218,746,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,701
|
py
|
from re import compile
class Poker888:
name = '[a-zA-Z0-9_\-@\'.,$*`áàåäãçéèêíîóöôõšüúžÄÁÃÅÉÍÖÔÓÜØø´<^>+&' \
'\\\/()Ѐ£¼ñ®™~#!%\[\]|°¿?:"=ß{}æ©«»¯²¡; ]+'
identifier = compile('^\*\*\*\*\* 888poker Hand History')
identifier_snap = compile('^Snap Poker Hand History')
hand_border = compile('^$')
hand_border_888 = compile(r'\*\*\*\*\* 888poker Hand History for ')
hand_border_snap = compile(r'Snap Poker Hand History for ')
find_hand_id = compile(r'^Game ([0-9]+) \*\*\*\*\*$')
step_border = compile(r'\*\* [DSa-z ]+ \*\*')
blinds_and_date = compile(r'^\$([0-9,]+)/\$([0-9,]+) Blinds No Limit Holdem - \*\*\* '
r'(.. .. ....) ([0-9:]+)$')
blinds_and_ante_2 = compile(r'^([0-9 ]+) \$/([0-9 ]+) \$ Blinds No Limit Holdem - \*\*\* '
r'(.. .. ....) ([0-9:]+)$')
game_info = compile(r'^Tournament #([0-9]+) (\$[0-9.]+ \+ \$[0-9.]+) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_2 = compile(r'^Tournament #([0-9]+) ([0-9,]+ \$ \+ [0-9,]+ \$) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_3 = compile(r'^Tournament #([0-9]+) (\$[0-9.]+) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_4 = compile(r'^Tournament #([0-9]+) ([0-9,]+ \$) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_5 = compile(r'^Tournament #([0-9]+) (Бесплатно) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
find_button_seat = compile(r'^Seat ([0-9]+) is the button$')
player_init = compile(r'^Seat ([0-9]+): (' + name + r') \( \$([0-9,]+) \)$')
player_init_2 = compile(r'^Seat ([0-9]+): (' + name + r') \( ([0-9 ]+) \$ \)$')
empty_init = compile(r'^Seat ([0-9]+):[ ]{2}\( ([0-9,$ ]+) \)$')
find_ante = compile(r'^(' + name + r') posts ante \[\$([0-9,]+)\]$')
find_ante_2 = compile(r'^(' + name + r') posts ante \[([0-9 ]+) \$\]$')
find_small_blind = compile(r'^(' + name + ') posts small blind \[\$([0-9,]+)\]$')
find_small_blind_2 = compile(r'^(' + name + r') posts small blind \[([0-9 ]+) \$\]$')
find_big_blind = compile(r'^(' + name + ') posts big blind \[\$([0-9,]+)\]$')
find_big_blind_2 = compile(r'^(' + name + r') posts big blind \[([0-9 ]+) \$\]$')
find_flop = compile(r'^\[ (..), (..), (..) \]$')
find_turn = compile(r'^\[ (..) \]$')
find_river = compile(r'^\[ (..) \]$')
skip_total_number_of_players = compile(r'^Total number of players : [0-9]+$')
# actions
find_dealt_cards = compile(r'^Dealt to (' + name + ') \[ (..), (..) \]$')
find_fold = compile(r'^(' + name + ') folds$')
find_call = compile(r'^(' + name + ') calls \[\$([0-9,]+)\]$')
find_call_2 = compile(r'^(' + name + r') calls \[([0-9 ]+) \$\]$')
find_check = compile(r'^(' + name + ') checks$')
find_bet = compile(r'^(' + name + ') bets \[\$([0-9,]+)\]$')
find_bet_2 = compile(r'^(' + name + r') bets \[([0-9 ]+) \$\]$')
find_raise = compile(r'^(' + name + ') raises \[\$([0-9,]+)\]$')
find_raise_2 = compile(r'^(' + name + ') raises \[([0-9 ]+) \$\]$')
find_did_not_show = compile(r'^(' + name + r') did not show his hand$')
find_win_money = compile(r'^(' + name + ') collected \[ \$([0-9,]+) \]$')
find_win_money_2 = compile(r'^(' + name + r') collected \[ ([0-9 ]+) \$ \]$')
find_show_cards = compile(r'^(' + name + ') shows \[ (..), (..) \]$')
find_muck_cards = compile(r'^(' + name + ') mucks \[ (..), (..) \]$')
|
[
"aaaaaa2493@yandex.ru"
] |
aaaaaa2493@yandex.ru
|
992d9b74e952ecd7516429a0554f8e5e86d3a855
|
6f594cc963795c69d8da3c30ca580c0405ef2d6e
|
/other/57InsertInterval.py
|
d1fb32163788f8998b4a82b8be2a45e9a2d0316a
|
[] |
no_license
|
lo-tp/leetcode
|
25933c5b25f64f881d43748d8b2763f69614a97f
|
4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50
|
refs/heads/master
| 2022-09-07T20:32:58.487759
| 2022-09-05T03:39:50
| 2022-09-07T13:39:50
| 116,555,892
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
class Solution(object):
def insert(self, intervals, newInterval):
res = []
if intervals:
start, end = newInterval
for s, e in intervals:
if start != -1:
# 1
if e < start:
res.append([s, e])
# 2
elif end < s:
res.append([start, end])
res.append([s, e])
start = -1
else:
start, end = min(start, s), max(end, e)
else:
res.append([s, e])
if start != -1:
res.append([start, end])
else:
res.append(newInterval)
return res
|
[
"regesteraccount@hotmail.com"
] |
regesteraccount@hotmail.com
|
7e072a572581f6627fca07bcdcad06f5612d2500
|
44990e9f4630aa9efc8e0fa56f2c5dbd836cddc6
|
/nao_vacila/wsgi.py
|
b4a7dcfe74eda774e16c8596df6bc9f14e247473
|
[] |
no_license
|
kallebefelipe/webserver-nao-vacila
|
33c61461d73b7f9e649a93406eb032014f3b983c
|
57e972a44a4eb68e5253d38d320051723d33a924
|
refs/heads/master
| 2022-12-14T19:18:22.670018
| 2017-09-06T13:00:02
| 2017-09-06T13:00:02
| 95,972,976
| 0
| 0
| null | 2022-12-07T23:58:58
| 2017-07-01T15:40:23
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
"""
WSGI config for nao_vacila project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nao_vacila.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[
"kallebefelipe@gmail.com"
] |
kallebefelipe@gmail.com
|
c2ea2cc2352bfd9d8b9ad888ff3c0fb82997b816
|
22954a0c13d7bf1824320802e802aa8166f16d76
|
/web_scraping/rhiphopheads/items.py
|
ca9af6e29328930a755afe5a2a604dbaed917dd5
|
[] |
no_license
|
luke-zhu/cs1951a-data
|
e0c7a96c7e100c278722419ba3bc845f6a5326c4
|
925c3263988db1de815589c5e47ddd918c345b25
|
refs/heads/master
| 2021-01-20T07:40:21.372377
| 2017-05-02T21:47:08
| 2017-05-02T21:47:08
| 90,025,042
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RhiphopheadsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"luke_zhu@brown.edu"
] |
luke_zhu@brown.edu
|
dc8835c6dec0140fcb1852faa09d8e70a7cdeaaf
|
c397d4899fbb5e34b90a2650be2e6aa6f5725972
|
/blog/migrations/0037_reviewimage_thumbnail.py
|
805c17f73adb43f3b5a343a908788a464aa1d064
|
[] |
no_license
|
CCCodes/ProConDuck
|
aa68e6e89c3c71ddf7832d35f51688fddc379b10
|
c4ce19e62d5b50b3da9d258fa4e40831e159f2f7
|
refs/heads/master
| 2023-02-16T18:55:27.766465
| 2021-01-17T16:49:37
| 2021-01-17T16:49:37
| 96,048,162
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-23 00:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0036_auto_20170822_0803'),
]
operations = [
migrations.AddField(
model_name='reviewimage',
name='thumbnail',
field=models.BooleanField(default=False),
),
]
|
[
"caitlinchou@gmail.com"
] |
caitlinchou@gmail.com
|
a91390c161f40656d0f323b1525d55125c72c02a
|
0b279c246179bc6a76ad17f055ad1dce3402b045
|
/private_production/eft/2018/crab_INT_MINIAODSIM.py
|
e9a3be5b74809d1efea5713aa88eb353e315e3d7
|
[] |
no_license
|
UniMiBAnalyses/CMSSWGeneration
|
a55e6ad840e4f7f9fae6b46a4bb939a288492f10
|
a7acf1a780eeb30e14616fef90ccf389e4367668
|
refs/heads/master
| 2023-09-01T02:01:44.746469
| 2022-01-31T11:01:29
| 2022-01-31T11:01:29
| 212,852,677
| 0
| 2
| null | 2022-06-16T15:23:25
| 2019-10-04T15:57:27
|
Python
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'VBS_SSWW_INT_MINIAODSIM'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'SMP-RunIIAutumn18MiniAOD-00048_1_cfg.py'
config.JobType.numCores = 4
config.JobType.maxMemoryMB = 6000
config.Data.inputDataset = '/Bulk/jixiao-VBS_SSWW_INT_Premix_2-7c74ac161ee1f5c5534fed7a9685e204/USER'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/%s/eft2018' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'VBS_SSWW_INT_MINIAODSIM'
config.Site.storageSite = 'T2_CN_Beijing'
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
8bc9ba267ab55211234f1b8531b5d213ec6c7238
|
2315afb8435de656afcc5789ec1ddde21135f658
|
/todo_project/todo_app/models.py
|
dbe2eb3e5521a50750030086908baa842542c537
|
[] |
no_license
|
DeanDupalov/Front-End-Basics
|
9754315cce8417cb86fbe33c76886df70e9d8ea4
|
acac5b03f55aff03620bd2d527a96c0d453e07d9
|
refs/heads/master
| 2023-04-22T08:58:28.124375
| 2021-05-13T13:11:18
| 2021-05-13T13:11:18
| 357,648,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=10)
description = models.TextField(max_length=100)
is_done = models.BooleanField(default=False)
def __str__(self):
return self.title
|
[
"75751527+DeanDupalov@users.noreply.github.com"
] |
75751527+DeanDupalov@users.noreply.github.com
|
48887c30ff50b09604e6af7c99af845d18f9c3aa
|
8dca64dd11b23a7d59413ac8e28e92a0ab80c49c
|
/504. Base 7/solution.py
|
298b59aa9f66b91ba715bc108c1bf1b2171775ae
|
[] |
no_license
|
huangruihaocst/leetcode-python
|
f854498c0a1d257698e10889531c526299d47e39
|
8f88cae7cc982ab8495e185914b1baeceb294060
|
refs/heads/master
| 2020-03-21T20:52:17.668477
| 2018-10-08T20:29:35
| 2018-10-08T20:29:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if -6 <= num <= 6:
return str(num)
def helper(n): # n >= 7
li = list()
while n >= 7:
li.append(n % 7)
n //= 7
li.append(n)
return ''.join(map(str, li[::-1]))
if num >= 0:
return helper(num)
else:
return '-' + helper(-num)
if __name__ == '__main__':
s = Solution()
print(s.convertToBase7(-7))
|
[
"huangruihaocst@126.com"
] |
huangruihaocst@126.com
|
b059b880de3f859a5969b06be9518974df6aa833
|
f6d08b29b76713165fcdb50f78bd9c74b6b38c22
|
/Collect/S30/DataAccess.py
|
b4ea6f8e898d457761a400b8cb3c13d44347eeb3
|
[
"Apache-2.0"
] |
permissive
|
joan-gathu/watertools
|
b4b22071897e21d2fb306344f9ace42511e9f3ef
|
55e383942ed3ddb3ba1d26596badc69922199300
|
refs/heads/master
| 2022-04-11T17:49:06.324478
| 2020-03-13T11:11:16
| 2020-03-13T11:11:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,459
|
py
|
# -*- coding: utf-8 -*-
"""
WaterSat
author: Tim Martijn Hessels
Created on Sun Feb 10 18:26:30 2019
"""
import datetime
import pandas as pd
import numpy as np
import os
import urllib
import gdal
#S2_tile = "10SGE"
#output_folder = "F:\Project_Jain\Case_California\Input_Data\S30"
#Startdate = "2018-03-01"
#Enddate = "2018-10-31"
def DownloadData(Dir, Startdate, Enddate, S2_tile):
# Import watertools modules
import watertools.General.data_conversions as DC
# Define the dates
Dates = pd.date_range(Startdate, Enddate, freq = "D")
# Create output folder
output_folder_end = os.path.join(Dir, S2_tile)
if not os.path.exists(output_folder_end):
os.makedirs(output_folder_end)
# Loop over the days
for Date in Dates:
# Get the datum
doy = int(Date.dayofyear)
year = Date.year
try:
# Create the right url
url = "https://hls.gsfc.nasa.gov/data/v1.4/S30/%s/%s/%s/%s/%s/HLS.S30.T%s.%s%03d.v1.4.hdf" % (year, S2_tile[0:2], S2_tile[2:3], S2_tile[3:4], S2_tile[4:5],S2_tile, year, doy)
filename_out = os.path.join(output_folder_end, "HLS.S30.T%s.%s%03d.v1.4.hdf" % (S2_tile, year, doy))
# Download the data
urllib.request.urlretrieve(url, filename=filename_out)
# Create a folder for the end results
folder_tile = os.path.join(output_folder_end, "HLS_S30_T%s_%s%03d_v1_4" % (S2_tile, year, doy))
if not os.path.exists(folder_tile):
os.makedirs(folder_tile)
# Write the hdf file in tiff files and store it in the folder
dataset = gdal.Open(filename_out)
sdsdict = dataset.GetMetadata('SUBDATASETS')
for Band in range(1,15):
dest = gdal.Open(sdsdict["SUBDATASET_%d_NAME" %Band])
Array = dest.GetRasterBand(1).ReadAsArray()
if Band < 9.:
Array = Array * 0.0001
Array[Array == -0.1] = -9999.
Band_name = "B%02d" %(Band)
if Band == 9.:
Band_name = "B8A"
Array = Array * 0.0001
Array[Array == -0.1] = -9999.
if (Band >= 10. and Band < 14.):
Band_name = "B%02d" %(Band - 1)
Array = Array * 0.0001
Array[Array == -0.1] = -9999.
if Band == 14.:
Array[Array == 255] = -9999.
Band_name = "QC"
meta = dataset.GetMetadata()
ulx = int(meta["ULX"])
uly = int(meta["ULY"])
size = int(meta["SPATIAL_RESOLUTION"])
projection = int(meta["HORIZONTAL_CS_CODE"].split(":")[-1])
time_string = meta["SENSING_TIME"].split(";")[0]
Time = datetime.datetime.strptime(time_string[:-5],"%Y-%m-%dT%H:%M:%S")
hour = int(Time.hour)
minute = int(Time.minute)
geo = tuple([ulx, size, 0, uly, 0, -size])
DC.Save_as_tiff(os.path.join(folder_tile, "HLS_S30_T%s_%s%03d_H%02dM%02d_%s.tif" % (S2_tile, year, doy, hour, minute, Band_name)), Array, geo, projection)
except:
pass
return()
|
[
"timhessels@hotmail.com"
] |
timhessels@hotmail.com
|
6ce225b27e57b222c5803bee8ef647f9c9f5b6e1
|
aaa762ce46fa0347cdff67464f56678ea932066d
|
/AppServer/lib/django-1.3/tests/regressiontests/test_client_regress/views.py
|
c40f34fe563a797d6d3a7c364eeb614697b044db
|
[
"Apache-2.0",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
] |
permissive
|
obino/appscale
|
3c8a9d8b45a6c889f7f44ef307a627c9a79794f8
|
be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f
|
refs/heads/master
| 2022-10-01T05:23:00.836840
| 2019-10-15T18:19:38
| 2019-10-15T18:19:38
| 16,622,826
| 1
| 0
|
Apache-2.0
| 2022-09-23T22:56:17
| 2014-02-07T18:04:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,115
|
py
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.core.exceptions import SuspiciousOperation
from django.shortcuts import render_to_response
from django.utils import simplejson
from django.utils.encoding import smart_str
from django.core.serializers.json import DjangoJSONEncoder
from django.test.client import CONTENT_TYPE_RE
from django.template import RequestContext
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise SuspiciousOperation()
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
get_view = login_required(get_view)
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render_to_response(template, {
'get-foo':request.GET.get('foo',None),
'get-bar':request.GET.get('bar',None),
'post-foo':request.POST.get('foo',None),
'post-bar':request.POST.get('bar',None),
'request-foo':request.REQUEST.get('foo',None),
'request-bar':request.REQUEST.get('bar',None),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/test_client_regress/get_view/')
login_protected_redirect_view = login_required(login_protected_redirect_view)
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render_to_response('unicode.html')
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = simplejson.loads(request.raw_post_data.decode(charset))
obj_json = simplejson.dumps(obj_dict, encoding=charset,
cls=DjangoJSONEncoder,
ensure_ascii=False)
response = HttpResponse(smart_str(obj_json, encoding=charset), status=200,
mimetype='application/json; charset=' + charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def raw_post_data(request):
"A view that is requested with GET and accesses request.raw_post_data. Refs #14753."
return HttpResponse(request.raw_post_data)
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render_to_response('request_context.html', context_instance=RequestContext(request, {}))
|
[
"root@lucid64.hsd1.ca.comcast.net"
] |
root@lucid64.hsd1.ca.comcast.net
|
5f0505f36af2d39f955a7c0c374ddee7f52a9465
|
024ad288e3e8c4407c147d3e5a3cef9c97ddecce
|
/keras/keras98_randomsearch.py
|
3188609d3745a71add73ce93d8b27576dc61a945
|
[] |
no_license
|
keumdohoon/STUDY
|
a17f62549e5dc59640875970b79b41ba8f62932c
|
83a1369d8e93767ebc445a443d8f55921cd984ce
|
refs/heads/master
| 2022-12-15T17:25:00.809774
| 2020-09-07T02:00:30
| 2020-09-07T02:00:30
| 264,050,749
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,264
|
py
|
#원래는 randomizedSearchCV로 변환, 파일 keras97불러오기.
#score 을 추가해준다.
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential, Model
from keras.layers import Input, Dropout, Conv2D, Flatten, Dense
from keras.layers import MaxPooling2D
import numpy as np
#. 1. 데이터
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#데이터를 불러옴과 동시에 x, y와 트레인과 테스트를 분리해준다.
print(x_train.shape)#(60000, 28, 28)
print(x_test.shape)#(10000, 28, 28)
# x_train = x_train.reshape(x_train[0], 28,28,1)/255
# x_test = x_test.reshape(x_test[0], 28,28,1)/255
#0부터 255개의 데이터가 들어가있는데 이것은 결국 민맥스와 같은 결과를 가져다 준다.
x_train = x_train.reshape(x_train.shape[0], 28 * 28)/255
x_test = x_test.reshape(x_test.shape[0], 28 * 28)/255
#위에거는 dense모델을 위해서 만들어 준 것이다.
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
#캐라스에서 하는거는 라벨의 시작이 0부터이니 원핫 인코딩을 할때에는 y의 차원을 반드시확인하고 들어가야한다.
#
print(y_train.shape)
#########################################################################################################
# model = GridSearchCV(modelthat we will use, Parameters that we will use, cv=kfold= u can just input a number)
###################################################################################################
#위에 모델을 만들어주기 위해서 모델, 파라미터, cv를 각각 만들어준다.
#2. 모델
#gridsearch 에 있는 parameter으의 순서를 보면
def build_model(drop=0.5, optimizer= 'adam'):
inputs = Input(shape=(28*28, ), name = 'input')
x = Dense(512, activation = 'relu', name= 'hidden1')(inputs)
x= Dropout(drop)(x)
x = Dense(256, activation = 'relu', name= 'hidden2')(x)
x= Dropout(drop)(x)
x = Dense(128, activation = 'relu', name= 'hidden3')(x)
x= Dropout(drop)(x)
output = Dense(10, activation = 'softmax', name= 'outputs')(x)
model = Model(inputs =inputs, outputs = output)
model.compile (optimizer, metrics =["acc"], loss = 'categorical_crossentropy')
return model
#이렇게 직접 함수형을 만들어 줄 수도 있는 것이다.
#그리드 서치를 사용하려면 맨처음에 들어가는 것이 모델이기 때문에 우리가 이미 모델을 만들어주고 그걸 사용하기 위해서 직접 모델을 만들어준다.
#컴파일까지만 만들어주고 핏은 아직 안만들어준다 왜냐하면 핏은 나중에 랜덤서치나 그리드 서치에서 할것이기 때문이다.
#모델을 만들었고 이제 두번째 파라미터를 만들어준다.
def create_hyperparameters():
batches =[10, 20, 30, 40, 50]
optimizers = ['rmsprop', 'adam', 'adadelta']
dropout = np.linspace(0.1,0.5, 5)
return{"batch_size" : batches, "optimizer" : optimizers,
"drop" :linspace}
#위에 딕셔너리형태이다. 파라미터에 들어가는 매개변수 형테는 딕셔너리 형태이다. 그래서 무조건 딕셔너리 형태로 맞춰줘야한다. 케라스가 아니라 싸이킷런에 맞는 모델로 래핑을 만들어주기 위해서 이런식으로 해준다.
#k fold에서는 숫자만 들어가면 되는것이니 그것도 이미 준비 된것이다.
#여기다가 에포도 넣을수 있고 노드의 갯수도 변수명을 넣어주고 하이퍼 파라미터에 넣을수 있고 activation도 넣어 줄 수 있다. 여기서 원하는건 다 넣을수 있음.
#케라스를 그냥 사용하면 안되고 케라스에 보면 사이킷런에 사용할수 있는 wrapper이라는 것이 존재하고 사이킷런에 케라스를 쌓아 올리겠다는는 뜻이다.
from keras.wrappers.scikit_learn import KerasClassifier
#케라스건 사이킷런이건 분류와 회기를 항상 잃지 말고
#케라스에서 wrapping을 해주는 이유는 사이킷런에서 해주기 위해서
model= KerasClassifier(build_fn= build_model, verbose= 1)
#우리가 만들어둔 모델을 wrapping 해 주는 것이다. kerasClassifier 모델을 이렇게 만들어주는 것이다.
hyperparameters = create_hyperparameters()
#help buiild a hyper parameters , 위데짜놓은 create_hyperparameters()를 hyperparamer 에 대입시켜준다.
#여기서 부터가 모델 핏 부분이 되는 것이다.
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
search = RandomizedSearchCV(model, hyperparameters, cv = 3 , n_jobs =-1)
search.fit(x_train, y_train)
print(search.best_params_)
#이 폴더에서 항상 주의해야할것들은 소스와 하이퍼 파라미터
# acc: 0.9311
# {'optimizer': 'rmsprop', 'drop': 0.1, 'batch_size': 50}
#score
#score 을 추가하여 작성
acc = search.score(x_test, y_test, verbose=0)
print(search.best_params_)
print("acc :", acc)
# acc: 0.9143
# {'optimizer': 'adadelta', 'drop': 0.30000000000000004, 'batch_size': 20}
# Traceback (most recent call last):
# File "d:\Study\Bitcamp\keras\keras98_randomsearch.py", line 105, in <module>
# print("최적의 매개변수 :", model.best_params_)
|
[
"58944180+keumdohoon@users.noreply.github.com"
] |
58944180+keumdohoon@users.noreply.github.com
|
ea0437398c5d2f0e423bd627eaa886ffd929f096
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/database/postgresql/postgresql_ext.py
|
97bd549f21b13edb26860e7beac097a8cf22f526
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 5,700
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database.
description:
- Add or remove PostgreSQL extensions from a database.
version_added: "1.9"
options:
name:
description:
- name of the extension to add or remove
required: true
default: null
db:
description:
- name of the database to add or remove the extension to/from
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The database extension state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using
this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''
# Adds postgis to the database "acme"
- postgresql_ext:
name: postgis
db: acme
'''
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
cursor.execute(query)
return True
else:
return False
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
cursor.execute(query)
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
port=dict(default="5432"),
db=dict(required=True),
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode=True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
ext = module.params["ext"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port"
}
kw = dict((params_map[k], v) for (k, v) in module.params.items()
if k in params_map and v != '')
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "present":
changed = not ext_exists(cursor, ext)
elif state == "absent":
changed = ext_exists(cursor, ext)
else:
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, db=db, ext=ext)
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
1657f962a8133600e36bf5a5651983e5160d9d34
|
9f2f386a692a6ddeb7670812d1395a0b0009dad9
|
/python/paddle/fluid/tests/unittests/dygraph_group_sharded_stage3_offload.py
|
5f9ec5c6e708e37b208ed07a321428f056f83a77
|
[
"Apache-2.0"
] |
permissive
|
sandyhouse/Paddle
|
2f866bf1993a036564986e5140e69e77674b8ff5
|
86e0b07fe7ee6442ccda0aa234bd690a3be2cffa
|
refs/heads/develop
| 2023-08-16T22:59:28.165742
| 2022-06-03T05:23:39
| 2022-06-03T05:23:39
| 181,423,712
| 0
| 7
|
Apache-2.0
| 2022-08-15T08:46:04
| 2019-04-15T06:15:22
|
C++
|
UTF-8
|
Python
| false
| false
| 6,441
|
py
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import ast
import time
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Linear
from paddle.distributed import fleet
from paddle.fluid.dygraph import nn
from paddle.fluid.framework import _test_eager_guard
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import GroupShardedStage3
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import GroupShardedScaler
epoch = 10
paddle.seed(2022)
np.random.seed(2022)
base_lr = 0.1
momentum_rate = 0.9
l2_decay = 1e-4
class MLP(fluid.Layer):
def __init__(self, linear_size=1000, param_attr=None, bias_attr=None):
super(MLP, self).__init__()
self._linear1 = Linear(linear_size, linear_size)
self._linear2 = Linear(linear_size, linear_size)
self._linear3 = Linear(linear_size, 10)
def forward(self, inputs):
y = self._linear1(inputs)
y = self._linear2(y)
y = self._linear3(y)
return y
def reader_decorator(linear_size=1000):
def __reader__():
for _ in range(100):
img = np.random.rand(linear_size).astype('float32')
label = np.ones(1).astype('int64')
yield img, label
return __reader__
def optimizer_setting(model, use_pure_fp16, opt_group=False):
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
optimizer = paddle.optimizer.AdamW(
parameters=[{
"params": model.parameters()
}] if opt_group else model.parameters(),
learning_rate=0.001,
weight_decay=0.00001,
grad_clip=clip,
multi_precision=use_pure_fp16)
return optimizer
def train_mlp(model,
use_pure_fp16=False,
accumulate_grad=False,
offload=False,
batch_size=100,
convert2cpu=False):
group = paddle.distributed.new_group([0, 1])
optimizer = optimizer_setting(model=model, use_pure_fp16=use_pure_fp16)
if use_pure_fp16:
model = paddle.amp.decorate(
models=model, level='O2', save_dtype='float32')
scaler = paddle.amp.GradScaler(init_loss_scaling=32768)
scaler = GroupShardedScaler(scaler)
model = GroupShardedStage3(
model,
optimizer=optimizer,
group=group,
offload=offload,
segment_size=2**15)
train_reader = paddle.batch(
reader_decorator(), batch_size=batch_size, drop_last=True)
train_loader = paddle.io.DataLoader.from_generator(
capacity=32,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=True)
train_loader.set_sample_list_generator(train_reader)
for eop in range(epoch):
model.train()
for batch_id, data in enumerate(train_loader()):
img, label = data
label.stop_gradient = True
img.stop_gradient = True
with paddle.amp.auto_cast(True, level='O2'):
out = model(img)
loss = paddle.nn.functional.cross_entropy(
input=out, label=label)
avg_loss = paddle.mean(x=loss.cast(dtype=paddle.float32))
if accumulate_grad:
avg_loss = avg_loss / 5
if not use_pure_fp16:
avg_loss.backward()
else:
scaler.scale(avg_loss).backward()
if not accumulate_grad:
if not use_pure_fp16:
optimizer.step()
else:
scaler.step(optimizer)
scaler.update()
optimizer.clear_grad()
if accumulate_grad:
if not use_pure_fp16:
optimizer.step()
else:
scaler.step(optimizer)
scaler.update()
optimizer.clear_grad()
if not convert2cpu:
model.get_all_parameters()
else:
model.get_all_parameters(convert2cpu)
return model.parameters()
def test_stage3_offload():
paddle.distributed.init_parallel_env()
mlp, mlp1, mlp2, mlp3, mlp4, mlp5, mlp6 = MLP(), MLP(), MLP(), MLP(), MLP(
), MLP(), MLP()
state_dict = mlp.state_dict()
mlp1.set_state_dict(state_dict)
mlp2.set_state_dict(state_dict)
mlp3.set_state_dict(state_dict)
mlp4.set_state_dict(state_dict)
mlp5.set_state_dict(state_dict)
mlp6.set_state_dict(state_dict)
# fp32 offload
stage3_params = train_mlp(mlp1, use_pure_fp16=False)
stage3_params_offload = train_mlp(mlp2, use_pure_fp16=False, offload=True)
for i in range(len(stage3_params)):
np.testing.assert_allclose(
stage3_params[i].numpy(),
stage3_params_offload[i].numpy(),
rtol=1e-6,
atol=1e-8)
# fp16 offload
stage3_params = train_mlp(mlp3, use_pure_fp16=True)
stage3_params_offload = train_mlp(mlp4, use_pure_fp16=True, offload=True)
for i in range(len(stage3_params)):
np.testing.assert_allclose(
stage3_params[i].numpy(),
stage3_params_offload[i].numpy(),
rtol=1e-2,
atol=1e-2)
# fp32 accumulate grad offload
stage3_params = train_mlp(
mlp5, use_pure_fp16=False, batch_size=20, accumulate_grad=True)
stage3_params_offload = train_mlp(
mlp6,
use_pure_fp16=False,
accumulate_grad=True,
offload=True,
batch_size=20,
convert2cpu=True)
for i in range(len(stage3_params)):
np.testing.assert_allclose(
stage3_params[i].numpy(),
stage3_params_offload[i].numpy(),
rtol=1e-6,
atol=1e-8)
return
if __name__ == '__main__':
with _test_eager_guard():
test_stage3_offload()
|
[
"noreply@github.com"
] |
sandyhouse.noreply@github.com
|
edf52ab76db2cfd99a0af88763f296ce469be40c
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/SjShRightSide/YW_GGQQ_QLFSJHA_191.py
|
7b402aecbf55309b2714b38ef26372383e256c92
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,954
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/service")
from OptMainService import *
from OptQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/mysql")
from Opt_SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryOrderErrorMsg import queryOrderErrorMsg
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_GGQQ_QLFSJHA_191(xtp_test_case):
def setUp(self):
sql_transfer = Opt_SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_GGQQ_QLFSJHA_191')
clear_data_and_restart_sh()
Api.trade.Logout()
Api.trade.Login()
def test_YW_GGQQ_QLFSJHA_191(self):
title = '卖平(权利方平仓):FOK市价全成或撤销-验资(可用资金不足)(下单金额<费用&&可用资金<(费用-下单金额))'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010120,
'errorMSG': queryOrderErrorMsg(11010120),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('10001034', '1', '*', '1', '0', '*', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
logger.error('查询结果为False,错误原因: {0}'.format(
json.dumps(rs['测试错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_OPTION'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_CLOSE'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 1
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 4
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
985a61e35a1166affcae13bcd6cc900782271bde
|
9825db945e7bfe68319b086e9fb7091a63645d5c
|
/transcribe/mommy_recipes.py
|
69965c1794946191d35991093b0493aa8e916c17
|
[] |
no_license
|
lupyanlab/telephone
|
e1ee095d5698dc228deec5ba5878a46b76d43f2d
|
136f27fb2b41263f53fba6bd44711cf57598a1a4
|
refs/heads/master
| 2020-04-12T07:33:05.849287
| 2017-04-04T01:16:14
| 2017-04-04T01:16:14
| 41,316,823
| 1
| 1
| null | 2015-11-17T04:44:40
| 2015-08-24T17:20:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
from unipath import Path
from django.conf import settings
from django.core.files import File
from model_mommy.recipe import Recipe, foreign_key, related
import grunt.models as grunt_models
import ratings.models as ratings_models
import transcribe.models as transcribe_models
django_file_path = Path(settings.APP_DIR, 'grunt/tests/media/test-audio.wav')
assert django_file_path.exists()
django_file = File(open(django_file_path, 'rb'))
chain = Recipe(grunt_models.Chain,
name = 'mommy_chain')
seed = Recipe(grunt_models.Message,
chain = foreign_key(chain),
audio = django_file)
recording = Recipe(grunt_models.Message,
chain = foreign_key(chain),
parent = foreign_key(seed),
audio = django_file)
transcription_survey = Recipe(transcribe_models.TranscriptionSurvey)
message_to_transcribe = Recipe(transcribe_models.MessageToTranscribe,
survey = foreign_key(transcription_survey),
given = foreign_key(recording),
)
transcription = Recipe(transcribe_models.Transcription,
message = foreign_key(message_to_transcribe))
|
[
"pierce.edmiston@gmail.com"
] |
pierce.edmiston@gmail.com
|
48983f1ea96897668bf4f661b7f0605254dc330d
|
02d1d89ed3c2a71a4f5a36f3a19f0683a0ae37e5
|
/navigation/sonar/maxsonar_class.py~
|
fbc039ae23b20329bb3f04d51f235e36260bfd38
|
[] |
no_license
|
lforet/robomow
|
49dbb0a1c873f75e11228e24878b1e977073118b
|
eca69d000dc77681a30734b073b2383c97ccc02e
|
refs/heads/master
| 2016-09-06T10:12:14.528565
| 2015-05-19T16:20:24
| 2015-05-19T16:20:24
| 820,388
| 11
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,504
|
#!/usr/bin/env python
import serial
import threading
import time
# need to find best way to search seria ports for find device
class MaxSonar(object):
def __init__(self):
self._isConnected = False
self._ser = self._open_serial_port()
self._should_stop = threading.Event()
self._start_reading()
self._data = 0
#self._port = port
def _open_serial_port(self):
while self._isConnected == False:
print "class MaxSonar: searching serial ports for ultrasonic sensor package..."
for i in range(11):
port = "/dev/ttyUSB"
port = port[0:11] + str(i)
print "class MaxSonar: searching on port:", port
time.sleep(.2)
try:
ser = serial.Serial(port, 9600, timeout=1)
data = ser.readline()
#print "data=", int(data[3:(len(data)-1)])
if data[0:2] == "s1":
#ser.write("a\n") # write a string
print "class MaxSonar: found ultrasonic sensor package on serial port: ", port
self._isConnected = True
#self._port = ser
time.sleep(.3)
break
except:
pass
for i in range(11):
port = "/dev/ttyACM"
port = port[0:11] + str(i)
print "class MaxSonar: searching on port:", port
time.sleep(.2)
try:
ser = serial.Serial(port, 9600, timeout=1)
data = ser.readline()
#print "data=", int(data[3:(len(data)-1)])
if data[0:2] == "s1":
#ser.write("a\n") # write a string
print "class MaxSonar: found ultrasonic sensor package on serial port: ", port
self._isConnected = True
#self._port = ser
time.sleep(.3)
break
except:
pass
if self._isConnected == False:
print "class MaxSonar: ultrasonic sensor package not found!"
time.sleep(1)
#print "returning", ser
return ser
def _start_reading(self):
def read():
#print self._should_stop.isSet()
#print self._ser.isOpen()
while not self._should_stop.isSet():
try:
data = self._ser.readline()
#print "recieved: ", data
#self._data = int(data[5:(len(data)-1)])
self._data = data[0:(len(data)-1)]
except:
try:
print "class MaxSonar:no connection...attempting to reconnect"
self._data = 0
self._isConnected = False
self._ser = self._open_serial_port()
time.sleep(.5)
except:
pass
thr = threading.Thread(target=read)
thr.start()
return thr
def stop(self):
self._should_stop.set()
self._read_thread.wait()
def distances_cm(self):
return self._data
|
[
"laird@isotope11.com"
] |
laird@isotope11.com
|
|
e10f712169e012afe52d661cee1079d73d473cf5
|
a4deea660ea0616f3b5ee0b8bded03373c5bbfa2
|
/executale_binaries/register-variants/vpblendvb_xmm_xmm_xmm_xmm.gen.vex.py
|
24114f1b5661dc25199fcafb0f1753e89bd770d4
|
[] |
no_license
|
Vsevolod-Livinskij/x86-64-instruction-summary
|
4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd
|
c276edab1b19e3929efb3ebe7514489f66087764
|
refs/heads/master
| 2022-02-02T18:11:07.818345
| 2019-01-25T17:19:21
| 2019-01-25T17:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
import angr
proj = angr.Project('vpblendvb_xmm_xmm_xmm_xmm.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp()
|
[
"sdasgup3@illinois.edu"
] |
sdasgup3@illinois.edu
|
73f759e34e0bb373fed5832be2d79bf6a4727643
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/prime-193.py
|
a9022462728d26ffa25a7f481c9f9b2ca1f06f24
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
# Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
# Run [1, n]
i:int = 1
# Crunch
while i <= n:
print($Exp(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
bceb5d5fd70239529410c669bae6ea96ca0148fd
|
dbaec1262c8966d66512cadd343249786a8c266d
|
/tests/test_scraper.py
|
1d196eeb7ad9cd52a2a34f6599ba263fbe8d42da
|
[] |
no_license
|
andreqi/django-manolo
|
96e50021a843cff1c223692853993c5dbb685acd
|
01552875a47c4da90c89db6f0b9c05a269fa07ca
|
refs/heads/master
| 2020-04-01T18:00:59.213208
| 2014-11-20T21:28:04
| 2014-11-20T21:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
# -*- coding: utf-8 -*-
"""
test_django-manolo
------------
Tests for `django-manolo` models module.
"""
import datetime
from datetime import timedelta as td
import unittest
import dataset
import sqlalchemy
from django.conf import settings
from manolo.models import Manolo
from manolo.management.commands.scraper import Command
class TestManolo(unittest.TestCase):
def setUp(self):
db = dataset.connect('sqlite:///' + settings.DATABASES['default']['NAME'])
table = db['manolo_manolo']
table.create_column('office', sqlalchemy.String(length=250))
table.create_column('sha512', sqlalchemy.String(length=200))
table.create_column('visitor', sqlalchemy.String(length=250))
table.create_column('meeting_place', sqlalchemy.String(length=250))
table.create_column('host', sqlalchemy.String(length=250))
table.create_column('entity', sqlalchemy.String(length=250))
table.create_column('objective', sqlalchemy.String(length=250))
table.create_column('id_document', sqlalchemy.String(length=250))
table.create_column('date', sqlalchemy.Date())
table.create_column('time_start', sqlalchemy.String(length=100))
table.create_column('time_end', sqlalchemy.String(length=100))
Manolo.objects.get_or_create(date=None)
Manolo.objects.get_or_create(date=datetime.date(2011, 7, 28))
Manolo.objects.get_or_create(date=datetime.date.today())
def test_get_last_date_in_db(self):
d1 = Command()
d1.__init__()
result = d1.get_last_date_in_db() + td(3)
self.assertEqual(result, datetime.date.today())
|
[
"aniversarioperu1@gmail.com"
] |
aniversarioperu1@gmail.com
|
15eb534642b2dbaf5eb98339e0cd15a18f7b59d6
|
4c228cca5bfdf3bd34dab2bedd7350ff501230b3
|
/tools/ex_network.py
|
daaafecdccaa5dfaf16ca6adbc450de5273af72f
|
[] |
no_license
|
gauenk/xi_uai18
|
3575b4b6db3393f4bc6a640a6b3c607a2d6bca6f
|
c24040f43e3d8779b7c2fff88f8ab787cf22c385
|
refs/heads/master
| 2022-02-24T04:29:14.037754
| 2020-04-04T16:57:17
| 2020-04-04T16:57:17
| 234,967,333
| 0
| 0
| null | 2021-03-29T23:13:54
| 2020-01-19T21:05:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
import plotly.graph_objects as go
import networkx as nx
def plot_network(G):
#
# plot the edges
#
edge_x = []
edge_y = []
for edge in G.edges():
x0, y0 = G.nodes[edge[0]]['pos']
x1, y1 = G.nodes[edge[1]]['pos']
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
edge_trace = go.Scatter(
x=edge_x, y=edge_y,
line=dict(width=0.5, color='#888'),
hoverinfo='none',
mode='lines')
#
# plot the nodes; allow for hover
#
node_x = []
node_y = []
for node in G.nodes():
x, y = G.nodes[node]['pos']
node_x.append(x)
node_y.append(y)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='YlGnBu',
reversescale=True,
color=[],
size=10,
colorbar=dict(
thickness=15,
title='Node Connections',
xanchor='left',
titleside='right'
),
line_width=2))
#
# set the text when hovering to show number of connections
#
node_adjacencies = []
node_text = []
for node, adjacencies in enumerate(G.adjacency()):
node_adjacencies.append(len(adjacencies[1]))
node_text.append('# of connections: '+str(len(adjacencies[1])))
node_trace.marker.color = node_adjacencies
node_trace.text = node_text
#
# plot the entire figure
#
fig = go.Figure(data=[edge_trace, node_trace],
layout=go.Layout(
title='<br>Network graph made with Python',
titlefont_size=16,
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
annotations=[ dict(
text="Python code: <a href='https://plot.ly/ipython-notebooks/network-graphs/'> https://plot.ly/ipython-notebooks/network-graphs/</a>",
showarrow=False,
xref="paper", yref="paper",
x=0.005, y=-0.002 ) ],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False))
)
fig.show()
if __name__ == "__main__":
G = nx.random_geometric_graph(200, 0.125)
plot_network(G)
|
[
"kent.gauen@gmail.com"
] |
kent.gauen@gmail.com
|
2627bc5bee346deeab204fa6ec44e5c9cc13abfc
|
8b9e9de996cedd31561c14238fe655c202692c39
|
/hackerrank/hackerrank_AntiPalindromic_Strings.py
|
e118373c79db90167cc964fa3d7cbc28599314dd
|
[] |
no_license
|
monkeylyf/interviewjam
|
0049bc1d79e6ae88ca6d746b05d07b9e65bc9983
|
33c623f226981942780751554f0593f2c71cf458
|
refs/heads/master
| 2021-07-20T18:25:37.537856
| 2021-02-19T03:26:16
| 2021-02-19T03:26:16
| 6,741,986
| 59
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
"""hackerrank_AntiPalindromic_Strings.py
https://www.hackerrank.com/contests/101hack19/challenges/antipalindromic-strings
"""
def main():
"""if n == 1, then there is m antipalindromic string.
if n == 2, then there is m * (m - 1) antipalindromic string
if n >= 3, then there is m * (m - 1) * (m - 1)...
Then all you need to do is to implement pow with mod and multi with mod.
"""
t = int(raw_input())
for _ in xrange(t):
n, m = map(int, raw_input().split())
if n == 1:
print m
elif n == 2:
print multi_mod(m, m - 1)
else:
print multi_mod(multi_mod(m, m - 1), pow_mod(m - 2, n - 2))
def multi_mod(a, b, mod=10**9+7):
return ((a % mod) * (b % mod)) % mod
def pow_mod(a, n, mod=10**9+7):
if n == 0:
return 1
base = a
ret = 1
while n:
if n % 2 == 1:
ret = (ret *base) % mod
base = (base * base) % mod
n /= 2
return ret
if __name__ == '__main__':
main()
|
[
"laituan1986@gmail.com"
] |
laituan1986@gmail.com
|
fd9051b479df526f24d4937d65fc91e13c2b0021
|
837fcd0d7e40de15f52c73054709bd40264273d2
|
/More_exercise-master/Repeated_element_list.py
|
db01da889fb9c8d678166fab52513a0e563108a2
|
[] |
no_license
|
NEHAISRANI/Python_Programs
|
dee9e05ac174a4fd4dd3ae5e96079e10205e18f9
|
aa108a56a0b357ca43129e59377ac35609919667
|
refs/heads/master
| 2020-11-25T07:20:00.484973
| 2020-03-08T12:17:39
| 2020-03-08T12:17:39
| 228,554,399
| 0
| 1
| null | 2020-10-01T06:41:20
| 2019-12-17T07:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 477
|
py
|
list1 = [1, 342, 75, 23, 98]
list2 = [75, 23, 98, 12, 78, 10, 1]
index=0
new=[]
while index<len(list1):
if list1[index] in list2:
new.append(list1[index])
index=index+1
new.sort()
print new
#"-------------------"
# without in operator
index=0
new=[]
while index<len(list1):
var1=0
while var1<len(list2):
if list1[index]==list2[var1]:
new.append(list1[index])
var1=var1+1
index=index+1
new.sort()
print new
|
[
"nehai18@navgurukul.org"
] |
nehai18@navgurukul.org
|
e6c242f7656466c344365678cdf6869daa23683b
|
8dbb2a3e2286c97b1baa3ee54210189f8470eb4d
|
/kubernetes-stubs/client/models/v2beta2_metric_target.pyi
|
e58a49afa7ff802b89fffd13f6b7a5441e8e92a4
|
[] |
no_license
|
foodpairing/kubernetes-stubs
|
e4b0f687254316e6f2954bacaa69ff898a88bde4
|
f510dc3d350ec998787f543a280dd619449b5445
|
refs/heads/master
| 2023-08-21T21:00:54.485923
| 2021-08-25T03:53:07
| 2021-08-25T04:45:17
| 414,555,568
| 0
| 0
| null | 2021-10-07T10:26:08
| 2021-10-07T10:26:08
| null |
UTF-8
|
Python
| false
| false
| 694
|
pyi
|
import datetime
import typing
import kubernetes.client
class V2beta2MetricTarget:
average_utilization: typing.Optional[int]
average_value: typing.Optional[str]
type: str
value: typing.Optional[str]
def __init__(
self,
*,
average_utilization: typing.Optional[int] = ...,
average_value: typing.Optional[str] = ...,
type: str,
value: typing.Optional[str] = ...
) -> None: ...
def to_dict(self) -> V2beta2MetricTargetDict: ...
class V2beta2MetricTargetDict(typing.TypedDict, total=False):
averageUtilization: typing.Optional[int]
averageValue: typing.Optional[str]
type: str
value: typing.Optional[str]
|
[
"nikhil.benesch@gmail.com"
] |
nikhil.benesch@gmail.com
|
9f4ade293e4deed7bf08590e33fecbb9a8b287d9
|
35b58dedc97622b1973456d907ede6ab86c0d966
|
/Test/2020年4月29日/001.py
|
c8895768d64c6ebb9f086308a28346ccff33c6e5
|
[] |
no_license
|
GithubLucasSong/PythonProject
|
7bb2bcc8af2de725b2ed9cc5bfedfd64a9a56635
|
e3602b4cb8af9391c6dbeaebb845829ffb7ab15f
|
refs/heads/master
| 2022-11-23T05:32:44.622532
| 2020-07-24T08:27:12
| 2020-07-24T08:27:12
| 282,165,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# import re
# import json
# sss = '{"testfan-token": "${neeo_001>data>data}$"}'
#
# find = re.findall('\${.*?}\$',sss)
#
# for i in find:
# find = i
# print(find)
#
# print(re.sub(find,'1',sss))
import requests
# response = requests.request(method='get', url='http://www.neeo.cc:6002/pinter/bank/api/query2',params={"userName": "admin"},headers={"testfan-token": "c818ced87fb94411a5c1db99672ec3d7"})
# print(response.json())
response = requests.request(method='post', url='http://www.neeo.cc:6002/pinter/bank/api/login',data={"userName": "admin", "password": "1234"})
print(response.j)
|
[
"1433880147@qq.com"
] |
1433880147@qq.com
|
f373f7fb41cef8b368430594ebbdee6e8ea6d030
|
30ab9750e6ca334941934d1727c85ad59e6b9c8a
|
/zentral/contrib/nagios/events/__init__.py
|
32a043fbbf1aebc95d5d5f0da9196c0e025d5d16
|
[
"Apache-2.0"
] |
permissive
|
ankurvaishley/zentral
|
57e7961db65278a0e614975e484927f0391eeadd
|
a54769f18305c3fc71bae678ed823524aaa8bb06
|
refs/heads/main
| 2023-05-31T02:56:40.309854
| 2021-07-01T07:51:31
| 2021-07-01T14:15:34
| 382,346,360
| 1
| 0
|
Apache-2.0
| 2021-07-02T12:55:47
| 2021-07-02T12:55:47
| null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
import logging
from zentral.core.events import event_cls_from_type, register_event_type
from zentral.core.events.base import BaseEvent
logger = logging.getLogger('zentral.contrib.nagios.events')
ALL_EVENTS_SEARCH_DICT = {"tag": "nagios"}
class NagiosEvent(BaseEvent):
tags = ["nagios"]
class NagiosHostEvent(NagiosEvent):
event_type = "nagios_host_event"
register_event_type(NagiosHostEvent)
class NagiosServiceEvent(NagiosEvent):
event_type = "nagios_service_event"
register_event_type(NagiosServiceEvent)
def post_nagios_event(nagios_instance, user_agent, ip, data):
event_type = data.pop("event_type", None)
if not event_type:
logger.warning("Missing event_type in nagios event payload")
return
elif event_type not in ['nagios_host_event', 'nagios_service_event']:
logger.warning("Wrong event_type %s in nagios event payload", event_type)
return
data["nagios_instance"] = {"id": nagios_instance.id,
"url": nagios_instance.url}
event_cls = event_cls_from_type(event_type)
event_cls.post_machine_request_payloads(None, user_agent, ip, [data])
|
[
"eric.falconnier@112hz.com"
] |
eric.falconnier@112hz.com
|
a5febbe7a5eedfbbabe6af1b6c0a253823fdc6b5
|
16b389c8dcace7f7d010c1fcf57ae0b3f10f88d3
|
/docs/jnpr_healthbot_swagger/test/test_topic_schema_variable.py
|
d8cf50a23cf30844a79d2a6d4c4d3e87e9c010c1
|
[
"Apache-2.0"
] |
permissive
|
Juniper/healthbot-py-client
|
e4e376b074920d745f68f19e9309ede0a4173064
|
0390dc5d194df19c5845b73cb1d6a54441a263bc
|
refs/heads/master
| 2023-08-22T03:48:10.506847
| 2022-02-16T12:21:04
| 2022-02-16T12:21:04
| 210,760,509
| 10
| 5
|
Apache-2.0
| 2022-05-25T05:48:55
| 2019-09-25T05:12:35
|
Python
|
UTF-8
|
Python
| false
| false
| 955
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 3.1.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.topic_schema_variable import TopicSchemaVariable # noqa: E501
from swagger_client.rest import ApiException
class TestTopicSchemaVariable(unittest.TestCase):
"""TopicSchemaVariable unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTopicSchemaVariable(self):
"""Test TopicSchemaVariable"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.topic_schema_variable.TopicSchemaVariable() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"sharanyab@juniper.net"
] |
sharanyab@juniper.net
|
60154971e033303df3ec37c5af4870bd330cbc8c
|
aabcf7b509608af70ce9fa6e7665837f6b6984b0
|
/bincrafters_envy/main.py
|
14ef7546bb5c5216fd5a45f5e21aae151fb2df3d
|
[
"MIT"
] |
permissive
|
bincrafters/bincrafters-envy
|
2573177e83c8ec0687eff9c76cbc0c79b1a4135c
|
584ea39c16927ca3d1ffc68b32ec8d77627c27e0
|
refs/heads/develop
| 2023-06-08T10:55:37.920810
| 2019-07-26T08:35:48
| 2019-07-26T08:35:48
| 113,282,817
| 1
| 0
|
MIT
| 2023-06-01T12:24:40
| 2017-12-06T07:21:00
|
Python
|
UTF-8
|
Python
| false
| false
| 266
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if sys.version_info.major == 3:
from bincrafters_envy import bincrafters_envy
else:
import bincrafters_envy
def run():
bincrafters_envy.main(sys.argv[1:])
if __name__ == '__main__':
run()
|
[
"uilianries@gmail.com"
] |
uilianries@gmail.com
|
c867e8178c3e307027a310c680b5dc60c0a7aeba
|
7395af9906200bb7135201ede8e238c0afb46c65
|
/public_api/api_requests/create_transaction.py
|
6edaac4ade0a40754dce2345e1b754c36cfb54fa
|
[] |
no_license
|
bellomusodiq/public-api
|
6fd21d91f9df4e1ef75d2f43f3d2ad59afc1f30c
|
20b59ecc67ac6c969a9c47991f385e538762c2a6
|
refs/heads/master
| 2023-01-02T05:30:53.797873
| 2020-10-27T20:19:51
| 2020-10-27T20:19:51
| 305,782,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
import requests
import json
import random
import string
from .config import base_url
def generate_random_string():
choices = string.ascii_letters + string.digits
string_ = ''
for _ in range(20):
string_ += random.choice(choices)
return string_
url = "{}/test/transactions".format(base_url)
def create_transaction(access_token, investor_id, instructions,
trade_date_limit, trade_action, trade_price_limit, trade_effective_date,
trade_units, stock_code):
payload = {
"investor_id":investor_id,
"transaction_ref":"s-{}".format(generate_random_string()),
"cscs_number": "67393940",
"instructions": instructions,
"trade_date_limit": trade_date_limit,
"trade_effective_date": trade_effective_date,
"trade_action": trade_action,
"trade_price_limit": str(trade_price_limit),
"trade_units": str(trade_units),
"stock_code": stock_code,
"trade_account_type":"INVESTOR"
}
headers = {
'authorization': 'Bearer {}'.format(access_token),
'content-type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data = json.dumps(payload))
return response.json()
"""
b'{"status":200,"message":"success","trade_status":"Success","transaction_ref":"s-x12daeadvd"}'
b'{"status":400,"errors":["This Investor does not exist"]}'
"""
|
[
"bmayowa25@gmail.com"
] |
bmayowa25@gmail.com
|
77c35e61da685b86d7d099062b817c4d4650011c
|
aee144770c8f4ec5987777aebe5b064e558fc474
|
/doc/integrations/pytorch/parlai/tasks/mnist_qa/agents.py
|
df1f01e28be9434fde8528ad3cb0ea9b583c46d5
|
[
"CC-BY-SA-3.0",
"MIT",
"Apache-2.0",
"AGPL-3.0-only"
] |
permissive
|
adgang/cortx
|
1d8e6314643baae0e6ee93d4136013840ead9f3b
|
a73e1476833fa3b281124d2cb9231ee0ca89278d
|
refs/heads/main
| 2023-04-22T04:54:43.836690
| 2021-05-11T00:39:34
| 2021-05-11T00:39:34
| 361,394,462
| 1
| 0
|
Apache-2.0
| 2021-04-25T10:12:59
| 2021-04-25T10:12:59
| null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a simple question answering task on the MNIST dataset. In each episode, agents
are presented with a number, which they are asked to identify.
Useful for debugging and checking that one's image model is up and running.
"""
from parlai.core.teachers import DialogTeacher
from parlai.utils.io import PathManager
from .build import build
import json
import os
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
labels_path = os.path.join(opt['datapath'], 'mnist', dt, 'labels.json')
image_path = os.path.join(opt['datapath'], 'mnist', dt)
return labels_path, image_path
class MnistQATeacher(DialogTeacher):
"""
This version of MNIST inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a `act` function, and enables Hogwild training with shared memory with no extra
work.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype'].split(':')[0]
labels_path, self.image_path = _path(opt)
opt['datafile'] = labels_path
self.id = 'mnist_qa'
self.num_strs = [
'zero',
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
]
super().__init__(opt, shared)
def label_candidates(self):
return [str(x) for x in range(10)] + self.num_strs
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as labels_file:
self.labels = json.load(labels_file)
self.question = 'Which number is in the image?'
episode_done = True
for i in range(len(self.labels)):
img_path = os.path.join(self.image_path, '%05d.bmp' % i)
label = [self.labels[i], self.num_strs[int(self.labels[i])]]
yield (self.question, label, None, None, img_path), episode_done
class DefaultTeacher(MnistQATeacher):
pass
|
[
"noreply@github.com"
] |
adgang.noreply@github.com
|
0be675f1f85ba5f732fc877fca398ee196184613
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/fibo_20200709155400.py
|
3b16f98c3d18b786e0d8adc98afc754847029ae4
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# solving the fibonaci sequence using recursion and dynamic programming
# Recursion
# Base case if n = 1 || n == 2 then fibo is 1
def fibo(n):
if n == 1:
result = 1
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
d6f72acbd5a87945e02e30a1fbc7fa53ce292903
|
724317c256e3c57e8573f74334be31f39ba34eb9
|
/scripts/graphquestions/insert_to_db.py
|
08cadc30d76f276e8821af87e44558d87e2df6ee
|
[
"Apache-2.0"
] |
permissive
|
pkumar2618/UDepLambda
|
a36662014fc23465aff587761810b986e4dad6dd
|
08f00b7dc99bb06c6912e9e83f47c32ebdd38eff
|
refs/heads/master
| 2022-11-25T09:22:41.444891
| 2020-08-01T08:56:52
| 2020-08-01T08:56:52
| 282,916,873
| 0
| 0
|
Apache-2.0
| 2020-07-27T14:09:32
| 2020-07-27T14:09:32
| null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
#!/usr/bin/python
import MySQLdb
# connect
db = MySQLdb.connect(host="rudisha.inf.ed.ac.uk", user="root", passwd="ammuma1234", db="gq_german")
cursor = db.cursor()
# execute SQL select statement
cursor.execute("SELECT * FROM sentences")
# commit your changes
db.commit()
# get the number of rows in the resultset
numrows = int(cursor.rowcount)
# get and display one row at a time.
for x in range(0,numrows):
row = cursor.fetchone()
print row[0], "-->", row[1]
|
[
"siva@sivareddy.in"
] |
siva@sivareddy.in
|
e019f16db7ba4fbd11cc190bd1425769fda97daa
|
9d5b0bcc105f7a99e545dd194d776a8f37b08501
|
/tf_quant_finance/math/integration/simpson.py
|
d011b774ef2f030af974719aec196bfd567db508
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
priyalorha/tf-quant-finance
|
ab082a9bd6d22fd3ea9a3adcf67a35dc23460588
|
72ce8231340b27b047279012ffe97aeb79117cdf
|
refs/heads/master
| 2023-02-23T11:08:30.161283
| 2021-02-01T13:57:43
| 2021-02-01T13:58:14
| 334,980,881
| 1
| 0
|
Apache-2.0
| 2021-02-01T14:45:14
| 2021-02-01T14:45:14
| null |
UTF-8
|
Python
| false
| false
| 3,828
|
py
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composite Simpson's algorithm for numeric integration."""
import tensorflow.compat.v2 as tf
def simpson(func, lower, upper, num_points=1001, dtype=None, name=None):
"""Evaluates definite integral using composite Simpson's 1/3 rule.
Integrates `func` using composite Simpson's 1/3 rule [1].
Evaluates function at points of evenly spaced grid of `num_points` points,
then uses obtained values to interpolate `func` with quadratic polynomials
and integrates these polynomials.
#### References
[1] Weisstein, Eric W. "Simpson's Rule." From MathWorld - A Wolfram Web
Resource. http://mathworld.wolfram.com/SimpsonsRule.html
#### Example
```python
f = lambda x: x*x
a = tf.constant(0.0)
b = tf.constant(3.0)
integrate(f, a, b, num_points=1001) # 9.0
```
Args:
func: Python callable representing a function to be integrated. It must be a
callable of a single `Tensor` parameter and return a `Tensor` of the same
shape and dtype as its input. It will be called with a `Tesnor` of shape
`lower.shape + [n]` (where n is integer number of points) and of the same
`dtype` as `lower`.
lower: `Tensor` or Python float representing the lower limits of
integration. `func` will be integrated between each pair of points defined
by `lower` and `upper`.
upper: `Tensor` of the same shape and dtype as `lower` or Python float
representing the upper limits of intergation.
num_points: Scalar int32 `Tensor`. Number of points at which function `func`
will be evaluated. Must be odd and at least 3.
Default value: 1001.
dtype: Optional `tf.Dtype`. If supplied, the dtype for the `lower` and
`upper`. Result will have the same dtype.
Default value: None which maps to dtype of `lower`.
name: Python str. The name to give to the ops created by this function.
Default value: None which maps to 'integrate_simpson_composite'.
Returns:
`Tensor` of shape `func_batch_shape + limits_batch_shape`, containing
value of the definite integral.
"""
with tf.compat.v1.name_scope(
name, default_name='integrate_simpson_composite', values=[lower, upper]):
lower = tf.convert_to_tensor(lower, dtype=dtype, name='lower')
dtype = lower.dtype
upper = tf.convert_to_tensor(upper, dtype=dtype, name='upper')
num_points = tf.convert_to_tensor(
num_points, dtype=tf.int32, name='num_points')
assertions = [
tf.debugging.assert_greater_equal(num_points, 3),
tf.debugging.assert_equal(num_points % 2, 1),
]
with tf.compat.v1.control_dependencies(assertions):
dx = (upper - lower) / (tf.cast(num_points, dtype=dtype) - 1)
dx_expand = tf.expand_dims(dx, -1)
lower_exp = tf.expand_dims(lower, -1)
grid = lower_exp + dx_expand * tf.cast(tf.range(num_points), dtype=dtype)
weights_first = tf.constant([1.0], dtype=dtype)
weights_mid = tf.tile(
tf.constant([4.0, 2.0], dtype=dtype), [(num_points - 3) // 2])
weights_last = tf.constant([4.0, 1.0], dtype=dtype)
weights = tf.concat([weights_first, weights_mid, weights_last], axis=0)
return tf.reduce_sum(func(grid) * weights, axis=-1) * dx / 3
|
[
"tf-quant-finance-robot@google.com"
] |
tf-quant-finance-robot@google.com
|
46914611421331bc8c3b99a2f18da0e2a7b11766
|
eb3c6e228a05e773fad89b42da0f54a1febbd096
|
/plenum/bls/bls_bft_utils.py
|
67d709cfdbd21e7a593db606e4930394d7383904
|
[
"Apache-2.0"
] |
permissive
|
amitkumarj441/indy-plenum
|
1f45e0c095b9aa27e8306e29c896aa1441a20229
|
7cbcdecd5e6e290530fe0d5e02d9ea70ab1c9516
|
refs/heads/master
| 2021-07-21T02:19:24.219993
| 2017-10-27T08:48:03
| 2017-10-27T08:48:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def create_full_root_hash(root_hash, pool_root_hash):
"""
Utility method for creating full root hash that then can be signed
by multi signature
"""
return root_hash + pool_root_hash
|
[
"alexander.sherbakov@dsr-company.com"
] |
alexander.sherbakov@dsr-company.com
|
5f3ef402e43e381527b710f81ee8970f9ac7c5a1
|
aaa204ad7f134b526593c785eaa739bff9fc4d2a
|
/tests/system/providers/google/marketing_platform/example_analytics.py
|
0d8f94ec38e552ad8bfc9ef564376ae17a7b7d4e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
cfei18/incubator-airflow
|
913b40efa3d9f1fdfc5e299ce2693492c9a92dd4
|
ffb2078eb5546420864229cdc6ee361f89cab7bd
|
refs/heads/master
| 2022-09-28T14:44:04.250367
| 2022-09-19T16:50:23
| 2022-09-19T16:50:23
| 88,665,367
| 0
| 1
|
Apache-2.0
| 2021-02-05T16:29:42
| 2017-04-18T20:00:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,724
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use Google Analytics 360.
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.marketing_platform.operators.analytics import (
GoogleAnalyticsDataImportUploadOperator,
GoogleAnalyticsDeletePreviousDataUploadsOperator,
GoogleAnalyticsGetAdsLinkOperator,
GoogleAnalyticsListAccountsOperator,
GoogleAnalyticsModifyFileHeadersDataImportOperator,
GoogleAnalyticsRetrieveAdsLinksListOperator,
)
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_google_analytics"
ACCOUNT_ID = os.environ.get("GA_ACCOUNT_ID", "123456789")
BUCKET = os.environ.get("GMP_ANALYTICS_BUCKET", "test-airflow-analytics-bucket")
BUCKET_FILENAME = "data.csv"
WEB_PROPERTY_ID = os.environ.get("GA_WEB_PROPERTY", "UA-12345678-1")
WEB_PROPERTY_AD_WORDS_LINK_ID = os.environ.get("GA_WEB_PROPERTY_AD_WORDS_LINK_ID", "rQafFTPOQdmkx4U-fxUfhj")
DATA_ID = "kjdDu3_tQa6n8Q1kXFtSmg"
with models.DAG(
DAG_ID,
schedule='@once', # Override to match your needs,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example", "analytics"],
) as dag:
# [START howto_marketing_platform_list_accounts_operator]
list_account = GoogleAnalyticsListAccountsOperator(task_id="list_account")
# [END howto_marketing_platform_list_accounts_operator]
# [START howto_marketing_platform_get_ads_link_operator]
get_ad_words_link = GoogleAnalyticsGetAdsLinkOperator(
web_property_ad_words_link_id=WEB_PROPERTY_AD_WORDS_LINK_ID,
web_property_id=WEB_PROPERTY_ID,
account_id=ACCOUNT_ID,
task_id="get_ad_words_link",
)
# [END howto_marketing_platform_get_ads_link_operator]
# [START howto_marketing_platform_retrieve_ads_links_list_operator]
list_ad_words_link = GoogleAnalyticsRetrieveAdsLinksListOperator(
task_id="list_ad_link", account_id=ACCOUNT_ID, web_property_id=WEB_PROPERTY_ID
)
# [END howto_marketing_platform_retrieve_ads_links_list_operator]
upload = GoogleAnalyticsDataImportUploadOperator(
task_id="upload",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
delete = GoogleAnalyticsDeletePreviousDataUploadsOperator(
task_id="delete",
account_id=ACCOUNT_ID,
web_property_id=WEB_PROPERTY_ID,
custom_data_source_id=DATA_ID,
)
transform = GoogleAnalyticsModifyFileHeadersDataImportOperator(
task_id="transform",
storage_bucket=BUCKET,
storage_name_object=BUCKET_FILENAME,
)
upload >> [delete, transform]
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
[
"noreply@github.com"
] |
cfei18.noreply@github.com
|
f706c41962f21c4b764b0b4ccea05a2eed8290b9
|
881ca022fb16096610b4c7cec84910fbd304f52b
|
/libs/scapy/contrib/__init__.py
|
d1ce31ce09076bfc086bfb4b8be14c6235ba16f5
|
[] |
no_license
|
mdsakibur192/esp32_bluetooth_classic_sniffer
|
df54a898c9b4b3e2b5d85b1c00dd597d52844d9f
|
7e8be27455f1d271fb92c074cb5118cc43854561
|
refs/heads/master
| 2023-07-31T14:29:22.989311
| 2021-09-08T11:18:21
| 2021-09-08T11:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Package of contrib modules that have to be loaded explicitly.
"""
|
[
"mgarbelix@gmail.com"
] |
mgarbelix@gmail.com
|
a693f848a13454a6cfa0984f201bdd2971733ff4
|
a39d0d1f0e257d0fff5de58e3959906dafb45347
|
/PythonTricks/DataStructures/arays.py
|
4dd812657bd50c08d6dda6321c7d0a1f08ba79a0
|
[] |
no_license
|
Twishar/Python
|
998d7b304070b621ca7cdec548156ca7750ef38e
|
1d1afa79df1aae7b48ac690d9b930708767b6d41
|
refs/heads/master
| 2021-09-23T14:18:36.195494
| 2018-09-24T12:33:36
| 2018-09-24T12:33:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import array
arr = ['one', 'two', 'three']
print(arr[0])
print(arr)
# Lists are mutable:
arr[1] = 'hello'
print(arr)
del arr[1]
print(arr)
# Lists can hold arbitrary data types:
arr.append(23)
print(arr)
# Tuple - immutable containers
arr = 'one', 'two', 'three'
print(arr[0])
print(arr)
# arr[1] = 'hello'
# del arr[1]
# Tuples can hold arbitrary data types:
# Adding elements creates a copy of the tuple
print(arr + (23,))
arr = array.array('f', (1.0, 1.5, 2.0, 2.5))
print(arr[1])
arr[1] = 23.9
print(arr)
del arr[1]
arr.append(42.94)
# Arrays are "typed"
# arr[1] = 'hello'
# STR
arr = 'abcd'
# string are immutable
print(list('abcd'))
# bytes - immutable Arrays of Single Bytes
arr_b = bytes((0, 1, 2, 3))
print(arr_b)
# arr[1] = 23
# del arr[1]
|
[
"stognienkovv@gmail.com"
] |
stognienkovv@gmail.com
|
f704cf61cb51810d863d902fd775d9cbbf0da782
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02238/s136410577.py
|
efae5c2b562906dd438512222d9cbfb70501d96a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#coding: utf-8
n = int(input())
color = ["white" for i in range(n)]
d = [[] for i in range(n)]
global t
t = 0
M = [[False for i in range(n)] for j in range(n)]
for i in range(n):
data = list(map(int,input().split()))
u = data[0]
k = data[1]
if i == 0:
start = u
for v in data[2:2+k]:
M[u-1][v-1] = True
def search(u,t):
t += 1
color[u-1] = "gray"
d[u-1].append(t)
for v in range(1,n+1):
if M[u-1][v-1] and color[v-1] == "white":
t = search(v,t)
color[u-1] = "black"
t += 1
d[u-1].append(t)
return t
t = search(start, t)
for i in range(1,n+1):
if color[i-1] == "white":
t = search(i, t)
for i in range(n):
print(i+1, d[i][0], d[i][1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
aa4a7a7bec6c8d765c3e813b46ac392fb2f243d9
|
98032c5363d0904ba44e1b5c1b7aa0d31ed1d3f2
|
/Chapter10/ch10/race_with_lock.py
|
5b939cb8db37805feba1ecc83d58524902c0916b
|
[
"MIT"
] |
permissive
|
PacktPublishing/Learn-Python-Programming-Second-Edition
|
7948b309f6e8b146a5eb5e8690b7865cb76136d5
|
54fee44ff1c696df0c7da1e3e84a6c2271a78904
|
refs/heads/master
| 2023-05-12T08:56:52.868686
| 2023-01-30T09:59:05
| 2023-01-30T09:59:05
| 138,018,499
| 65
| 44
|
MIT
| 2023-02-15T20:04:34
| 2018-06-20T10:41:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 576
|
py
|
import threading
from time import sleep
from random import random
counter = 0
randsleep = lambda: sleep(0.1 * random())
def incr(n):
global counter
for count in range(n):
with incr_lock:
current = counter
randsleep()
counter = current + 1
randsleep()
n = 5
incr_lock = threading.Lock()
t1 = threading.Thread(target=incr, args=(n, ))
t2 = threading.Thread(target=incr, args=(n, ))
t1.start()
t2.start()
t1.join()
t2.join()
print(f'Counter: {counter}')
"""
$ python race.py
Counter: 10 # every time
"""
|
[
"33118647+romydias@users.noreply.github.com"
] |
33118647+romydias@users.noreply.github.com
|
d448574fb3725a8d6dc5fef6401a51fda2584702
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/googlecloudsdk/command_lib/error_reporting/exceptions.py
|
0ff475910503910e5c0b0a043a2e41e2f0aa50de
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions for the error-reporting surface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import exceptions
class CannotOpenFileError(exceptions.Error):
"""Cannot open file."""
def __init__(self, f, e):
super(CannotOpenFileError, self).__init__(
'Failed to open file [{f}]: {e}'.format(f=f, e=e))
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
91b849e900044ed54bf41ef89839d496d7edea56
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_gunslinger.py
|
05089d95d9ae89b34ed5d0ce0aaa83ee93969be4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
#calss header
class _GUNSLINGER():
def __init__(self,):
self.name = "GUNSLINGER"
self.definitions = [u'especially in the past in North America, someone who is good at shooting guns and is employed for protection or to kill people']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
748ba57cf89dcdd66b898d345938928ec78c11b0
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/32_best.py
|
99e35e658b452b484a3f56877cf8d1ea52efde61
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
dp = [0] * len(s)
for i in range(len(s)):
if s[i] == "(":
dp[i] = 0
else:
if i - 1 >= 0 and i - 1 - dp[i - 1] >= 0 and s[i - 1 - dp[i - 1]] == "(":
dp[i] = 2 + dp[i - 1]
if i - 1 - dp[i - 1] - 1 >= 0:
dp[i] += dp[i - 1 - dp[i - 1] - 1]
print(dp)
return max(dp)
|
[
"noelsun@mowennaierdeMacBook-Pro.local"
] |
noelsun@mowennaierdeMacBook-Pro.local
|
4888153745ea34d4c15768a4a8e942d57823c159
|
71e324d2e7c9557a9cfec01997a44a66539ac2e6
|
/Chapter_08/object_3_seperate.py
|
bc01ecd017f07f5efe52cb0fb90a2fb4a449db82
|
[] |
no_license
|
ulillilu/Python_Practice
|
2706a72b22243f4d76bf239f552bd7da2615c1ef
|
f2b238176f7e68b3fa0674ce4951aaa4206c15d3
|
refs/heads/master
| 2022-11-29T17:58:52.035022
| 2020-08-13T17:01:49
| 2020-08-13T17:01:49
| 287,334,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
#객체를 처리하는 함수
def create_student(name, korean, math, english, science):
return{
"name": name,
"korean": korean,
"math": math,
"english": english,
"science": science
}
def student_get_sum(student):
return student["korean"] + student["math"] + student["english"] + student["science"]
def student_get_average(student):
return student_get_sum(student) / 4
def student_to_string(student):
return "{}\t{}\t{}".format(
student["name"],
student_get_sum(student),
student_get_average(student)
)
students = [
create_student("윤인성", 87, 98, 88, 95),
create_student("연하진", 92, 98, 96, 98),
create_student("구지연", 76, 96, 94, 90),
create_student("나선주", 98, 92, 96, 92),
create_student("윤아린", 95, 98, 98, 98),
create_student("윤명월", 94, 88, 92, 92)
]
print ("이름", "총점", "평균", sep="\t")
for student in students:
print(student_to_string(student))
|
[
"noreply@github.com"
] |
ulillilu.noreply@github.com
|
d0fb24f28c2ec27cd9e6e2a7952e61012fa0dc50
|
1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc
|
/venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_routetable.py
|
1dc6180ba8335b5645e02e1f39d13a54a3e496bc
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/devopscourses_infra
|
1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c
|
e42e5deafce395af869084ede245fc6cff6d0b2c
|
refs/heads/master
| 2020-04-29T02:41:49.985889
| 2019-05-21T06:35:19
| 2019-05-21T06:35:19
| 175,780,457
| 0
| 1
|
MIT
| 2019-05-21T06:35:20
| 2019-03-15T08:35:54
|
HCL
|
UTF-8
|
Python
| false
| false
| 6,015
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_routetable
version_added: "2.7"
short_description: Manage Azure route table resource.
description:
- Create, update or delete a route table.
options:
resource_group:
description:
- name of resource group.
required: true
name:
description:
- name of the route table.
required: true
state:
description:
- Assert the state of the route table. Use C(present) to create or update and
C(absent) to delete.
default: present
choices:
- absent
- present
disable_bgp_route_propagation:
description:
- Specified whether to disable the routes learned by BGP on that route table.
type: bool
default: False
location:
description:
- Region of the resource.
- Derived from C(resource_group) if not specified
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create a route table
azure_rm_routetable:
resource_group: myResourceGroup
name: myRouteTable
disable_bgp_route_propagation: False
tags:
purpose: testing
- name: Delete a route table
azure_rm_routetable:
resource_group: myResourceGroup
name: myRouteTable
state: absent
'''
RETURN = '''
changed:
description: Whether the resource is changed.
returned: always
type: bool
id:
description: resource id.
returned: success
type: str
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name
class AzureRMRouteTable(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
disable_bgp_route_propagation=dict(type='bool', default=False)
)
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.tags = None
self.disable_bgp_route_propagation = None
self.results = dict(
changed=False
)
super(AzureRMRouteTable, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
self.location = normalize_location_name(self.location)
result = dict()
changed = False
result = self.get_table()
if self.state == 'absent' and result:
changed = True
if not self.check_mode:
self.delete_table()
elif self.state == 'present':
if not result:
changed = True # create new route table
else: # check update
update_tags, self.tags = self.update_tags(result.tags)
if update_tags:
changed = True
if self.disable_bgp_route_propagation != result.disable_bgp_route_propagation:
changed = True
if changed:
result = self.network_models.RouteTable(location=self.location,
tags=self.tags,
disable_bgp_route_propagation=self.disable_bgp_route_propagation)
if not self.check_mode:
result = self.create_or_update_table(result)
self.results['id'] = result.id if result else None
self.results['changed'] = changed
return self.results
def create_or_update_table(self, param):
try:
poller = self.network_client.route_tables.create_or_update(self.resource_group, self.name, param)
return self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating route table {0} - {1}".format(self.name, str(exc)))
def delete_table(self):
try:
poller = self.network_client.route_tables.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
return result
except Exception as exc:
self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
def get_table(self):
try:
return self.network_client.route_tables.get(self.resource_group, self.name)
except CloudError as cloud_err:
# Return None iff the resource is not found
if cloud_err.status_code == 404:
self.log('{0}'.format(str(cloud_err)))
return None
self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err)))
except Exception as exc:
self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc)))
def main():
AzureRMRouteTable()
if __name__ == '__main__':
main()
|
[
"skydevapp@gmail.com"
] |
skydevapp@gmail.com
|
afc8f2bf08df0703f759b13e99e9b4c3ff9e26a4
|
e2255da9f41a3ca592f5042c96ec8dc1f5ceba21
|
/google/appengine/ext/mapreduce/api/map_job/__init__.py
|
9347a8bdc947888f6886b093371cbbe34aac3d61
|
[
"Apache-2.0"
] |
permissive
|
KronnyEC/cliques
|
2a2b1eb0063017f3dbe7de6a42a98d21a7cffb37
|
2fd66c4c4ea4552ab8ef6d738613f618a1a74fc7
|
refs/heads/master
| 2021-01-24T05:05:49.142434
| 2014-08-18T06:30:32
| 2014-08-18T06:42:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Map job package."""
from .map_job_config import JobConfig
from .map_job_context import JobContext
from .map_job_context import ShardContext
from .map_job_context import SliceContext
from .map_job_control import Job
from .mapper import Mapper
|
[
"josh@pcsforeducation.com"
] |
josh@pcsforeducation.com
|
8c18a2515beac0972d4760bcf73d68aec9f59e15
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02702/s763305864.py
|
c1f2a064512072f127149a2d43913b8cc8dc8cd7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
S = input()
dp = [0]*(len(S)+1)
cur = int(S[len(S)-1])
mod_10 = 1
count_num = [0]*2019
count_num[0] += 1
for i in range(len(S)):
dp[len(S)-i-1] = cur
count_num[cur] += 1
mod_10 = (mod_10*10)%2019
if i <= len(S)-2:
cur = (cur+int(S[len(S)-i-2])*(mod_10))%2019
ans = 0
for i in range(2019):
ans += (count_num[i]*(count_num[i]-1))//2
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e13fe5af3a9acaff486417fedd87270c31830d4c
|
396f93d8e73c419ef82a94174815a2cecbb8334b
|
/.history/tester2_20200321232603.py
|
3011254a2f67dbc34040fee7f27f648bd06eec26
|
[] |
no_license
|
mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch
|
8c73d9448b916009c9431526864a4441fdeb682a
|
90b2dca920c85cddd7c1b3335344ac7b10a9b061
|
refs/heads/master
| 2021-03-26T21:16:42.561068
| 2020-04-17T21:44:26
| 2020-04-17T21:44:26
| 247,750,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,912
|
py
|
import os
import subprocess
import re
from datetime import datetime
import time
from statistics import mean
numberOfTests = 10
tabuIteration = '10'
tabuDuration = '0'
numberOfCities = '50'
final_solution = []
list_coverage = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration, tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
else:
coverage = 5
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summery:")
optimum_result = len(list(filter(lambda x: x == 5644, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} runs of test we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteration {mean(list_coverage)}\nand in worst we found it in iteration {max(list_coverage)} \nand in best case in iteration {max(list_coverage)}')
print(f'Totally, {sum(list_coverage)} cities visited before finding the global optimum in {numberOfTests} runs of this test\n\n\n')
|
[
"farzam.mirmoeini@gmail.com"
] |
farzam.mirmoeini@gmail.com
|
10d81e12691ea7edbbac63eee7f183d1e0842d8a
|
fc746b644a2f4d07508e84b0d162c0f2ef07076d
|
/build/orocos_kinematics_dynamics/catkin_generated/generate_cached_setup.py
|
174396b513f66f2b974b95d28716ad2d3160a197
|
[] |
no_license
|
andreatitti97/thesis_ws
|
d372146246b8c9b74d25e1310e6e79f9e0270cc4
|
c59d380abe7be47ea2d7812e416dee7298c20db8
|
refs/heads/main
| 2023-03-16T12:26:17.676403
| 2021-03-15T13:22:59
| 2021-03-15T13:22:59
| 340,458,689
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/andrea/thesis_ws/devel;/opt/ros/kinetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/andrea/thesis_ws/devel/.private/orocos_kinematics_dynamics/env.sh')
output_filename = '/home/andrea/thesis_ws/build/orocos_kinematics_dynamics/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"andrea.tiranti97@gmail.com"
] |
andrea.tiranti97@gmail.com
|
60fb4c4eb6e60fca24c3bb874dd487c384022e84
|
e811662c890217c77b60aa2e1295dd0f5b2d4591
|
/src/problem_145.py
|
da0952c5573f00aae6e48903d71a6426d47dd221
|
[] |
no_license
|
rewonderful/MLC
|
95357f892f8cf76453178875bac99316c7583f84
|
7012572eb192c29327ede821c271ca082316ff2b
|
refs/heads/master
| 2022-05-08T05:24:06.929245
| 2019-09-24T10:35:22
| 2019-09-24T10:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
from TreeNode import TreeNode
def postorderTraversal(self, root):
"""
记住得了= =
就访问当前root,然后左子树入栈,右子树入栈,最后逆序一下就好了,return output[::-1]
相当于是这么个思路:
目标:左右根
那么我就先求出来根右左,先根遍历是好访问的,根右左的话,因为是栈,所以先访问根,然后左孩子节点入栈,
再右孩子节点入栈,这样出栈的顺序就是跟右左了,其实可以联想一下,其实左右只是人为设定和规定的嘛,就像二分类要用10,也可以看成是01
一个道理,那就调换左右顺序呗,这样就可以根右左的访问
最后再将【根右左】遍历得到的结果逆序,不就是左右根了,比较讨巧
"""
if root is None:
return []
stack, output = [root, ], []
while stack:
root = stack.pop()
output.append(root.val)
if root.left is not None:
stack.append(root.left)
if root.right is not None:
stack.append(root.right)
return output[::-1]
def postorderTraversal2(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
ans = []
visited = set()
stack = [root]
while stack:
top = stack[-1]
if top in visited:
ans.append(stack.pop().val)
else:
if top.right:
stack.append(top.right)
if top.left:
stack.append(top.left)
visited.add(top)
return ans
def postorderTraversal1(self, root):
"""
算法:递归遍历
"""
return [] if root == None else self.postorderTraversal(root.left) + self.postorderTraversal(root.right) + [root.val]
if __name__ == '__main__':
#1,2,3,4,5,6,7
t1 = TreeNode(1)
t2 = TreeNode(2)
t3 = TreeNode(3)
t4 = TreeNode(4)
t5 = TreeNode(5)
t6 = TreeNode(6)
t7 = TreeNode(7)
# t1.left = t2
# t1.right = t3
# t2.left = t4
# t2.right = t5
# t3.left = t6
# t3.right = t7
t1.right =t2
t2.left = t3
print(postorderTraversal(t1))
|
[
"457261336@qq.com"
] |
457261336@qq.com
|
4a69e480891dda49ef8586ab48e9e957f44d391d
|
327981aeef801fec08305d70270deab6f08bc122
|
/19.网络编程/TCP编程/2.客户端与服务器端的数据交互/client.py
|
659f33d99bf8eb92f58accb100300281589e8207
|
[] |
no_license
|
AWangHe/Python-basis
|
2872db82187b169226271c509778c0798b151f50
|
2e3e9eb6da268f765c7ba04f1aefc644d50c0a29
|
refs/heads/master
| 2020-03-20T12:15:44.491323
| 2018-06-15T08:24:19
| 2018-06-15T08:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# -*- coding: utf-8 -*-
import socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('192.168.43.240', 8083))
count = 0
while True:
count += 1
data = input("请输入给服务器发送的数据:")
client.send(data.encode("utf-8"))
info = client.recv(1024)
print("服务器说:", info.decode("utf-8"))
|
[
"huanji2209747841@foxmail.com"
] |
huanji2209747841@foxmail.com
|
95282fad8921847fbce1b8d8cf3e6b80655c0234
|
d2f71636c17dc558e066d150fe496343b9055799
|
/eventi/receipts/urls.py
|
adc4aac61ed9506ad430a00c7e224d076c9b8818
|
[
"MIT"
] |
permissive
|
klebercode/lionsclub
|
9d8d11ad6083d25f6d8d92bfbae9a1bbfa6d2106
|
60db85d44214561d20f85673e8f6c047fab07ee9
|
refs/heads/master
| 2020-06-11T19:45:39.974945
| 2015-04-05T01:11:57
| 2015-04-05T01:11:57
| 33,409,707
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
# coding: utf-8
from django.conf.urls import patterns, url
urlpatterns = patterns(
'eventi.receipts.views',
url(r'^$', 'receipt', name='receipt'),
url(r'^(\d+)/$', 'detail', name='detail'),
)
|
[
"kleberr@msn.com"
] |
kleberr@msn.com
|
777ff814dd92fd8c87e5d20a934a54207ca894cf
|
d3b7a7a922eb9999f22c99c0cc3908d7289ca27e
|
/tests/multi_processing/multi_process_queue.py
|
907844ddda04cef496883f0bd2b010512fd7341b
|
[
"Apache-2.0"
] |
permissive
|
g3l0o/plaso
|
b668203c2c7cf8799a1c12824ee1bdc8befd3980
|
ae29d853a6bcdd1530ce9320a3af7b3f122941ac
|
refs/heads/master
| 2020-12-25T20:31:08.928709
| 2016-07-22T20:00:33
| 2016-07-22T20:00:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests the multi-processing queue."""
import unittest
from plaso.multi_processing import multi_process_queue
from tests import test_lib as shared_test_lib
from tests.engine import test_lib as engine_test_lib
class MultiProcessingQueueTest(shared_test_lib.BaseTestCase):
"""Tests the multi-processing queue object."""
_ITEMS = frozenset([u'item1', u'item2', u'item3', u'item4'])
def testPushPopItem(self):
"""Tests the PushItem and PopItem functions."""
# A timeout is used to prevent the multi processing queue to close and
# stop blocking the current process
test_queue = multi_process_queue.MultiProcessingQueue(timeout=0.1)
for item in self._ITEMS:
test_queue.PushItem(item)
test_queue_consumer = engine_test_lib.TestQueueConsumer(test_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, len(self._ITEMS))
if __name__ == '__main__':
unittest.main()
|
[
"onager@deerpie.com"
] |
onager@deerpie.com
|
bd309d0b656942f65a5f6031b1475317b8f6cf1f
|
79661312d54643ce9dcfe3474058f514b01bfbe6
|
/ScikitLearn/ElasticNet_1f.py
|
9591fda25cf74ca65630946558d5e9573e4ea026
|
[] |
no_license
|
davis-9fv/Project
|
5c4c8ac03f5bf9db28704e63de9b004f56a52f10
|
f2bd22b3ac440b91d1d1defc8da9e2ba2e67265e
|
refs/heads/master
| 2020-03-20T22:24:07.244521
| 2019-02-28T16:58:04
| 2019-02-28T16:58:04
| 137,796,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html#sklearn.linear_model.ElasticNet
from sklearn import linear_model
from sklearn.linear_model import ElasticNet
import numpy as np
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from math import sqrt
series = read_csv('../tests/shampoo-sales3.csv', header=0)
raw_data = series.values
X_train, X_test, y_train, y_test = train_test_split(raw_data[:, 0], raw_data[:, 1], test_size=0.33, random_state=9)
X_train = X_train.reshape(X_train.shape[0], 1)
X_test = X_test.reshape(X_test.shape[0], 1)
regr = ElasticNet(random_state=0)
regr.fit(X_train, y_train)
print(regr.coef_)
print(regr.intercept_)
y_predicted = regr.predict(X_test)
print('y_test: ')
print(y_test)
print('y_predicted: ')
print(y_predicted)
rmse = sqrt(mean_squared_error(y_test, y_predicted))
print('Test RMSE: %.7f' % (rmse))
|
[
"francisco.vinueza@alterbios.com"
] |
francisco.vinueza@alterbios.com
|
872970618a04f2ca7f58bc8040f04ba42271524b
|
d8fd7f56537d3c4ad4c99965a0a451c5442b704f
|
/endlesshandsome/wsgi.py
|
29a7d6b605fc1118dfa24a6ccc1d6d200ee9a31b
|
[] |
no_license
|
EndlessHandsome/endless-handsome
|
8febdc5edbaed973922b7c31d903d19d4361dc32
|
59f1c3e52bd43c765177288ced755b081db0c746
|
refs/heads/master
| 2020-04-06T06:57:24.027547
| 2016-08-19T03:51:55
| 2016-08-19T03:51:55
| 65,613,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
"""
WSGI config for endlesshandsome project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "endlesshandsome.settings")
application = get_wsgi_application()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
4a865beed91c9a8e8b4658315602570354bd4770
|
f4b011992dd468290d319d078cbae4c015d18338
|
/Array/counting_element_in_two_array.py
|
f63a8421ab898a5ab3b5dcbed4c726b0b2a93aef
|
[] |
no_license
|
Neeraj-kaushik/Geeksforgeeks
|
deca074ca3b37dcb32c0136b96f67beb049f9592
|
c56de368db5a6613d59d9534de749a70b9530f4c
|
refs/heads/master
| 2023-08-06T05:00:43.469480
| 2021-10-07T13:37:33
| 2021-10-07T13:37:33
| 363,420,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
def counting_element(li, li1):
li3 = []
for i in range(len(li)):
count = 0
for j in range(len(li1)):
if li[i] >= li1[j]:
count = count+1
li3.append(count)
print(li3)
n = int(input())
m = int(input())
li = [int(x) for x in input().split()]
li1 = [int(x) for x in input().split()]
counting_element(li, li1)
|
[
"nkthecoder@gmail.com"
] |
nkthecoder@gmail.com
|
8cb9fad16805ff1eef128d7408246c8350a45bdf
|
5b683c7f0cc23b1a2b8927755f5831148f4f7e1c
|
/Python_Study/DataStructureAndAlgorithm/剑指Offer/Solution21py
|
129b991defc62a3357595192e3b5ddc9ee1ac835
|
[] |
no_license
|
Shmilyqjj/Shmily-py
|
970def5a53a77aa33b93404e18c57130f134772a
|
770fc26607ad3e05a4d7774a769bc742582c7b64
|
refs/heads/master
| 2023-09-02T04:43:39.192052
| 2023-08-31T03:28:39
| 2023-08-31T03:28:39
| 199,372,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
#!/usr/bin/env python
# encoding: utf-8
"""
:Description:剑指Offer 21
:Author: 佳境Shmily
:Create Time: 2020/4/29 11:59
:File: Solution21
:Site: shmily-qjj.top
"""
class Solution:
def jumpFloor(self, number):
# write code here
pass
if __name__ == '__main__':
s = Solution()
|
[
"710552907@qq.com"
] |
710552907@qq.com
|
|
d7857e8040c986cb578fcb3f8736cbe77f1ee7cb
|
d8cbc94a4207337d709a64447acb9c8fe501c75a
|
/evaluation/code/utils/checkpoint.py
|
0ce4488ec35b22f34e1d615616e0b445ee73a941
|
[
"MIT"
] |
permissive
|
sripathisridhar/acav100m
|
6f672384fa723a637d94accbbe11a9a962f5f87f
|
13b438b6ce46d09ba6f79aebb84ad31dfa3a8e6f
|
refs/heads/master
| 2023-09-06T01:05:21.188822
| 2021-11-18T08:08:08
| 2021-11-18T08:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
import os
import shutil
from collections import OrderedDict
import torch
import utils.logging as logging
logger = logging.get_logger(__name__)
def load_checkpoint(model, state_dict, data_parallel=False):
"""
Load the trained weights from the checkpoint.
Args:
model (model): model to load the weights from the checkpoint.
state_dict (OrderedDict): checkpoint.
data_parallel (bool): if true, model is wrapped by
torch.nn.parallel.DistributedDataParallel.
"""
ms = model.module if data_parallel else model
ms.load_state_dict(state_dict)
def load_pretrained_checkpoint(model, state_dict, data_parallel=False):
"""
Load the pretrained weights from the checkpoint.
Args:
model (model): model to load the weights from the checkpoint.
state_dict (OrderedDict): checkpoint.
data_parallel (bool): if true, model is wrapped by
torch.nn.parallel.DistributedDataParallel.
"""
ms = model.module if data_parallel else model
model_dict = ms.state_dict()
partial_dict = OrderedDict()
for key in state_dict.keys():
if 'visual_conv' in key and 'head' not in key:
partial_dict[key] = state_dict[key]
if 'audio_conv' in key and 'head' not in key:
partial_dict[key] = state_dict[key]
update_dict = {k: v for k, v in partial_dict.items() if k in model_dict}
ms.load_state_dict(update_dict, strict=False)
def save_checkpoint(state, is_best=False, filename='checkpoint.pyth'):
"""
Save the model weights to the checkpoint.
Args:
state (Dict): model states
is_best (bool): whether the model has achieved the best performance so far.
filename (str): path to the checkpoint to save.
"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pyth')
|
[
"sangho.lee@vision.snu.ac.kr"
] |
sangho.lee@vision.snu.ac.kr
|
702e2678b812860fd99d5d5961d919bb4fd981e8
|
6b033e3dddc280417bb97500f72e68d7378c69d6
|
/V. Algorithm/ii. Site/D. BOJ/Dynamic Programming/2193.py
|
4b960384965a4371013ee1182733e2531ed328a8
|
[] |
no_license
|
inyong37/Study
|
e5cb7c23f7b70fbd525066b6e53b92352a5f00bc
|
e36252a89b68a5b05289196c03e91291dc726bc1
|
refs/heads/master
| 2023-08-17T11:35:01.443213
| 2023-08-11T04:02:49
| 2023-08-11T04:02:49
| 128,149,085
| 11
| 0
| null | 2022-10-07T02:03:09
| 2018-04-05T02:17:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
# n = 0: cnt = 0
# n = 1: 1 cnt = 1
# n = 2: 10 cnt = 1
# n = 3: 100, 101 cnt = 2
# n = 4: 1010, 1001, 1000 cnt =3
# n = 5: 10000, 10001, 10010, 10101, 10100 cnt =5
n = int(input())
dp = [0, 1, 1]
for i in range(3, n+1):
dp.append(dp[i-2] + dp[i-1])
print(dp[n])
|
[
"inyong1020@gmail.com"
] |
inyong1020@gmail.com
|
6d6ea4a6da71a4dd55b9827ed63099c095f2893a
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/surface/config/configurations/list.py
|
31105c5866eaeddb88a441350c50f64724d0caa9
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,213
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list named configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.configurations import properties_file
import six
class List(base.ListCommand):
"""Lists existing named configurations."""
detailed_help = {
'DESCRIPTION': """\
{description}
Run `$ gcloud topic configurations` for an overview of named
configurations.
""",
'EXAMPLES': """\
To list all available configurations, run:
$ {command}
""",
}
@staticmethod
def Args(parser):
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
parser.display_info.AddFormat("""table(
name,
is_active,
properties.core.account,
properties.core.project,
properties.compute.zone:label=DEFAULT_ZONE,
properties.compute.region:label=DEFAULT_REGION)
""")
def Run(self, args):
configs = named_configs.ConfigurationStore.AllConfigs()
for _, config in sorted(six.iteritems(configs)):
props = properties.VALUES.AllValues(
list_unset=True,
properties_file=properties_file.PropertiesFile([config.file_path]),
only_file_contents=True)
yield {
'name': config.name,
'is_active': config.is_active,
'properties': props,
}
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
b72639fefb186348b900a58c7e765b4f198fea4c
|
f8d0e0358cfc7774e2ade30fb041a7227f72f696
|
/Project/MNIST/Actual_Picture/mnist_generate_dataset.py
|
7fad26b687d9e1f1ecda401e1fd57dbd54d78c55
|
[] |
no_license
|
KimDaeUng/DeepLearningPractice
|
e01c99d868e7a472ca5ec9c863990e0ab4b48529
|
811f26e0859f0f7cb73d9a0ce3529fb8db867442
|
refs/heads/master
| 2023-04-01T02:26:14.670687
| 2021-04-03T14:08:48
| 2021-04-03T14:08:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
import tensorflow as tf
import numpy as np
from PIL import Image
import os
# Generate standard tfrecord of training or testing set
image_train_path = './mnist_data_jpg/mnist_train_jpg_60000/'
label_train_path = './mnist_data_jpg/mnist_train_jpg_60000.txt'
image_test_path = './mnist_data_jpg/mnist_test_jpg_10000/'
label_test_path = './mnist_data_jpg/mnist_test_jpg_10000.txt'
data_path = './data/'
tfRecord_train = './data/mnist_train.tfrecords'
tfRecord_test = './data/mnist_test.tfrecords'
resize_height = 28; resize_width = 28
# Generate tfRecord file
def write_tfRecord(tfRecordName, image_path, label_path):
writer = tf.python_io.TFRecordWriter(tfRecordName) # Create a writer
with open(label_path, 'r') as label_file:
picfile_label_pair = label_file.readlines()
for num, content in enumerate(picfile_label_pair):
# Construct picture path
picfile, label = content.split()
pic_path = image_path + picfile
img = Image.open(pic_path)
img_raw = img.tobytes() # Transfer image into bytes
# One-hot encode: transfer label e.g. 3 -> 0001000000
labels = [0] * 10
labels[int(label)] = 1
# Create an example
example = tf.train.Example(features=tf.train.Features(feature={
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=labels))
})) # warp image and label data
writer.write(example.SerializeToString()) # serialize the example
#print("finish processing number of picture: ", num + 1)
writer.close()
#print("write tfRecord successfully")
def generate_tfRecord():
if not os.path.exists(data_path):
# if the folder doesn't exist then mkdir
os.makedirs(data_path)
else:
print("Directory has already existed")
# Generate training set
print("Generating training set...")
write_tfRecord(tfRecord_train, image_train_path, label_train_path)
# Generate test set
print("Generating test set...")
write_tfRecord(tfRecord_test, image_test_path, label_test_path)
def read_tfRecord(tfRecord_path):
filename_queue = tf.train.string_input_producer([tfRecord_path])
# TFRecordReader has been deprecated
reader = tf.TFRecordReader() # Create a reader
serialized_example = reader.read(filename_queue)[1] # store samples
features = tf.parse_single_example(serialized_example, features={
'label': tf.FixedLenFeature([10], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string)
})
img = tf.decode_raw(features['img_raw'], tf.uint8) # Decode img_raw into unsigned int
img.set_shape([784]) # Reshape image into a row of 784 pixel
img = tf.cast(img, tf.float32) * (1/255) # Normalize image into float
label = tf.cast(features['label'], tf.float32) # Transfer label into float
return img, label
# Construct a batcher (generator)
def get_tfRecord(num, getTrain=True):
if getTrain:
tfRecord_path = tfRecord_train
else:
tfRecord_path = tfRecord_test
img, label = read_tfRecord(tfRecord_path)
# Shuffle the image order
img_batch, label_batch = tf.train.shuffle_batch([img, label], batch_size=num, num_threads=2, capacity=1000, min_after_dequeue=700)
return img_batch, label_batch
def main():
generate_tfRecord()
if __name__ == '__main__':
main()
|
[
"daviddwlee84@gmail.com"
] |
daviddwlee84@gmail.com
|
de2a5e9123899c4d2aa008f270bed4e1523f7c76
|
2ba65a65140e818787ab455ca374f99348ade844
|
/hashmap_and_heap/q004_longest_consecutive_sequence.py
|
30fea9fc4eba2b9a20ea220cdbe5bcb49156643d
|
[] |
no_license
|
samyakjain101/DSA
|
9e917f817a1cf69553b5f8ca5b739bc6f0c81307
|
632a605150704ceb5238cb77289785eb5a58201c
|
refs/heads/main
| 2023-05-06T06:31:09.401315
| 2021-06-01T13:37:59
| 2021-06-01T13:37:59
| 340,645,897
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
def longest_consecutive_sequence(arr: list):
hashmap = dict()
for num in arr:
hashmap[num] = True
for key in hashmap.keys():
if key - 1 in hashmap:
hashmap[key] = False
max_streak_start_point = 0
max_streak = 0
for key, value in hashmap.items():
if value:
temp_streak_start_point = key
temp_streak = 1
while temp_streak_start_point + temp_streak in hashmap:
temp_streak += 1
if temp_streak > max_streak:
max_streak = temp_streak
max_streak_start_point = temp_streak_start_point
return [
i for i in range(max_streak_start_point, max_streak_start_point + max_streak)
]
if __name__ == "__main__":
array = [
12,
5,
1,
2,
10,
2,
13,
7,
11,
8,
9,
11,
8,
9,
5,
6,
11,
]
print(longest_consecutive_sequence(array))
|
[
"samyakjain101@gmail.com"
] |
samyakjain101@gmail.com
|
a52be340b6e6941cb775fd9f43ea853958806772
|
f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc
|
/examples/render/show_all_std_line_types.py
|
e9df274fed178676f291164a7d5210d8cc1bb535
|
[
"MIT"
] |
permissive
|
triroakenshield/ezdxf
|
5652326710f2a24652605cdeae9dd6fc58e4f2eb
|
82e964a574bcb86febc677bd63f1626318f51caf
|
refs/heads/master
| 2023-08-17T12:17:02.583094
| 2021-10-09T08:23:36
| 2021-10-09T08:23:36
| 415,426,069
| 1
| 0
|
MIT
| 2021-10-09T21:31:25
| 2021-10-09T21:31:25
| null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
# Copyright (c) 2019-2021, Manfred Moitzi
# License: MIT License
import ezdxf
from ezdxf.math import Vec3
from ezdxf.tools.standards import linetypes
doc = ezdxf.new("R2007", setup=True)
msp = doc.modelspace()
# How to change the global linetype scaling:
doc.header["$LTSCALE"] = 0.5
p1 = Vec3(0, 0)
p2 = Vec3(9, 0)
delta = Vec3(0, -1)
text_offset = Vec3(0, 0.1)
for lt in linetypes():
name = lt[0]
msp.add_line(p1, p2, dxfattribs={"linetype": name, "lineweight": 25})
msp.add_text(
name, dxfattribs={"style": "OpenSansCondensed-Light", "height": 0.25}
).set_pos(p1 + text_offset)
p1 += delta
p2 += delta
doc.set_modelspace_vport(25, center=(5, -10))
doc.saveas("all_std_line_types.dxf")
|
[
"me@mozman.at"
] |
me@mozman.at
|
2765443d9dab3cb470fb4a2a844eff84e6645762
|
3a51e7173c1b5a5088ac57f668ecb531e514e0fe
|
/m11_feature_importances5_diabets.py
|
e18b4753ce0bb3c9ed11c5bde334cc898d0c903c
|
[] |
no_license
|
marattang/ml_basic
|
83a167324317178701ae0ee0e2e2046293eafacc
|
e8e6b8c9ab7d866377eb01e50ac94ff5b1ea7a73
|
refs/heads/main
| 2023-07-05T03:30:24.663574
| 2021-08-22T07:06:58
| 2021-08-22T07:06:58
| 394,574,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
# 피처 = 컬럼 = 열
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import load_iris, load_boston, load_diabetes
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
# 1. 데이터
datasets = load_diabetes()
x_train, x_test, y_train, y_test = train_test_split(
datasets.data, datasets.target, train_size=0.8, random_state=66
)
# 2. 모델
# model = DecisionTreeRegressor(max_depth=3)
model = RandomForestRegressor()
# 3. 훈련
model.fit(x_train, y_train)
# 4. 평가 예측
r2 = model.score(x_test, y_test)
print('r2 : ', r2)
print(model.feature_importances_) # [0.0125026 0. 0.53835801 0.44913938]
# 트리계열에서는 모델 자체가 성능도 괜찮지만, feature importance라는 기능이 있다. 아이리스는 컬럼이 4개라서 4개 수치가 나온다.
# 4개의 컬럼이 훈련에 대한 영향도 두번째 컬럼같은 경우는 0이 나왔기 때문에 크게 중요하지 않은 컬럼이다. => 절대적이지 않고 상대적
# '의사결정트리'에서 사용했을 때 2번째 컬럼이 크게 도움이 안된다는 얘기
def plot_feature_importances_datasets(model):
n_features = datasets.data.shape[1]
plt.barh(np.arange(n_features), model.feature_importances_,
align='center')
plt.yticks(np.arange(n_features), datasets.feature_names)
plt.xlabel("Feature Importances")
plt.ylabel("Features")
plt.ylim(-1, n_features)
plot_feature_importances_datasets(model)
plt.show()
# DecisionTreeRegressor
# r2 : 0.3139678308823193
# # RandomForestRegressor
# r2 : 0.38347482197285554
|
[
"tlawlfp0322@gmail.com"
] |
tlawlfp0322@gmail.com
|
fcbd61892093b56028d5617ccab23d3aca729c0a
|
ed12b604e0626c1393406d3495ef5bbaef136e8a
|
/Iniciante/Python/exercises from 1000 to 1099/exercise_1038.py
|
8acf7316aa83808418ceec78f9e872c26c2019c0
|
[] |
no_license
|
NikolasMatias/urionlinejudge-exercises
|
70200edfd2f9fc3889e024dface2579b7531ba65
|
ca658ee8b2100e2b687c3a081555fa0770b86198
|
refs/heads/main
| 2023-09-01T20:33:53.150414
| 2023-08-21T07:07:32
| 2023-08-21T07:07:32
| 361,160,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
class Lanche:
def __init__(self, codigo, especificacao, preco):
self.codigo = codigo
self.especificacao = especificacao
self.preco = preco
def getCodigo(self):
return self.codigo
def totalPorQtde(self, qtde):
return self.preco*qtde
lanches = [
Lanche(1, 'Cachorro Quente', 4.00),
Lanche(2, 'X-Salada', 4.50),
Lanche(3, 'X-Bacon', 5.00),
Lanche(4, 'Torrada simples', 2.00),
Lanche(5, 'Refrigerante', 1.50)
]
codigo, qtde = [int(x) for x in input().split()]
for lanche in lanches:
if codigo == lanche.getCodigo():
print(''.join(['Total: R$ ', "{:.2f}".format(lanche.totalPorQtde(qtde))]))
break
|
[
"nikolas.matias500@gmail.com"
] |
nikolas.matias500@gmail.com
|
a4c0f7cac515fab2dde43dae019bf1f9f9359d98
|
e000416c89725db514ed5c01d7b9ef8e37c5355f
|
/backend/wallet/migrations/0001_initial.py
|
42f6e380119fb0d9602b441bb339d9aa4929f923
|
[] |
no_license
|
crowdbotics-apps/click-time-28533
|
376f4d34b10ca3050fde3f43df51233e5b612c2d
|
89f4f7e05bf04623b2822899677b6c3606968151
|
refs/heads/master
| 2023-06-19T06:37:55.564153
| 2021-07-07T06:12:26
| 2021-07-07T06:12:26
| 383,692,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
# Generated by Django 2.2.20 on 2021-07-07 06:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='userwallet_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_wallet', to='wallet.UserWallet')),
],
),
migrations.CreateModel(
name='DriverWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('driver', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='driverwallet_driver', to='taxi_profile.DriverProfile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
df398393aea5d0bd7445abc5140d3c360b258e60
|
973713f993166b1d0c2063f6e84361f05803886d
|
/Day01-15/02_variableTest_3.py
|
0a1660b15ed013535066fbed5471a61876d4a6c4
|
[
"MIT"
] |
permissive
|
MaoningGuan/Python-100-Days
|
20ad669bcc0876b5adfbf2c09b4d25fd4691061a
|
d36e49d67a134278455438348efc41ffb28b778a
|
refs/heads/master
| 2022-11-17T12:24:45.436100
| 2020-07-18T02:24:42
| 2020-07-18T02:24:42
| 275,157,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
使用input()函数获取键盘输入(字符串)
使用int()函数将输入的字符串转换成整数
使用print()函数输出带占位符的字符串
"""
a = int(input('a = '))
b = int(input('b = '))
print('%d + %d = %d' % (a, b, a + b)) # 加
print('%d - %d = %d' % (a, b, a - b)) # 减
print('%d * %d = %d' % (a, b, a * b)) # 乘
print('%d / %d = %f' % (a, b, a / b)) # 除
print('%d // %d = %d' % (a, b, a // b)) # 取商
print('%d %% %d = %d' % (a, b, a % b)) # 取余
print('%d ** %d = %d' % (a, b, a ** b)) # a的b次方
|
[
"1812711281@qq.com"
] |
1812711281@qq.com
|
ed02706c8203b78c812b8159c71208e1e7196960
|
597b888dca4e9add7acdf449f8c3d8d716826ff2
|
/gui/demos/listbox.py
|
105c3d631bc9eb579543eeadf5002fcfc4aee71b
|
[
"MIT"
] |
permissive
|
alsor62/micropython-micro-gui
|
a7cad669d69358599feb84011c23ac5d767adfda
|
5c7d6c96b30e4936a2a4315b09e98a730f14c6db
|
refs/heads/main
| 2023-09-04T09:54:10.946964
| 2021-11-09T11:46:59
| 2021-11-09T11:46:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
# listbox.py micro-gui demo of Listbox class
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2021 Peter Hinch
# hardware_setup must be imported before other modules because of RAM use.
from hardware_setup import ssd # Create a display instance
from gui.core.ugui import Screen
from gui.core.writer import CWriter
from gui.core.colors import *
from gui.widgets.listbox import Listbox
from gui.widgets.buttons import CloseButton
import gui.fonts.freesans20 as font
class BaseScreen(Screen):
def __init__(self):
def cb(lb, s):
print('Gas', s)
def cb_radon(lb, s): # Yeah, Radon is a gas too...
print('Radioactive', s)
super().__init__()
wri = CWriter(ssd, font, GREEN, BLACK, verbose=False)
els = (('Hydrogen', cb, ('H',)),
('Helium', cb, ('He',)),
('Neon', cb, ('Ne',)),
('Xenon', cb, ('Xe',)),
('Radon', cb_radon, ('Ra',)),
('Uranium', cb_radon, ('U',)),
('Plutonium', cb_radon, ('Pu',)),
('Actinium', cb_radon, ('Ac',)),
)
Listbox(wri, 2, 2,
elements = els, dlines=5, bdcolor=RED, value=1, also=Listbox.ON_LEAVE)
#bdcolor = RED, fgcolor=RED, fontcolor = YELLOW, select_color=BLUE, value=1)
CloseButton(wri)
Screen.change(BaseScreen)
|
[
"peter@hinch.me.uk"
] |
peter@hinch.me.uk
|
0cc430d7621bfaae1f6b6a655566642dd758c4bf
|
1333a965058e926649652ea55154bd73b6f05edd
|
/4_advanced/modules/userinput.py
|
bf7135098c2c79a045d6ce0bbf207f42b97ecacc
|
[
"MIT"
] |
permissive
|
grecoe/teals
|
42ebf114388b9f3f1580a41d5d03da39eb083082
|
ea00bab4e90d3f71e3ec2d202ce596abcf006f37
|
refs/heads/main
| 2021-06-21T20:12:03.108427
| 2021-05-10T19:34:40
| 2021-05-10T19:34:40
| 223,172,099
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
'''
This file allows you to hide all of the implementation details of asking
a user for input for your program. It will verify that the correct data is
returned.
Externally, we want to expose the getUserInput().
'''
'''
__parseInt
Parameters
userInput : Input string from user
error : Error to display if not a int
Returns:
Int if non error, None otherwise
'''
def __parseInt(userInput, error):
returnVal = None
try:
returnVal = int(userInput)
except Exception as ex:
returnVal = None
print(error)
return returnVal
'''
__parseFloat
Parameters
userInput : Input string from user
error : Error to display if not a float
Returns:
Float if non error, None otherwise
'''
def __parseFloat(userInput, error):
returnVal = None
try:
returnVal = float(userInput)
except Exception as ex:
returnVal = None
print(error)
return returnVal
'''
getUserInput:
Parameters:
prompt : Prompt to show to the user
error: Error to show to the user if expected type not input.
classInfo: Class info of type to collect
retries: Number of times to allow user to get it right.
Returns:
Expected value type if retries isn't exceeded
'''
def getUserInput(prompt, retries, error, classInfo):
userValue = None
className = classInfo.__name__
currentRetries = 0
while True:
currentRetries += 1
userInput = input(prompt)
if className == 'int':
userValue = __parseInt(userInput, error)
elif className == 'float':
userValue = __parseFloat(userInput, error)
else:
userValue = userInput
# If we have a value, get out
if userValue is not None:
break
# If we've exhausted our retries, get out.
if currentRetries >= retries:
print("You have exhausted your attempts to enter a ", className)
break
return userValue
|
[
"grecoe@microsoft.com"
] |
grecoe@microsoft.com
|
1ebb7382e258c4abf52348f5f8cdcbf3ae69437d
|
1500fe9ea062152becc85a01577cced0465cde52
|
/landacademy/urls.py
|
d6983faca141d85dce6130a0e3056d9ab11c126d
|
[] |
no_license
|
Xednom/gpg-ams
|
afe4da92384f2de1b7b69ce28e13becb103009b3
|
7c87ab639b140873dfc90ac43c7aec349aec6436
|
refs/heads/master
| 2022-11-06T15:22:34.761321
| 2021-05-11T03:48:08
| 2021-05-11T03:48:08
| 239,904,632
| 0
| 1
| null | 2022-11-03T02:21:18
| 2020-02-12T01:50:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
from django.urls import path
from . import views
app_name="landacademy"
urlpatterns = [
path('add-land-academy-inventory', views.AddLandAcademyView.as_view(), name="add_landacademy"),
path('view-land-academy-inventory', views.LandAcademyView.as_view(), name="view_landacademy"),
path('view-o2o-smart-pricing', views.SmartPricingView.as_view(), name="view_o2o"),
path('add-o2o-smart-pricing', views.AddSmartPricingView.as_view(), name="add_o2o"),
]
|
[
"monde.lacanlalay@gmail.com"
] |
monde.lacanlalay@gmail.com
|
27104a744c874c71d5916e37b398277acf2b845a
|
0c427b2b8b7960bf3c1e727984532c6731328066
|
/APP/forms.py
|
1e073b90096a2c7b3a8ab029ca583f02c05bf928
|
[] |
no_license
|
Master-cai/flask_library
|
c7c3910c6f989b7f0f90a3b32eb53d421b7f431d
|
ebc86cbfbac67b199d06a3ec1e789b0d3fec1488
|
refs/heads/master
| 2020-11-24T03:34:42.063453
| 2019-12-20T09:30:01
| 2019-12-20T09:30:01
| 227,948,428
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,996
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, TextAreaField, ValidationError, HiddenField, \
BooleanField, PasswordField, IntegerField, DateTimeField, DateField
from wtforms.validators import DataRequired, Email, Length, Optional, URL, EqualTo
class LoginForm(FlaskForm):
ReaderID = StringField('ReaderID', validators=[DataRequired(), Length(1, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(6, 20)])
remember = BooleanField('Remember me')
submit = SubmitField('Log in')
class RegisterForm(FlaskForm):
RID = StringField('RID', validators=[DataRequired(), Length(1, 20)])
rName = StringField('readerName', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired(),
Length(1, 20),
EqualTo('password_confirm', message='password doesn`t match')])
password_confirm = PasswordField('Password_confirm', validators=[DataRequired()])
department = StringField('Department', validators=[DataRequired(), Length(1, 20)])
major = StringField('Department', validators=[DataRequired(), Length(1, 20)])
submit = SubmitField('Register')
class SearchInfo(FlaskForm):
# SearchType = StringField('SearchType', validators=[DataRequired(), Length(1, 20)])
typeChoices = ['BID', 'BookName', 'Category', 'Press', 'Author']
SearchType = SelectField('SearchType', choices=[(t, t) for t in typeChoices])
SearchInfo = StringField('SearchInfo', validators=[DataRequired(), Length(1, 20)])
submit = SubmitField('Search')
class newBookForm(FlaskForm):
BID = StringField('BID', validators=[DataRequired(), Length(1, 20)])
bName = StringField('bName', validators=[DataRequired()])
Category = StringField('Category', validators=[DataRequired(), Length(1, 20)])
ISBN = StringField('ISBN', validators=[DataRequired(), Length(1, 20)])
author = StringField('author', validators=[DataRequired(), Length(1, 20)])
publicationDate = DateField('publicationDate', validators=[DataRequired()])
press = StringField('press', validators=[DataRequired(), Length(1, 40)])
sum = IntegerField('sum', validators=[DataRequired()])
currNum = IntegerField('currNum', validators=[DataRequired()])
submit = SubmitField('submit')
class ReturnBookForm(FlaskForm):
RID = StringField('RID', validators=[DataRequired(), Length(1, 20)])
BID = StringField('BID', validators=[DataRequired(), Length(1, 20)])
submit = SubmitField('Return')
class ReaderInfoForm(FlaskForm):
# SearchType = StringField('SearchType', validators=[DataRequired(), Length(1, 20)])
typeChoices = ['RID', 'rName', 'department']
SearchType = SelectField('SearchType', choices=[(t, t) for t in typeChoices])
SearchInfo = StringField('SearchInfo', validators=[DataRequired(), Length(1, 20)])
submit = SubmitField('Search')
|
[
"719591339@qq.com"
] |
719591339@qq.com
|
dc93b89f7e841b512d47ecff109941a4fd9c59cb
|
a7a13ca32072bb27ce2dceb87c414767b3751ec5
|
/src/gthnk/__init__.py
|
f8ee1ab5a2ed24607ff10461411e9f764b139904
|
[] |
no_license
|
SocioProphet/gthnk
|
36e50338d5f3df19f84620ff9a337f3cf8e9e362
|
fc3d21090c2de10cfd74650436536999e5c65d7c
|
refs/heads/master
| 2022-12-13T18:55:00.610073
| 2020-09-14T14:50:21
| 2020-09-14T14:50:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
db = SQLAlchemy()
login_manager = LoginManager()
bcrypt = Bcrypt()
|
[
"ian@iandennismiller.com"
] |
ian@iandennismiller.com
|
5ad1c174ef11481211eccd3c9a071aaebf6e217e
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/1268.py
|
9e45fefd2245fc74e784ff57dba4a16de3dd55a5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
def read_case(line):
return int(line)
def read_input(path):
f = open(path, 'r')
g = open(path + '_res.txt', 'w')
T = int(f.readline())
for i in xrange(T):
line = f.readline()
g.write('Case #%d: ' % (i+1))
n = read_case(line)
g.write(str(solve(n)))
g.write('\n')
g.close()
f.close()
def first_untidy(n):
res = 0
while n > 0:
if n%10 < (n/10)%10:
return res
res += 1
n /= 10
return -1
def is_units_tidy(n):
u = n % 10
while n > 0:
n /= 10
if n % 10 > u:
return False
return True
def solve_after_first(n):
if n < 10:
return n
if not is_units_tidy(n):
n -= n % 10 + 1
return solve_after_first(n/10)*10 + n%10
def solve(n):
k = first_untidy(n)
if k == -1:
return n
n -= n % 10**(k+1) + 1
return solve_after_first(n)
read_input('B-small-attempt1.in')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
00a7d4bb8e564e8e4d36756ce801d18ce4fdfecc
|
b87389aa0d6595c8b649ac899e8ade4226309739
|
/manage.py
|
f53d707f83062edaaf89cd56b7fde370fb63af16
|
[] |
no_license
|
bussiere/ImageIp
|
be3797d2fdd57b78d35de2111eb67b269f7df104
|
bc3e7b3d4edabde5353f9fadfc46253b7407ba5a
|
refs/heads/master
| 2020-12-24T15:23:03.255370
| 2013-08-23T10:52:46
| 2013-08-23T10:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "imageip.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"bussiere@gmail.com"
] |
bussiere@gmail.com
|
e6ed56fd3df5070e61ae811df3ed3d5638f8db41
|
e1312afff90dbe1cdcd500541e29097da19fee97
|
/inference/infer_with_pb_1capture.py
|
83e9ad60c4b8ca47ef4676152e1dd42c7059bd70
|
[] |
no_license
|
andreiqv/github_detector_scale
|
af6115caff6ca6c73c934dc1fa6673ccd69b71fa
|
329d32a4d26b2bfb2f902f876bb9b8eb35bd9e2a
|
refs/heads/master
| 2020-04-08T12:30:36.966069
| 2019-01-30T22:21:34
| 2019-01-30T22:21:34
| 159,350,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,509
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Using TF for inference, and TensorRT for compress a graph.
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
from tensorflow.python.platform import gfile
from PIL import Image
import timer
from time import sleep
import io
from picamera import PiCamera
camera = PiCamera()
stream = io.BytesIO()
#import tensorflow.contrib.tensorrt as trt
use_hub_model = False
if True:
FROZEN_FPATH = '/home/pi/work/pb/model_first_3-60-1.000-1.000[0.803].pb'
#FROZEN_FPATH = '/home/pi/work/pb/model_resnet50-97-0.996-0.996[0.833].pb'
ENGINE_FPATH = 'saved_model_full_2.plan'
INPUT_SIZE = [3, 128, 128]
INPUT_NODE = 'input_1'
OUTPUT_NODE = 'dense_1/Sigmoid'
#OUTPUT_NODE = 'dense/Sigmoid'
input_output_placeholders = [INPUT_NODE + ':0', OUTPUT_NODE + ':0']
def get_image_as_array(image_file):
# Read the image & get statstics
image = Image.open(image_file)
#img.show()
#shape = [299, 299]
shape = tuple(INPUT_SIZE[1:])
#image = tf.image.resize_images(img, shape, method=tf.image.ResizeMethod.BICUBIC)
image = image.resize(shape, Image.ANTIALIAS)
image_arr = np.array(image, dtype=np.float32) / 255.0
return image_arr
def get_labels(labels_file):
with open(labels_file) as f:
labels = f.readlines()
labels = [x.strip() for x in labels]
print(labels)
#sys.exit(0)
return labels
def get_frozen_graph(pb_file):
# We load the protobuf file from the disk and parse it to retrive the unserialized graph_drf
with gfile.FastGFile(pb_file,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
#sess.graph.as_default() #new line
return graph_def
def compress_graph_with_trt(graph_def, precision_mode):
output_node = input_output_placeholders[1]
if precision_mode==0:
return graph_def
trt_graph = trt.create_inference_graph(
graph_def,
[output_node],
max_batch_size=1,
max_workspace_size_bytes=2<<20,
precision_mode=precision_mode)
return trt_graph
def inference_with_graph(graph_def, image):
""" Predict for single images
"""
with tf.Graph().as_default() as graph:
with tf.Session() as sess:
# Import a graph_def into the current default Graph
print("import graph")
input_, predictions = tf.import_graph_def(graph_def, name='',
return_elements=input_output_placeholders)
camera.start_preview()
camera.resolution = (640, 480)
camera.framerate = 32
timer.timer('predictions.eval')
time_res = []
for i in range(10):
camera.capture(stream, format='jpeg')
stream.seek(0)
image = Image.open(stream)
shape = tuple(INPUT_SIZE[1:])
image = image.resize(shape, Image.ANTIALIAS)
image_arr = np.array(image, dtype=np.float32) / 255.0
pred_val = predictions.eval(feed_dict={input_: [image_arr]})
print(pred_val)
timer.timer()
#time_res.append(0)
#print('index={0}, label={1}'.format(index, label))
camera.stop_preview()
print(camera.resolution)
#print('mean time = {0}'.format(np.mean(time_res)))
#return index
def inference_images_with_graph(graph_def, filenames):
""" Process list of files of images.
"""
images = [get_image_as_array(filename) for filename in filenames]
with tf.Graph().as_default() as graph:
with tf.Session() as sess:
# Import a graph_def into the current default Graph
print("import graph")
input_, predictions = tf.import_graph_def(graph_def, name='',
return_elements=input_output_placeholders)
camera.start_preview()
for i in range(len(filenames)):
filename = filenames[i]
image = images[i]
p_val = predictions.eval(feed_dict={input_: [image]})
index = np.argmax(p_val)
label = labels[index]
print('{0}: prediction={1}'.format(filename, label))
camera.stop_preview()
def createParser ():
"""ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', default=None, type=str,\
help='input')
parser.add_argument('-dir', '--dir', default="../images", type=str,\
help='input')
parser.add_argument('-pb', '--pb', default="saved_model.pb", type=str,\
help='input')
parser.add_argument('-o', '--output', default="logs/1/", type=str,\
help='output')
return parser
if __name__ == '__main__':
parser = createParser()
arguments = parser.parse_args(sys.argv[1:])
pb_file = arguments.pb
if arguments.input is not None:
filenames = [arguments.input]
#image = get_image_as_array(filename)
#images = [(image]
else:
filenames = []
src_dir = arguments.dir
listdir = os.listdir(src_dir)
for f in listdir:
filenames.append(src_dir + '/' + f)
assert type(filenames) is list and filenames != []
#labels = get_labels('labels.txt')
pb_file = FROZEN_FPATH
graph_def = get_frozen_graph(pb_file)
#modes = ['FP32', 'FP16', 0]
#precision_mode = modes[2]
#pb_file_name = 'saved_model.pb' # output_graph.pb
# no compress
image_file = '/home/pi/work/images/img_1_0_2018-08-04-09-37-300672_5.jpg'
image = get_image_as_array(image_file)
inference_with_graph(graph_def, image)
#inference_images_with_graph(graph_def, filenames)
"""
for mode in modes*2:
print('\nMODE: {0}'.format(mode))
graph_def = compress_graph_with_trt(graph_def, mode)
inference_with_graph(graph_def, images, labels)
"""
"""
0.0701 sec. (total time 1.72) - model_first_3-60-1.000-1.000[0.803].pb
0.7628 sec. -- model_resnet50-97-0.996-0.996[0.833].pb
---
capture pict from cam:
1024x768 (def.) - 0.7612 sec.
"""
|
[
"phxv@mail.ru"
] |
phxv@mail.ru
|
f3ad29a421a0cf868288fc6682c1a2f1460652b8
|
61ce57892c172f71286a39c8c863aa8a7b29484b
|
/stampede_results/efficiency.py
|
819f43b7bee082e404a72cda5a0ee62105cfe01a
|
[] |
no_license
|
bd-j/cetus
|
cfbb46aa2d94fdf982a7906bc8bdcbe9375df67c
|
c9f7e0972184a0c2fcc7add6e766733b7a46b149
|
refs/heads/master
| 2021-05-01T00:06:10.518322
| 2015-02-17T16:49:46
| 2015-02-17T16:49:46
| 21,367,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
import sys, os, glob
import numpy as np
import matplotlib.pyplot as pl
import bsfh.read_results as bread
def process_run(mcmc_file, model_file):
result, pr, model = bread.read_pickles(mcmc_file, model_file=model_file)
nburn = np.sum(result['run_params']['nburn'])
nw, niter, ndim = result['chain'].shape
time = result['sampling_duration']
free_params = model.theta_labels()
return [nw, nburn, niter], time, free_params
mcfiles = glob.glob('*dmock*_mcmc')
speed, ncpu, hasgp = [], [], []
for i,f in enumerate(mcfiles):
dims, dur, params = process_run(f, f.replace('_mcmc','_model'))
print(f+'\n')
s = dims[0] * (dims[1] + dims[2]) / dur
speed += [s]
nc = dims[0]/2 + 1
ncpu += [nc]
print(dims[0], dims[2], dur, s, nc, 'gp_jitter' in params)
hasgp += ['gp_jitter' in params]
color = ['b','r']
fig, axes = pl.subplots()
clr =np.array(color)[np.array(hasgp).astype(int)]
axes.scatter(ncpu, speed/ncpu, marker ='o', c=clr)
axes.set_xlabel('cores')
axes.set_ylabel('Likelihood calculations/sec/core')
|
[
"benjamin.duncan.johnson@gmail.com"
] |
benjamin.duncan.johnson@gmail.com
|
ea9cb6af6472d76c58d80685599298f8e4a6f15e
|
439cda44ba6d5d8061a134875736a9efcd4bf22c
|
/trakt_tools/tasks/profile/backup/create/handlers/playback.py
|
746491bfc433fc6a3dc4ee210ef1164e29330dbc
|
[] |
no_license
|
fuzeman/trakt-tools
|
28a0fcb2c2efe88371bba1892777be75236fdc5c
|
8bdcb117b6092733cc50f87d4f943fc23340da90
|
refs/heads/master
| 2023-01-07T06:16:46.716517
| 2022-12-27T22:57:03
| 2022-12-27T22:57:03
| 68,256,616
| 31
| 4
| null | 2022-12-27T22:41:57
| 2016-09-15T01:11:54
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
class PlaybackHandler(object):
def run(self, backup, profile):
print('Playback Progress')
# Request ratings
response = profile.get('/sync/playback')
if response.status_code != 200:
print('Invalid response returned')
return False
# Retrieve items
items = response.json()
print(' - Received %d item(s)' % len(items))
# Write playback progress to disk
print(' - Writing to "playback.json"...')
try:
return backup.write('playback.json', items)
except Exception as ex:
log.error('Unable to write playback progress to disk: %s', ex, exc_info=True)
return False
|
[
"me@dgardiner.net"
] |
me@dgardiner.net
|
5cdd8442cd814d0483be06189f1baa90c42bb382
|
d2c92cfe95a60a12660f1a10c0b952f0df3f0e8e
|
/adminasto/adminasto/tongji.py
|
698ac53c3172a4fe3beefb1a1fcf538a722beb21
|
[] |
no_license
|
snamper/zzpython
|
71bf70ec3762289bda4bba80525c15a63156a3ae
|
20415249fa930ccf66849abb5edca8ae41c81de6
|
refs/heads/master
| 2021-12-21T16:12:22.190085
| 2017-09-30T06:26:05
| 2017-09-30T06:26:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
from sphinxapi import *
import os
import MySQLdb
import datetime
nowpath=os.path.dirname(__file__)
execfile("conn.py")
#产品数量
def getproductnum(nowday):
format="%Y-%m-%d";
nowday=strtodatetime(nowday,format)
oneday=datetime.timedelta(days=1)
sql="select count(0) from products where gmt_created>'"+str(nowday)+"' and gmt_created<='"+str(nowday+oneday)+"'"
cursor.execute(sql)
offerlist=cursor.fetchone()
if offerlist:
return offerlist[0]
else:
return 0
#数据统计
def tongji(request):
fromday="2013-8-1";
format="%Y-%m-%d";
fromday=strtodatetime(fromday,format)
oneday=datetime.timedelta(days=1)
num=30
listall=[]
for i in range(0,num):
fromday=fromday-oneday
postnum=0
postnum=getproductnum(datetostr(fromday))
leavewordsnum=0
#leavewordsnum=getleavewordsnum(datetostr(fromday))
list={'date':datetostr(fromday),'postnum':postnum,'leavewordsnum':leavewordsnum}
listall.append(list)
return render_to_response('tongji.html',locals())
closeconn()
|
[
"2496256902@qq.com"
] |
2496256902@qq.com
|
2a1ed3b591771dcef576f456adcb8a35894e6e42
|
7a3757a341fb1c5a06482e2e5cb066a967a6eff5
|
/tests/apis/test_htmls.py
|
8a62ed922151fdb0fb4d2da11af953142cdb52d2
|
[
"MIT"
] |
permissive
|
ninoseki/uzen
|
4bff6080b9c0677dcf25abc0f104eca3fb92ed8a
|
2a0065aa57fe3891c46e1174c1dc9aab673e52a8
|
refs/heads/master
| 2023-09-02T01:59:18.893712
| 2022-08-28T09:49:12
| 2022-08-28T09:49:12
| 241,092,872
| 87
| 9
|
MIT
| 2023-06-01T01:08:05
| 2020-02-17T11:37:59
|
Python
|
UTF-8
|
Python
| false
| false
| 568
|
py
|
from typing import List
from fastapi.testclient import TestClient
from app import models
def test_html(client: TestClient, snapshots: List[models.Snapshot]):
id_ = snapshots[0].id
response = client.get(f"/api/snapshots/{id_}")
snapshot = response.json()
sha256 = snapshot.get("html", {}).get("sha256", "")
response = client.get(f"/api/htmls/{sha256}")
assert response.status_code == 200
sha256 = snapshot.get("html", {}).get("sha256", "")
response = client.get(f"/api/htmls/{sha256}/text")
assert response.status_code == 200
|
[
"manabu.niseki@gmail.com"
] |
manabu.niseki@gmail.com
|
685279deb51b82c7913268989efa1ce91cda2791
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/P.O.R.-master/pirates/effects/ShipSinkSplashes.py
|
4efffec5639f34902279a1a506fbbbf5ba52d62c
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,226
|
py
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class ShipSinkSplashes(PooledEffect, EffectController):
card2Scale = 64.0
cardScale = 64.0
def __init__(self, parent = None):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.setDepthWrite(0)
self.setLightOff()
self.setBin('fixed', 50)
self.effectScale = 1.0
self.f = ParticleEffect.ParticleEffect('ShipSinkSplashes')
self.f.reparentTo(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleWhiteSteam')
self.card2 = model.find('**/particleSplash')
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('DiscEmitter')
self.p1 = Particles.Particles('particles-2')
self.p1.setFactory('PointParticleFactory')
self.p1.setRenderer('SpriteParticleRenderer')
self.p1.setEmitter('DiscEmitter')
self.f.addParticles(self.p1)
self.p0.setPoolSize(128)
self.p0.setBirthRate(0.14999999999999999)
self.p0.setLitterSize(10)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(2.5)
self.p0.factory.setLifespanSpread(0.5)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(0.5)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.getColorInterpolationManager().addLinear(0.25, 1.0, Vec4(1.0, 1.0, 1.0, 1.0), Vec4(1.0, 1.0, 1.0, 0.0), 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(1.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, -2.0, 10.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p1.setPoolSize(128)
self.p1.setBirthRate(0.01)
self.p1.setLitterSize(3)
self.p1.setLitterSpread(1)
self.p1.setSystemLifespan(0.0)
self.p1.setLocalVelocityFlag(1)
self.p1.setSystemGrowsOlderFlag(0)
self.p1.setFloorZ(-50)
self.p1.factory.setLifespanBase(0.5)
self.p1.factory.setLifespanSpread(0.14999999999999999)
self.p1.factory.setMassBase(1.0)
self.p1.factory.setMassSpread(0.0)
self.p1.factory.setTerminalVelocityBase(400.0)
self.p1.factory.setTerminalVelocitySpread(0.0)
self.p1.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p1.renderer.setUserAlpha(0.25)
self.p1.renderer.setFromNode(self.card2)
self.p1.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p1.renderer.setXScaleFlag(1)
self.p1.renderer.setYScaleFlag(1)
self.p1.renderer.setAnimAngleFlag(0)
self.p1.renderer.setNonanimatedTheta(0.0)
self.p1.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p1.renderer.setAlphaDisable(0)
self.p1.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p1.emitter.setAmplitude(0.0)
self.p1.emitter.setAmplitudeSpread(0.5)
self.p1.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p1.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
def createTrack(self):
self.setEffectScale(self.effectScale)
shrink = LerpFunctionInterval(self.resize, 2.5, fromData = 1.0, toData = 0.25)
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.10000000000000001), Func(self.p0.clearToInitial), Func(self.p1.setBirthRate, 0.01), Func(self.p1.clearToInitial), Func(self.f.start, self, self.particleDummy), Func(self.f.reparentTo, self))
self.endEffect = Sequence(shrink, Func(self.p0.setBirthRate, 100.0), Func(self.p1.setBirthRate, 100.0), Wait(2.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(7.0), self.endEffect)
def setEffectScale(self, scale):
self.effectScale = scale
self.p0.renderer.setInitialXScale(0.14999999999999999 * self.cardScale * scale)
self.p0.renderer.setFinalXScale(0.40000000000000002 * self.cardScale * scale)
self.p0.renderer.setInitialYScale(0.14999999999999999 * self.cardScale * scale)
self.p0.renderer.setFinalYScale(0.40000000000000002 * self.cardScale * scale)
self.p0.emitter.setRadius(20.0 * scale)
self.p1.renderer.setInitialXScale(0.050000000000000003 * self.card2Scale * scale)
self.p1.renderer.setFinalXScale(0.20000000000000001 * self.card2Scale * scale)
self.p1.renderer.setInitialYScale(0.10000000000000001 * self.card2Scale * scale)
self.p1.renderer.setFinalYScale(0.14999999999999999 * self.card2Scale * scale)
self.p1.emitter.setOffsetForce(Vec3(0.0, 0.0, 16.0 * scale))
self.p1.emitter.setRadius(20.0 * scale)
def resize(self, t):
self.setEffectScale(self.effectScale * t)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
f29845b4a7e41fd25e8f0aaf3a5e1216fa204a11
|
1db2e2238b4ef9c1b6ca3b99508693ee254d6904
|
/develop/analyse_2D_matrix/analyse_2D_matrix.py
|
f46d66c680efd95275086ab37592a63a94f4224e
|
[] |
no_license
|
pgreisen/pythonscripts
|
8674e08095f76edf08ef2059300349218079724c
|
0aadf8f96d19b306c1bc44a772e766a06fe3408b
|
refs/heads/master
| 2021-07-06T23:54:57.774342
| 2021-06-08T19:36:36
| 2021-06-08T19:36:36
| 22,017,192
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
from numpy import mean,sqrt,var
import sys
from collections import defaultdict
from collections import OrderedDict
import os,shutil,sys,argparse
class Analyse2D:
def __init__(self):
self.scores = defaultdict(list)
self.scores_position_two = {}
self.aa = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']
self.aa_single_letter = ['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']
self.aa_values = []
# times above mean
self.factor = 1
self.score_term = "total_score"
self.score_term_position = 0
# A_D_resfile_scores
self.baseline_value = -552.660
def get_sorted_hashtable(self, hashtable):
return OrderedDict(sorted(hashtable.items(), key=lambda x: x[1],reverse=True)) #[0:maxvalue])
def set_matrix(self,filename):
tmp_variable = True
tmp_variable2 = False
with open(filename,'r') as f:
for line in f:
tmp_line = line.split()
if( line[0:4] == "SEQU" ):
continue
elif ( line[0:4] == "SCOR" and tmp_variable == True):
for t in range( len(tmp_line) ):
if (tmp_line[t] == self.score_term):
self.score_term_position = t
tmp_variable = False
elif( tmp_variable == False):
aa_tmp = filename.split('_')
tmp_value = round(float( tmp_line[self.score_term_position] ) - self.baseline_value,3)
self.scores[ str(aa_tmp[0]) ].append( (aa_tmp[1], str( tmp_value ) ) )
# works
#self.scores[ str(aa_tmp[0]) ].append( (aa_tmp[1], tmp_line[self.score_term_position] - self.baseline_value) )
##print aa_tmp[1] , tmp_line[self.score_term_position]
##self.scores[ str(aa_tmp[0]) ].append( scores_position_two[ aa_tmp[1] ] = tmp_line[self.score_term_position] )
#self.scores_position_two = { aa_tmp[1] : tmp_line[self.score_term_position] }
#tmp_variable2 = True
#elif( tmp_variable2 == True):
#self.scores[ str(aa_tmp[0]) ].append( self.scores_position_two )
def write_matrix_to_file(self):
with open("2Dscan.csv",'w') as f:
f.write("AA(203/233),")
for key in self.aa_single_letter:
f.write(key+",")
f.write("\n")
#assert 1 == 0
# print self.scores
for key in self.aa_single_letter:
f.write(key+",")
##print key
##assert 1 == 0
tmp_dic = self.scores[ key ]
for key in self.aa_single_letter:
for i in range(len(tmp_dic) ):
if(key == tmp_dic[i][0]):
# continue
# print key, tmp_dic[i]
f.write(tmp_dic[i][1]+",")
f.write("\n")
def main(self):
parser = argparse.ArgumentParser(description="Generate 2D matrix from multiple rosetta output files")
# get the initial rosetta design as input
parser.add_argument("-s","--score_term", dest="score_term", help="Which score term to analyse (Default total_score )", default="total_score")
# parser.add_argument("-b", "--bundles", dest="helical_bundle", help="Four chains helical bundle with four chains is set to true", action="store_true", default=False )
path = "./"
files = os.listdir( path )
args_dict = vars( parser.parse_args() )
for item in args_dict:
setattr(self, item, args_dict[item])
for fl in files:
# assumes that the pachdock file ends with .out
if( os.path.isfile(fl) and fl.endswith("scores") ):
self.set_matrix( fl )
self.write_matrix_to_file()
if __name__ == "__main__":
run = Analyse2D()
run.main()
|
[
"pgreisen@gmail.com"
] |
pgreisen@gmail.com
|
127340c6f50c34bb02f1678ec7d9f12c7ce76d64
|
04c41aca1f78ac617fe1573818b10fb0e12fbe40
|
/tests/schemas/users/snapshots/snap_test_queries.py
|
85793ffecaf3ec9d16884144152f71578d6c90f8
|
[] |
no_license
|
telephoneorg/orka-api
|
619489ae17dfd850c74aa2c950e0d4385ba059d9
|
5693aff776b7a7649617915a0a8e0942d1228c24
|
refs/heads/master
| 2022-01-10T15:28:21.277746
| 2019-05-16T19:06:40
| 2019-05-16T19:06:40
| 186,914,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,718
|
py
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_get_me_query 1'] = {
'data': {
'me': {
'cellPhone': '+14153334444',
'contexts': {
'count': 2,
'edges': [
{
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'node': {
'__typename': 'ParticipantContext',
'id': 'UGFydGljaXBhbnRDb250ZXh0OjE=',
'profile': {
'avatar': 'https://example.com/images/me.jpg',
'bio': '''Hey!
What are you looking at?''',
'displayName': 'Johnny',
'id': 'UHJvZmlsZTox'
},
'status': 'CREATED'
}
},
{
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'node': {
'__typename': 'FacilitatorContext',
'availability': '[{"isoWeekDay": 1, "start": "14:49:31.947740", "end": "22:49:31.947749"}, {"isoWeekDay": 2, "start": "14:49:31.947758", "end": "22:49:31.947760"}, {"isoWeekDay": 3, "start": "14:49:31.947765", "end": "22:49:31.947766"}, {"isoWeekDay": 4, "start": "14:49:31.947770", "end": "22:49:31.947772"}, {"isoWeekDay": 5, "start": "14:49:31.947776", "end": "22:49:31.947777"}, {"isoWeekDay": 6, "start": "14:49:31.947781", "end": "22:49:31.947782"}, {"isoWeekDay": 7, "start": "14:49:31.947786", "end": "22:49:31.947788"}]',
'id': 'RmFjaWxpdGF0b3JDb250ZXh0OjI=',
'licenses': {
'edges': [
{
'cursor': 'YXJyYXljb25uZWN0aW9uOjA=',
'node': {
'expiry': '2022-02-08',
'id': 'RmFjaWxpdGF0b3JMaWNlbnNlOjE=',
'number': 'xpii23420e90',
'type': 'TBD',
'usState': 'NY'
}
},
{
'cursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'node': {
'expiry': None,
'id': 'RmFjaWxpdGF0b3JMaWNlbnNlOjI=',
'number': 'xpn342300309e8',
'type': 'TBD',
'usState': None
}
},
{
'cursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'node': {
'expiry': '2016-02-08',
'id': 'RmFjaWxpdGF0b3JMaWNlbnNlOjM=',
'number': 'expired',
'type': 'TBD',
'usState': 'NY'
}
}
],
'pageInfo': {
'endCursor': 'YXJyYXljb25uZWN0aW9uOjI=',
'hasNextPage': False,
'hasPreviousPage': False,
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA='
}
},
'npi': 'nc2394jt98ddeesd',
'profile': {
'avatar': 'https://dr-smith.com/photos/dr-smith.jpg',
'bio': '''I don't know how to put this but I'm kind of a big deal.
People know me. I'm very important.
I have many leather-bound books and my apartment smells of rich mahogany.''',
'displayName': 'Dr. Smith',
'id': 'UHJvZmlsZToy'
},
'status': 'CREATED'
}
}
],
'pageInfo': {
'endCursor': 'YXJyYXljb25uZWN0aW9uOjE=',
'hasNextPage': False,
'hasPreviousPage': False,
'startCursor': 'YXJyYXljb25uZWN0aW9uOjA='
}
},
'dob': '1980-01-04',
'email': 'johnsmith@gmail.com',
'firstName': 'John',
'id': 'VXNlcjox',
'lastName': 'Smith',
'notificationPolicy': {
'allowEmail': True,
'allowMarketing': False,
'allowSms': True,
'id': 'Tm90aWZpY2F0aW9uUG9saWN5OjE='
}
}
}
}
snapshots['test_get_profile_query 1'] = {
'data': {
'profile': {
'avatar': 'https://example.com/images/me.jpg',
'bio': '''Hey!
What are you looking at?''',
'displayName': 'Johnny',
'id': 'UHJvZmlsZTox'
}
}
}
|
[
"me@joeblack.nyc"
] |
me@joeblack.nyc
|
ed7b59ea529505c039c84632f0c762b6e7687cd5
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/29/usersdata/149/9337/submittedfiles/atividade.py
|
a129ae25b27b8413697e0322686088d89dc72a89
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,309
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
print('não sei de nada)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
d83d17593a23cc734d5f3a70eba905a6d4cec639
|
d780df6e068ab8a0f8007acb68bc88554a9d5b50
|
/python/g1/asyncs/kernels/g1/asyncs/kernels/__init__.py
|
14f907bb3747de8bcda48ebf1fa4c851b89d6562
|
[
"MIT"
] |
permissive
|
clchiou/garage
|
ed3d314ceea487b46568c14b51e96b990a50ed6f
|
1d72863d3a5f5d620b170f4dd36f605e6b72054f
|
refs/heads/master
| 2023-08-27T13:57:14.498182
| 2023-08-15T07:09:57
| 2023-08-15T19:53:52
| 32,647,497
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
__all__ = [
'KernelTimeout',
'call_with_kernel',
'get_kernel',
'run',
'with_kernel',
]
import contextlib
import contextvars
import functools
import logging
from . import contexts
from . import kernels
# Re-export errors.
from .errors import KernelTimeout
logging.getLogger(__name__).addHandler(logging.NullHandler())
def with_kernel(func):
"""Wrap ``func`` that it is called within a kernel context."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return call_with_kernel(func, *args, **kwargs)
return wrapper
def call_with_kernel(func, *args, **kwargs):
"""Call ``func`` within a kernel context.
The kernel object is closed on return.
"""
def caller():
# Do not create nested kernels; this seems to make more sense.
# In general, I think it is easier to work with when there is
# always at most one global kernel object per thread.
if contexts.get_kernel(None) is None:
kernel = kernels.Kernel()
contexts.set_kernel(kernel)
cm = contextlib.closing(kernel)
else:
cm = contextlib.nullcontext()
with cm:
return func(*args, **kwargs)
return contextvars.copy_context().run(caller)
def run(awaitable=None, timeout=None):
return contexts.get_kernel().run(awaitable, timeout)
def get_kernel():
return contexts.get_kernel(None)
|
[
"clchiou@gmail.com"
] |
clchiou@gmail.com
|
750b855466580ca336ef851454d85be2d5325ce7
|
5c099927aedc6fdbc515f40ff543c65b3bf4ec67
|
/algorithms/find-and-replace-pattern/src/Solution2.py
|
3baef0faeceac6fc78a718b61d8201ae9e97cf0a
|
[] |
no_license
|
bingzhong-project/leetcode
|
7a99cb6af1adfbd9bb1996a7f66a65679053c478
|
ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505
|
refs/heads/master
| 2020-04-15T09:27:33.979519
| 2020-03-10T03:43:07
| 2020-03-10T03:43:07
| 164,550,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
class Solution:
def findAndReplacePattern(self, words: list, pattern: str) -> list:
def match(word, pattern):
word_map = {}
pattern_map = {}
for i in range(len(pattern)):
if pattern[i] not in pattern_map:
pattern_map[pattern[i]] = word[i]
if word[i] not in word_map:
word_map[word[i]] = pattern[i]
if (pattern_map[pattern[i]],
word_map[word[i]]) != (word[i], pattern[i]):
return False
return True
res = []
for word in words:
if match(word, pattern):
res.append(word)
return res
|
[
"zhongyongbin@foxmail.com"
] |
zhongyongbin@foxmail.com
|
f95818b00f5f0d66c4279b5d1bba2ce0634bbc30
|
87209058bd5dd05ff0b3bd3ce2e5b5ed12671410
|
/jiaoyu/apps/organizations/forms.py
|
7fd39c13a1a60aab869309800b2a5a8761931593
|
[] |
no_license
|
keepingoner/django-Projects
|
2a8a245b702a507efc27b4b5c6fb669bcf7d1846
|
8ca94559a31f82951a05dd8749c37d7595a8e298
|
refs/heads/master
| 2022-01-24T09:08:27.384731
| 2019-07-23T02:48:20
| 2019-07-23T02:48:20
| 109,779,078
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from django import forms
from operations.models import UserAsk
class UserAskForm(forms.ModelForm):
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'coursename']
|
[
"keepingoner@163.com"
] |
keepingoner@163.com
|
d247e69e2008ceb52000bf94a9c402b65030dfbb
|
c84cee1abce6372a71314d28ca6a8681a6ad5cb5
|
/chat/forms.py
|
adc3810f9a97a850edc84266b4d97bcd84337a87
|
[] |
no_license
|
SergeyLebidko/ChannelsTraining
|
441477a14c6fe424ea11ba2e05aac0fef90151bc
|
845a9b27f1d0bdb9424407c68e5051ec06d06cc9
|
refs/heads/master
| 2023-06-09T09:32:21.049921
| 2021-06-22T13:42:44
| 2021-06-22T13:42:44
| 376,584,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import validate_password
class RegisterForm(forms.Form):
username = forms.CharField(label='Имя пользователя', required=True)
password = forms.CharField(label='Пароль', widget=forms.PasswordInput, required=True)
def clean_password(self):
password = self.cleaned_data['password']
validate_password(password)
return password
def clean_username(self):
username = self.cleaned_data['username']
user_exists = User.objects.filter(username=username).exists()
if user_exists:
raise ValidationError('Пользователь с таким именем уже существует')
return username
|
[
"it.sergeyler@mail.ru"
] |
it.sergeyler@mail.ru
|
91d8dc9cb2b95ee091580cbe4da673fa4fb4185c
|
9c36503027aa6fc2fa2f841d60f70f2697ae60be
|
/pygraphc/similarity/LogTextSimilarity.py
|
59263ee4a8088fff29f3a4c0e97edc18e48e95c9
|
[
"MIT"
] |
permissive
|
studiawan/pygraphc
|
bd5517478a6e1ad04220c13fa9f7aea6546225ac
|
436aca13cfbb97e7543da61d38c8462da64343b5
|
refs/heads/master
| 2021-01-23T21:29:41.151025
| 2018-05-17T07:54:54
| 2018-05-17T07:54:54
| 58,362,965
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
from pygraphc.preprocess.PreprocessLog import PreprocessLog
from pygraphc.similarity.StringSimilarity import StringSimilarity
from itertools import combinations
import csv
import multiprocessing
class LogTextSimilarity(object):
"""A class for calculating cosine similarity between a log pair. This class is intended for
non-graph based clustering method.
"""
def __init__(self, mode, logtype, logs, clusters, cosine_file=''):
"""The constructor of class LogTextSimilarity.
Parameters
----------
mode : str
Mode of operation, i.e., text and text-h5
logtype : str
Type for event log, e.g., auth, syslog, etc.
logs : list
List of every line of original logs.
clusters : dict
Dictionary of clusters. Key: cluster_id, value: list of log line id.
"""
self.mode = mode
self.logtype = logtype
self.logs = logs
self.clusters = clusters
self.events = {}
self.cosine_file = cosine_file
def __call__(self, node):
return self.__write_cosine_csv(node)
def __write_cosine_csv(self, node):
csv_file = self.cosine_file + str(node) + '.csv'
f = open(csv_file, 'wb')
writer = csv.writer(f)
for cluster_id, cluster in self.clusters.iteritems():
row = []
for c in cluster:
if node != c:
similarity = StringSimilarity.get_cosine_similarity(self.events[node]['tf-idf'],
self.events[c]['tf-idf'],
self.events[node]['length'],
self.events[c]['length'])
if similarity > 0:
row.append(1 - similarity)
if row:
row.append(cluster_id)
writer.writerow(row)
f.close()
def get_cosine_similarity(self):
"""Get cosine similarity from a pair of log lines in a file.
Returns
-------
cosine_similarity : dict
Dictionary of cosine similarity in non-graph clustering. Key: (log_id1, log_id2),
value: cosine similarity distance.
"""
preprocess = PreprocessLog(self.logtype)
preprocess.preprocess_text(self.logs)
self.events = preprocess.events_text
cosines_similarity = {}
if self.mode == 'text':
# calculate cosine similarity
for log_pair in combinations(range(preprocess.loglength), 2):
cosines_similarity[log_pair] = \
StringSimilarity.get_cosine_similarity(self.events[log_pair[0]]['tf-idf'],
self.events[log_pair[1]]['tf-idf'],
self.events[log_pair[0]]['length'],
self.events[log_pair[1]]['length'])
return cosines_similarity
elif self.mode == 'text-csv':
# write cosine similarity to csv files
nodes = range(preprocess.loglength)
pool = multiprocessing.Pool(processes=3)
pool.map(self, nodes)
pool.close()
pool.join()
|
[
"studiawan@gmail.com"
] |
studiawan@gmail.com
|
7997978962821fe9981f1e5e1415740d920bddbb
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/ENJTPoWCyEGgnXYjM_7.py
|
6804f1adebe40875f04f9577628db7907b57414a
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
"""
Create a function that calculates what percentage of the box is filled in.
Give your answer as a string percentage rounded to the nearest integer.
### Examples
percent_filled([
"####",
"# #",
"#o #",
"####"
]) ➞ "25%"
# One element out of four spaces.
percent_filled([
"#######",
"#o oo #",
"#######"
]) ➞ "60%"
# Three elements out of five spaces.
percent_filled([
"######",
"#ooo #",
"#oo #",
"# #",
"# #",
"######"
]) ➞ "31%"
# Five elements out of sixteen spaces.
### Notes
* Only "o" will fill the box and also "o" will not be found outside of the box.
* Don't focus on how much physical space an element takes up, pretend that each element occupies one whole unit (which you can judge according to the number of "#" on the sides).
"""
def percent_filled(box):
frase = ''.join(box)
a = frase.count(' ')
b = frase.count('o')
return str(int((b / (a + b)) * 100)) + '%'
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b7b79b8c36d18d972a46f2208026d965dae2afbd
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py210110d_python3a/day10_210314/homework/stem1403a_hw_9_0307_KevinLiu.py
|
b6e041e3e82b381854c864f058043ff9a5dc02eb
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136
| 2021-08-24T23:03:51
| 2021-08-24T23:03:51
| 210,029,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
"""
Date: 2021-03-08
1. Write a GUI program of clock
Requirements:
(Function)
Show current time in the pattern of HH:mm:ss.aaa
i.e.
10:12:45.369
(UI)
Display a title, main area for clock, and footer for the date
Due date: by the end of next Friday
Hint:
import datetime
strftime
"""
"""
score:
perfect
"""
# tkinter module
from tkinter import *
from tkinter.ttk import Separator
import datetime
# function
def run_clock():
current_time.configure(text=datetime.datetime.now().strftime("%H:%M:%S.%f")[:12])
current_time.after(1, run_clock)
# widget
root = Tk()
root.title('Python GUI - pack fill')
w_width = 640
w_height = 450
sw = root.winfo_screenwidth(); sh = root.winfo_screenheight()
top_left_x = int(sw/2 - w_width/2); top_left_y = int(sh/2 - w_height/2)
root.geometry(f"{w_width}x{w_height}+{top_left_x}+{top_left_y}")
# header
header = Label(text="System Time", fg="Black", font=("Helvetica", 28))
header.pack(padx=15, pady=15)
# separator 1
sep = Separator(root, orient=HORIZONTAL)
sep.pack(fill=X)
# current time label
time = datetime.datetime.now().strftime("%H:%M:%S.%f")[:-3]
current_time = Label(text=time, bg="green", fg="White", font=("Helvetica", 36), width=15, height=5)
current_time.pack(padx=15, pady=15)
# separator 2
sep2 = Separator(root, orient=HORIZONTAL)
sep2.pack(fill=X)
# footer
footer = Label(text="Version 1, Kevin Liu, 12 March 2021", fg="Black", font=("Helvetica", 28))
footer.pack(padx=15, pady=15)
# main program
run_clock()
root.mainloop()
|
[
"lada314@gmail.com"
] |
lada314@gmail.com
|
0ed0a470b8e20c51b15783399801293e4c38342f
|
8fa1999cb8a973937d6629c553feca60dd3a73d7
|
/Atividade E/fabio01_q06.py
|
2f76792ccd7c7f87b12dc795ab7900fe3e71545e
|
[] |
no_license
|
Isaias301/IFPI-ads2018.1
|
16cddbb278336a3e06738f9dc21d2d11053dcec4
|
026fe5c2ffbc8aed55c478b7544472c46b357e69
|
refs/heads/master
| 2020-03-22T20:15:13.789014
| 2018-08-09T03:14:48
| 2018-08-09T03:14:48
| 140,585,300
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
""" Questão: Lista E 06
Descrição: Leia uma velocidade em km/h, calcule e escreva esta velocidade em m/s.
"""
def main():
# entrada
velocidade_em_km = float(input("Digite uma uma velocidade em Km/h: "))
# calculos, operacoes, processamento
velocidade_em_ms = velocidade_em_km * 3.6
# saida
print('Resultado: %.2f m/s' % velocidade_em_ms)
if __name__ == '__main__':
main()
|
[
"isaiassantana301@gmail.com"
] |
isaiassantana301@gmail.com
|
aa0f59f38a582475a55c56c36a78be79bab75599
|
fe203d5c28e2010cdc78a4b29755e148d58045db
|
/p02/q07_miles_to_kilometres.py
|
1e02355748970bfbdec15c9a409bb1cc807d34ef
|
[] |
no_license
|
sp0002/cp2019
|
d2a9aa5bfe7c82de3ed3f96f281c39be8704d3bd
|
6c48528f948dad01f4d6571e3bb22dbf253c423c
|
refs/heads/master
| 2020-04-24T23:24:21.324069
| 2019-04-13T11:13:01
| 2019-04-13T11:13:01
| 171,574,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
print("Miles Kilometers Kilometres Miles")
for i in range(10):
print(str(i+1) + (" "*(6-len(str(i+1)))) + "{:.3f}".format(round(((i+1)*1.60934), 3)) +
(" "*(11-len(str(round(((i+1)*1.60934), 3))))) + str(i*5+20) +
(" "*(11-len(str(i*5+20))) + "{:.3f}".format(round((i*5+20)/1.60934, 3))))
|
[
"k"
] |
k
|
ca592bb99a1866b3bd5f87d00cf9884fb0e2e036
|
fab39aa4d1317bb43bc11ce39a3bb53295ad92da
|
/nncf/torch/dynamic_graph/operation_address.py
|
f9fe1e55d976d86a8bab71816c910f10257af01d
|
[
"Apache-2.0"
] |
permissive
|
dupeljan/nncf
|
8cdce27f25f01ce8e611f15e1dc3036fb8548d6e
|
0abfd7103ca212888a946ba4d0fbdb9d436fdaff
|
refs/heads/develop
| 2023-06-22T00:10:46.611884
| 2021-07-22T10:32:11
| 2021-07-22T10:32:11
| 388,719,455
| 0
| 0
|
Apache-2.0
| 2021-07-23T07:46:15
| 2021-07-23T07:43:43
| null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.torch.dynamic_graph.scope import Scope
class OperationAddress:
def __init__(self, operator_name: str, scope_in_model: Scope, call_order: int):
self.operator_name = operator_name
self.scope_in_model = scope_in_model
self.call_order = call_order
def __eq__(self, other: 'OperationAddress'):
return isinstance(other, OperationAddress) and \
(self.operator_name == other.operator_name) and \
(self.scope_in_model == other.scope_in_model) and \
(self.call_order == other.call_order)
def __str__(self):
return str(self.scope_in_model) + '/' + \
self.operator_name + "_" + str(self.call_order)
def __hash__(self):
return hash((self.operator_name, self.scope_in_model, self.call_order))
@staticmethod
def from_str(s: str):
scope_and_op, _, call_order_str = s.rpartition('_')
scope_str, _, op_name = scope_and_op.rpartition('/')
return OperationAddress(op_name,
Scope.from_str(scope_str),
int(call_order_str))
|
[
"noreply@github.com"
] |
dupeljan.noreply@github.com
|
cdad51f5c22f7a1acdf954745aa1ca7cd922befa
|
ba3231b25c60b73ca504cd788efa40d92cf9c037
|
/nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/network/ptp.py
|
27fa36e319f1e78d559dded8881328498c85d2f6
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zhuweigh/vpx13
|
f6d559ae85341e56472e3592cbc67062dac34b93
|
b36caa3729d3ca5515fa725f2d91aeaabdb2daa9
|
refs/heads/master
| 2020-07-04T22:15:16.595728
| 2019-09-20T00:19:56
| 2019-09-20T00:19:56
| 202,435,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,505
|
py
|
#
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ptp(base_resource) :
""" Configuration for Precision Time Protocol resource. """
def __init__(self) :
self._state = None
@property
def state(self) :
r"""Enables or disables Precision Time Protocol (PTP) on the appliance. If you disable PTP, make sure you enable Network Time Protocol (NTP) on the cluster.<br/>Default value: ENABLE<br/>Possible values = DISABLE, ENABLE.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
r"""Enables or disables Precision Time Protocol (PTP) on the appliance. If you disable PTP, make sure you enable Network Time Protocol (NTP) on the cluster.<br/>Default value: ENABLE<br/>Possible values = DISABLE, ENABLE
"""
try :
self._state = state
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ptp_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ptp
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update ptp.
"""
try :
if type(resource) is not list :
updateresource = ptp()
updateresource.state = resource.state
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the ptp resources that are configured on netscaler.
"""
try :
if not name :
obj = ptp()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class State:
DISABLE = "DISABLE"
ENABLE = "ENABLE"
class ptp_response(base_response) :
def __init__(self, length=1) :
self.ptp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ptp = [ptp() for _ in range(length)]
|
[
"zhuwei@xsky.com"
] |
zhuwei@xsky.com
|
71de8ab94c91d087323136dda99bddbbcd9ec73f
|
73c01a3f052f8ef63890ec3c2e28403ad41e9a71
|
/service/migrations/0007_ticket_photo.py
|
806039291d087ae7c08ce662cfe1a5f5ce6385fb
|
[] |
no_license
|
Jokey90/aho
|
4c007c65c819efb726a732a8f36067c5a0226100
|
8bcd41e9ef7d40f07499429f385d4fec590636f6
|
refs/heads/master
| 2020-03-21T22:28:36.395996
| 2018-06-29T09:25:05
| 2018-06-29T09:25:05
| 139,128,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-16 12:57
from __future__ import unicode_literals
from django.db import migrations, models
import service.models.ticket
class Migration(migrations.Migration):
dependencies = [
('service', '0006_auto_20170804_1420'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='photo',
field=models.FileField(blank=True, null=True, upload_to=service.models.ticket.file_path, verbose_name='Фото'),
),
]
|
[
"Kishkurno_AS@dsdf.cds.ru"
] |
Kishkurno_AS@dsdf.cds.ru
|
fb24842332a1d4553a27ced6b2f8e60c9554ad3d
|
c50fb310d8c52284be2c636f951de796eededae9
|
/47.py
|
f181b2c679dcf18046f30a11c88ec47c1b317684
|
[] |
no_license
|
Deepakdk7/Playerset3
|
6f46f638f22d894b9cc93d81b27c221f9dcdaad3
|
636e1feed0f97bbc9e9495a5dbb81a512ed980c5
|
refs/heads/master
| 2020-06-03T07:35:23.203780
| 2019-08-06T08:56:16
| 2019-08-06T08:56:16
| 191,497,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
ax=list(map(int,input().split()))
if ax[0]+ax[1]+ax[2]==180 and ax[0]!=0 and ax[1]!=0 and ax[2]!=0:
print('yes')
else:
print('no')
|
[
"noreply@github.com"
] |
Deepakdk7.noreply@github.com
|
27d1a3c411b12208e8d4fb289eb2af4bf85cb440
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part3/updated_part2_batch/jm/parser_errors_2/239061968.py
|
6824dca6f32823cb12a5f3a0a45879a6c8761224
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713
| 2020-06-09T21:15:37
| 2020-06-09T21:15:37
| 262,290,632
| 0
| 0
|
MIT
| 2020-06-09T21:15:38
| 2020-05-08T10:10:47
|
C
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 239061968
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 2, 4)
assert board is not None
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_free_fields(board, 2) == 2
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_golden_possible(board, 2) == 1
gamma_delete(board)
|
[
"noreply@github.com"
] |
kozakusek.noreply@github.com
|
7a877964c195ba8b4611fc1c614aab2598a7d346
|
b2301365d220ff0295b8beddbed38b0581f9610d
|
/Django/landscapes/landscapes/urls.py
|
6bb380064f3a5011c960add9367daf6a83339d72
|
[] |
no_license
|
JoA-MoS/Python
|
db246a5ff2201c6ef1dfb9d9b0fd8a37e1d7c46d
|
4547c2667f3eaf0a001532bb2b103aab3c344fbe
|
refs/heads/master
| 2021-08-16T11:18:20.420868
| 2017-07-21T05:52:18
| 2017-07-21T05:52:18
| 96,125,892
| 0
| 0
| null | 2021-06-10T18:40:09
| 2017-07-03T15:34:52
|
Python
|
UTF-8
|
Python
| false
| false
| 786
|
py
|
"""landscapes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.landscape.urls')),
]
|
[
"justin.r.dietz@gmail.com"
] |
justin.r.dietz@gmail.com
|
06610cfadfa7b7f1355f379fc9b4d330bce025b0
|
a1e7457b5d1ef03ea9d891a6886718b3029c2ba4
|
/zoe_scheduler/state/blobs/__init__.py
|
35e47533ccc22fcd06c1ecf2657d097af0742752
|
[
"Apache-2.0"
] |
permissive
|
ddcy/zoe
|
06bd104b0d3b632ed18ff8a8cc5b580b1f140b1f
|
bd1ac8cdefeda3ebd1ccc941243b781cb7c0beb2
|
refs/heads/master
| 2020-12-26T21:46:17.128925
| 2016-02-26T17:52:20
| 2016-02-26T17:52:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
# Copyright (c) 2016, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseBlobs:
def init(self):
pass
def store_blob(self, kind, name, data):
raise NotImplementedError
def load_blob(self, kind, name):
raise NotImplementedError
def delete_blob(self, kind, name):
raise NotImplementedError
def list_blobs(self, kind):
raise NotImplementedError
|
[
"venza@brownhat.org"
] |
venza@brownhat.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.