blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fcb4615ea3d1c2f22d029cad3e4f73a185db48ae
|
3152fd9ec9ccd83b6e0d2ea40aa36a4b145aea2e
|
/temp/test_打印正方形.py
|
69d148e2251622402e2f8c77af081ab51a935b17
|
[] |
no_license
|
derekduan1028/hm_python
|
cf1b6037ac1cde8dcac393453a291c39b5a936c2
|
ae79f817a55d1b3bfdbdf1b50d5147946c8b7401
|
refs/heads/master
| 2023-01-21T00:33:00.927709
| 2020-11-27T00:00:35
| 2020-11-27T00:00:35
| 291,869,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
#!/usr/bin/python
# coding:utf-8
"""
@author:derek
@contract:derek_duan@sina.com
@file: test_打印正方形.py
@time: 11/18/20 4:45 PM
"""
def print_lines(str1, str2, wide):
print(str1, end=" ")
for i in range(wide):
print(str2, end=" ")
print(str1, end=" ")
for i in range(wide):
print(str2, end=" ")
print(str1, end=" ")
a = 10
for j in range(0, 11):
if j % 5 == 0:
print_lines("+", "-", a)
print("\t")
else:
print_lines("|", " ", a)
print("\t")
|
[
"derek@Derek-Mbp"
] |
derek@Derek-Mbp
|
06dca00fa6a330d2a68438a2972b67d9f16a64a1
|
426521e1689f70732222efd5f98675014e361964
|
/youtube_dl/extractor/afreecatv.py
|
518c61f67eb0befa0ce59fb393d10d8ebd4dcc03
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
DalavanCloud/youtube-dl
|
8b6f34e8e8dc296df6ee7c12fdf91688092f2df7
|
c8f45f763cac3c0d0e4ca35ba072d8d321957e85
|
refs/heads/master
| 2020-04-13T06:36:38.023940
| 2016-09-27T16:03:00
| 2016-09-27T16:03:00
| 163,026,015
| 1
| 0
|
Unlicense
| 2018-12-24T22:04:42
| 2018-12-24T22:04:42
| null |
UTF-8
|
Python
| false
| false
| 4,991
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
xpath_element,
xpath_text,
)
class AfreecaTVIE(InfoExtractor):
IE_DESC = 'afreecatv.com'
_VALID_URL = r'''(?x)^
https?://(?:(live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)?
(?:
/app/(?:index|read_ucc_bbs)\.cgi|
/player/[Pp]layer\.(?:swf|html))
\?.*?\bnTitleNo=(?P<id>\d+)'''
_TESTS = [{
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
'info_dict': {
'id': '36164052',
'ext': 'mp4',
'title': '데일리 에이프릴 요정들의 시상식!',
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
'upload_date': '20160503',
}
}, {
'url': 'http://afbbs.afreecatv.com:8080/app/read_ucc_bbs.cgi?nStationNo=16711924&nTitleNo=36153164&szBjId=dailyapril&nBbsNo=18605867',
'info_dict': {
'id': '36153164',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$',
'uploader': 'dailyapril',
'uploader_id': 'dailyapril',
},
'playlist_count': 2,
'playlist': [{
'md5': 'd8b7c174568da61d774ef0203159bf97',
'info_dict': {
'id': '36153164_1',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'upload_date': '20160502',
},
}, {
'md5': '58f2ce7f6044e34439ab2d50612ab02b',
'info_dict': {
'id': '36153164_2',
'ext': 'mp4',
'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'",
'upload_date': '20160502',
},
}],
}, {
'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
'only_matching': True,
}]
@staticmethod
def parse_video_key(key):
video_key = {}
m = re.match(r'^(?P<upload_date>\d{8})_\w+_(?P<part>\d+)$', key)
if m:
video_key['upload_date'] = m.group('upload_date')
video_key['part'] = m.group('part')
return video_key
def _real_extract(self, url):
video_id = self._match_id(url)
parsed_url = compat_urllib_parse_urlparse(url)
info_url = compat_urlparse.urlunparse(parsed_url._replace(
netloc='afbbs.afreecatv.com:8080',
path='/api/video/get_video_info.php'))
video_xml = self._download_xml(info_url, video_id)
if xpath_element(video_xml, './track/video/file') is None:
raise ExtractorError('Specified AfreecaTV video does not exist',
expected=True)
title = xpath_text(video_xml, './track/title', 'title')
uploader = xpath_text(video_xml, './track/nickname', 'uploader')
uploader_id = xpath_text(video_xml, './track/bj_id', 'uploader id')
duration = int_or_none(xpath_text(video_xml, './track/duration',
'duration'))
thumbnail = xpath_text(video_xml, './track/titleImage', 'thumbnail')
entries = []
for i, video_file in enumerate(video_xml.findall('./track/video/file')):
video_key = self.parse_video_key(video_file.get('key', ''))
if not video_key:
continue
entries.append({
'id': '%s_%s' % (video_id, video_key.get('part', i + 1)),
'title': title,
'upload_date': video_key.get('upload_date'),
'duration': int_or_none(video_file.get('duration')),
'url': video_file.text,
})
info = {
'id': video_id,
'title': title,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'thumbnail': thumbnail,
}
if len(entries) > 1:
info['_type'] = 'multi_video'
info['entries'] = entries
elif len(entries) == 1:
info['url'] = entries[0]['url']
info['upload_date'] = entries[0].get('upload_date')
else:
raise ExtractorError(
'No files found for the specified AfreecaTV video, either'
' the URL is incorrect or the video has been made private.',
expected=True)
return info
|
[
"peter@pmrowla.com"
] |
peter@pmrowla.com
|
4fe49b4538b78e6aef54b68010f7bf3670fe30d9
|
8a53b6e78ee6bc66bbf83d78fedef20e44e40809
|
/braceyourselfassignmentsarecoming/sudoku.py
|
00a3b61d94dbf5681019330623f67e0699604bba
|
[] |
no_license
|
chintanbetrabet/ChessAI
|
cc5c6dfa91c0ba5a0b6de1cc705092cf996bcdcb
|
3d9ebd96330623ab48f7f758cc8ad3b61eb79d55
|
refs/heads/master
| 2021-07-02T05:57:58.524808
| 2017-09-22T09:32:50
| 2017-09-22T09:32:50
| 104,456,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,450
|
py
|
import copy
import os
import time
class Square():
def __init__(self,show,row,col,legal):
self.show=show
self.row=row
self.col=col
self.legal=copy.deepcopy(legal)
def update(self):
if len(self.legal)==1:
self.show=str(self.legal[0])
else :
if self.show!='.':
self.legal=self.legal[0:0]
self.legal.append(int(self.show))
class Sudoku():
def __init__(self,fil):
self.Puzzle=[]
pos=0
univ_legal=[1,2,3,4,5,6,7,8,9]
f=open(fil,'r')
for i in range(9):
line=f.readline()
for j in range(9):
if line[j]=='.':
legal=copy.deepcopy(univ_legal)
else:
legal=[line[j]]
add=Square(line[j],i,j,legal)
self.Puzzle.append(add)
def __init__(self,string):
self.Puzzle=[]
pos=0
univ_legal=[1,2,3,4,5,6,7,8,9]
line=string.split()
for i in range(9):
for j in range(9):
if line[9*i+j]=='.':
legal=copy.deepcopy(univ_legal)
else:
legal=[line[i*9+j]]
add=Square(line[i*9+j],i,j,legal)
self.Puzzle.append(add)
def ret_box(self,i,j):
start_i=i-i%3
end_i=start_i+2
start_j=j-j%3
end_j=start_j+2
return self.ret_lis(start_i,end_i,start_j,end_j)
def ret_col(self,i,j):
start_i=0
end_i=8
start_j=j
end_j=j
return self.ret_lis(start_i,end_i,start_j,end_j)
def ret_row(self,i,j):
start_i=i
end_i=i
start_j=0
end_j=8
return self.ret_lis(start_i,end_i,start_j,end_j)
def ret_lis(self,i,i1,j,j1):
start_i=i
start_j=j
lis=[]
while i<=i1:
j=start_j
while j<=j1:
if self.Puzzle[9*i+j].show!='.' and self.Puzzle[9*i+j].show!='0' :
lis.append(int(self.Puzzle[9*i+j].show))
j+=1
i+=1
return lis
def upgrade(self,pos):
if self.Puzzle[pos].show=='.' or self.Puzzle[pos].show=='0' :
lis=self.ret_col(pos/9,pos%9)
for x in self.Puzzle[pos].legal:
if x in lis:
self.Puzzle[pos].legal.remove(int(x))
lis=self.ret_row(pos/9,pos%9)
for x in self.Puzzle[pos].legal:
if x in lis and len(lis)>1:
#x=int(x)
self.Puzzle[pos].legal.remove(int(x))
lis=self.ret_box(pos/9,pos%9)
for x in self.Puzzle[pos].legal:
if x in lis and len(lis)>1:
self.Puzzle[pos].legal.remove(int(x))
self.Puzzle[pos].update()
def do_move(self,pos):
if self.Puzzle[pos].show=='.' or self.Puzzle[pos].show=='0':
self.move_col(pos/9,pos%9)
self.move_row(pos/9,pos%9)
self.move_box(pos/9,pos%9)
self.Puzzle[pos].update()
def print_legal_on_demand(self,i,i1,j,j1):
start_j=j
while i<=i1:
j=start_j
while j<=j1:
print self.Puzzle[9*i+j].legal
j+=1
i+=1
def show_puz(self):
pos=0
for i in range(9):
print
print ' '.join(self.Puzzle[9*i+j].show for j in range(9))
def show_puz2(self):
pos=0
for i in range(9):
#print
print ' '.join(self.Puzzle[9*i+j].show for j in range(9)),
print
def move_box(self,i,j):
start_i=i-i%3
end_i=start_i+2
start_j=j-j%3
end_j=start_j+2
return self.make_move(start_i,end_i,start_j,end_j)
def move_col(self,i,j):
start_i=0
end_i=8
start_j=j
end_j=j
return self.make_move(start_i,end_i,start_j,end_j)
def move_row(self,i,j):
start_i=i
end_i=i
start_j=0
end_j=8
return self.make_move(start_i,end_i,start_j,end_j)
def make_move(self,i,i1,j,j1):
start_i=i
start_j=j
special=0
for num in range(1,10):
count=0
move_pos=-1
i=start_i
while i<=i1:
j=start_j
while j<=j1:
pos=9*i+j
if len(self.Puzzle[pos].legal)==1 and int(self.Puzzle[pos].show)==num:
count=-100000
if len(self.Puzzle[pos].legal)!=1:
for x in self.Puzzle[pos].legal:
if int(x)==num:
if count==0:
move_pos=pos
count+=1
j+=1
i+=1
if count==1 and self.Puzzle[move_pos].show=='.':
self.Puzzle[move_pos].show=str(num)
self.Puzzle[move_pos].level=copy.deepcopy([num])
self.Puzzle[move_pos].update()
for p in range(81):
self.upgrade(p)
#self.show_puz();
#raw_input();
def fil_count(pu):
count=0
for i in range(81):
if pu.Puzzle[i].show!='.':
count+=1
return count
t=input()
while(t>0):
t-=1
x=raw_input()
x1=""
for i in x:
if i =='0':
x1+='.'
else:
x1+=i
#print x1
#t=time.clock()
#sud=Sudoku("sud.txt")
sud=Sudoku(x1)
#sud.show_puz()
last=fil_count(sud)
j=0
while last!=81:
#print "last=%d"%last
for i in range(81):
sud.upgrade(i)
#sud.show_puz()
if j>0 and last==fil_count(sud):
for i in range(81):
sud.do_move(i)
last=fil_count(sud)
j+=1
#t=time.clock()-t1
#print "after time %f"%t
sud.show_puz2()
#raw_input("donne")
|
[
"chintanbetrabet@gmail.com"
] |
chintanbetrabet@gmail.com
|
62204aa625906842ccced44fdf50596c95ec552b
|
de4d88db6ea32d20020c169f734edd4b95c3092d
|
/aiotdlib/api/functions/add_network_statistics.py
|
b94694668600633ed7ff2c3fce21c7ade2450981
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
thiagosm/aiotdlib
|
5cc790a5645f7e4cc61bbd0791433ed182d69062
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
refs/heads/main
| 2023-08-15T05:16:28.436803
| 2021-10-18T20:41:27
| 2021-10-18T20:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
from ..types import NetworkStatisticsEntry
class AddNetworkStatistics(BaseObject):
"""
Adds the specified data to data usage statistics. Can be called before authorization
:param entry: The network statistics entry with the data to be added to statistics
:type entry: :class:`NetworkStatisticsEntry`
"""
ID: str = Field("addNetworkStatistics", alias="@type")
entry: NetworkStatisticsEntry
@staticmethod
def read(q: dict) -> AddNetworkStatistics:
return AddNetworkStatistics.construct(**q)
|
[
"pylakey@protonmail.com"
] |
pylakey@protonmail.com
|
cdf83d46c866bb0fd896c3d3359f95a1100fee01
|
4018ede0bb90d621a1002073529304d942ba4322
|
/backend/vehicle/migrations/0001_initial.py
|
41e9f437055310feb82188c190808790ea8ccfd8
|
[] |
no_license
|
crowdbotics-apps/uber-19759
|
82ee6d2cd616c1fa699d426f85a964af40b4cb44
|
17331bfcdfc29a20c2d986e796df2db88a2b5ed1
|
refs/heads/master
| 2022-12-27T05:29:15.759136
| 2020-10-05T01:17:32
| 2020-10-05T01:17:32
| 289,784,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
# Generated by Django 2.2.15 on 2020-08-23 23:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='VehicleType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('base_rate', models.FloatField()),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_description', models.CharField(max_length=255)),
('plate_number', models.CharField(max_length=10)),
('timestamp_registered', models.DateTimeField(auto_now_add=True)),
('is_on_duty', models.BooleanField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_driver', to='taxi_profile.DriverProfile')),
('vehicle_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_vehicle_type', to='vehicle.VehicleType')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9c15d394fd6b99aa3d6bc0d05671cbe053dda4a1
|
4c43fb0220bc0c12e8fa21f8cca2618d64b03425
|
/lab11/OrderRecordServicePS_RS/message_puller.py
|
5518536f8a9d4b8f8d9469421c1eac6611052f57
|
[] |
no_license
|
dayanach/IS
|
fddf0a8b95e6535ca9222ebfd535dc01f581d3bd
|
d5bab2729a5a6fd03280a62cc0132e7f9d72ba37
|
refs/heads/master
| 2022-05-26T09:46:56.543883
| 2020-05-02T16:18:18
| 2020-05-02T16:18:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
import json
from threading import Thread
import pika
import requests
def callback(ch, method, props, body):
print(" [x] Received %r" % body)
payload = json.loads(body.decode('utf-8'))
msg = requests.post("http://127.0.0.1:5002/orders/", json=payload)
connection = pika.BlockingConnection(pika.ConnectionParameters('104.198.35.199'))
channel = connection.channel()
channel.queue_declare(queue=props.reply_to)
channel.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id= \
props.correlation_id),
body=msg.content)
channel.basic_ack(delivery_tag=method.delivery_tag)
connection.close()
def pull_message():
connection = pika.BlockingConnection(pika.ConnectionParameters('104.198.35.199'))
channel = connection.channel()
channel.exchange_declare(exchange='order', exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='order', queue=queue_name, routing_key="order.create.*.*")
print(' [*] Waiting for messages. To exit press CTRL+C ' + queue_name)
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
channel.start_consuming()
class MessagePuller(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
while True:
pull_message()
|
[
"ipkumarawd@yahoo.com"
] |
ipkumarawd@yahoo.com
|
90f371715ee021773b7ca9da6fec0febc3eafcbc
|
36e3d735e06d0642f1e8c26bff57305a01cc627c
|
/apClient/net_data/migrations/0007_auto_20160428_0551.py
|
1ab204fe96c50f574cb4f7620bc8e1d1f6d23978
|
[] |
no_license
|
WilsonWangTHU/ipv6_server
|
5c768cdaeaf22ee508c5fff162b208481a42f95d
|
5088f58ab25061e65127699ed328ddaab24f9aac
|
refs/heads/master
| 2021-01-18T21:18:39.653994
| 2016-05-27T04:22:23
| 2016-05-27T04:22:23
| 55,656,523
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-28 05:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('net_data', '0006_auto_20160427_0806'),
]
operations = [
migrations.RemoveField(
model_name='long_term_dataset',
name='data_set',
),
migrations.RemoveField(
model_name='configuration',
name='long_term_sample_period',
),
migrations.RemoveField(
model_name='configuration',
name='long_term_volumn',
),
migrations.RemoveField(
model_name='cpu_data',
name='cpu_kernel',
),
migrations.AddField(
model_name='configuration',
name='heart_beat_sample_period',
field=models.IntegerField(default=600),
),
migrations.AlterField(
model_name='configuration',
name='short_term_sample_period',
field=models.IntegerField(default=60),
),
migrations.AlterField(
model_name='configuration',
name='short_term_volumn',
field=models.IntegerField(default=200),
),
migrations.DeleteModel(
name='long_term_dataset',
),
]
|
[
"wode406@hotmail.com"
] |
wode406@hotmail.com
|
e9b16c80258ae328972b1a66f590751c25508eb0
|
272aff93c6f399cd834835970891696e605a1e31
|
/dsp_ws/build/hector_navigation/hector_costmap/catkin_generated/pkg.installspace.context.pc.py
|
9203dd9dc86536536a7e49eceaa03f0ccbd62473
|
[] |
no_license
|
dingjianfeng/dsp_ding2
|
18c99958a022d2e2fae3aa5888fd07fa279568d6
|
a3327a1db4635865a07390023c5cc2932456b367
|
refs/heads/master
| 2020-05-02T12:41:26.516325
| 2019-03-27T10:10:26
| 2019-03-27T10:10:26
| 177,964,602
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/dsp/dsp_ws/install/include".split(';') if "/home/dsp/dsp_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_costmap"
PROJECT_SPACE_DIR = "/home/dsp/dsp_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"dingjianfeng"
] |
dingjianfeng
|
f21b83ad722ceb1b046532aaa66c353ff2d81b99
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Eltex/DSLAM/get_ifindexes.py
|
1daf655961d379978c1d13de48ccb37a501d5fc5
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
# ---------------------------------------------------------------------
# Generic.get_ifindexes
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetifindexes import IGetIfindexes
from noc.core.mib import mib
class Script(BaseScript):
name = "Eltex.DSLAM.get_ifindexes"
interface = IGetIfindexes
cache = True
def execute_snmp(self):
r = {}
if self.is_platform_MXA24:
o = "1.3.6.1.4.1.34300.1.6"
ooid = "%s.15.2.1.2" % o
aoid = "%s.10.2.1.2" % o
for oid, name in self.snmp.getnext(aoid, max_retries=8):
if oid.endswith(".0"):
ifindex = int(oid.split(".")[-2])
else:
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
for oid, name in self.snmp.getnext(ooid, max_retries=8):
if " " in name:
name = name.split()[2]
if name.startswith("p"):
name = "s%s" % name
if oid.endswith(".0"):
ifindex = int(oid.split(".")[-2])
else:
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
else:
if self.is_platform_MXA32:
o = "1.3.6.1.4.1.35265.1.28"
else:
o = "1.3.6.1.4.1.35265.1.33"
aoid = "%s.10.2.1.2" % o
for oid, name in self.snmp.getnext(mib["IF-MIB::ifDescr"], max_retries=8):
if name.startswith("p"):
name = "s%s" % name
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
for oid, name in self.snmp.getnext(aoid, max_retries=8):
if oid.endswith(".0"):
ifindex = int(oid.split(".")[-2])
else:
ifindex = int(oid.split(".")[-1])
r[name] = ifindex
return r
|
[
"sysfar@gmail.com"
] |
sysfar@gmail.com
|
be6efdac7b8c19e02de4aae801e0423401d88808
|
bef93432b7745ba5492f11e709e47a5a372590f0
|
/modules/dxtbx/format/FormatCBFMiniPilatusXXX.py
|
10e642a2bb5e1094c4d7b1d60ad3896600e1a1e7
|
[
"BSD-3-Clause"
] |
permissive
|
BlenderCN-Org/dials-dev20190819
|
939378744d546692e3de33d106a1b5218a584c2a
|
1b719b88a1642c13a5a8d488addbb215d0fa290c
|
refs/heads/master
| 2020-07-19T17:00:06.944870
| 2019-08-19T21:36:25
| 2019-08-19T21:36:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
# Copyright (C) 2014 Diamond Light Source, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# An implementation of the CBF image reader for Pilatus images, from the Pilatus
# 6M SN 100 currently on Diamond I04.
from __future__ import absolute_import, division, print_function
from dxtbx.format.FormatCBFMiniPilatus import FormatCBFMiniPilatus
class FormatCBFMiniPilatusXXX(FormatCBFMiniPilatus):
"""A class for reading mini CBF format Pilatus images for 6M SN XXX."""
@staticmethod
def understand(image_file):
"""Check to see if this looks like an Pilatus mini CBF format image,
i.e. we can make sense of it."""
header = FormatCBFMiniPilatus.get_cbf_header(image_file)
for record in header.split("\n"):
if (
"# Detector" in record
and "PILATUS" in record
and "S/N XX-XXX" in header
):
return True
return False
def __init__(self, image_file, **kwargs):
"""Initialise the image structure from the given file, including a
proper model of the experiment."""
from dxtbx import IncorrectFormatError
if not self.understand(image_file):
raise IncorrectFormatError(self, image_file)
FormatCBFMiniPilatus.__init__(self, image_file, **kwargs)
def _goniometer(self):
"""Return a model for a simple single-axis goniometer. This should
probably be checked against the image header, though for miniCBF
there are limited options for this."""
return self._goniometer_factory.single_axis_reverse()
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
edd9d190611d86c93ad2a0c1bd2e9ba947c8e046
|
52c5b78f3afab4573926dd6d0a49e10ee1a77e26
|
/project_4/app1/migrations/0001_initial.py
|
31865ddbc05d99f2596e900e8cbe2b3c4fa2036f
|
[] |
no_license
|
zime-py/eight
|
d9eefc28a00a8411f3a58b0e931807492bc5bfc2
|
2138b2a8884dea299654ff7c41060c72f183486c
|
refs/heads/master
| 2023-01-11T23:03:53.062441
| 2020-11-14T14:43:04
| 2020-11-14T14:43:04
| 312,831,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# Generated by Django 3.1.1 on 2020-09-05 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='cool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('roll', models.IntegerField()),
],
),
]
|
[
"mahmudhossain836@gmail.com"
] |
mahmudhossain836@gmail.com
|
3bfe8561268459da6f00fece955b503d03c776ef
|
8ae8c4ab4ec7d33d31b55d4678e5e40d555ee24e
|
/node.py
|
115960529e2c6188bb926873641d0f2588a03382
|
[] |
no_license
|
firefirer1983/python_programing
|
23229b2ae201310752dd919d3757717c96473662
|
dfe49b9cace0639e49a9e67295e3d76110103103
|
refs/heads/master
| 2020-05-30T17:33:59.099549
| 2019-06-04T12:40:44
| 2019-06-04T12:40:44
| 189,877,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
from collections import defaultdict
class Node:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def __str__(self):
return self._name
class Edge:
def __init__(self, src, dst):
self._src = src
self._dst = dst
@property
def source(self):
return self._src
@property
def destination(self):
return self._dst
def __str__(self):
return "%s -> %s" % (self.source, self.destination)
class WeightEdge(Edge):
def __init__(self, src, dst, weight):
super().__init__(src, dst)
self._weight = weight
@property
def weight(self):
return self._weight
def __str__(self):
return "%s -> %s [weight:%r]" % (self.source, self.destination, self.weight)
class Digraph:
def __init__(self):
self._nodes = list()
self._edges = defaultdict(lambda: list())
def add_node(self, node):
if node in self._nodes:
print("%s already in list" % node)
return Node
self._nodes.append(node)
def add_edge(self, edge):
if edge.source not in self._nodes and edge.destination not in self._nodes:
print("edge %s is invalid" % edge)
print("+ %s -> %s" % (edge.source, edge.destination))
self._edges[edge.source].append(edge.destination)
def children_of(self, node):
return self._edges[node]
def has_node(self, node):
return node in self._nodes
def __str__(self):
res = ""
for s in self._nodes:
for d in self._edges[s]:
res += "%s -> %s\n" % (s, d)
return res
def __iter__(self):
yield from self._nodes
class Graph(Digraph):
def add_edge(self, edge):
super().add_edge(edge)
super().add_edge(Edge(edge.destination, edge.source))
|
[
"fyman.zhang@gmail.com"
] |
fyman.zhang@gmail.com
|
731c60ac11e13721e6a93743ada9af4811db31aa
|
3c5c4c4fb296d08e9e984c4a60ae4fa147293e9a
|
/ceres/util/block_cache.py
|
54f514b4d7f2add3e596a58e06bcca9279fe65af
|
[
"Apache-2.0"
] |
permissive
|
signingup/ceres-combineharvester
|
a8874ab11145e7ba2223b85483b96dea01054ad0
|
aad918a03a4a522e0e2f3bac104d19d693d6bf79
|
refs/heads/main
| 2023-07-25T04:11:13.765471
| 2021-09-09T14:59:48
| 2021-09-09T14:59:48
| 404,918,382
| 1
| 0
|
Apache-2.0
| 2021-09-10T01:22:20
| 2021-09-10T01:22:20
| null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
import logging
from typing import Dict, List, Optional
from ceres.consensus.block_record import BlockRecord
from ceres.consensus.blockchain_interface import BlockchainInterface
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from ceres.types.header_block import HeaderBlock
from ceres.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from ceres.util.ints import uint32
class BlockCache(BlockchainInterface):
def __init__(
self,
blocks: Dict[bytes32, BlockRecord],
headers: Dict[bytes32, HeaderBlock] = None,
height_to_hash: Dict[uint32, bytes32] = None,
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = None,
):
if sub_epoch_summaries is None:
sub_epoch_summaries = {}
if height_to_hash is None:
height_to_hash = {}
if headers is None:
headers = {}
self._block_records = blocks
self._headers = headers
self._height_to_hash = height_to_hash
self._sub_epoch_summaries = sub_epoch_summaries
self._sub_epoch_segments: Dict[uint32, SubEpochSegments] = {}
self.log = logging.getLogger(__name__)
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self._block_records[header_hash]
def height_to_block_record(self, height: uint32, check_db: bool = False) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self._sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self._sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
if height not in self._height_to_hash:
self.log.warning(f"could not find height in cache {height}")
return None
return self._height_to_hash[height]
def contains_block(self, header_hash: bytes32) -> bool:
return header_hash in self._block_records
def contains_height(self, height: uint32) -> bool:
return height in self._height_to_hash
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return self._block_records
async def get_block_records_at(self, heights: List[uint32]) -> List[BlockRecord]:
block_records: List[BlockRecord] = []
for height in heights:
block_records.append(self.height_to_block_record(height))
return block_records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
return self._block_records[header_hash]
def remove_block_record(self, header_hash: bytes32):
del self._block_records[header_hash]
def add_block_record(self, block: BlockRecord):
self._block_records[block.header_hash] = block
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
return self._headers
async def persist_sub_epoch_challenge_segments(
self, sub_epoch_summary_height: uint32, segments: List[SubEpochChallengeSegment]
):
self._sub_epoch_segments[sub_epoch_summary_height] = SubEpochSegments(segments)
async def get_sub_epoch_challenge_segments(
self,
sub_epoch_summary_height: uint32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments = self._sub_epoch_segments.get(sub_epoch_summary_height)
if segments is None:
return None
return segments.challenge_segments
|
[
"hulatang_eric@163.com"
] |
hulatang_eric@163.com
|
2a26001d443cb6b58b8139b330be87998641c886
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/21. Merge Two Sorted Lists/solution2.py
|
c0c75749ec0249059072e52b316690eeb23bd917
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1 and not l2: return None
if not l1: return l2
if not l2: return l1
dummy = curr = ListNode()
while l1 and l2:
if l1.val > l2.val:
curr.next = ListNode(l2.val)
curr = curr.next
l2 = l2.next
else:
curr.next = ListNode(l1.val)
curr = curr.next
l1 = l1.next
while l1:
curr.next = ListNode(l1.val)
curr = curr.next
l1 = l1.next
while l2:
curr.next = ListNode(l2.val)
curr = curr.next
l2 = l2.next
return dummy.next
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
0cbca552ac46d5ff810627de47fd32c725cb9f9b
|
7738e950c103fb23b48d5e004eddcf108ea71fa1
|
/Cursoemvideo/Mundo3/exercise106.py
|
ee09831fdf4985b1abe94c9121e775d495431ca2
|
[] |
no_license
|
pedrottoni/Studies-Python
|
c9bdbaf4b4aaa209bf32aa93d6ee4814a0a39c53
|
f195bcb4c6868689ec0cf05c34cd4d5a6c7b3ea1
|
refs/heads/master
| 2021-09-26T05:23:02.398552
| 2020-02-12T04:48:23
| 2020-02-12T04:48:23
| 203,452,221
| 0
| 0
| null | 2021-09-22T18:21:51
| 2019-08-20T20:48:07
|
Python
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
"""
Faça um mini-sistema que utilize o Interactive Help do Python. O usuário vai digitar o comando e o manual vai aparecer. Quando o usuário digitar a palavra 'FIM', o programa se encerrará. Importante: use cores.
"""
colors = (
'\033[m', # Defaut
'\033[0;30;41m', # red
'\033[0;30;42m', # green
'\033[0;30;43m', # yellow
'\033[0;30;44m' # blue
)
def custom_help(py_command):
print(colors[2])
help(py_command)
print(colors[0])
def title(msg, color):
print(colors[color])
print(f'\n {msg} \n')
def interface():
py_command = input('Digite um comando: ')
title('Sistema de ajuda', 4)
while py_command != 'fim':
custom_help(py_command)
py_command = input('Digite um comando: ').lower()
title('Fim do dia', 1)
print(colors[0])
interface()
|
[
"pedrottoni@outlook.com"
] |
pedrottoni@outlook.com
|
ec06410ce2adc7455ba1077d79d2470120d5230a
|
fb0e82ab4b4d15965cce2396fd9ae31ed2de1080
|
/file2.py
|
7b8e4b69230dec1d4849187abac9ca76554623e8
|
[] |
no_license
|
flerchy/My-1-PyProj
|
477a1ed212d2f4721b1048f43033f1803eda7302
|
e95973dd424d2bb3557d5475501dafdc3fa46317
|
refs/heads/master
| 2020-12-03T04:10:09.120252
| 2017-06-29T22:55:23
| 2017-06-29T22:55:23
| 95,823,795
| 0
| 2
| null | 2017-08-24T11:05:43
| 2017-06-29T22:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 847
|
py
|
#import math
class vect:
x = 0
y = 0
def Multiply(self, v):
v2 = vect(0, 0)
v2.x = v.x * self.x
v2.y = v.y * self.y
return v2
def __init__(self, x: object, y: object) -> object:
self.x = x
self.y = y
class dot:
x = 0
y = 0
def Add(self, d):
d2 = dot(0, 0)
d2.x = d.x + self.x
d2.y = d.y + self.y
return d2
def __init__(self, x: object, y: object) -> object:
self.x = x
self.y = y
def VectorOperations():
a = vect(2, 4)
b = vect(5, 2)
return a.Multiply(b)
def DotOperations():
a = dot(2, 4)
b = dot(5, 2)
return a.Add(b)
def Main():
res = VectorOperations()
print(res.x, res.y)
res = DotOperations()
print(res.x, res.y)
return 0
if __name__ == "__main__":
Main()
|
[
"flerchy@gmail.com"
] |
flerchy@gmail.com
|
45bca23a6ef9d24fca6aae8566d120036237ddfb
|
e754fd34d40b41cd56adc947309832574094e0b6
|
/jiajun_experiment/cifar10_experiment/cifar10.py
|
0cc9f2f34dea255f2751c7d028bd33e60afa1d25
|
[] |
no_license
|
yaliamit/Python
|
7d071fe76eba14c78540b5008d616080bca78ed9
|
d0e441212a9f86a91723a99f8bfc89d245992a2e
|
refs/heads/master
| 2021-01-11T05:01:07.293444
| 2020-05-28T01:27:20
| 2020-05-28T01:27:20
| 71,490,136
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,688
|
py
|
import lasagne
import numpy as np
import theano.tensor as T
import theano
import os, sys, gzip
from six.moves import urllib
import tarfile
import pickle
import cifar10_input
import lasagne
from lasagne.layers import LocalResponseNormalization2DLayer, DenseLayer, Conv2DLayer, MaxPool2DLayer, InputLayer, DimshuffleLayer, BatchNormLayer
from lasagne.regularization import regularize_layer_params_weighted, l2
#from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer
#from lasagne.layers.dnn import MaxPool2DDNNLayer as MaxPool2DLayer
# Basic model parameters.
#tf.app.flags.DEFINE_integer('batch_size', 128,
# """Number of images to process in a batch.""")
#tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
# """Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def build_cnn(input_var=None):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
input_layer = InputLayer((None, 3, IMAGE_SIZE, IMAGE_SIZE), input_var=input_var)
norm0 = BatchNormLayer(input_layer)
# conv1
conv1 = Conv2DLayer(norm0, num_filters=64, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.0),
name="conv1")
conv1a = Conv2DLayer(conv1, num_filters=64, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.0),
name="conv1a")
pool1 = MaxPool2DLayer(conv1a, pool_size=(2, 2), stride=(2, 2), pad=0)
# norm1 = LocalResponseNormalization2DLayer(pool1, alpha=0.001 / 9.0,
# beta=0.75, k=1.0, n=9)
norm1 = BatchNormLayer(pool1)
# pool1
# conv2
conv2 = Conv2DLayer(lasagne.layers.dropout(norm1, p = 0.5),
num_filters=128, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.1),
name='conv2')
conv2a = Conv2DLayer(conv2,
num_filters=128, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.1),
name='conv2a')
pool2 = MaxPool2DLayer(conv2a, pool_size=(2, 2), stride=(2, 2), pad=0)
# norm2 = LocalResponseNormalization2DLayer(pool2, alpha=0.001 / 9.0,
# beta=0.75, k=1.0, n=9)
norm2 = BatchNormLayer(pool2)
# pool2
conv3 = Conv2DLayer(lasagne.layers.dropout(norm2, p = 0.5),
num_filters=256, filter_size=(3,3),
nonlinearity=lasagne.nonlinearities.rectify,
pad='same', W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.1),
name='conv3')
pool3 = MaxPool2DLayer(conv3, pool_size=(2, 2), stride=(2, 2), pad=0)
# norm3 = LocalResponseNormalization2DLayer(pool3, alpha=0.001 / 9.0,
# beta=0.75, k=1.0, n=9)
norm3 = BatchNormLayer(pool3)
# fc1
fc1 = DenseLayer(lasagne.layers.dropout(norm3, p = 0.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.1),
name="fc1")
# fc3
softmax_layer = DenseLayer(lasagne.layers.dropout(fc1, p = 0.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax,
W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.0),
name="softmax")
# Weight Decay
weight_decay_layers = {fc1: 0.0}
l2_penalty = regularize_layer_params_weighted(weight_decay_layers, l2)
return softmax_layer, l2_penalty
|
[
"shenjiajun90@gmail.com"
] |
shenjiajun90@gmail.com
|
850d09c0f348c17d5cf4d4f63e9c595609e66659
|
8364e4d23191ee535c163debffafa8418d705843
|
/test/test_v1beta1_cron_job_status.py
|
0240f917290f60dc3c62a8ed58c7e06548496912
|
[
"Apache-2.0"
] |
permissive
|
olitheolix/aiokubernetes
|
2bb6499030e2e6e9b7ca0db63c4441293d70a09b
|
266718b210dff2a9b2212183261ea89adf89115e
|
refs/heads/master
| 2020-03-21T23:02:30.484410
| 2018-10-20T19:33:01
| 2018-10-22T05:52:42
| 139,162,905
| 28
| 3
|
Apache-2.0
| 2018-10-22T05:52:51
| 2018-06-29T15:02:59
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import aiokubernetes
from aiokubernetes.models.v1beta1_cron_job_status import V1beta1CronJobStatus # noqa: E501
from aiokubernetes.rest import ApiException
class TestV1beta1CronJobStatus(unittest.TestCase):
"""V1beta1CronJobStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CronJobStatus(self):
"""Test V1beta1CronJobStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = aiokubernetes.models.v1beta1_cron_job_status.V1beta1CronJobStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"olitheolix@gmail.com"
] |
olitheolix@gmail.com
|
140c36e514ac1e06b410d5e548d03c864a0c432c
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/wicd/rev519-537/wicdMerge/wicd/backend.py
|
2cd969a4f2227e3f956a033686a6a1581ac2fd22
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,990
|
py
|
""" Backend manager for wicd.
Manages and loads the pluggable backends for wicd.
"""
import sys
import os
import wicd.wpath as wpath
from baseinterface import BaseInterface
class BackendManager (object) :
def __init__(self):
""" Initialize the backend manager. """
self.backend_dir = "backends"
self.__loaded_backends = {}
def _valid_backend(self, be_dir):
""" Make sure the backend is valid. """
access = os.access(be_dir, os.F_OK)
isdir = os.path.isdir(be_dir)
starts_with_be = os.path.basename(be_dir).startswith('be-')
return access and isdir and starts_with_be
def get_loaded_backends(self):
if self.__loaded_backends and not self.__loaded_backends is None:
return self.__loaded_backends
else:
return None
def get_backend_by_type(self, type):
return self.__loaded_backends[type]
def get_available_backend_modules(self):
""" Returns a list of all valid backends in the backend directory. """
be_list = []
for f in os.listdir(self.backend_dir):
if self._valid_backend(os.path.join(self.backend_dir, f)):
be_list.append(f[3:])
return be_list
def load_all_available_backends(self):
for backend in self.get_available_backend_modules():
print 'loading backend',backend
self.load_backend(backend)
def load_backend(self, backend_name):
""" Load and return a backend module.
Given a backend name be-foo, attempt to load a python module
in the backends directory called be-foo. The module must
include a certain set of classes and variables to be considered
valid.
"""
def fail(backend_name, reason):
print "failed to load backend %s: %s" % (backend_name, reason)
print 'trying to load backend %s' % backend_name
backend_path = os.path.join(self.backend_dir, 'be-' + backend_name)
if self._valid_backend(backend_path):
sys.path.insert(0, self.backend_dir)
backend = __import__('be-' + backend_name)
else:
fail(backend_name, 'invalid backend file.')
return None
new_backends = [ i for i in dir(backend.interface) if i.startswith('Backend') ]
for backend_class_name in new_backends:
backend_class = getattr(backend.interface, backend_class_name)
if issubclass(backend_class, BaseInterface):
self.__loaded_backends[backend_class.get_type()] = backend_class
print 'successfully loaded backend %s' % backend_class.__name__
else:
fail(backend_class, 'does not subclass BaseInterface')
if __name__ == "__main__":
print "main"
be = BackendManager()
print be.get_available_backend_modules()
be.load_all_available_backends()
print be.get_loaded_backends()
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
fde44d5d006e5ec5248ff47a658973924c676d68
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L36/36-35_MD_NVT_rerun/set_1ns_equi_1.py
|
0ea1c77d5e2d79791410687f4e64ca8b29dd7c03
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L36/MD_NVT_rerun/ti_one-step/36_35/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../36-35_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
b0c36703950d6c12151ca5149dff452c3190ec04
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/138/usersdata/201/53117/submittedfiles/volumeTV.py
|
8edb280ea7ea90acf2183f107078000ec0dd653e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
-*- coding: utf-8 -*-
V=int(input('Volume inicial:'))
T=int(input('Variação do volume:'))
soma=V
for i in range(1,T+1,1):
n=int(input('Muudança de volume:'))
soma=soma+1
if soma>100:
soma=100
elif soma<0:
soma=0
print(soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4e7f60a0275bdcfe3703e3ac63914ff611e793df
|
1c9abb4b27517d99d579a4284035f39e092033e5
|
/0x11-python-network_1/2-post_email.py
|
d0dc27e1da547b11f12ea8805e2802503a9fc511
|
[] |
no_license
|
95ktsmith/holbertonschool-higher_level_programming
|
15160e6e76f7f6f7e4ddfd2266cf9bf60fddbcb5
|
c845402a9b4c7ad9d1c1b1a983f9fb7a4727209d
|
refs/heads/master
| 2022-12-19T05:56:00.288537
| 2020-09-24T23:32:20
| 2020-09-24T23:32:20
| 259,328,593
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
#!/usr/bin/python3
""" POST an email """
if __name__ == "__main__":
from urllib import request
from urllib import parse
from sys import argv
data = parse.urlencode({'email': argv[2]}).encode('ascii')
req = request.Request(argv[1], data)
with request.urlopen(req) as response:
print(response.read().decode('utf-8'))
|
[
"95ktsmith@gmail.com"
] |
95ktsmith@gmail.com
|
c65f1abb5a53c4cd127b36179a9397dbb9797578
|
93f200a88e6084be9dad4422195f5e7af6eecb68
|
/src/pymor/analyticalproblems/text.py
|
d1bb4d4fe6c8f6bfa6f6d941c81d83651fca99de
|
[
"BSD-2-Clause"
] |
permissive
|
mahgadalla/pymor
|
dfc163b396c15dec05ea519ee0e9b3277ba5c84f
|
ee2806b4c93748e716294c42454d611415da7b5e
|
refs/heads/master
| 2020-03-21T13:08:00.819939
| 2018-06-15T12:19:00
| 2018-06-18T08:08:07
| 138,589,646
| 1
| 0
| null | 2018-06-25T12:05:39
| 2018-06-25T12:05:39
| null |
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.core.defaults import defaults
from pymor.domaindescriptions.basic import RectDomain
from pymor.functions.basic import ConstantFunction, LincombFunction
from pymor.functions.bitmap import BitmapFunction
from pymor.parameters.functionals import ProjectionParameterFunctional
from pymor.parameters.spaces import CubicParameterSpace
@defaults('font_name')
def text_problem(text='pyMOR', font_name=None):
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tempfile import NamedTemporaryFile
font_list = [font_name] if font_name else ['DejaVuSansMono.ttf', 'VeraMono.ttf', 'UbuntuMono-R.ttf', 'Arial.ttf']
font = None
for filename in font_list:
try:
font = ImageFont.truetype(filename, 64) # load some font from file of given size
except (OSError, IOError):
pass
if font is None:
raise ValueError('Could not load TrueType font')
size = font.getsize(text) # compute width and height of rendered text
size = (size[0] + 20, size[1] + 20) # add a border of 10 pixels around the text
def make_bitmap_function(char_num): # we need to genereate a BitmapFunction for each character
img = Image.new('L', size) # create new Image object of given dimensions
d = ImageDraw.Draw(img) # create ImageDraw object for the given Image
# in order to position the character correctly, we first draw all characters from the first
# up to the wanted character
d.text((10, 10), text[:char_num + 1], font=font, fill=255)
# next we erase all previous character by drawing a black rectangle
if char_num > 0:
d.rectangle(((0, 0), (font.getsize(text[:char_num])[0] + 10, size[1])), fill=0, outline=0)
# open a new temporary file
with NamedTemporaryFile(suffix='.png') as f: # after leaving this 'with' block, the temporary
# file is automatically deleted
img.save(f, format='png')
return BitmapFunction(f.name, bounding_box=[(0, 0), size], range=[0., 1.])
# create BitmapFunctions for each character
dfs = [make_bitmap_function(n) for n in range(len(text))]
# create an indicator function for the background
background = ConstantFunction(1., 2) - LincombFunction(dfs, np.ones(len(dfs)))
# form the linear combination
dfs = [background] + dfs
coefficients = [1] + [ProjectionParameterFunctional('diffusion', (len(text),), (i,)) for i in range(len(text))]
diffusion = LincombFunction(dfs, coefficients)
return StationaryProblem(
domain=RectDomain(dfs[1].bounding_box, bottom='neumann'),
neumann_data=ConstantFunction(-1., 2),
diffusion=diffusion,
parameter_space=CubicParameterSpace(diffusion.parameter_type, 0.1, 1.)
)
|
[
"stephanrave@uni-muenster.de"
] |
stephanrave@uni-muenster.de
|
48bd9280efa0de89650a0336d76d194c09989518
|
dd949f215d968f2ee69bf85571fd63e4f085a869
|
/systems/css-2011-teams/blue/subarchitectures/planner.sa/src/base_planners/downward/plan.py
|
b98bbe87d41a3acc5beeb19f1f35a0306dfed8db
|
[] |
no_license
|
marc-hanheide/cogx
|
a3fd395805f1b0ad7d713a05b9256312757b37a9
|
cb9a9c9cdfeba02afac6a83d03b7c6bb778edb95
|
refs/heads/master
| 2022-03-16T23:36:21.951317
| 2013-12-10T23:49:07
| 2013-12-10T23:49:07
| 219,460,352
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
#! /usr/bin/env python2.5
import os
import subprocess
import sys
import shutil
path = os.path.abspath(os.path.dirname(__file__)) # where this file resides
def main():
def run(*args, **kwargs):
input = kwargs.pop("input", None)
output = kwargs.pop("output", None)
assert not kwargs
redirections = {}
if input:
redirections["stdin"] = open(input)
if output:
redirections["stdout"] = open(output, "w")
# print args, redirections
subprocess.check_call(args, **redirections)
if len(sys.argv) == 3:
domain, problem = sys.argv[1:]
# run translator
run(os.path.join(path, "translate/translate.py"), domain, problem)
else:
domain, problem, mutex = sys.argv[1:]
# run translator
run(os.path.join(path, "translate/translate.py"), domain, problem, "-m", mutex)
# run preprocessing
run(os.path.join(path, "preprocess/preprocess"), input="output.sas")
# run search
run(os.path.join(path, "search/search"), "yY", input="output")
# epsilonize plan
# shutil.move("%s.1" % result_name, result_name)
# run("search/epsilonize_plan.py", input=result_name, output="%s_eps" % result_name)
# shutil.move("%s_eps" % result_name, result_name)
if __name__ == "__main__":
main()
|
[
"marc@hanheide.net"
] |
marc@hanheide.net
|
4ea429fb0979407907d6336d069e7fbbe0fd2e87
|
8cde806e824208949fd9e34806445d05114860cc
|
/detools/compression/heatshrink.py
|
c7f535d58b3d9ff41dbd3c60cd647beae73eb8f7
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
tips367/detools
|
80003facc744147c39f339cfe20b2d3eb8dccd70
|
21092202cdefc3358f450801be0e1855ea06a18d
|
refs/heads/master
| 2022-12-29T14:33:08.882316
| 2020-10-16T15:20:08
| 2020-10-16T15:20:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
"""Heatshrink wrapper.
"""
import bitstruct
from heatshrink2.core import Writer
from heatshrink2.core import Reader
from heatshrink2.core import Encoder
def pack_header(window_sz2, lookahead_sz2):
return bitstruct.pack('u4u4', window_sz2 - 4, lookahead_sz2 - 3)
def unpack_header(data):
window_sz2, lookahead_sz2 = bitstruct.unpack('u4u4', data)
return window_sz2 + 4, lookahead_sz2 + 3
class HeatshrinkCompressor(object):
def __init__(self):
window_sz2 = 8
lookahead_sz2 = 7
self._data = pack_header(window_sz2, lookahead_sz2)
self._encoder = Encoder(Writer(window_sz2=window_sz2,
lookahead_sz2=lookahead_sz2))
def compress(self, data):
compressed = self._encoder.fill(data)
if self._data:
compressed = self._data + compressed
self._data = b''
return compressed
def flush(self):
return self._data + self._encoder.finish()
class HeatshrinkDecompressor(object):
def __init__(self, number_of_bytes):
self._number_of_bytes_left = number_of_bytes
self._data = b''
self._encoder = None
def decompress(self, data, size):
if self._encoder is None:
if not data:
return b''
window_sz2, lookahead_sz2 = unpack_header(data[:1])
self._encoder = Encoder(Reader(window_sz2=window_sz2,
lookahead_sz2=lookahead_sz2))
data = data[1:]
self._number_of_bytes_left -= 1
if self._number_of_bytes_left > 0:
self._data += self._encoder.fill(data)
self._number_of_bytes_left -= len(data)
if self._number_of_bytes_left == 0:
self._data += self._encoder.finish()
self._number_of_bytes_left = -1
decompressed = self._data[:size]
self._data = self._data[size:]
return decompressed
@property
def needs_input(self):
return self._data == b'' and not self.eof
@property
def eof(self):
return self._number_of_bytes_left == -1 and self._data == b''
|
[
"erik.moqvist@gmail.com"
] |
erik.moqvist@gmail.com
|
fa1b8dd2c5c5d52eb24c24a8d293b35c86edfe04
|
13c111d2c405fef3b074fd7f7ed7cd06cc05084a
|
/graphql_start/migrations/0003_auto_20181101_1259.py
|
516bc9625590708dfdd411fe9a0b44ec7caf73ea
|
[] |
no_license
|
Dimas4/GraphQL-Django
|
7da5f73cb0427f4e7bbf5a48e1c4bd45fc35cfb6
|
3fb6919cd2c30848e08e251279e0445dab6f8247
|
refs/heads/master
| 2020-04-04T09:48:38.987618
| 2018-11-09T09:33:15
| 2018-11-09T09:33:15
| 155,831,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
# Generated by Django 2.1.2 on 2018-11-01 12:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('graphql_start', '0002_auto_20181101_1259'),
]
operations = [
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='graphql_start.Category'),
preserve_default=False,
),
migrations.AlterField(
model_name='article',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"ivanshatukho@yandex.ru"
] |
ivanshatukho@yandex.ru
|
77aa658e02bb132300c8e65d1c3916d6b8025cbd
|
0956319ecf55da86b05237e2a26a0ebae41fe884
|
/scrape-circuit-patch-share.py
|
d8b01153c1d2393e0658f7d14b0b738e04634d4a
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
SpotlightKid/personal-scripts
|
65492ea919ec3634daa40eac6803067e390ffa79
|
8e93005e740987adc5a8403ab80e0049998bfbbe
|
refs/heads/master
| 2023-08-20T16:27:25.717092
| 2023-08-19T11:48:42
| 2023-08-19T12:37:26
| 167,387,617
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
#!/usr/bin/env python2
"""Scrape Novation Circuit SysEx patch data from HTML saved from Circuit Patch Share site."""
import argparse
import logging
import os
import re
import sys
from os.path import exists, join
from base64 import b64decode
log = logging.getLogger('scrape-circuit-patch-share')
def safe_name(name):
return "".join(c if re.match(r'\w', c) else '_' for c in name)
def unescape(match):
return chr(int(match.group(1)))
def scrape_patches(html):
html = re.sub('&#(\d+);', unescape, html)
patches = re.findall(r"sendPatchToCircuit\('(.*?)',\s*atob\('(.*?)'\),\s*(\d+)\)", html)
result = {}
for name, patch, synth in patches:
name = name.strip()
if name in result:
continue
result[(name, int(synth))] = bytearray([int(c) for c in b64decode(patch).split(b',')])
return result
def main(args=None):
ap = argparse.ArgumentParser(description=__doc__.splitlines()[0])
ap.add_argument('-v', '--verbose', action="store_true",
help="Be verbose")
ap.add_argument('-o', '--output-dir', metavar='DIR', default=os.getcwd(),
help="Output directory (default: current directory)")
ap.add_argument('html', help="HTML input file")
args = ap.parse_args(args)
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO)
with open(args.html) as fp:
html = fp.read()
patches = scrape_patches(html)
log.info("Found %i patches.", len(patches))
for i, ((name, synth), data) in enumerate(sorted(patches.items())):
outdir = join(args.output_dir, "Synth %i" % (synth + 1,))
if not exists(outdir):
os.makedirs(outdir)
outfn = join(outdir, "%s.syx" % safe_name(name))
log.info("Writing patch '%s' to '%s'...", name, outfn)
data[7] = synth
with open(outfn, 'wb') as fp:
fp.write(patches[(name, synth)])
log.info("%i patch files written.", i + 1)
return 0
if __name__ == '__main__':
sys.exit(main() or 0)
|
[
"chris@chrisarndt.de"
] |
chris@chrisarndt.de
|
5226b1f41f04ac77a049d211af76f2e57c43105c
|
673f9b85708affe260b892a4eb3b1f6a0bd39d44
|
/Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pandas/tests/dtypes/test_concat.py
|
02daa185b1cdb687219a6be79e0400731acdfc72
|
[
"MIT"
] |
permissive
|
i2tResearch/Ciberseguridad_web
|
feee3fe299029bef96b158d173ce2d28ef1418e4
|
e6cccba69335816442c515d65d9aedea9e7dc58b
|
refs/heads/master
| 2023-07-06T00:43:51.126684
| 2023-06-26T00:53:53
| 2023-06-26T00:53:53
| 94,152,032
| 14
| 0
|
MIT
| 2023-09-04T02:53:29
| 2017-06-13T00:21:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
import pytest
import pandas.core.dtypes.concat as _concat
from pandas import DatetimeIndex, Period, PeriodIndex, Series, TimedeltaIndex
@pytest.mark.parametrize(
"to_concat, expected",
[
# int/float/str
([["a"], [1, 2]], ["i", "object"]),
([[3, 4], [1, 2]], ["i"]),
([[3, 4], [1, 2.1]], ["i", "f"]),
# datetimelike
([DatetimeIndex(["2011-01-01"]), DatetimeIndex(["2011-01-02"])], ["datetime"]),
([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 days"])], ["timedelta"]),
# datetimelike object
(
[
DatetimeIndex(["2011-01-01"]),
DatetimeIndex(["2011-01-02"], tz="US/Eastern"),
],
["datetime", "datetime64[ns, US/Eastern]"],
),
(
[
DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"),
DatetimeIndex(["2011-01-02"], tz="US/Eastern"),
],
["datetime64[ns, Asia/Tokyo]", "datetime64[ns, US/Eastern]"],
),
([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 hours"])], ["timedelta"]),
(
[
DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"),
TimedeltaIndex(["1 days"]),
],
["datetime64[ns, Asia/Tokyo]", "timedelta"],
),
],
)
def test_get_dtype_kinds(index_or_series, to_concat, expected):
to_concat_klass = [index_or_series(c) for c in to_concat]
result = _concat.get_dtype_kinds(to_concat_klass)
assert result == set(expected)
@pytest.mark.parametrize(
"to_concat, expected",
[
(
[PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="M")],
["period[M]"],
),
(
[
Series([Period("2011-01", freq="M")]),
Series([Period("2011-02", freq="M")]),
],
["period[M]"],
),
(
[PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="D")],
["period[M]", "period[D]"],
),
(
[
Series([Period("2011-01", freq="M")]),
Series([Period("2011-02", freq="D")]),
],
["period[M]", "period[D]"],
),
],
)
def test_get_dtype_kinds_period(to_concat, expected):
result = _concat.get_dtype_kinds(to_concat)
assert result == set(expected)
|
[
"ulcamilo@gmail.com"
] |
ulcamilo@gmail.com
|
986576fea2470a7c79037f9f2e1ec6d1f08251f2
|
b83a23fa50e8f1ca6ce1fb3b550e6ceb1b513261
|
/aliyun-python-sdk-live/aliyunsdklive/request/v20161101/AddCasterComponentRequest.py
|
c939357ca0b2390c92cbbcb114802eddb670e807
|
[
"Apache-2.0"
] |
permissive
|
sunfuze/aliyun-openapi-python-sdk
|
c9f8143cf1ceac1bdd09f36d5f4493a510f48a0b
|
09910c57081f207da294d6d2fe981f7f913bc501
|
refs/heads/master
| 2021-03-24T12:01:12.107284
| 2018-01-18T06:32:06
| 2018-01-18T06:32:06
| 118,209,677
| 1
| 0
| null | 2018-01-20T04:41:58
| 2018-01-20T04:41:58
| null |
UTF-8
|
Python
| false
| false
| 2,951
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AddCasterComponentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCasterComponent')
def get_ImageLayerContent(self):
return self.get_query_params().get('ImageLayerContent')
def set_ImageLayerContent(self,ImageLayerContent):
self.add_query_param('ImageLayerContent',ImageLayerContent)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_ComponentLayer(self):
return self.get_query_params().get('ComponentLayer')
def set_ComponentLayer(self,ComponentLayer):
self.add_query_param('ComponentLayer',ComponentLayer)
def get_ComponentName(self):
return self.get_query_params().get('ComponentName')
def set_ComponentName(self,ComponentName):
self.add_query_param('ComponentName',ComponentName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_ComponentType(self):
return self.get_query_params().get('ComponentType')
def set_ComponentType(self,ComponentType):
self.add_query_param('ComponentType',ComponentType)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_LocationId(self):
return self.get_query_params().get('LocationId')
def set_LocationId(self,LocationId):
self.add_query_param('LocationId',LocationId)
def get_Effect(self):
return self.get_query_params().get('Effect')
def set_Effect(self,Effect):
self.add_query_param('Effect',Effect)
def get_TextLayerContent(self):
return self.get_query_params().get('TextLayerContent')
def set_TextLayerContent(self,TextLayerContent):
self.add_query_param('TextLayerContent',TextLayerContent)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
872f46b1d5265e8af1c408c23647f1e3647142a3
|
29e91d422f0fcad92f0e25b3dbb9efd39dc01162
|
/electronic-station/weak-point.py
|
63745fe8a2295a4af4e98785b732a6fed26e2473
|
[] |
no_license
|
cielavenir/checkio
|
c206410b7d8d368e80ad0f66f6314097bd900bcd
|
e2dfcdef75cd68ca3cced159225b5433570bd85b
|
refs/heads/master
| 2021-01-22T20:34:29.899146
| 2018-02-22T15:16:21
| 2018-02-22T15:16:21
| 85,328,995
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
w=lambda m:min(list(range(len(m))),key=lambda i:sum(m[i][j]for j in range(len(m))))
weak_point=lambda m:[w(m),w(list(zip(*m)))]
if __name__ == '__main__':
assert isinstance(weak_point([[1]]), (list, tuple)), "The result should be a list or a tuple"
assert list(weak_point([[7, 2, 7, 2, 8],
[2, 9, 4, 1, 7],
[3, 8, 6, 2, 4],
[2, 5, 2, 9, 1],
[6, 6, 5, 4, 5]])) == [3, 3], "Example"
assert list(weak_point([[7, 2, 4, 2, 8],
[2, 8, 1, 1, 7],
[3, 8, 6, 2, 4],
[2, 5, 2, 9, 1],
[6, 6, 5, 4, 5]])) == [1, 2], "Two weak point"
assert list(weak_point([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])) == [0, 0], "Top left"
|
[
"cielartisan@gmail.com"
] |
cielartisan@gmail.com
|
b548121d1bdde5836f7a4a846c7841648d2592bc
|
e29f8c29a993156b7de7b0451d63ad8cca51c9a6
|
/zajecia10/czas_godzina1.py
|
cd07774d76e7b37bd5bcebda52c33b00869cbffd
|
[] |
no_license
|
remekwilk/python_basic
|
d898ad26aba809eb14ebed9d94bd93db69154ffa
|
af145a9711dabca232dc5f5be8fe4c407a5fda54
|
refs/heads/master
| 2020-05-03T14:42:19.523070
| 2020-04-11T20:42:03
| 2020-04-11T20:42:03
| 176,701,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
from datetime import time
samo_poludnie = time(12)
print(samo_poludnie)
kwadrans_po_osmej = time(8, 15)
print(kwadrans_po_osmej)
usain_bolt_na_100m = time(0, 0, 9, 580000) # ostatni argument jest w mikrosekundach
usain_bolt_na_100m = time(second=9, microsecond=580000) # ostatni argument jest w mikrosekundach
print(usain_bolt_na_100m)
# UWAGA! Ten obiekt nie umożliwia pobrania aktualnej godziny
|
[
"remekwilk@gmail.com"
] |
remekwilk@gmail.com
|
f2355712075a733730ccd7d93e61854573372a8e
|
769f6d88fd777459eb60eb1bbb0fba17cb20d963
|
/Chapter05/05_01_Robots.py
|
cbb36dfa52b7f3c567c4f06bfda9bf6e6ebc3cb9
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Web-Scraping-Cookbook
|
141379d09abe2c7d8f408858a2eb44ff0fe3ef26
|
030eb974ba1437b2590b59d38f19fb697bbf9d4c
|
refs/heads/master
| 2023-02-16T04:29:49.942243
| 2023-01-30T04:19:03
| 2023-01-30T04:19:03
| 120,744,571
| 115
| 105
|
MIT
| 2019-10-03T17:38:37
| 2018-02-08T10:08:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
from reppy.robots import Robots
url = "http://www.amazon.com"
robots = Robots.fetch(url + "/robots.txt")
paths = [
'/',
'/gp/dmusic/',
'/gp/dmusic/promotions/PrimeMusic/',
'/gp/registry/wishlist/'
]
for path in paths:
print("{0}: {1}".format(robots.allowed(path, '*'), url + path))
|
[
"packt.danishs@gmail.com"
] |
packt.danishs@gmail.com
|
83d8e7ca934db2de8ed827172cff2f8794ca29de
|
685f4474699d769dae88537c69f5517ac13a8431
|
/EL258.py
|
b7ef048ddcb0d6128037a7c929726637dd19012c
|
[] |
no_license
|
Pumafied/Project-Euler
|
7466f48e449b7314598c106398c0be0424ae72d5
|
0c3e80a956893ce1881a9694131d52b156b9d3d8
|
refs/heads/master
| 2016-09-05T22:45:09.733696
| 2013-04-20T04:46:48
| 2013-04-20T04:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
# A sequence is defined as:
# gk = 1, for 0 k 1999
# gk = gk-2000 + gk-1999, for k 2000.
# Find gk mod 20092010 for k = 1018.
|
[
"pumafied@gmail.com"
] |
pumafied@gmail.com
|
e029887d12b7ae495e426e2301094b410b3f5302
|
46f1e7c1d81f271b2a3357b2e133049893725d82
|
/Solution/动态规划/一维/70. 爬楼梯/动态规划1.py
|
5f06161b6ab8f5ae526b7d050d229ba88cb1390b
|
[] |
no_license
|
WeiS49/leetcode
|
203fed67773592a45186c99fd6a2f16dff426c3e
|
76ddcec959c18164ae7efb564f2287981f5ab5ca
|
refs/heads/master
| 2023-08-13T16:06:45.152951
| 2021-09-26T15:36:06
| 2021-09-26T15:36:06
| 319,033,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
#
# @lc app=leetcode.cn id=70 lang=python3
#
# [70] 爬楼梯
# 动态规划, 没有用实际操作去运算f(n)=f(n-1)+f(n-2)
# 而是在找到逻辑后, 使用加法完成方法数量的计算
# 时间复杂度: 单层循环, O(n)
# 空间复杂度: 占用空间随楼梯数n线性变化, O(n)
# @lc code=start
class Solution:
def climbStairs(self, n: int) -> int:
dp = [0] * (n + 1) # 创建数组, 保存每一级的步数, 考虑到dp[0], 所以长度+1
dp[0] = dp[1] = 1 # 为什么要设置dp[0]? 第0级楼梯吗
for i in range(2, n + 1): # 从第2级到第n级楼梯开始使用递推式
dp[i] = dp[i - 1] + dp[i - 2] # 用加法代替了递归的执行
return dp[-1] # 返回数组中最后一个元素, 即为最终的方法数
# @lc code=end
|
[
"swh_1C3@outlook.com"
] |
swh_1C3@outlook.com
|
92e4f8e251b201a9642253fbf8807dba64c8fb89
|
f3b99fdd48bc38dbe5d972c07dcbce204e5cff2b
|
/Projetos_Django/project/products/views.py
|
97b0c6e78e026d3b71de54c1569823a558248cc6
|
[] |
no_license
|
Aleleonel/Python_codes
|
5b66251d45bbf3678451b6380ca4d5a81f416e25
|
fd535da3f2f4c510b4e85f9ec1dee59c9d07ffcb
|
refs/heads/master
| 2020-04-03T16:44:24.923059
| 2018-12-06T00:16:21
| 2018-12-06T00:16:21
| 155,416,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
from django.shortcuts import render, redirect
from .models import Product
from .forms import ProductForm
def list_products(request):
products = Product.objects.all()
return render(request, 'products.html', {'products': products})
def create_product(request):
form = ProductForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('list_products')
return render(request, 'products-form.html', {'form': form})
def update_product(request, id):
product = Product.objects.get(id=id)
form = ProductForm(request.POST or None, instance=product)
if form.is_valid():
form.save()
return redirect('list_products')
return render(request, 'products-form.html', {'form': form, 'product': product})
def delete_product(request, id):
product = Product.objects.get(id=id)
if request.method == 'POST':
product.delete()
return redirect('list_products')
return render(request, 'prod-delete-confirm.html', {'product':product})
|
[
"aleleonel@gmail.com"
] |
aleleonel@gmail.com
|
9aa8968084570663211a42b8fd02fb7b0d5d36e1
|
0b9802d039ffee38fd666659719034cf7e42c04b
|
/faker/factory.py
|
bdf466c29ae071633f3150b19b9ff187d06e7c07
|
[
"MIT"
] |
permissive
|
SysGrove/faker
|
e1f633f3231ee94fdb82a15518ec8ecf899c5385
|
61c1aa3eeece341c0984e95cd3128bcdf3797a78
|
refs/heads/master
| 2021-01-16T18:42:56.837210
| 2013-07-23T14:46:09
| 2013-07-23T14:46:09
| 11,422,041
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,912
|
py
|
import sys
from faker import DEFAULT_LOCALE, DEFAULT_PROVIDERS, AVAILABLE_LOCALES
from faker import Generator
from faker import providers
class Factory(object):
@classmethod
def create(cls, locale=None, providers=None ):
# fix locale to package name
locale = locale.replace('-','_') if locale else DEFAULT_LOCALE
if '_' in locale:
locale = locale[:2] + locale[2:].upper()
if locale not in AVAILABLE_LOCALES:
raise AttributeError('Invalid configuration for faker locale "%s"' % locale)
providers = providers or DEFAULT_PROVIDERS
generator = Generator()
for provider in providers:
providerClass = cls._getProviderClass( provider, locale )
generator.addProvider( providerClass(generator) )
return generator
@classmethod
def _getProviderClass(cls, provider, locale=''):
providerClass = cls._findProviderClass( provider, locale )
if providerClass:
return providerClass
if locale and locale != DEFAULT_LOCALE:
# fallback to default locale
providerClass = cls._findProviderClass( provider, DEFAULT_LOCALE )
if providerClass:
return providerClass
# fallback to no locale
providerClass = cls._findProviderClass( provider )
if providerClass:
return providerClass
raise ValueError('Unable to find provider "%s" with locale "%s"' % (provider, locale))
@classmethod
def _findProviderClass(cls, provider, locale=''):
path = "{providers}{lang}.{provider}".format(
providers=providers.__package__,
lang='.' + locale if locale else '',
provider=provider
)
try:
__import__(path)
except ImportError:
return None
return sys.modules[path].Provider
|
[
"joke2k@gmail.com"
] |
joke2k@gmail.com
|
c02481c7ac213d1465183d1cc02ade2e36da39ae
|
076d4b8a007fd01e41b357342aad100c87367562
|
/venv/bin/rst2xetex.py
|
93ea0bba24a5121b9d01203e0a5e6f59a88857ed
|
[] |
no_license
|
AlexanderMcNulty/publicpolls
|
b755c0922949018125d5eb18ac8b07fa087f97b7
|
56f340f5d9d044af65262f4099f146d6e22af754
|
refs/heads/master
| 2020-04-15T04:19:59.931533
| 2019-01-07T04:31:03
| 2019-01-07T04:31:03
| 164,378,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
#!/home/ammc/pyramid/publicpolls/venv/bin/python3
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
|
[
"alexander.mcnulty92@gmail.com"
] |
alexander.mcnulty92@gmail.com
|
fb4850984909d60534cbe43c3bce4336a65383b1
|
76dd8343cb5d04fec631c1711a5642e6f83d8ae2
|
/python/oneflow/test/modules/test_roll.py
|
6f378f8cf56fe1fa6d4ba64262039cbc0eb7ccdc
|
[
"Apache-2.0"
] |
permissive
|
weinapianyun/oneflow
|
56c580ca2d6019f7d3e184a476ee9cb0699eea3e
|
748501a5383f50bf9f3a5d3b3da81d4f31b425de
|
refs/heads/master
| 2023-09-03T05:40:03.313826
| 2021-11-22T08:44:34
| 2021-11-22T08:44:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,875
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import GenArgList
import torch
def _test_roll(test_case, device):
torch_x = torch.rand(
(2, 3, 5, 10, 20), device=device, dtype=torch.float32, requires_grad=True
)
torch_grad = torch.rand_like(torch_x, device=device)
shifts = (
np.random.randint(-100, 100),
np.random.randint(-100, 100),
np.random.randint(-100, 100),
np.random.randint(-100, 100),
)
dims = (0, 2, 3, 4)
torch_y = torch.roll(torch_x, shifts, dims)
torch_y.backward(torch_grad)
of_x = flow.tensor(
torch_x.detach().cpu().numpy(),
device=device,
dtype=flow.float32,
requires_grad=True,
)
of_y = flow.roll(of_x, shifts, dims)
of_grad = flow.tensor(torch_grad.cpu().numpy(), device=device, dtype=flow.float32)
of_y.backward(of_grad)
test_case.assertTrue(np.array_equal(of_y.numpy(), torch_y.detach().cpu().numpy()))
test_case.assertTrue(np.array_equal(of_x.grad.numpy(), torch_x.grad.cpu().numpy()))
def _test_roll_single_dims(test_case, device):
torch_x = torch.rand(
(2, 3, 5, 10, 20), device=device, dtype=torch.float32, requires_grad=True
)
torch_grad = torch.rand_like(torch_x, device=device)
shifts = np.random.randint(-100, 100)
dims = np.random.randint(0, 4)
torch_y = torch.roll(torch_x, shifts, dims)
torch_y.backward(torch_grad)
of_x = flow.tensor(
torch_x.detach().cpu().numpy(),
device=device,
dtype=flow.float32,
requires_grad=True,
)
of_y = flow.roll(of_x, shifts, dims)
of_grad = flow.tensor(torch_grad.cpu().numpy(), device=device, dtype=flow.float32)
of_y.backward(of_grad)
test_case.assertTrue(np.array_equal(of_y.numpy(), torch_y.detach().cpu().numpy()))
test_case.assertTrue(np.array_equal(of_x.grad.numpy(), torch_x.grad.cpu().numpy()))
def _test_roll_none_dims(test_case, device):
torch_x = torch.rand(
(2, 3, 5, 10, 20), device=device, dtype=torch.float32, requires_grad=True
)
torch_grad = torch.rand_like(torch_x, device=device)
shifts = np.random.randint(-100, 100)
dims = None
torch_y = torch.roll(torch_x, shifts, dims)
torch_y.backward(torch_grad)
of_x = flow.tensor(
torch_x.detach().cpu().numpy(),
device=device,
dtype=flow.float32,
requires_grad=True,
)
of_y = flow.roll(of_x, shifts, dims)
of_grad = flow.tensor(torch_grad.cpu().numpy(), device=device, dtype=flow.float32)
of_y.backward(of_grad)
test_case.assertTrue(np.array_equal(of_y.numpy(), torch_y.detach().cpu().numpy()))
test_case.assertTrue(np.array_equal(of_x.grad.numpy(), torch_x.grad.cpu().numpy()))
@flow.unittest.skip_unless_1n1d()
class TestRoll(flow.unittest.TestCase):
def test_expand_compare_with_torch(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_roll,
_test_roll_single_dims,
_test_roll_none_dims,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
weinapianyun.noreply@github.com
|
8d7c1c02839e814bee3d5d5ab60f6f5d06442c78
|
90174b56d08ca79d30d3b1bcace14fa72e228532
|
/tests/experiments/test_prior.py
|
da07cd05bcdd928f94e39071db8bbe4863671d1b
|
[] |
no_license
|
miclaraia/swap-tools
|
559cebfb112649dcaa61f52c278fdfbf86ee5ece
|
fb3468a6fef254cf43e46373c940d0a867c4445d
|
refs/heads/master
| 2021-01-15T20:08:46.928830
| 2017-10-03T19:53:54
| 2017-10-03T19:53:54
| 99,840,718
| 0
| 0
| null | 2017-10-03T19:53:55
| 2017-08-09T18:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
from swaptools.experiments.prior import Prior
import swaptools.experiments.config as config
import swaptools.experiments.db.experiment as edb
import swaptools.experiments.db.trials as tdb
from swaptools.experiments.iterators import ValueIterator as VI
from unittest.mock import patch, MagicMock
import pytest
@pytest.fixture(scope='module')
def override():
config.experiments.name = 'testexperiments'
patch.object(tdb.Trials, 'next_id', 0)
patch.object(edb.Experiments, 'next_id', 0)
def generate():
prior = VI.range(.2, .8, .2)
golds = VI.single(100)
series = VI.range(1, 3, 1)
kwargs = {'name': None, 'description': None}
e = Prior.new(prior, golds, series, **kwargs)
gg = MagicMock()
gg.golds = {i: i for i in range(200)}
e.gg = gg
return e
# pylint: disable=W0613,W0621,R0201
class TestPrior:
def test_setup_first(self, override):
e = generate()
e._setup_next()
assert e.trial_info == {
'n': 0,
'golds': 100,
'prior': .2,
'series': 1
}
def test_setup_next(self, override):
e = generate()
e._setup_next()
e._setup_next()
assert e.trial_info == {
'n': 1,
'golds': 100,
'prior': .4,
'series': 1
}
def test_rollover(self, override):
e = generate()
e.n = 4
e.values['prior'].current = .8
e._setup_next()
assert e.trial_info == {
'n': 5,
'golds': 100,
'prior': .2,
'series': 2
}
def test_has_next_true(self, override):
e = generate()
e.n = 4
e.values['prior'].current = .8
e.values['series'].current = 2
assert e.has_next() is True
def test_has_next_false(self, override):
e = generate()
e.n = 4
e.values['prior'].current = .8
e.values['series'].current = 3
assert e.has_next() is False
def test_count(self, override):
e = generate()
assert e.count() == 12
|
[
"micheal.laraia@gmail.com"
] |
micheal.laraia@gmail.com
|
f2d0b8ecf990b3bd64d4ed8f7ac429a1f7618569
|
e11dff811ca981f428644fd70d10a7369c671bcb
|
/src/tools/ecos/cvxpy/examples/advanced/circuits.py
|
f13a6e85c11de273d99651ed51274c17c59334ff
|
[
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
riadnassiffe/Simulator
|
3c4a036b5635534929fdb04b0e9c96d64c0da71f
|
7d9ff09f26367d3714e3d10be3dd4a9817b8ed6b
|
refs/heads/master
| 2021-06-20T09:31:36.033427
| 2021-04-17T00:03:17
| 2021-04-17T00:03:17
| 16,033,879
| 0
| 0
|
MIT
| 2021-03-22T23:20:34
| 2014-01-18T20:58:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,057
|
py
|
# An object oriented model of a circuit.
from cvxpy import *
import abc
class Node(object):
""" A node connecting devices. """
def __init__(self):
self.voltage = Variable()
self.current_flows = []
# The current entering a node equals the current leaving the node.
def constraints(self):
return [sum(f for f in self.current_flows) == 0]
class Ground(Node):
""" A node at 0 volts. """
def constraints(self):
return [self.voltage == 0] + super(Ground, self).constraints()
class Device(object):
__metaclass__ = abc.ABCMeta
""" A device on a circuit. """
def __init__(self, pos_node, neg_node):
self.pos_node = pos_node
self.pos_node.current_flows.append(-self.current())
self.neg_node = neg_node
self.neg_node.current_flows.append(self.current())
# The voltage drop on the device.
@abc.abstractmethod
def voltage(self):
return NotImplemented
# The current through the device.
@abc.abstractmethod
def current(self):
return NotImplemented
# Every path between two nodes has the same voltage drop.
def constraints(self):
return [self.pos_node.voltage - self.voltage() == self.neg_node.voltage]
class Resistor(Device):
""" A resistor with V = R*I. """
def __init__(self, pos_node, neg_node, resistance):
self._current = Variable()
self.resistance = resistance
super(Resistor, self).__init__(pos_node, neg_node)
def voltage(self):
return -self.resistance*self.current()
def current(self):
return self._current
class VoltageSource(Device):
""" A constant source of voltage. """
def __init__(self, pos_node, neg_node, voltage):
self._current = Variable()
self._voltage = voltage
super(VoltageSource, self).__init__(pos_node, neg_node)
def voltage(self):
return self._voltage
def current(self):
return self._current
class CurrentSource(Device):
""" A constant source of current. """
def __init__(self, pos_node, neg_node, current):
self._current = current
self._voltage = Variable()
super(CurrentSource, self).__init__(pos_node, neg_node)
def voltage(self):
return self._voltage
def current(self):
return self._current
# # Create a simple circuit and find the current and voltage.
nodes = [Ground(),Node(),Node()]
# A 5 V battery
devices = [VoltageSource(nodes[0], nodes[2], 10)]
# A series of pairs of parallel resistors.
# 1/4 Ohm resistor and a 1 Ohm resistor in parallel.
devices.append( Resistor(nodes[0], nodes[1], 0.25) )
devices.append( Resistor(nodes[0], nodes[1], 1) )
# 4 Ohm resistor and a 1 Ohm resistor in parallel.
devices.append( Resistor(nodes[1], nodes[2], 4) )
devices.append( Resistor(nodes[1], nodes[2], 1) )
# Create the problem.
constraints = []
for obj in nodes + devices:
constraints += obj.constraints()
Problem(Minimize(0), constraints).solve()
for node in nodes:
print node.voltage.value
|
[
"riad.nassiffe@gmail.com"
] |
riad.nassiffe@gmail.com
|
cc7f87e4bc994ca90cb52cbb925e54d73bd1bb7c
|
6684f88abb4dde0e1295fd65e1d82b5d5a3a0414
|
/mysite/models.py
|
3483dc080b012728c9554e723193a1abec2fd9e4
|
[] |
no_license
|
mostafaitalian/mostafaprofile
|
f6242bcdb60af3c679530a9dc4f6dfb3aee6bfd9
|
ac7fa2e2e73cc9dca08e3127dd2a1859e7bbdd28
|
refs/heads/main
| 2023-06-12T12:01:22.766499
| 2021-07-05T19:53:43
| 2021-07-05T19:53:43
| 382,725,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from django.db import models
from myprofile.models import Profile
# Create your models here.
class MySite(models.Model):
project_name = models.CharField(max_length=100)
link = models.URLField()
description = models.TextField()
images = models.ImageField(upload_to='image/')
myprofile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="sites")
def __str__(self):
return self.project_name
|
[
"eng_mustafa_yossef@hotmail.com"
] |
eng_mustafa_yossef@hotmail.com
|
5d858f6dd2ad50fdc1f51166b1f4bc8ece460ce7
|
38a5a87d04e16cc7af2de659516f534853302ed2
|
/scrapy/core/downloader/__init__.py
|
62f48ec5a69a12cfe6d962000b07511880e9cc6e
|
[
"BSD-3-Clause"
] |
permissive
|
zhangcheng/scrapy
|
d623232b946779c386eb7ca56bcfb6d5706a0ccb
|
88e33ad0ad95d5f9049d8d8b1359819f4fbbf704
|
refs/heads/master
| 2021-01-18T12:07:01.174623
| 2011-06-09T03:15:53
| 2011-06-09T03:15:53
| 1,871,569
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,985
|
py
|
"""
Download web pages using asynchronous IO
"""
import random
from time import time
from collections import deque
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from scrapy.exceptions import IgnoreRequest
from scrapy.conf import settings
from scrapy.utils.python import setattr_default
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.signal import send_catch_log
from scrapy import signals
from scrapy import log
from .middleware import DownloaderMiddlewareManager
from .handlers import DownloadHandlers
class SpiderInfo(object):
"""Simple class to keep information and state for each open spider"""
def __init__(self, spider):
setattr_default(spider, 'download_delay', spider.settings.getfloat('DOWNLOAD_DELAY'))
setattr_default(spider, 'randomize_download_delay', spider.settings.getbool('RANDOMIZE_DOWNLOAD_DELAY'))
setattr_default(spider, 'max_concurrent_requests', spider.settings.getint('CONCURRENT_REQUESTS_PER_SPIDER'))
if spider.download_delay > 0 and spider.max_concurrent_requests > 1:
spider.max_concurrent_requests = 1
msg = "Setting max_concurrent_requests=1 because of download_delay=%s" % spider.download_delay
log.msg(msg, spider=spider)
self.spider = spider
self.active = set()
self.queue = deque()
self.transferring = set()
self.closing = False
self.lastseen = 0
self.next_request_calls = set()
def free_transfer_slots(self):
return self.spider.max_concurrent_requests - len(self.transferring)
def needs_backout(self):
# use self.active to include requests in the downloader middleware
return len(self.active) > 2 * self.spider.max_concurrent_requests
def download_delay(self):
delay = self.spider.download_delay
if self.spider.randomize_download_delay:
delay = random.uniform(0.5*delay, 1.5*delay)
return delay
def cancel_request_calls(self):
for call in self.next_request_calls:
call.cancel()
self.next_request_calls.clear()
class Downloader(object):
"""Mantain many concurrent downloads and provide an HTTP abstraction.
It supports a limited number of connections per spider and many spiders in
parallel.
"""
def __init__(self):
self.sites = {}
self.handlers = DownloadHandlers()
self.middleware = DownloaderMiddlewareManager.from_settings(settings)
self.concurrent_spiders = settings.getint('CONCURRENT_SPIDERS')
def fetch(self, request, spider):
"""Main method to use to request a download
This method includes middleware mangling. Middleware can returns a
Response object, then request never reach downloader queue, and it will
not be downloaded from site.
"""
site = self.sites[spider]
if site.closing:
raise IgnoreRequest('Cannot fetch on a closing spider')
site.active.add(request)
def _deactivate(response):
site.active.remove(request)
self._close_if_idle(spider)
return response
dfd = self.middleware.download(self.enqueue, request, spider)
return dfd.addBoth(_deactivate)
def enqueue(self, request, spider):
"""Enqueue a Request for a effective download from site"""
site = self.sites[spider]
if site.closing:
raise IgnoreRequest
def _downloaded(response):
send_catch_log(signal=signals.response_downloaded, \
response=response, request=request, spider=spider)
return response
deferred = defer.Deferred().addCallback(_downloaded)
site.queue.append((request, deferred))
self._process_queue(spider)
return deferred
def _process_queue(self, spider):
"""Effective download requests from site queue"""
site = self.sites.get(spider)
if not site:
return
# Delay queue processing if a download_delay is configured
now = time()
delay = site.download_delay()
if delay:
penalty = delay - now + site.lastseen
if penalty > 0 and site.free_transfer_slots():
d = defer.Deferred()
d.addCallback(self._process_queue)
call = reactor.callLater(penalty, d.callback, spider)
site.next_request_calls.add(call)
d.addBoth(lambda x: site.next_request_calls.remove(call))
return
site.lastseen = now
# Process enqueued requests if there are free slots to transfer for this site
while site.queue and site.free_transfer_slots() > 0:
request, deferred = site.queue.popleft()
if site.closing:
dfd = defer.fail(Failure(IgnoreRequest()))
else:
dfd = self._download(site, request, spider)
dfd.chainDeferred(deferred)
self._close_if_idle(spider)
def _close_if_idle(self, spider):
site = self.sites.get(spider)
if site and site.closing and not site.active:
del self.sites[spider]
site.closing.callback(None)
def _download(self, site, request, spider):
# The order is very important for the following deferreds. Do not change!
# 1. Create the download deferred
dfd = mustbe_deferred(self.handlers.download_request, request, spider)
# 2. After response arrives, remove the request from transferring
# state to free up the transferring slot so it can be used by the
# following requests (perhaps those which came from the downloader
# middleware itself)
site.transferring.add(request)
def finish_transferring(_):
site.transferring.remove(request)
self._process_queue(spider)
# avoid partially downloaded responses from propagating to the
# downloader middleware, to speed-up the closing process
if site.closing:
log.msg("Crawled while closing spider: %s" % request, \
level=log.DEBUG, spider=spider)
raise IgnoreRequest
return _
return dfd.addBoth(finish_transferring)
def open_spider(self, spider):
"""Allocate resources to begin processing a spider"""
assert spider not in self.sites, "Spider already opened: %s" % spider
self.sites[spider] = SpiderInfo(spider)
def close_spider(self, spider):
"""Free any resources associated with the given spider"""
assert spider in self.sites, "Spider not opened: %s" % spider
site = self.sites.get(spider)
site.closing = defer.Deferred()
site.cancel_request_calls()
self._process_queue(spider)
return site.closing
def is_idle(self):
return not self.sites
|
[
"pablo@pablohoffman.com"
] |
pablo@pablohoffman.com
|
2424d2a90d0aaef68de0d6f758b31fc067c00c5d
|
de64b143a346585f51590bd674e8d13bbc672386
|
/algorithm/2022/1219_952_Largest_Component_Size_by_Common_Factor/Juwan.py
|
b646a287a0b1aa11aadaf34bd77f8cb879296a9b
|
[] |
no_license
|
ai-kmu/etc
|
304ec20f59e4026025abdcbcae21863c80630dcb
|
9c29941e19b7dd2a2037b110dd6e16690e9a0cc2
|
refs/heads/master
| 2023-08-21T16:30:31.149956
| 2023-08-21T16:26:19
| 2023-08-21T16:26:19
| 199,843,899
| 3
| 24
| null | 2023-05-31T09:56:59
| 2019-07-31T11:36:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 881
|
py
|
class Solution:
def largestComponentSize(self, nums: List[int]) -> int:
parent = [-1]*100001 # parent node 추적을 하기 위한 List
def find(x): # union find 방식으로 풀이
if parent[x] == -1:
return x
parent[x] = find(parent[x])
return parent[x]
def union(a, b): # node들의 parent node들을 찾기 위한 함수
a = find(a)
b = find(b)
if a != b:
parent[b] = a
for i in nums:
for j in range(2, int(sqrt(i)) + 1):
if i % j == 0:
union(j, i)
union(i, i//j)
cnt = 0
h_t = {}
for i in nums:
a = find(i)
cnt = max(cnt, 1 + h_t.get(a, 0))
h_t[a] = 1 + h_t.get(a, 0)
return cnt
|
[
"noreply@github.com"
] |
ai-kmu.noreply@github.com
|
1a5a0fecba5719fbafefe8d2a0202fd233083119
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/rdbms/azure-mgmt-rdbms/generated_samples/postgresql/configuration_create_or_update.py
|
fd2ce019539a6fd4667037e606ec4d697827004b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python configuration_create_or_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PostgreSQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="ffffffff-ffff-ffff-ffff-ffffffffffff",
)
response = client.configurations.begin_create_or_update(
resource_group_name="TestGroup",
server_name="testserver",
configuration_name="array_nulls",
parameters={"properties": {"source": "user-override", "value": "off"}},
).result()
print(response)
# x-ms-original-file: specification/postgresql/resource-manager/Microsoft.DBforPostgreSQL/stable/2017-12-01/examples/ConfigurationCreateOrUpdate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
b19da845110781324e7547090ada1cf6297e4fed
|
6044266e775c87afed99397c8bb88366fbbca0e7
|
/scrapy_file/csrf_token_extract_with_re.py
|
054ec197c5e44ddfab392e7960aca50fad972aa6
|
[] |
no_license
|
ranafge/all-documnent-projects
|
e4434b821354076f486639419598fd54039fb5bd
|
c9d65ddea291c53b8e101357547ac63a36406ed9
|
refs/heads/main
| 2023-05-08T20:01:20.343856
| 2021-05-30T10:44:28
| 2021-05-30T10:44:28
| 372,186,355
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
from bs4 import BeautifulSoup
import requests
import json
url= ["https://www.premierleague.com/stats/top/clubs/wins?se={}".format(x) for x in range(1,100)]
print(url)
for url in url:
data= requests.get(url).text
soup=BeautifulSoup(data,"html.parser")
PLtable = soup.find_all('table')[0]
data = []
for td in PLtable.find_all("td"):
data.append(td.text.replace('\n', ' ').strip())
print(data)
|
[
"ranafge@gmail.com"
] |
ranafge@gmail.com
|
2af1aad03697a4881cf62d2aba159672b8dd4e77
|
5ec7b086aed5341bdb6356e4f013d92f4eef961f
|
/app_local/client.py
|
1138dc2454d825a3fbb8a9345ec65334357d47eb
|
[] |
no_license
|
Mizterbox/MizterboxLogs
|
4de843587ce38909de893eb98e50c2ccb2027654
|
b1c7c2e7e5eacdf8528e6c13ec71564faa7ef82a
|
refs/heads/master
| 2020-04-25T19:46:38.818903
| 2019-03-03T09:08:44
| 2019-03-03T09:08:44
| 173,032,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
import requests, numpy as np, time
from time import gmtime, strftime, localtime
sprinklerid = np.random.randint(100,size=100)
address = np.random.randint(1000,size=100)
status = np.random.randint(1000,size=100)
maxcount = 5000000
counter = 0
actualsprinkids = [1,2,4,0]
status = ['Running Active', 'Restarting','Connecting to Wifi', 'Connecting to Internet']
while counter < maxcount:
res = requests.post('http://mizterboxlogs.herokuapp.com/sprinklerlogs/', json={
"id":int(np.random.choice(actualsprinkids,size=1)[0]),
"status":np.random.choice(status,size=1)[0],
})
if res.ok:
print (res.json())
time.sleep(2)
counter+=1
|
[
"rakshithvbharani@gmail.com"
] |
rakshithvbharani@gmail.com
|
9785e0140ac83818493992d4910ac3f403e90e9f
|
42e5fd024ca7522c990d9627863302aa1f792804
|
/DeepWNCS/Inverted_Pendulum_sihoon/Common/initialize.py
|
e5e97772145b48d36cec12f9d46d7bee7bd2f66e
|
[] |
no_license
|
msh0576/RL_WCPS
|
1f36de09ab6e4664b56ff929c69fab7de7314988
|
498a54f9777c5a849b0af491d9e76fcc470aa083
|
refs/heads/master
| 2023-06-08T19:13:33.676970
| 2020-11-30T05:30:02
| 2020-11-30T05:30:02
| 317,114,515
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 12:41:23 2020
@author: Sihoon
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
def linear_weights_init(m):
if isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.uniform_(-stdv, stdv)
def conv_weights_init(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
|
[
"msh0576@dgist.ac.kr"
] |
msh0576@dgist.ac.kr
|
5165947fc94888a1cc057b8dae59e599ae2c2e82
|
6a8eac5877ea4f782c094ad7b974d03e1dc86401
|
/src/brouwers/albums/tests/factory_models.py
|
85253b626987baa65d422813ba585327d3347edd
|
[] |
permissive
|
modelbrouwers/modelbrouwers
|
cb2bbea34e70f4a1d9a7361dfe7131a20ea26b02
|
7713e78eeb31809e04b0b316ec8f8deed0808fc9
|
refs/heads/main
| 2023-08-06T10:49:33.804123
| 2023-07-30T20:28:34
| 2023-07-30T20:28:34
| 13,872,961
| 7
| 3
|
MIT
| 2023-05-29T15:33:06
| 2013-10-25T21:51:20
|
Python
|
UTF-8
|
Python
| false
| false
| 179
|
py
|
import warnings
from .factories import *
warnings.warn(
"Import from albums.tests.factories, the factory_models " "module will be removed",
PendingDeprecationWarning,
)
|
[
"sergeimaertens@gmail.com"
] |
sergeimaertens@gmail.com
|
6a3fea967bb843876033c7044091961fc7cfb259
|
f77d97840915ff2318c8f3841096019337c58689
|
/_admin/admin_service/digestmonkey/models.py
|
1ae642e4624a3ed2f41e871fac4e851e8d1d1b6a
|
[] |
no_license
|
rrader/events-service
|
f35d7e237e0ef5e3598b90878713539960153895
|
5933a6ba83aacb63832dd6efa806409bb37812aa
|
refs/heads/master
| 2021-01-10T04:25:45.875103
| 2015-11-20T16:21:32
| 2015-11-20T16:21:32
| 44,528,882
| 4
| 1
| null | 2015-11-01T19:28:47
| 2015-10-19T11:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
from sqlalchemy.dialects.postgresql import ARRAY, JSON
from admin_service.extensions import db
from sqlalchemy.orm import backref, relationship
class DigestMonkeyConfig(db.Model):
__tablename__ = 'mailchimpkeys'
id = db.Column(db.Integer, primary_key=True)
mailchimp_key = db.Column(db.String(100))
templates_uri = db.Column(db.String(100))
github_key = db.Column(db.String(100))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
team = relationship("Team", backref=backref("digestmonkey_config", uselist=False))
class PublishedDigest(db.Model):
__tablename__ = 'digests'
id = db.Column(db.Integer, primary_key=True)
events_data = db.Column(JSON)
events_ids = db.Column(ARRAY(db.Integer))
template = db.Column(db.String(100))
preview = db.Column(db.Text)
s_list = db.Column(db.String(20))
s_list_name = db.Column(db.String(100))
from_name = db.Column(db.String(100))
from_email = db.Column(db.String(100))
subject = db.Column(db.String(200))
campaign_id = db.Column(db.String(20))
web_id = db.Column(db.String(20))
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
team = relationship("Team", backref=backref("digests"))
|
[
"roman.rader@gmail.com"
] |
roman.rader@gmail.com
|
ca9938e289f72a91088f1d3ffb1dd9dbee75ce3b
|
d5cc71ec7bbf2d6be0916e2c0a019501692979e6
|
/main.py
|
8024cd3359a72a1524430f87022d70076c750f9c
|
[] |
no_license
|
JellyWX/BattleTanks
|
208216df0bc0dc15a553d1624938060307690408
|
fc92ac40b126325b932e43721a803bef45f34a90
|
refs/heads/master
| 2020-12-30T23:36:59.030501
| 2018-02-19T13:20:09
| 2018-02-19T13:20:09
| 86,604,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,396
|
py
|
from gui import GUI
from tank import Tank
from bullet import Bullet
from tile import Tile, Flower, Crate, MiniCrate, WeaponCrate
from grid import Grid
from BaseClass import BaseClass
from imageLoader import imageLoader
from random import random
import math
import os
import pygame
import sys
for arg in sys.argv:
if arg == '-dH' or arg == '--debug-hitboxes':
GUI.debug = True
imageloader = imageLoader('assets/images/')
gui = GUI(400,400,'Battle Tanks')
grid = Grid(16,16)
BaseClass.grid = grid
BaseClass.gui = gui
BaseClass.images = imageloader
done = False
process_stage = 0
player = Tank(40,40)
player_sequence = [player]
render_sequence = [grid]
grid.Draw('park2')
grid.sortRenderingComponents()
def stage(n):
global gui
for e in gui.event():
if e.type == pygame.QUIT:
return -1
if e.type == pygame.VIDEORESIZE:
gui.resize(e.dict['size'][0],e.dict['size'][1])
if n == 0:
dx = pygame.mouse.get_pos()[0] - player.x
dy = pygame.mouse.get_pos()[1] - player.y
rad_angle_turret = math.atan2(dx,dy)
final_rotation_turret = rad_angle_turret*180/math.pi
if gui.mouseAction(0):
if not (-8 < dx < 8 and -8 < dy < 8):
rad_angle = math.atan2(dy,dx)
hyp_tank = math.sqrt(dx*dx + dy*dy)
hyp_dis_x = dx / hyp_tank
hyp_dis_y = dy / hyp_tank
final_vec = (hyp_dis_x,hyp_dis_y)
final_rotation = math.atan2(final_vec[0],final_vec[1])*180/math.pi
player.move_cursor(final_vec,final_rotation+180)
elif gui.keyAction(pygame.K_UP) or gui.keyAction(pygame.K_w):
player.move_keys(1)
elif gui.keyAction(pygame.K_DOWN) or gui.keyAction(pygame.K_s):
player.move_keys(0)
if gui.mouseAction(2):
hyp_bullet = math.sqrt(dx*dx + dy*dy)
hyp_dis_x_bullet = dx / hyp_bullet
hyp_dis_y_bullet = dy / hyp_bullet
bullet_vec = (hyp_dis_x_bullet,hyp_dis_y_bullet)
player.attack(bullet_vec)
## Rotate turret ##
player.rotate_turret(final_rotation_turret+180)
for p in player_sequence:
for b in p.bullets:
b.move()
return 0
elif n == 1:
return -1
while not done:
if process_stage == -1:
done = True
process_stage = stage(process_stage)
gui.page.fill((0,0,0))
for i in render_sequence:
if isinstance(i,Tank):
for b in i.bullets:
b.render()
i.render()
gui.flip(64)
|
[
"judewrs@gmail.com"
] |
judewrs@gmail.com
|
eeff373e45e52f34ff7290461c61af68eb909dfb
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/databoxedge/v20190801/get_device_extended_information.py
|
820de0f3738b39d62317888cba5931c33eb4e51a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,774
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDeviceExtendedInformationResult',
'AwaitableGetDeviceExtendedInformationResult',
'get_device_extended_information',
]
@pulumi.output_type
class GetDeviceExtendedInformationResult:
"""
The extended Info of the Data Box Edge/Gateway device.
"""
def __init__(__self__, encryption_key=None, encryption_key_thumbprint=None, id=None, name=None, resource_key=None, type=None):
if encryption_key and not isinstance(encryption_key, str):
raise TypeError("Expected argument 'encryption_key' to be a str")
pulumi.set(__self__, "encryption_key", encryption_key)
if encryption_key_thumbprint and not isinstance(encryption_key_thumbprint, str):
raise TypeError("Expected argument 'encryption_key_thumbprint' to be a str")
pulumi.set(__self__, "encryption_key_thumbprint", encryption_key_thumbprint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_key and not isinstance(resource_key, str):
raise TypeError("Expected argument 'resource_key' to be a str")
pulumi.set(__self__, "resource_key", resource_key)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[str]:
"""
The public part of the encryption certificate. Client uses this to encrypt any secret.
"""
return pulumi.get(self, "encryption_key")
@property
@pulumi.getter(name="encryptionKeyThumbprint")
def encryption_key_thumbprint(self) -> Optional[str]:
"""
The digital signature of encrypted certificate.
"""
return pulumi.get(self, "encryption_key_thumbprint")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceKey")
def resource_key(self) -> str:
"""
The Resource ID of the Resource.
"""
return pulumi.get(self, "resource_key")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetDeviceExtendedInformationResult(GetDeviceExtendedInformationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeviceExtendedInformationResult(
encryption_key=self.encryption_key,
encryption_key_thumbprint=self.encryption_key_thumbprint,
id=self.id,
name=self.name,
resource_key=self.resource_key,
type=self.type)
def get_device_extended_information(device_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeviceExtendedInformationResult:
"""
The extended Info of the Data Box Edge/Gateway device.
:param str device_name: The device name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20190801:getDeviceExtendedInformation', __args__, opts=opts, typ=GetDeviceExtendedInformationResult).value
return AwaitableGetDeviceExtendedInformationResult(
encryption_key=__ret__.encryption_key,
encryption_key_thumbprint=__ret__.encryption_key_thumbprint,
id=__ret__.id,
name=__ret__.name,
resource_key=__ret__.resource_key,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
2c1371f7dcb0284f2757359fe2e367bc9542b2f5
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/python/basic/arg_expr.py
|
e2978cda02c519982301970fb696eff2cc1c5580
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392
| 2023-08-27T06:45:20
| 2023-08-27T06:45:20
| 181,903,332
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!/usr/bin/python
#\file arg_expr.py
#\brief certain python script
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Mar.14, 2016
import sys, random, math
if __name__=='__main__':
s_expr= sys.argv[1]
print 'arg[1]=',s_expr
expr= eval('lambda x:'+s_expr)
print 'expr=',expr
for i in range(10):
x= random.random()
print 'expr({x})= {value}'.format(x=x, value=expr(x))
|
[
"info@akihikoy.net"
] |
info@akihikoy.net
|
7622573ee5a322ca35255c62269e353ba2ad8f81
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02418/s050810012.py
|
813576c56b0f65866e959ed464e23f400f903cdc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
def check(s, p):
for i in range(len(s)):
count = 0
for j in range(len(p)):
if s[(i+j) % len(s)] != p[j]:
break
count += 1
if count == len(p):
return True
return False
s = raw_input()
p = raw_input()
flag = check(s, p)
if flag:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
82d4f8a638d84f91ab3f4cf61ad517ef8eeec04a
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc011/A/4811622.py
|
80e3e9613e7fb93e920fc7e005481f593391fd61
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
n, c, k = map(int, input().split())
t = [int(input()) for i in range(n)]
t.sort()
result = 1
count = 0
f = 0
for i in range(n):
if (t[i] - t[f]) > k or c == count:
result += 1
count = 1
f = i
else:
count += 1
print(result)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
86fcf585e7121caa6968a3c5a0bfd281544770c3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02410/s418924252.py
|
63557771bea67c49bf2723c66e25a07d7c4cec3a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
n,m=list(map(int,input().split()))
mA=[list(map(int,input().split())) for i in range(n)]
mB=[int(input()) for j in range(m)]
for ma in mA:
print(sum([a*b for a, b in zip(ma,mB)]))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
975e35f4d46d96b077ccec902d83a50f0befc10b
|
6725eff72a6cf04c9cf62cb6f7f9df6373d5ceb5
|
/backend/eplan_18788/settings.py
|
fb957dea65dc264ef337508f1c3991c3f454f306
|
[] |
no_license
|
crowdbotics-apps/eplan-18788
|
ae064dc4fc95bb3f2eb2218402cf7375fd6b7273
|
652b38c70d36fe84eebd335dffed30b8e3ade581
|
refs/heads/master
| 2022-11-16T06:07:53.552260
| 2020-07-10T09:38:39
| 2020-07-10T09:38:39
| 278,595,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,785
|
py
|
"""
Django settings for eplan_18788 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eplan_18788.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eplan_18788.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
54a2ac39d5f82bb0261de10d4bd2935a611a6881
|
e3beccff804b034047dc50e0247c28b1606c7fdb
|
/bogo_sort.py
|
061ff6d79acf27c1b15d6d8a18c97d2dd614c9bf
|
[] |
no_license
|
Rutrle/algorithms
|
0434249a3d9616cc478697c78327f643166db3e7
|
bfd5237c6420b84b3e43d321530dc4778fdd79ca
|
refs/heads/master
| 2023-09-01T09:10:20.618467
| 2021-11-02T21:49:37
| 2021-11-02T21:49:37
| 357,696,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
import random
import sys
numbers = [1, 5, 8, 44, 6, 45, 468]
def is_sorted(values):
for index in range(len(values)-1):
if values[index] > values[index+1]:
return False
return True
def bogo_sort(values):
attempts = 0
while not is_sorted(values):
random.shuffle(values)
print(values)
attempts += 1
print(attempts)
return values
print(bogo_sort(numbers))
|
[
"rutrle@seznam.cz"
] |
rutrle@seznam.cz
|
32199bf70c40c64d9497a96e4c056000b9c9a54f
|
18508cea9458b2879017b44e6f18520cd8cf4f6c
|
/UCMDBPython/src/plugin_ntcmd_file_version.py
|
be371b707921cbbacd41e47fc3580ff9493e7e73
|
[] |
no_license
|
kvt11/dd-git
|
7d4935962e06d835ad0023c4abb185876a5a9e77
|
49aafa7081b861c5f6d0e1753b425e78948116d0
|
refs/heads/master
| 2022-11-23T19:03:19.763423
| 2016-04-04T14:54:18
| 2016-04-04T14:54:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
#coding=utf-8
import file_ver_lib
from plugins import Plugin
class FileVersionInformationPluginByNTCMD(Plugin):
def __init__(self):
Plugin.__init__(self)
def isApplicable(self, context):
client = context.client
if client.isWinOs():
return 1
else:
return 0
def process(self, context):
client = context.client
applicationOsh = context.application.getOsh()
processes = context.application.getProcesses()
for process in processes:
fullFileName = process.executablePath
if fullFileName:
fileVer = file_ver_lib.getWindowsWMICFileVer(client, fullFileName)
if not fileVer:
fileVer = file_ver_lib.getWindowsShellFileVer(client, fullFileName)
if fileVer:
applicationOsh.setAttribute("application_version_number", fileVer)
break
|
[
"bluesteelkc@gmail.com"
] |
bluesteelkc@gmail.com
|
7a8a85dc5a3a16c2d803052a8d902c7eb41278e1
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/rasbt_mlxtend/mlxtend-master/mlxtend/_base/_classifier.py
|
c28a3668f82e55c395cafb7d08c381bc0104d08e
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
# Sebastian Raschka 2014-2017
# mlxtend Machine Learning Library Extensions
#
# Base Clusteer (Clutering Parent Class)
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: BSD 3 clause
import numpy as np
class _Classifier(object):
def __init__(self):
pass
def _check_target_array(self, y, allowed=None):
if not np.issubdtype(y[0], int):
raise AttributeError('y must be an integer array.\nFound %s'
% y.dtype)
found_labels = np.unique(y)
if (found_labels < 0).any():
raise AttributeError('y array must not contain negative labels.'
'\nFound %s' % found_labels)
if allowed is not None:
found_labels = tuple(found_labels)
if found_labels not in allowed:
raise AttributeError('Labels not in %s.\nFound %s'
% (allowed, found_labels))
def score(self, X, y):
""" Compute the prediction accuracy
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (true class labels).
Returns
---------
acc : float
The prediction accuracy as a float
between 0.0 and 1.0 (perfect score).
"""
y_pred = self.predict(X)
acc = np.sum(y == y_pred, axis=0) / float(X.shape[0])
return acc
def fit(self, X, y, init_params=True):
"""Learn model from training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
init_params : bool (default: True)
Re-initializes model parameters prior to fitting.
Set False to continue training with weights from
a previous model fitting.
Returns
-------
self : object
"""
self._is_fitted = False
self._check_arrays(X=X, y=y)
self._check_target_array(y)
if hasattr(self, 'self.random_seed') and self.random_seed:
self._rgen = np.random.RandomState(self.random_seed)
self._fit(X=X, y=y, init_params=init_params)
self._is_fitted = True
return self
def predict(self, X):
"""Predict targets from X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
target_values : array-like, shape = [n_samples]
Predicted target values.
"""
self._check_arrays(X=X)
if not self._is_fitted:
raise AttributeError('Model is not fitted, yet.')
return self._predict(X)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
c0a9380b65fa405b9ecd5afca937d85fc43dff4d
|
9be57e13dae005f7138879871cf4deb50bb32d3a
|
/tests/test_module.py
|
6925827ce0e2a141c347c564e0ce65f595c52349
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
shnizzedy/progressivis
|
28321e3187b49b9fe034bb1786729a4b15b4a519
|
d3e67925253ff3dc34dc72282ac82bb2a9571354
|
refs/heads/master
| 2021-05-30T21:50:41.702094
| 2016-05-13T07:45:39
| 2016-05-13T07:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
import unittest
from progressivis import *
class TestProgressiveModule(unittest.TestCase):
def setUp(self):
self.scheduler = Scheduler()
def test_scheduler(self):
self.assertEqual(len(self.scheduler), 0)
def test_module(self):
module = Module(id='a', scheduler=self.scheduler)
self.assertEqual(module.id, 'a')
self.assertEqual(self.scheduler.exists('a'), True)
if __name__ == '__main__':
unittest.main()
|
[
"Jean-Daniel.Fekete@inria.fr"
] |
Jean-Daniel.Fekete@inria.fr
|
4e808b55d68fc959b93093eef571a1166e03efdd
|
d6b99ab3cc7108f4f0cc0be899641ac990e30db9
|
/split_string/split_string.py
|
2cf9b6dc6a0f81d094a6f334823d09ab0204003e
|
[] |
no_license
|
AsemAntar/codewars_problems
|
ef97e8a8058551276cdb943a07474cbeb9353c4d
|
c0ae0a769e16211c2b8e325d1116a6cebd3be016
|
refs/heads/master
| 2020-08-10T02:01:12.411030
| 2019-12-15T22:45:20
| 2019-12-15T22:45:20
| 214,229,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
# Author : Asem Antar Abdesamee
# Problem Description:
"""
Complete the solution so that it splits the string into pairs of two characters.
If the string contains an odd number of characters
then it should replace the missing second character of the final pair with an underscore ('_').
Examples:
solution('abc') # should return ['ab', 'c_']
solution('abcdef') # should return ['ab', 'cd', 'ef']
"""
"""
====================================
My Solution
====================================
"""
import re
def solution(s):
sol = []
while s:
if len(s) % 2 == 0:
sol.append(s[:2])
s = s[2:]
else:
s += '_'
sol.append(s[:2])
s = s[2:]
return sol
"""
====================================
Better Solution
====================================
"""
def solution2(s):
return re.findall(".{2}", s + "_")
print(solution2('abcde'))
|
[
"asemantar@gmail.com"
] |
asemantar@gmail.com
|
56e10d0655c2f6f7366ce2a46d971413d855fa76
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/common/util/suppression.py
|
1240bb00dd13c5e937571f07d45e8b1364fc12da
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
from __future__ import annotations
import re
from collections.abc import Iterable
from checkov.common.bridgecrew.integration_features.features.policy_metadata_integration import (
integration as metadata_integration,
)
from checkov.common.comment.enum import COMMENT_REGEX
from checkov.common.models.enums import CheckResult
from checkov.common.typing import _CheckResult, _SkippedCheck
def collect_suppressions_for_report(code_lines: list[tuple[int, str]]) -> dict[str, _CheckResult]:
"""Searches for suppressions in a config block to be used in a report"""
suppressions = {}
for _, line in code_lines:
skip_search = re.search(COMMENT_REGEX, line)
if skip_search:
check_result: _CheckResult = {
"result": CheckResult.SKIPPED,
"suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided",
}
suppressions[skip_search.group(2)] = check_result
return suppressions
def collect_suppressions_for_context(code_lines: Iterable[tuple[int, int | str]]) -> list[_SkippedCheck]:
"""Searches for suppressions in a config block to be used in a context"""
skipped_checks = []
bc_id_mapping = metadata_integration.bc_to_ckv_id_mapping
for line_number, line_text in code_lines:
skip_search = re.search(COMMENT_REGEX, str(line_text))
if skip_search:
skipped_check: _SkippedCheck = {
"id": skip_search.group(2),
"suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided",
"line_number": line_number
}
# No matter which ID was used to skip, save the pair of IDs in the appropriate fields
if bc_id_mapping and skipped_check["id"] in bc_id_mapping:
skipped_check["bc_id"] = skipped_check["id"]
skipped_check["id"] = bc_id_mapping[skipped_check["id"]]
elif metadata_integration.check_metadata:
skipped_check["bc_id"] = metadata_integration.get_bc_id(skipped_check["id"])
skipped_checks.append(skipped_check)
return skipped_checks
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
5c8b9bbaeea57e3892988d55ad34cbcfa836aba7
|
9d5ae8cc5f53f5aee7247be69142d9118769d395
|
/419. Battleships in a Board.py
|
0dd34567666bfd5777b15f6d650c4bcb404e31ef
|
[] |
no_license
|
BITMystery/leetcode-journey
|
d4c93319bb555a7e47e62b8b974a2f77578bc760
|
616939d1599b5a135747b0c4dd1f989974835f40
|
refs/heads/master
| 2020-05-24T08:15:30.207996
| 2017-10-21T06:33:17
| 2017-10-21T06:33:17
| 84,839,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,799
|
py
|
class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
# Idea: If a point is the head or tail of a ship, only one of its 4 adjacent points is 'X'. Exception: The ship contains only one 'X'.
# If a point is an inner node of a ship, two of its 4 adjacent points are 'X's.
r = 0
length = len(board)
width = len(board[0])
for i in range(0, length):
for j in range(0, width):
if board[i][j] == 'X':
counter = 0
# up
if i > 0 and board[i - 1][j] == 'X':
counter += 1
#down
if i < length - 1 and board[i + 1][j] == 'X':
counter += 1
#left
if j > 0 and board[i][j - 1] == 'X':
counter += 1
#right
if j < width -1 and board[i][j + 1] == 'X':
counter += 1
if counter == 0:
r += 2
elif counter == 1:
r += 1
return r / 2
class Solution_2(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
#Idea: Scan the board, once the left and top neighbor of a point are '.'s, this point counts a ship.
r = 0
length = len(board)
width = len(board[0])
for i in range(0, length):
for j in range(0, width):
# Better than solution 1. Only need to judge left and top
if board[i][j] == 'X' and (i == 0 or board[i-1][j] == '.') and (j == 0 or board[i][j-1] == '.'):
r += 1
return r
s = Solution()
print s.countBattleships(['X..X', '...X', '...X'])
|
[
"noreply@github.com"
] |
BITMystery.noreply@github.com
|
846b82d516567cd220e94d37409b58410512e50e
|
2846e9aae639966796395a92bfe8ac06315f22b0
|
/leetcode/ino/prob_400/prob_400_nth_digit(2).py
|
7f74b0e0dc8cd07d8cb1a46c3033b1ceede09a87
|
[] |
no_license
|
sglim/inno-study
|
6b8b454da4977be5ffb53d6862f3e8f2177bb077
|
456a3dd62b429037587cd23ed847ac316aa723dd
|
refs/heads/master
| 2020-05-24T07:36:42.717567
| 2017-06-27T02:49:25
| 2017-06-27T02:49:25
| 84,835,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
class Solution(object):
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
if n < 10:
return n
order_idx = 1
temp = n
k = 0
while temp > 0:
k = temp
temp -= order_idx * 9 * 10**(order_idx - 1)
order_idx += 1
order_idx -= 1
# now nth digit is located kth position of order_idx's range
n_include_num = 10**(order_idx - 1) + (k//order_idx) - 1
if k % order_idx != 0:
n_include_num += 1
#target digit is located 'order_idx + 1 - k%order_idx'th from back of n_include_num
back = 0
if k % order_idx == 0:
back = 1
else:
back = (order_idx + 1) - (k % order_idx)
result = 0
for i in range(back):
result = n_include_num % 10
n_include_num //= 10
return result
n = 2147483647
# n = 17
obj = Solution()
print(obj.findNthDigit(n))
# print(2**31-1)
|
[
"inoh.jung@gmail.com"
] |
inoh.jung@gmail.com
|
4da63feb93d7c27541efed91f449f67ea88a4fc2
|
e906fe8237e5b55b7bef1f7a87884c5924ccd8b1
|
/contactmps/context_processors.py
|
98c239bbb478844afc111d5142b05c0474145d13
|
[
"MIT"
] |
permissive
|
OpenUpSA/contact-mps
|
ac9a88ef166769d6305e213f3d77191f385c962a
|
63d7f86e1b6c9319a4d0344a6125cd22770f34c7
|
refs/heads/master
| 2022-12-11T07:22:20.942567
| 2020-01-15T13:11:59
| 2020-01-15T13:11:59
| 93,042,651
| 0
| 2
|
MIT
| 2022-12-08T02:08:08
| 2017-06-01T09:52:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 750
|
py
|
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
def general(request):
""" Add some useful settings into the template helpers.
"""
info = {
'BASE_URL': "https://%s" % get_current_site(request).domain,
}
ga_tracking_id = getattr(settings, 'GOOGLE_ANALYTICS_ID', False)
if not settings.DEBUG and ga_tracking_id:
info['GOOGLE_ANALYTICS_ID'] = ga_tracking_id
return info
def is_mobile(request):
useragent = request.META.get('HTTP_USER_AGENT', '').lower()
mobiles = [
'ipad',
'ipod',
'iphone',
'android',
'blackberry',
]
return {
'is_mobile': any(mobile in useragent for mobile in mobiles),
}
|
[
"jbothma@gmail.com"
] |
jbothma@gmail.com
|
198758c611b2f754df74f3b1587d1c8ef5e8c7fd
|
ec1f8cdbf52bcc5516a833e02ac99301a1664ed9
|
/wordclasses/wctool.py
|
41aa852ee84d3a9b2979e3e11015d5b6c7da6751
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
senarvi/theanolm
|
8fe85dcf07358a331807b9002a56b6089d5f0ff3
|
9904faec19ad5718470f21927229aad2656e5686
|
refs/heads/master
| 2023-06-24T10:39:21.985241
| 2023-06-12T06:55:26
| 2023-06-12T06:55:26
| 42,454,187
| 95
| 37
|
Apache-2.0
| 2020-11-05T11:22:31
| 2015-09-14T14:35:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,849
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import argparse
import logging
from time import time
from theanolm.backend import TextFileType
from wordclasses import TheanoBigramOptimizer, NumpyBigramOptimizer
from theanolm.vocabulary import Vocabulary
from theanolm.vocabulary import compute_word_counts, BigramStatistics
from wordclasses.functions import is_scheduled
def save(optimizer, output_file, output_format):
"""Writes the current classes to a file.
If the output file is seekable, first rewinds and truncates the file.
:type optimizer: BigramOptimizer
:param optimizer: save the current state of this optimizer
:type output_file: file object
:param output_file: a file or stream where to save the classes
:type output_format: str
:param output_format: either "classes" or "srilm-classes" - selects the
output file format
"""
if output_file.seekable():
output_file.seek(0)
output_file.truncate()
for word, class_id, prob in optimizer.words():
if output_format == 'classes':
output_file.write('{} {}\n'.format(word, class_id))
elif output_format == 'srilm-classes':
output_file.write('CLASS-{:05d} {} {}\n'.format(class_id, prob, word))
def main():
parser = argparse.ArgumentParser(prog='wctool')
argument_group = parser.add_argument_group("files")
argument_group.add_argument(
'--training-set', metavar='FILE', type=TextFileType('r'),
nargs='+', required=True,
help='text or .gz files containing training data (one sentence per '
'line)')
argument_group.add_argument(
'--vocabulary', metavar='FILE', type=TextFileType('r'), default=None,
help='text or .gz file containing a list of words to include in class '
'forming, and possibly their initial classes')
argument_group.add_argument(
'--vocabulary-format', metavar='FORMAT', type=str, default='words',
help='vocabulary format, one of "words" (one word per line, default), '
'"classes" (word and class ID per line), "srilm-classes" (class '
'name, membership probability, and word per line)')
argument_group.add_argument(
'--output-file', metavar='FILE', type=TextFileType('w'), default='-',
help='where to write the word classes (default stdout)')
argument_group.add_argument(
'--output-format', metavar='FORMAT', type=str, default='srilm-classes',
help='format of the output file, one of "classes" (word and class ID '
'per line), "srilm-classes" (default; class name, membership '
'probability, and word per line)')
argument_group.add_argument(
'--output-frequency', metavar='N', type=int, default='1',
help='save classes N times per optimization iteration (default 1)')
argument_group = parser.add_argument_group("optimization")
argument_group.add_argument(
'--num-classes', metavar='N', type=int, default=2000,
help='number of classes to form, if vocabulary is not specified '
'(default 2000)')
argument_group.add_argument(
'--method', metavar='NAME', type=str, default='bigram-theano',
help='method for creating word classes, one of "bigram-theano", '
'"bigram-numpy" (default "bigram-theano")')
argument_group = parser.add_argument_group("logging and debugging")
argument_group.add_argument(
'--log-file', metavar='FILE', type=str, default='-',
help='path where to write log file (default is standard output)')
argument_group.add_argument(
'--log-level', metavar='LEVEL', type=str, default='info',
help='minimum level of events to log, one of "debug", "info", "warn" '
'(default "info")')
argument_group.add_argument(
'--log-interval', metavar='N', type=int, default=1000,
help='print statistics after every Nth word; quiet if less than one '
'(default 1000)')
args = parser.parse_args()
log_file = args.log_file
log_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_level, int):
raise ValueError("Invalid logging level requested: " + args.log_level)
log_format = '%(asctime)s %(funcName)s: %(message)s'
if args.log_file == '-':
logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level)
else:
logging.basicConfig(filename=log_file, format=log_format, level=log_level)
if args.vocabulary is None:
word_counts = compute_word_counts(args.training_set)
vocabulary = Vocabulary.from_word_counts(word_counts,
args.num_classes)
for subset_file in args.training_set:
subset_file.seek(0)
else:
vocabulary = Vocabulary.from_file(args.vocabulary,
args.vocabulary_format)
print("Number of words in vocabulary:", vocabulary.num_shortlist_words())
print("Number of word classes:", vocabulary.num_classes())
print("Number of normal word classes:", vocabulary.num_normal_classes)
logging.info("Reading word unigram and bigram statistics.")
statistics = BigramStatistics(args.training_set, vocabulary)
if args.method == 'bigram-theano':
optimizer = TheanoBigramOptimizer(statistics, vocabulary)
elif args.method == 'bigram-numpy':
optimizer = NumpyBigramOptimizer(statistics, vocabulary)
else:
raise ValueError("Invalid method requested: " + args.method)
iteration = 1
while True:
logging.info("Starting iteration %d.", iteration)
num_words = 0
num_moves = 0
for word in vocabulary.words():
start_time = time()
num_words += 1
if optimizer.move_to_best_class(word):
num_moves += 1
duration = time() - start_time
if (args.log_interval >= 1) and \
(num_words % args.log_interval == 0):
logging.info("[%d] (%.1f %%) of iteration %d -- moves = %d, cost = %.2f, duration = %.1f ms",
num_words,
num_words / vocabulary.num_shortlist_words() * 100,
iteration,
num_moves,
optimizer.log_likelihood(),
duration * 100)
if is_scheduled(num_words,
args.output_frequency,
vocabulary.num_shortlist_words()):
save(optimizer, args.output_file, args.output_format)
if num_moves == 0:
break
iteration += 1
logging.info("Optimization finished.")
save(optimizer, args.output_file, args.output_format)
|
[
"seppo.git@marjaniemi.com"
] |
seppo.git@marjaniemi.com
|
2038dd0e6d0049b70a0b0d8ef36745acc98d4064
|
fe826833d207ced7b01d8aef4922da58614846ca
|
/demo/NavierStokesDrivenCavity.py
|
7202e6a95247bd4863a6e921199545fc994f8544
|
[
"BSD-2-Clause"
] |
permissive
|
snytav/shenfun
|
ce56d912a38beef3f4df3072708a1f9aa5370e75
|
67844cb75e21488d7ab43bf0caa21dfbdc057395
|
refs/heads/master
| 2022-12-14T21:21:44.895648
| 2020-09-16T13:40:54
| 2020-09-16T13:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,818
|
py
|
r"""Solve Navier-Stokes equations for the lid driven cavity using a coupled
formulation
The equations are in strong form
.. math::
\nu\nabla^2 u - \nabla p &= (u \cdot \nabla) u) \\
\nabla \cdot u &= 0 \\
i\bs{u}(x, y=1) = (1, 0) \, &\text{ or }\, \bs{u}(x, y=1) = ((1-x)^2(1+x)^2, 0) \\
u(x, y=-1) &= 0 \\
u(x=\pm 1, y) &= 0
In addition we require :math:`\int p d\ = 0`, which is achieved by
fixing the coefficient :math:`\hat{p}_{0, 0} = 0`.
We use a tensorproductspace with a composite Legendre for the Dirichlet space
and a regular Legendre for the pressure space.
To remove all nullspaces we use a P_{N} x P_{N-2} basis, with P_{N-2} for the
pressure.
"""
import os
import sys
import time
import numpy as np
from scipy.sparse.linalg import splu
import sympy
from shenfun import *
assert comm.Get_size() == 1, "Two non-periodic directions only have solver implemented for serial"
Re = 10.
nu = 2./Re
alfa = 0.2 # underrelaxation factor
N = (46, 46)
family = 'Chebyshev'
#family = 'Legendre'
quad = 'GC'
x = sympy.symbols('x', real='True')
D0X = FunctionSpace(N[0], family, quad=quad, bc=(0, 0))
#D1Y = FunctionSpace(N[1], family, quad=quad, bc=(0, 1))
D1Y = FunctionSpace(N[1], family, quad=quad, bc=(0, (1-x)**2*(1+x)**2))
D0Y = FunctionSpace(N[1], family, quad=quad, bc=(0, 0))
PX = FunctionSpace(N[0], family, quad=quad)
PY = FunctionSpace(N[1], family, quad=quad)
# Create tensor product spaces with different combination of bases
V1 = TensorProductSpace(comm, (D0X, D1Y))
V0 = TensorProductSpace(comm, (D0X, D0Y))
P = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
# To get a P_N x P_{N-2} space, just pick the first N-2 items of the pressure basis
# Note that this effectively sets P_N and P_{N-1} to zero, but still the basis uses
# the same quadrature points as the Dirichlet basis, which is required for the inner
# products.
PX.slice = lambda: slice(0, PX.N-2)
PY.slice = lambda: slice(0, PY.N-2)
# Create vector space for velocity
W1 = VectorSpace([V1, V0])
# Create mixed space for total solution
VQ = CompositeSpace([W1, P]) # for velocity and pressure
# Create padded spaces for nonlinearity
V1p = V1.get_dealiased((1.5, 1.5))
V0p = V0.get_dealiased((1.5, 1.5))
#V1p = V1.get_dealiased(dealias_direct=True)
#V0p = V0.get_dealiased(dealias_direct=True)
#V1p = V1 # Or do not use dealiasing at all. Makes very little difference here
#V0p = V0
W1p = VectorSpace([V1p, V0p])
W0p = VectorSpace([V0p, V0p])
QTp = TensorSpace([W1p, W0p]) # for uiuj
up = TrialFunction(VQ)
vq = TestFunction(VQ)
u, p = up
v, q = vq
# Assemble blocks of the complete block matrix
if family.lower() == 'legendre':
A00 = inner(grad(v), -nu*grad(u))
A01 = inner(div(v), p)
else:
A00 = inner(v, nu*div(grad(u)))
A01 = inner(v, -grad(p))
A10 = inner(q, div(u))
# Extract the boundary matrices
bc_mats = extract_bc_matrices([A00, A01, A10])
# Create Block matrix
M = BlockMatrix(A00+A01+A10)
# Create Function to hold solution
uh_hat = Function(VQ).set_boundary_dofs()
ui_hat = uh_hat[0]
# New solution (iterative)
uh_new = Function(VQ).set_boundary_dofs()
ui_new = uh_new[0]
# Compute the constant contribution to rhs due to nonhomogeneous boundary conditions
bh_hat0 = Function(VQ)
BM = BlockMatrix(bc_mats)
bh_hat0 = BM.matvec(-uh_hat, bh_hat0)
bi_hat0 = bh_hat0[0]
# Create regular work arrays for right hand side. (Note that bc part will not be used so we can use Q)
bh_hat = Function(VQ)
# Create arrays to hold velocity vector solution
ui = Array(W1)
uip = Array(W1p)
# Create work arrays for nonlinear part
uiuj = Array(QTp)
uiuj_hat = Function(QTp)
def compute_rhs(ui_hat, bh_hat):
global uip, uiuj, uiuj_hat, W1p
bh_hat.fill(0)
uip = W1p.backward(ui_hat, uip)
uiuj = outer(uip, uip, uiuj)
uiuj_hat = uiuj.forward(uiuj_hat)
bi_hat = bh_hat[0]
bi_hat = inner(v, div(uiuj_hat), output_array=bi_hat)
#bi_hat = inner(grad(v), -uiuj_hat, output_array=bi_hat)
bh_hat += bh_hat0
return bh_hat
uh_hat, Ai = M.solve(bh_hat0, u=uh_hat, constraints=((2, 0, 0),), return_system=True) # Constraint for component 2 of mixed space
Alu = splu(Ai)
uh_new[:] = uh_hat
converged = False
count = 0
max_count = 1000
if 'pytest' in os.environ:
max_count = 1
t0 = time.time()
while not converged:
count += 1
bh_hat = compute_rhs(ui_hat, bh_hat)
uh_new = M.solve(bh_hat, u=uh_new, constraints=((2, 0, 0),), Alu=Alu) # Constraint for component 2 of mixed space
error = np.linalg.norm(ui_hat-ui_new)
uh_hat[:] = alfa*uh_new + (1-alfa)*uh_hat
converged = abs(error) < 1e-11 or count >= max_count
if count % 1 == 0:
print('Iteration %d Error %2.4e' %(count, error))
print('Time ', time.time()-t0)
# Move solution to regular Function
up = Array(VQ)
up = uh_hat.backward(up)
u_, p_ = up
if 'pytest' in os.environ: sys.exit(0)
# Postprocessing
# Solve streamfunction
r = TestFunction(V0)
s = TrialFunction(V0)
S = inner(r, div(grad(s)))
h = inner(r, -curl(ui_hat))
H = la.SolverGeneric2ND(S)
phi_h = H(h)
phi = phi_h.backward()
# Compute vorticity
PX.slice = lambda: slice(0, PX.N)
PY.slice = lambda: slice(0, PY.N)
w_h = Function(P)
w_h = project(curl(ui_hat), P, output_array=w_h)
#p0 = np.array([[0.], [0.]])
#print(w_h.eval(p0)*2)
# Find minimal streamfunction value and position
# by gradually zooming in on mesh
W = 101
converged = False
xmid, ymid = 0, 0
dx = 1
psi_old = 0
count = 0
y, x = np.meshgrid(np.linspace(ymid-dx, ymid+dx, W), np.linspace(xmid-dx, xmid+dx, W))
points = np.vstack((x.flatten(), y.flatten()))
pp = phi_h.eval(points).reshape((W, W))
while not converged:
yr, xr = np.meshgrid(np.linspace(ymid-dx, ymid+dx, W), np.linspace(xmid-dx, xmid+dx, W))
points = np.vstack((xr.flatten(), yr.flatten()))
pr = phi_h.eval(points).reshape((W, W))
xi, yi = pr.argmin()//W, pr.argmin()%W
psi_min, xmid, ymid = pr.min()/2, xr[xi, yi], yr[xi, yi]
err = abs(psi_min-psi_old)
converged = err < 1e-12 or count > 10
psi_old = psi_min
dx = dx/4.
print("%d %d " %(xi, yi) +("%+2.7e "*4) %(xmid, ymid, psi_min, err))
count += 1
import matplotlib.pyplot as plt
#f = open('plot_u_y_Ghia{}.csv'.format(int(Re)))
#g = np.loadtxt(f, skiprows=1, delimiter=',')
#plt.figure()
#y = 2*(g[:, 0]-0.5)
#plt.plot(y, g[:, 1], 'r+')
X = V0.local_mesh(True)
#x = np.vstack([np.zeros(N[0]), X[1][0]])
#res = ui_hat[0].eval(x)
#plt.plot(x[1], res)
#res2 = ui_hat[0].eval(np.vstack([np.zeros(len(y)), y]))
#plt.plot(y, res2, 'bs', mfc='None')
plt.figure()
plt.contourf(X[0], X[1], p_, 100)
plt.figure()
plt.quiver(X[0], X[1], u_[0], u_[1])
plt.figure()
plt.spy(M.diags())
plt.figure()
plt.contourf(X[0], X[1], u_[0], 100)
plt.figure()
plt.contourf(X[0], X[1], u_[1], 100)
#plt.figure()
#plt.contour(x, y, pp, 100)
#plt.title('Streamfunction')
plt.show()
|
[
"mikaem@math.uio.no"
] |
mikaem@math.uio.no
|
dd6415024aa3cbcb67c72fdd6d5982cb3f90a182
|
5234bc430c83d616a8214d7f77c2c081543b6b26
|
/src/Python/801-900/824.GoatLatin.py
|
3a2bc8c0e7db0df5b73e1507777865a28111ca71
|
[
"Apache-2.0"
] |
permissive
|
AveryHuo/PeefyLeetCode
|
3e749b962cadfdf10d7f7b1ed21c5fafc4342950
|
92156e4b48ba19e3f02e4286b9f733e9769a1dee
|
refs/heads/master
| 2022-04-26T06:01:18.547761
| 2020-04-25T09:55:46
| 2020-04-25T09:55:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
class Solution:
def toGoatLatin(self, S):
"""
:type S: str
:rtype: str
"""
yuanyin = {'a', 'e', 'i', 'o', 'u', 'I', 'E', 'A', 'O', 'U'}
words = S.split(' ')
for i in range(len(words)):
if words[i][0] in yuanyin:
words[i] += "ma" + "a" * (i + 1)
else:
words[i] = words[i][1:] + words[i][0] + "ma" + "a" * (i + 1)
return ' '.join(words)
if __name__ == '__main__':
solution = Solution()
print(solution.toGoatLatin("I speak Goat Latin"))
print(solution.toGoatLatin("The quick brown fox jumped over the lazy dog"))
else:
pass
|
[
"xpf6677@163.com"
] |
xpf6677@163.com
|
e33a1aba6d98fe98d0dc48d2d88c120aa113de68
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1850396913/pyexpat.py
|
d8a525a88b3a33d69b9d4060d8306489c2477a02
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 7,337
|
py
|
# encoding: utf-8
# module pyexpat
# from (built-in)
# by generator 1.147
""" Python wrapper for Expat parser. """
# imports
import pyexpat.errors as errors # <module 'pyexpat.errors'>
import pyexpat.model as model # <module 'pyexpat.model'>
# Variables with simple values
EXPAT_VERSION = 'expat_2.2.5'
native_encoding = 'UTF-8'
XML_PARAM_ENTITY_PARSING_ALWAYS = 2
XML_PARAM_ENTITY_PARSING_NEVER = 0
XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE = 1
# functions
def ErrorString(*args, **kwargs): # real signature unknown
""" Returns string error for given number. """
pass
def ParserCreate(*args, **kwargs): # real signature unknown
""" Return a new XML parser object. """
pass
# classes
class ExpatError(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
error = ExpatError
class XMLParserType(object):
""" XML parser """
def ExternalEntityParserCreate(self, *args, **kwargs): # real signature unknown
""" Create a parser for parsing an external entity based on the information passed to the ExternalEntityRefHandler. """
pass
def GetBase(self, *args, **kwargs): # real signature unknown
""" Return base URL string for the parser. """
pass
def GetInputContext(self, *args, **kwargs): # real signature unknown
"""
Return the untranslated text of the input that caused the current event.
If the event was generated by a large amount of text (such as a start tag
for an element with many attributes), not all of the text may be available.
"""
pass
def Parse(self, *args, **kwargs): # real signature unknown
"""
Parse XML data.
`isfinal' should be true at end of input.
"""
pass
def ParseFile(self, *args, **kwargs): # real signature unknown
""" Parse XML data from file-like object. """
pass
def SetBase(self, *args, **kwargs): # real signature unknown
""" Set the base URL for the parser. """
pass
def SetParamEntityParsing(self, *args, **kwargs): # real signature unknown
"""
Controls parsing of parameter entities (including the external DTD subset).
Possible flag values are XML_PARAM_ENTITY_PARSING_NEVER,
XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE and
XML_PARAM_ENTITY_PARSING_ALWAYS. Returns true if setting the flag
was successful.
"""
pass
def UseForeignDTD(self, *args, **kwargs): # real signature unknown
"""
Allows the application to provide an artificial external subset if one is not specified as part of the document instance.
This readily allows the use of a 'default' document type controlled by the
application, while still getting the advantage of providing document type
information to the parser. 'flag' defaults to True if not provided.
"""
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7ffb5f337048>, 'find_spec': <classmethod object at 0x7ffb5f337080>, 'find_module': <classmethod object at 0x7ffb5f3370b8>, 'create_module': <classmethod object at 0x7ffb5f3370f0>, 'exec_module': <classmethod object at 0x7ffb5f337128>, 'get_code': <classmethod object at 0x7ffb5f337198>, 'get_source': <classmethod object at 0x7ffb5f337208>, 'is_package': <classmethod object at 0x7ffb5f337278>, 'load_module': <classmethod object at 0x7ffb5f3372b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
expat_CAPI = None # (!) real value is '<capsule object "pyexpat.expat_CAPI" at 0x7ffb5c5df630>'
features = [
(
'sizeof(XML_Char)',
1,
),
(
'sizeof(XML_LChar)',
1,
),
(
'XML_DTD',
0,
),
(
'XML_CONTEXT_BYTES',
1024,
),
(
'XML_NS',
0,
),
]
version_info = (
2,
2,
5,
)
__spec__ = None # (!) real value is "ModuleSpec(name='pyexpat', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
1e4bb90e0f5856138c8ab6c6996f0479a227feb7
|
28541d61368a14a0d5003db4cc07fed21b40c41f
|
/Chapter-4/depth_search2.py
|
e0dc1ae6ff91822c949a2e637ca6c0eb3ca7512a
|
[] |
no_license
|
eizin6389/python_algorithm
|
390861f9342ce907f2cda0b45b84d364bcba7541
|
abf3588ed97a343b6559eb5d69156708d42bc243
|
refs/heads/master
| 2022-12-06T20:48:49.470312
| 2020-08-14T13:29:26
| 2020-08-14T13:29:26
| 282,905,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
tree = [[1,2],[3,4],[5,6],[7,8],[9,10],[11,12],[13,14],[],[],[],[],[],[],[],[]]
def search(pos):
for i in tree[pos]:
search(i)
print(pos, end=' ')
search(0)
|
[
"hide@matsumotohideto-no-MacBook-Pro.local"
] |
hide@matsumotohideto-no-MacBook-Pro.local
|
82e54a6fd231e57a350d7ae8277a496efb65f8b6
|
0aa7255bf5df6b11ad929ec313019d734e67afb8
|
/LMS/blog/models/blog_tag.py
|
a62cc44a40ac7f850e5d91dc6384f1bac7a19239
|
[] |
no_license
|
arafat08007/Learning-management-system-by-jaki
|
c4e3f34061b527a09cdbc86d5ec8547074774189
|
928bdf4b8a0f408a17fa0c3d9b8bb6d77ef285e8
|
refs/heads/master
| 2022-11-16T06:04:22.016667
| 2020-06-19T09:23:01
| 2020-06-19T09:23:01
| 273,452,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from django.db import models
# # Create your models here.
class Tag(models.Model):
tag_name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.tag_name
|
[
"me.jaki@outlook.com"
] |
me.jaki@outlook.com
|
883b04e8faa0d0655b54ddd6386e4fedc823eb87
|
1734fd26a9adf7d2580f8bd981babda861944ebd
|
/snippets/plot.py
|
33a0a5553e81ca7a1279684804877763af55723c
|
[] |
no_license
|
tangzhuochen/Python_ML_Code
|
420f4d80552a901b41e368e4e66a06f51ea1b29f
|
b418fd6a431a77838447ab4736bdf24019276309
|
refs/heads/master
| 2020-03-28T11:44:50.853941
| 2018-02-08T06:59:31
| 2018-02-08T06:59:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 08:57:13 2015
@author: shifeng
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation,导入iris数据,做数据准备
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]#去掉了label为2,label只能二分,才可以。
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
#分类,做ROC分析
# Run classifier with cross-validation and plot ROC curves
#使用6折交叉验证,并且画ROC曲线
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)#注意这里,probability=True,需要,不然预测的时候会出现异常。另外rbf核效果更好些。
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
#通过训练数据,使用svm线性核建立模型,并对测试集进行测试,求出预测得分
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# print set(y[train]) #set([0,1]) 即label有两个类别
# print len(X[train]),len(X[test]) #训练集有84个,测试集有16个
# print "++",probas_ #predict_proba()函数输出的是测试集在lael各类别上的置信度,
# #在哪个类别上的置信度高,则分为哪类
# Compute ROC curve and area the curve
#通过roc_curve()函数,求出fpr和tpr,以及阈值
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr) #对mean_tpr在mean_fpr处进行插值,通过scipy包调用interp()函数
mean_tpr[0] = 0.0 #初始处为0
roc_auc = auc(fpr, tpr)
#画图,只需要plt.plot(fpr,tpr),变量roc_auc只是记录auc的值,通过auc()函数能计算出来
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
#画对角线
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv) #在mean_fpr100个点,每个点处插值插值多次取平均
mean_tpr[-1] = 1.0 #坐标最后一个点为(1,1)
mean_auc = auc(mean_fpr, mean_tpr) #计算平均AUC值
#画平均ROC曲线
#print mean_fpr,len(mean_fpr)
#print mean_tpr
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
[
"1002937942@qq.com"
] |
1002937942@qq.com
|
6e8a2025251aa202713cc2049aa715e891ee297c
|
2c50ed6522f5c58f7be0416e702ec0d73127246f
|
/tests/test_model_e2e.py
|
982be9523b521d2ab07a423c8bd28eb6dfb5cfa2
|
[
"Apache-2.0"
] |
permissive
|
wzy810103882/detectron2
|
5a5ca7f3c88e7972ddc379ab81e315075a7a2c0d
|
ca38df54206a78742d02a8bd572390cebcc91c86
|
refs/heads/master
| 2020-09-22T13:49:45.656257
| 2019-12-01T22:52:24
| 2019-12-01T22:52:24
| 225,226,643
| 0
| 0
|
Apache-2.0
| 2019-12-01T20:32:49
| 2019-12-01T20:32:49
| null |
UTF-8
|
Python
| false
| false
| 2,643
|
py
|
# -*- coding: utf-8 -*-
import unittest
import torch
import detectron2.model_zoo as model_zoo
from detectron2.modeling import build_model
from detectron2.structures import BitMasks, Boxes, Instances
from detectron2.utils.events import EventStorage
from detectron2.config import get_cfg
def get_model_zoo(config_path):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg_file = model_zoo.get_config_file(config_path)
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
return build_model(cfg)
def create_model_input(img, inst=None):
if inst is not None:
return {"image": img, "instances": inst}
else:
return {"image": img}
def get_empty_instance(h, w):
inst = Instances((h, w))
inst.gt_boxes = Boxes(torch.rand(0, 4))
inst.gt_classes = torch.tensor([]).to(dtype=torch.int64)
inst.gt_masks = BitMasks(torch.rand(0, h, w))
return inst
class MaskRCNNE2ETest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
def test_empty_data(self):
inst = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
# eval
self.model.eval()
self.model(
[
create_model_input(torch.rand(3, 200, 250)),
create_model_input(torch.rand(3, 200, 249)),
]
)
# training
self.model.train()
with EventStorage():
losses = self.model(
[
create_model_input(torch.rand(3, 200, 250), inst[0]),
create_model_input(torch.rand(3, 200, 249), inst[1]),
]
)
sum(losses.values()).backward()
del losses
class RetinaNetE2ETest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
def test_empty_data(self):
inst = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
# eval
self.model.eval()
self.model(
[
create_model_input(torch.rand(3, 200, 250)),
create_model_input(torch.rand(3, 200, 249)),
]
)
# training
self.model.train()
with EventStorage():
losses = self.model(
[
create_model_input(torch.rand(3, 200, 250), inst[0]),
create_model_input(torch.rand(3, 200, 249), inst[1]),
]
)
sum(losses.values()).backward()
del losses
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
33454d8a473d9356ae6386b4a2337be5edca2700
|
b35f80114ad96928ccce44d40840177e0b5158aa
|
/dfvfs/encoding/decoder.py
|
1ec6014e57a2ce8159dd18307c1cfe296256ff2c
|
[
"Apache-2.0"
] |
permissive
|
ryanmjones/dfvfs
|
7b62bab127cb201e679331fa808ec79e8ef03bd9
|
29ae5baddbf285260a596a67a199d0f5077214c1
|
refs/heads/master
| 2020-03-29T23:47:49.363000
| 2018-09-03T11:43:03
| 2018-09-03T11:43:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# -*- coding: utf-8 -*-
"""The decoder interface."""
from __future__ import unicode_literals
import abc
class Decoder(object):
"""Decoder interface."""
@abc.abstractmethod
def Decode(self, encoded_data):
"""Decodes the encoded data.
Args:
encoded_data (byte): encoded data.
Returns:
tuple(bytes, bytes): decoded data and remaining encoded data.
"""
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
bd9d719ca44cc85dcaf2e828f9cfb1f1854d7bc8
|
aa44a2a7dec257687eb67ed109ca7727ac09d343
|
/polls/migrations/0001_initial.py
|
22cb82fe892f4aa5ffdda8e008b1f0fd8e12ee2e
|
[] |
no_license
|
mitshel/mbrc_poll
|
3f63bc2e0aa18d14eefc4e4583a84613de7d9e81
|
694cc8b084394feca178bdc2bd9ecfc1f58d9906
|
refs/heads/master
| 2020-04-05T23:17:08.152738
| 2015-09-16T17:32:14
| 2015-09-16T17:32:14
| 40,611,735
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='polls',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='polls_Answers',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('answer', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='polls_AnswersResults',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('open_answer', models.CharField(max_length=1024)),
('closed_answer', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='polls_Questions',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('question', models.CharField(max_length=1024)),
('poll', models.ForeignKey(to='polls.polls')),
],
),
migrations.CreateModel(
name='polls_Results',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('poll', models.ForeignKey(to='polls.polls')),
],
),
migrations.AddField(
model_name='polls_answersresults',
name='poll_result',
field=models.ForeignKey(to='polls.polls_Results'),
),
migrations.AddField(
model_name='polls_answersresults',
name='question',
field=models.ForeignKey(to='polls.polls_Questions'),
),
migrations.AddField(
model_name='polls_answers',
name='question',
field=models.ForeignKey(to='polls.polls_Questions'),
),
]
|
[
"mitshel@mail.ru"
] |
mitshel@mail.ru
|
93d81bbe417a0112b72a86bf91c615abb9e27a37
|
47bd686ab04d8f6daba2097875dfefdba967d598
|
/01_baekjoon/83_problem_3052.py
|
af9e73d95822c7b61f0dd7150058852dbd9f71bf
|
[] |
no_license
|
EmjayAhn/DailyAlgorithm
|
9633638c7cb7064baf26126cbabafd658fec3ca8
|
acda1917fa1a290fe740e1bccb237d83b00d1ea4
|
refs/heads/master
| 2023-02-16T17:04:35.245512
| 2023-02-08T16:29:51
| 2023-02-08T16:29:51
| 165,942,743
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
import sys
inputs = []
for _ in range(10):
input_number = int(sys.stdin.readline())
inputs.append(input_number%42)
print(len(list(set(inputs))))
|
[
"emjay.data@gmail.com"
] |
emjay.data@gmail.com
|
6f80c3f5e025ec85ced638609306fc2465839e96
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/dms_write_1/replication-task-assessment-run_cancel.py
|
d153b39c7a14410bbcb8fb410169d26887be325c
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/cancel-replication-task-assessment-run.html
if __name__ == '__main__':
"""
delete-replication-task-assessment-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/delete-replication-task-assessment-run.html
describe-replication-task-assessment-runs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/describe-replication-task-assessment-runs.html
start-replication-task-assessment-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/start-replication-task-assessment-run.html
"""
parameter_display_string = """
# replication-task-assessment-run-arn : Amazon Resource Name (ARN) of the premigration assessment run to be canceled.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("dms", "cancel-replication-task-assessment-run", "replication-task-assessment-run-arn", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
b9d8ab974796272d2f381fa35d1a618abf7f072d
|
758f1ad9c287c74e57fa7a4f8d03aba8d9f776ab
|
/host/knobui/list.py
|
3936fdb4caea0387b3222bc6750f7be8a61a6517
|
[] |
no_license
|
cnvogelg/knobterm
|
6ef50bc479a64d5ff1729265f447b40c5d5dfd00
|
a731d1a0f1f85a0ed17f3b0df5175e151f82608f
|
refs/heads/master
| 2021-01-21T22:26:53.052986
| 2013-01-01T16:38:40
| 2013-01-01T16:38:40
| 5,993,835
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
from label import Label
from consts import *
class List:
def __init__(self, x, y, w, title, entries=None):
self.x = x
self.y = y
self.w = w
self.h = len(entries) + 2
x += 2
w -= 4
self.tlabel = Label(x,y,w,title, align=Label.ALIGN_CENTER, fg=COLOR_LIGHT_GREY)
self.labels = []
y += 1
self.lx = x
self.ly = y
self.lw = w
if entries != None:
self.add_entries(entries)
def add_entries(self, entries):
y = self.ly
for e in entries:
l = Label(self.lx,y,self.lw,e)
self.labels.append(l)
y += 1
self.ly = y
def add_entry(self, entry):
l = Label(self.lx,self.ly,self.lw,entry)
self.ly += 1
def draw(self, gc):
gc.set_color_fg(COLOR_LIGHT_GREY)
gc.draw_border(1, self.x, self.y, self.w-2, self.h-2)
self.tlabel.draw(gc)
for l in self.labels:
l.draw(gc)
def get_label(self, i):
return self.labels[i]
|
[
"C.Vogelgsang@web.de"
] |
C.Vogelgsang@web.de
|
b5a4113c696eea49ed3ad204ff79189f7aa46c03
|
210e1cffcd8a705c2a8a1485ed5532b9169f5d10
|
/whoville/cloudbreak/models/custom_container_request.py
|
dbe93467c0216ad39351277fd6aa353b5d0e87eb
|
[
"Apache-2.0"
] |
permissive
|
mikchaos/whoville
|
2a45bc6636d448733d8d2368ac88a980cf6954ea
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
refs/heads/master
| 2020-04-19T08:53:04.430990
| 2019-01-29T05:01:57
| 2019-01-29T05:01:57
| 168,092,002
| 0
| 0
|
Apache-2.0
| 2019-01-29T05:00:06
| 2019-01-29T05:00:06
| null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CustomContainerRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'definitions': 'dict(str, str)'
}
attribute_map = {
'definitions': 'definitions'
}
def __init__(self, definitions=None):
"""
CustomContainerRequest - a model defined in Swagger
"""
self._definitions = None
if definitions is not None:
self.definitions = definitions
@property
def definitions(self):
"""
Gets the definitions of this CustomContainerRequest.
:return: The definitions of this CustomContainerRequest.
:rtype: dict(str, str)
"""
return self._definitions
@definitions.setter
def definitions(self, definitions):
"""
Sets the definitions of this CustomContainerRequest.
:param definitions: The definitions of this CustomContainerRequest.
:type: dict(str, str)
"""
self._definitions = definitions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CustomContainerRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"chaffelson@gmail.com"
] |
chaffelson@gmail.com
|
600d6005c001b142a95537db39711a46551329a9
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3_neat/16_0_3_stormshadow1896_test.py
|
3f76200143e9cb10b787cfb85cc80e4209134e9f
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
def check(num):
for i in range( 2, int(num**.5) + 2 ):
if num % i == 0:
return i
break
return -1
def create_num( num, base ):
i = 1
temp = 0
while num > 0:
temp = temp + (num % 10) * i
i = i*base
num = num / 10
return temp
def valid(num):
a = str(num)
for i in range(2,10):
if str(i) in a:
return False
if num % 10 == 1:
return True
else: return False
found = 0
a = 1000000000000001
# a = 100001
# print len(str(a))
print "Case #1:"
while found < 50:#
factors = []
if valid(a) :
for bases in range(2, 11):
num = create_num(a, bases)
# print num,
# if valid(a):
# print "yes"
# print len(set(str(num))) == 2
# print num
res = check(num)
# print res
if res == -1:
break
else:
factors.append(res)
if len(factors) == 9:
print a,
for e in factors:print e,
# print factors
found = found + 1
print
# print
a = a + 1
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
81ca69ae2548272b52a98916125018505f90c491
|
d755c825cacbb60e9a23234fbaaf93de48c6a058
|
/others/permutations.py
|
a183388454c2dd1f1a4ad70a22c8ae4c383a2f96
|
[] |
no_license
|
Subhash3/CodeChef
|
d82e4df751e6dd1e07205871b9e2c0b1202d0506
|
5301b0e4555aac55a72f175dfba8f786b4ea7bbd
|
refs/heads/master
| 2020-08-04T20:17:18.339602
| 2020-01-21T16:10:12
| 2020-01-21T16:10:12
| 212,267,130
| 2
| 3
| null | 2019-10-15T12:07:44
| 2019-10-02T06:07:41
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
#!/usr/bin/env python3
import math
for i in range(int(input())) :
n_k = input().split()
n = int(n_k[0])
k = int(n_k[1])
integers = input().split()
integers = list(map(int, integers))
r = integers.count(0)
a = math.factorial(r)
print(a)
|
[
"subhashsarangi123@gmail.com"
] |
subhashsarangi123@gmail.com
|
c6888ea8295a4174a130e4b3ac4e16938370ffc9
|
f4dd7ae9af786a396c42c3cc4a2126ab7d7e9cb8
|
/tests/test_transformer/test_refcnt/test_refcnt_optimizer.py
|
3fb998e7c54a00dad269e0741fcfd4b50370de05
|
[
"Apache-2.0"
] |
permissive
|
hpplinux/utensor_cgen
|
1e9b85b8a457117763313b8924d9696e0b99a120
|
d892b728d24321bc751552667b9722633d17c574
|
refs/heads/master
| 2020-04-19T16:13:31.891523
| 2019-02-19T02:27:44
| 2019-02-19T02:27:44
| 168,297,950
| 0
| 0
| null | 2019-01-30T07:20:53
| 2019-01-30T07:20:52
| null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
from utensor_cgen.ir import uTensorGraph
from utensor_cgen.transformer import RefCntOptimizer
def test_refcnt_optimizer(refgraph_tuple):
(graph_def, refcnt_ans, output_nodes)= refgraph_tuple
ugraph = uTensorGraph(graph_def, output_nodes)
transformer = RefCntOptimizer()
ugraph = transformer.transform(ugraph)
for node_name in ugraph.topo_order:
if node_name in refcnt_ans:
op_info = ugraph.ops_info[node_name]
refcnts = op_info.op_attr["%s__ref_counts" % transformer.KWARGS_NAMESCOPE]
assert refcnts == refcnt_ans[node_name]
|
[
"qmalliao@gmail.com"
] |
qmalliao@gmail.com
|
7bc4aca3786342718ac19d3cf036d249e8b025a1
|
e6d862a9df10dccfa88856cf16951de8e0eeff2b
|
/VMS/core/python-aiohttp/api_server/models/person.py
|
47ed159f8bd6e87bf16eaad232b627a25976f722
|
[] |
no_license
|
AllocateSoftware/API-Stubs
|
c3de123626f831b2bd37aba25050c01746f5e560
|
f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0
|
refs/heads/master
| 2022-06-01T07:26:53.264948
| 2020-01-09T13:44:41
| 2020-01-09T13:44:41
| 232,816,845
| 0
| 0
| null | 2022-05-20T21:23:09
| 2020-01-09T13:34:35
|
C#
|
UTF-8
|
Python
| false
| false
| 5,846
|
py
|
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from api_server.models.base_model_ import Model
from api_server.models.link import Link
from api_server import util
class Person(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, id: str=None, universal_id: str=None, first_name: str=None, surname: str=None, email: str=None, telephone_number: str=None, links: List[Link]=None):
"""Person - a model defined in OpenAPI
:param id: The id of this Person.
:param universal_id: The universal_id of this Person.
:param first_name: The first_name of this Person.
:param surname: The surname of this Person.
:param email: The email of this Person.
:param telephone_number: The telephone_number of this Person.
:param links: The links of this Person.
"""
self.openapi_types = {
'id': str,
'universal_id': str,
'first_name': str,
'surname': str,
'email': str,
'telephone_number': str,
'links': List[Link]
}
self.attribute_map = {
'id': 'id',
'universal_id': 'universalId',
'first_name': 'firstName',
'surname': 'surname',
'email': 'email',
'telephone_number': 'telephoneNumber',
'links': 'links'
}
self._id = id
self._universal_id = universal_id
self._first_name = first_name
self._surname = surname
self._email = email
self._telephone_number = telephone_number
self._links = links
@classmethod
def from_dict(cls, dikt: dict) -> 'Person':
"""Returns the dict as a model
:param dikt: A dict.
:return: The Person of this Person.
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this Person.
identifier of the person (worker) within the VMS
:return: The id of this Person.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Person.
identifier of the person (worker) within the VMS
:param id: The id of this Person.
:type id: str
"""
self._id = id
@property
def universal_id(self):
"""Gets the universal_id of this Person.
Global identifer, such as National Insurance number (where known), or other identifier or composite identifier that may be used for matching purposes.
:return: The universal_id of this Person.
:rtype: str
"""
return self._universal_id
@universal_id.setter
def universal_id(self, universal_id):
"""Sets the universal_id of this Person.
Global identifer, such as National Insurance number (where known), or other identifier or composite identifier that may be used for matching purposes.
:param universal_id: The universal_id of this Person.
:type universal_id: str
"""
self._universal_id = universal_id
@property
def first_name(self):
"""Gets the first_name of this Person.
:return: The first_name of this Person.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this Person.
:param first_name: The first_name of this Person.
:type first_name: str
"""
self._first_name = first_name
@property
def surname(self):
"""Gets the surname of this Person.
:return: The surname of this Person.
:rtype: str
"""
return self._surname
@surname.setter
def surname(self, surname):
"""Sets the surname of this Person.
:param surname: The surname of this Person.
:type surname: str
"""
self._surname = surname
@property
def email(self):
"""Gets the email of this Person.
:return: The email of this Person.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this Person.
:param email: The email of this Person.
:type email: str
"""
self._email = email
@property
def telephone_number(self):
"""Gets the telephone_number of this Person.
:return: The telephone_number of this Person.
:rtype: str
"""
return self._telephone_number
@telephone_number.setter
def telephone_number(self, telephone_number):
"""Sets the telephone_number of this Person.
:param telephone_number: The telephone_number of this Person.
:type telephone_number: str
"""
self._telephone_number = telephone_number
@property
def links(self):
"""Gets the links of this Person.
Array of HATEOAS-style references that may be followed by the client. This may include a 'worker.profile' URL, which will return an HTML page representing the worker profile within the VMS.
:return: The links of this Person.
:rtype: List[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Person.
Array of HATEOAS-style references that may be followed by the client. This may include a 'worker.profile' URL, which will return an HTML page representing the worker profile within the VMS.
:param links: The links of this Person.
:type links: List[Link]
"""
self._links = links
|
[
"nigel.magnay@gmail.com"
] |
nigel.magnay@gmail.com
|
d302aaee20a1f34b79c85492e6c660c1b7a60229
|
f443a7ab85f6eb99cc2466f147843faed0c2efd8
|
/fivelayerssoftmax.py
|
0488df1ccb7e1f73182d270249eb104271bca015
|
[] |
no_license
|
chaichai1997/deeplearning-tensorflow
|
40d09a3c13518e0634ffebc1260add7b6fab80b9
|
1d75b759868ad0775ab8432d4828dc2448fcb882
|
refs/heads/master
| 2020-06-30T07:24:17.398378
| 2019-08-04T02:33:13
| 2019-08-04T02:33:13
| 200,765,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,961
|
py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import math
logs_path = 'log_simple_stats_5_layers_sigmoid'
batch_size = 100
learning_rate = 0.5
training_epochs = 10
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
"""
构建网络架构,即构建图
"""
# x为28*28的图像
X = tf.placeholder(tf.float32, [None, 784], name="input")
# y为10个元素张量组成的一维数据
Y_ = tf.placeholder(tf.float32, [None, 10])
L = 200 # 第一层神经元数目
M = 100 # 第二层
N = 60 # 第三层
O = 30 # 第四层
# 参数定义
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))
B1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.zeros([O]))
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))
# 输入拉伸为一维张量
dropout_radio = tf.placeholder(tf.float32)
XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + B1)
# Y1 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
# Y1d = tf.nn.dropout(Y1, dropout_radio)
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + B2)
# Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.sigmoid(tf.matmul(Y2, W3) + B3)
# Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.sigmoid(tf.matmul(Y3, W4) + B4)
# Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
# 损失函数
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
learning_rate = 0.003
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
for epoch in range(training_epochs):
batch_count = int(mnist.train.num_examples/batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_step, summary_op],
feed_dict={X: batch_x,
Y_: batch_y})
writer.add_summary(summary,
epoch * batch_count + i)
print("Epoch: ", epoch)
print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
print("done")
|
[
"1224816105@qq.com"
] |
1224816105@qq.com
|
b6c1a3f1e0ce006d9453defe233f1062e31be1a6
|
4b6ab4d9d8a3e35def45633149cab03a1430ecfb
|
/my_pytest/my_pytest/urls.py
|
ee611711be63735dd44762c4a628969b678fef31
|
[
"MIT"
] |
permissive
|
Anych/pytest_project
|
d8d7b2f45a05f1042db5195f5d01586d0866510f
|
6ed90b3688212d0b5f035b0d9761f2cf5f99a82f
|
refs/heads/main
| 2023-05-20T13:56:56.103966
| 2021-06-13T20:33:29
| 2021-06-13T20:33:29
| 375,334,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.contrib import admin
from django.urls import path, include
from companies.urls import companies_router
from companies.views import send_company_email
urlpatterns = [
path("admin/", admin.site.urls),
path("", include(companies_router.urls)),
path("send-email", send_company_email),
]
|
[
"anuar123@mail.ru"
] |
anuar123@mail.ru
|
b6742b1202739ec60419f8f23681a7bb827e3aa0
|
8fbcb5eb7a527b700486ec161c09225dfdd30bbb
|
/Actividades/AC20/14632152_11633905.py
|
47b2a52023588612eeeb6b43f2633acc9a7ee741
|
[] |
no_license
|
rjherrera/IIC2233
|
388eae95ed9a32e2de7239f4c99277b2c01f5223
|
6637ff92ee225092b7a598d765012153c39713ee
|
refs/heads/master
| 2021-01-21T14:38:50.939337
| 2015-12-10T22:21:32
| 2015-12-10T22:21:32
| 95,313,145
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,535
|
py
|
from PyQt4 import QtGui, uic
from calc_financiero import calcular_jub
form = uic.loadUiType("hexa.ui")
class MainWindow(form[0], form[1]):
def __init__(self):
super().__init__()
self.setupUi(self)
pixmap = QtGui.QPixmap('logo_argentum.png')
self.label_15.setPixmap(pixmap)
self.label_15.resize(self.label_15.sizeHint())
self.label_15.move(90, 0)
self.label_16.setScaledContents(True)
self.label_16.setPixmap(QtGui.QPixmap('logo_hexa.png'))
self.label_16.resize(100, 30)
self.label_14.move(105, 68)
# aporte mensual
self.lineEdit.textChanged.connect(self.calcular)
self.lineEdit_2.textChanged.connect(self.calcular)
# años
self.lineEdit_4.textChanged.connect(self.calcular)
self.lineEdit_5.textChanged.connect(self.calcular)
self.lineEdit_6.textChanged.connect(self.calcular)
# combobox
self.comboBox.currentIndexChanged.connect(self.calcular)
# Completar la creación de la interfaz #
def calcular(self):
""" Completar esta función para calcular los cambios de los datos
en tiempo real según el input del usuario. """
# aporte mensual
ingreso = self.lineEdit
porcentaje = self.lineEdit_2
error = False
if ingreso.text() and porcentaje.text():
try:
porcentaje_valor = float(porcentaje.text())
res = int(ingreso.text()) * porcentaje_valor / 100
if not 0 <= porcentaje_valor <= 100:
raise ValueError
self.label_2.setText('$%f' % res)
self.statusbar.showMessage('')
error = False
except:
if not error:
self.statusbar.showMessage(
'Ingreso y/o porcentaje inválidos.')
error = True
# años de pension
edad_jubilacion = self.lineEdit_5
esperanza = self.lineEdit_6
if edad_jubilacion.text() and esperanza.text():
try:
res = int(esperanza.text()) - int(edad_jubilacion.text())
self.label_5.setText('%d' % res)
self.statusbar.showMessage('')
error = False
except:
if not error:
self.statusbar.showMessage('Edades inválidas.')
error = True
# calculo final
edad_actual = self.lineEdit_4
seleccion = self.comboBox.itemText(self.comboBox.currentIndex())
if (ingreso.text() and porcentaje.text() and edad_actual.text() and
edad_jubilacion.text() and esperanza.text() and seleccion):
try:
res = calcular_jub(int(ingreso.text()),
float(porcentaje.text()),
int(edad_actual.text()),
int(edad_jubilacion.text()),
int(esperanza.text()),
seleccion)
self.label_13.setText(res)
self.statusbar.showMessage('')
error = False
except:
if not error:
self.statusbar.showMessage(
'Error en los datos ingresados.')
error = True
if __name__ == '__main__':
app = QtGui.QApplication([])
form = MainWindow()
form.show()
app.exec_()
|
[
"rjherrera@uc.cl"
] |
rjherrera@uc.cl
|
65e0ac96fa03bc4c5d8b73113cbf7696a1137df2
|
958685165bfeb4122cc3473659a6d0c89c5cae95
|
/crea8s_warehouse/stock.py
|
784d425ff8259ded6339e857f54c984518363ce3
|
[] |
no_license
|
tringuyen17588/OpenERP-7.0
|
44efee7735af65d960c5adb4b03a1a329f5c4a57
|
2486261e4d351d4f444ec31e74c6b0e36ed2fb82
|
refs/heads/master
| 2021-01-10T02:45:24.320726
| 2016-02-19T06:05:21
| 2016-02-19T06:05:21
| 52,064,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,556
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import time
import datetime
from datetime import timedelta
class product_product(osv.osv):
_inherit = "product.product"
def get_product_available(self, cr, uid, ids, context=None):
""" Finds whether product is available or not in particular warehouse.
@return: Dictionary of values
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
shop_obj = self.pool.get('sale.shop')
states = context.get('states',[])
what = context.get('what',())
if not ids:
ids = self.search(cr, uid, [])
res = {}.fromkeys(ids, 0.0)
if not ids:
return res
if context.get('shop', False):
warehouse_id = shop_obj.read(cr, uid, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]
if warehouse_id:
context['warehouse'] = warehouse_id
if context.get('warehouse', False):
lot_id = warehouse_obj.read(cr, uid, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]
if lot_id:
context['location'] = lot_id
if context.get('location', False):
if type(context['location']) == type(1):
location_ids = [context['location']]
elif type(context['location']) in (type(''), type(u'')):
location_ids = location_obj.search(cr, uid, [('name','ilike',context['location'])], context=context)
else:
location_ids = context['location']
else:
location_ids = []
# compute again quantity depend on warehouse
users_br = self.pool.get('res.users').browse(cr, uid, uid)
wids = [x.id for x in users_br.warehouse_ids]
if not wids:
company_id = users_br.company_id.id
wids = warehouse_obj.search(cr, uid, [('company_id', '=', company_id)], context=context)
if not wids:
return res
for w in warehouse_obj.browse(cr, uid, wids, context=context):
location_ids.append(w.lot_stock_id.id)
# build the list of ids of children of the location given by id
if context.get('compute_child',True):
child_location_ids = location_obj.search(cr, uid, [('location_id', 'child_of', location_ids)])
location_ids = child_location_ids or location_ids
# this will be a dictionary of the product UoM by product id
product2uom = {}
uom_ids = []
for product in self.read(cr, uid, ids, ['uom_id'], context=context):
product2uom[product['id']] = product['uom_id'][0]
uom_ids.append(product['uom_id'][0])
# this will be a dictionary of the UoM resources we need for conversion purposes, by UoM id
uoms_o = {}
for uom in self.pool.get('product.uom').browse(cr, uid, uom_ids, context=context):
uoms_o[uom.id] = uom
results = []
results2 = []
from_date = context.get('from_date',False)
to_date = context.get('to_date',False)
date_str = False
date_values = False
where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]
if from_date and to_date:
date_str = "date>=%s and date<=%s"
where.append(tuple([from_date]))
where.append(tuple([to_date]))
elif from_date:
date_str = "date>=%s"
date_values = [from_date]
elif to_date:
date_str = "date<=%s"
date_values = [to_date]
if date_values:
where.append(tuple(date_values))
prodlot_id = context.get('prodlot_id', False)
prodlot_clause = ''
if prodlot_id:
prodlot_clause = ' and prodlot_id = %s '
where += [prodlot_id]
# TODO: perhaps merge in one query.
if 'in' in what:
# all moves from a location out of the set to a location in the set
cr.execute(
'select sum(product_qty), product_id, product_uom '\
'from stock_move '\
'where location_id NOT IN %s '\
'and location_dest_id IN %s '\
'and product_id IN %s '\
'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\
+ prodlot_clause +
'group by product_id,product_uom',tuple(where))
results = cr.fetchall()
if 'out' in what:
# all moves from a location in the set to a location out of the set
cr.execute(
'select sum(product_qty), product_id, product_uom '\
'from stock_move '\
'where location_id IN %s '\
'and location_dest_id NOT IN %s '\
'and product_id IN %s '\
'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\
+ prodlot_clause +
'group by product_id,product_uom',tuple(where))
results2 = cr.fetchall()
# Get the missing UoM resources
uom_obj = self.pool.get('product.uom')
uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)
if context.get('uom', False):
uoms += [context['uom']]
uoms = filter(lambda x: x not in uoms_o.keys(), uoms)
if uoms:
uoms = uom_obj.browse(cr, uid, list(set(uoms)), context=context)
for o in uoms:
uoms_o[o.id] = o
#TOCHECK: before change uom of product, stock move line are in old uom.
context.update({'raise-exception': False})
# Count the incoming quantities
for amount, prod_id, prod_uom in results:
amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,
uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)
res[prod_id] += amount
# Count the outgoing quantities
for amount, prod_id, prod_uom in results2:
amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,
uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)
res[prod_id] -= amount
return res
product_product()
|
[
"tri@crea8s.com"
] |
tri@crea8s.com
|
9132da0e6d4babe5a123bb4b0ccc7a8bb1cb97e0
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_117/505.py
|
1738dd39c2a21f537d1057b58316f37a1bfb3b13
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
#!/usr/bin/python3
from common import *
def testcase(x):
n, m = readintegers()
a = [0] * n
for i in range(n):
a[i] = readintegers()
row_maxes = [0] * n
col_maxes = [0] * m
for i in range(n):
for j in range(m):
if a[i][j] > row_maxes[i]:
row_maxes[i] = a[i][j]
if a[i][j] > col_maxes[j]:
col_maxes[j] = a[i][j]
possible = True
for i in range(n):
for j in range(m):
if a[i][j] < row_maxes[i] and a[i][j] < col_maxes[j]:
possible = False
if possible:
writeline("Case #%d: YES" % x)
else:
writeline("Case #%d: NO" % x)
run_tests(testcase)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
6801cdfd546e4d6267fe06cd6e03cbe17529a639
|
bde607d5c75179861cd1bae62fa40861b984ee4b
|
/datalive/datalive_cust_veh/migrations/0038_insurance_insurance_accident_phone.py
|
17b494fe3e8628616fe39944ae984d06b99775da
|
[] |
no_license
|
simba999/Geofence-project
|
1658f1473b1b2a554607596872448928c1ccac77
|
7c01b55ff0ff3537fd63ea10182b12c5e1f107fa
|
refs/heads/master
| 2021-03-31T00:51:57.811563
| 2018-03-08T19:22:12
| 2018-03-08T19:22:12
| 124,434,299
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-12-12 21:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datalive_cust_veh', '0037_insurance_address'),
]
operations = [
migrations.AddField(
model_name='insurance',
name='insurance_accident_phone',
field=models.CharField(blank=True, max_length=14, null=True),
),
]
|
[
"oliverking8985@yahoo.com"
] |
oliverking8985@yahoo.com
|
718d72ed56398bf36ff79851a900d5fc1dc3117c
|
1864af9eda58307024acbf7fe5d5f2f39f435e44
|
/module_1/python/reverse_list.py
|
c8c318922bfd95015188f65af585098c10945cc5
|
[] |
no_license
|
vprusso/6-Weeks-to-Interview-Ready
|
c393bbfe071d97cba12f0f0668e53a25fb25986f
|
8105e1b20bf450a03a9bb910f344fc140e5ba703
|
refs/heads/master
| 2021-08-11T04:48:34.252178
| 2020-08-09T22:54:55
| 2020-08-09T22:54:55
| 210,997,768
| 6
| 2
| null | 2019-09-26T04:12:44
| 2019-09-26T04:12:44
| null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
"""
Title: Reverse linked list.
Problem: Reverse a singly linked list.
Execution: python reverse_list.py
"""
import unittest
class ListNode:
"""Basic node class for linked list."""
def __init__(self, x):
self.val = x
self.next = None
def reverse_list_iterative(head: ListNode) -> ListNode:
"""Function for iteratively reversing singly linked list."""
prev = None
curr = head
while curr:
next_temp = curr.next
curr.next = prev
prev = curr
curr = next_temp
return prev
def reverse_list_recursive(head: ListNode) -> ListNode:
"""Function for recursively reversing singly linked list."""
if head is None or head.next is None:
return head
p = reverse_list_recursive(head.next)
head.next.next = head
head.next = None
return p
def print_list(head: ListNode) -> list:
"""Print linked list elements."""
output_list = []
while head:
output_list.append(head.val)
head = head.next
return output_list
class TestReverseList(unittest.TestCase):
"""Unit test for reverse list."""
def test_1(self):
"""Test for 1->2->3->4->5."""
input_1 = ListNode(1)
input_1.next = ListNode(2)
input_1.next.next = ListNode(3)
input_1.next.next.next = ListNode(4)
input_1.next.next.next.next = ListNode(5)
list_output_iterative_1 = print_list(reverse_list_iterative(input_1))
self.assertEqual(list_output_iterative_1, [5, 4, 3, 2, 1])
if __name__ == '__main__':
unittest.main()
|
[
"vincentrusso1@gmail.com"
] |
vincentrusso1@gmail.com
|
831fa7afce258eaefe2d09b668885866549fd4c9
|
99aa9b2be5199bf1b2f670bc9bb1a5bc7cec1c89
|
/BFS_topological/L207_Course_schedule.py
|
5fdbab7f22cbf7d4e144ae1cf2db55a5f348e331
|
[] |
no_license
|
SimonFans/LeetCode
|
5196e85dec886b18cb2350419a4a2ae3c751966c
|
0a34a19bb0979d58b511822782098f62cd86b25e
|
refs/heads/master
| 2023-02-08T00:49:30.916655
| 2023-01-31T06:32:32
| 2023-01-31T06:32:32
| 145,938,196
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
There are a total of n courses you have to take, labeled from 0 to n-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
Example 1:
Input: 2, [[1,0]]
Output: true
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
Example 2:
Input: 2, [[1,0],[0,1]]
Output: false
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should
also have finished course 1. So it is impossible.
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
# initialize 0 for each course
inDegree=[0 for i in range(numCourses+1)]
# create a course & its pre-course mapping
Map={}
# loop to let all course that need pre-course +=1, others no need are 0
# Map find relationship, exp: 1->4, 2->[4,5]...
for i in range(len(prerequisites)):
inDegree[prerequisites[i][0]]+=1
if prerequisites[i][1] not in Map:
Map[prerequisites[i][1]]=[prerequisites[i][0]]
else:
Map[prerequisites[i][1]].append(prerequisites[i][0])
# find all courses not need pre-course, append to queue
queue=collections.deque()
for i in range(numCourses):
if inDegree[i]==0:
queue.append(i) #queue: [0,1,2]
#queue.popleft()
# BFS starts
while len(queue):
course=queue.popleft()
subcourses=Map.get(course,[]) # [4], [4,5]
for k in range(len(subcourses)):
if inDegree[subcourses[k]]!=0:
inDegree[subcourses[k]]-=1
if inDegree[subcourses[k]]==0:
queue.append(subcourses[k])
for i in range(numCourses+1):
if inDegree[i]!=0:
return False
else:
return True
|
[
"noreply@github.com"
] |
SimonFans.noreply@github.com
|
44deee25bd721ba3b1e5f31587692afa7f3bce16
|
3c73609eea12d6784ffc0be5acc6994cda19dc57
|
/Codeforces Difficulty 500-700/749ABachgoldProb.py
|
4cefb4124f4f1af645594d719bbdb2a464837732
|
[] |
no_license
|
TanveshT/Competitive-Programming
|
0cf7a8ebc20a74cb6fd8505e67fbfec5bac6b8c2
|
47acc0a2af2711c86bb0da06e961677a8ec1e7d3
|
refs/heads/master
| 2022-12-19T01:44:46.033633
| 2020-09-25T06:57:23
| 2020-09-25T06:57:23
| 258,095,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
n = int(input())
result = ''
if n%2 == 0:
result += ' '.join(['2'] * (n//2))
else:
result += ' '.join(['2'] * ((n-2)//2))
result += ' 3'
print(n//2)
print(result)
|
[
"tanveshtakawale26@gmail.com"
] |
tanveshtakawale26@gmail.com
|
7c68a69b658aa325154cbd035c1f844f20806ee6
|
48a7b266737b62da330170ca4fe4ac4bf1d8b663
|
/molsysmt/form/pytraj_Trajectory/to_molsysmt_Topology.py
|
296fd312de31fa18840f59e7be11dc3efb032788
|
[
"MIT"
] |
permissive
|
uibcdf/MolSysMT
|
ddab5a89b8ec2377f383884c5169d147cab01322
|
c3d713ba63db24eb8a2426115cf8d9cb3665d225
|
refs/heads/main
| 2023-08-08T15:04:16.217967
| 2023-08-04T05:49:56
| 2023-08-04T05:49:56
| 137,937,243
| 15
| 3
|
MIT
| 2023-06-04T20:27:06
| 2018-06-19T19:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
from molsysmt._private.digestion import digest
@digest(form='pytraj.Trajectory')
def to_molsysmt_Topology(item, atom_indices='all'):
from . import to_pytraj_Topology
from ..pytraj_Topology import to_molsysmt_Topology
tmp_item = to_pytraj_Topology(item)
tmp_item = pytraj_Topology_to_molsysmt_Topology(tmp_item, atom_indices=atom_indices)
return tmp_item
|
[
"prada.gracia@gmail.com"
] |
prada.gracia@gmail.com
|
ad3bccd5f5c0938d4ef0c702293960cec7e513ff
|
45bd50e1e63346f77a46c2a75d9bda8c19fe97de
|
/Basic Concept Zoning World Prelim 2 2019/Test Scripts/Zoning/ZoningShow.py
|
fad8d27b7d260c9f54284db7440e4057d7a3785f
|
[] |
no_license
|
Hackin7/Space-Through-CoSpace
|
40a5d8fad18c392dc3f2ae6f3f83f8ae290166a8
|
d93541e8f0cd8b331ca8a179617ca4204ab43e3b
|
refs/heads/master
| 2022-11-13T01:35:49.052934
| 2020-06-29T08:54:36
| 2020-06-29T08:54:36
| 222,223,987
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,671
|
py
|
from tkinter import *
from tkinter.filedialog import askopenfilename
from PIL import Image
from PIL import ImageTk
from PIL import ImageGrab
from ctypes import windll #Makes app DPI Aware
user32 = windll.user32
user32.SetProcessDPIAware()
###Drawing Path
text_ids=[]
import sys,os
from sys import stdin, stdout
sys.path.append('../Python_Libraries/')
from ImageCalculation import *
#from PreprocessedMap import data
from hppToMap import readMapData
data = readMapData("../../World2/w2map.hpp")
def imageObj(im, pixels):
print(stack)
lines = cppInput(stack[0],stack[1],stack[2],stack[3])
coordinates = inputToCoordinates(lines)
newMap = data#switchYValues(mapData(im.size, pixels))
newImg, newPixels = convertBack(newMap)
addCoordinates(coordinates, newPixels,(100,100,0))
return newImg
if __name__ == "__main__":
root = Tk()
im, pixels = convertBack(data)
widths, heights = im.size
img=ImageTk.PhotoImage(im)
#setting up a tkinter canvas with scrollbars
frame = Frame(root, bd=2, relief=SUNKEN,height=heights,width=widths)
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
canvas = Canvas(frame, bd=0)
canvas.grid(row=0, column=0, sticky=N+S+E+W)
frame.pack(fill=BOTH,expand=1)
#adding the image
canvasImage = canvas.create_image(0,0,image=img,anchor="nw")
# canvas.config(scrollregion=canvas.bbox(ALL))
zoneMapping = {}
zones = []
with open("zone.txt") as f:
for line in f:
if " " in line:
x,y,node=map(int,line.split())
zoneMapping[x,y] = node
x = x/(360/widths)
y = (heights-y)/(270/heights)
if node not in zones:
zones.append(node)
text_id = canvas.create_text(x-15,y-10,text=str(node),font = "Times 20 bold")
print(zones)
with open("../../World2/zones.hpp", "w") as f:
w, h = 360,270
zoneData = [[0 for j in range(h)] for i in range(w)]
for i in range(w):
for j in range(h):
try:
zoneData[i][j] = zoneMapping[i,j]
except: pass#print("failed:",i,j)
print(len(zoneData), len(zoneData[0]))
f.write(f"int zones[width][height] = {str(zoneData).replace('[','{').replace(']','}')};")
#mouseclick event
def printZone(event):
x = round(event.x/(widths/360))
y = round((heights-event.y)/(heights/270))
print("Zone No:",zoneMapping[x,y])
pass
canvas.bind("<Button 1>",printZone)
root.mainloop()
|
[
"zunmun@gmail.com"
] |
zunmun@gmail.com
|
188553b42af704e15e28d627a6b689ddccdb9a8b
|
b391498124fdcaef989bf3ebafffb0df43e3e07f
|
/pygccxml-0.8.2/unittests/decl_string_tester.py
|
2e539a882efae0042f2ee800be1a0a8f58ad8294
|
[
"BSL-1.0"
] |
permissive
|
glehmann/WrapITK-unstable
|
9a0dd9d387ecd59c9439465dcc32cca552e14576
|
402fc668f1f3c3dd57d0751a61efa3b1625d238b
|
refs/heads/master
| 2021-01-10T22:02:04.715926
| 2008-05-25T16:53:07
| 2008-05-25T16:53:07
| 3,272,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
# Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
import pygccxml
from pygccxml.utils import *
from pygccxml.parser import *
from pygccxml.declarations import *
class tester_t( parser_test_case.parser_test_case_t ):
COMPILATION_MODE = COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args ):
parser_test_case.parser_test_case_t.__init__( self, *args )
self.header = os.path.join( autoconfig.data_directory, 'declarations_calldef.hpp' )
self.template = """
//test generated declaration string using gcc(xml) compiler
#include "declarations_calldef.hpp"
void test_generated_decl_string( %s );
"""
def test_member_function(self):
declarations = parse( [self.header], self.config )
member_inline_call = find_declaration( declarations, type=member_function_t, name='member_inline_call' )
self.failUnless( member_inline_call, "unable to find 'member_inline_call' function" )
decls = parse_string( self.template % member_inline_call.decl_string, self.config )
self.failUnless( decls, "Created decl_string for member function containes mistake" )
def test_free_function(self):
declarations = parse( [self.header], self.config )
return_default_args = find_declaration( declarations, type=free_function_t, name='return_default_args' )
self.failUnless( return_default_args, "unable to find 'return_default_args' function" )
decls = parse_string( self.template % return_default_args.decl_string, self.config )
self.failUnless( decls, "Created decl_string for global function containes mistake" )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
[
"gaetan.lehmann@jouy.inra.fr"
] |
gaetan.lehmann@jouy.inra.fr
|
761792f87be2af9725838fd2e45005f8cbb7e3b7
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/mixed_precision/experimental/__init__.py
|
557964bda8534e5d71a39df895b2a0954a2758d7
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a99d880d11e16bc170b4e38a2ed534d30a03cdcd4c9dc919e44db72fb592db6d
size 562
|
[
"github@cuba12345"
] |
github@cuba12345
|
2b6a3dd2306c6267bd3b4723905ad8e96699bc58
|
6a8d047b4502507c67120a0a32640c6a3e60d8a5
|
/apps/profiles/migrations/0015_auto_20171128_2324.py
|
318ad8f57eac31c224e44335024c2a8c50376817
|
[] |
no_license
|
dwebdevcore/BoardDirector_dashboard
|
320f110d7581c065920b7607ef06a457851c4bb4
|
7cd2b2abe1c660531a805d84930c8a6183b863b6
|
refs/heads/master
| 2020-05-26T05:32:37.501642
| 2019-05-22T22:33:25
| 2019-05-22T22:33:25
| 188,122,429
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-28 20:24
from __future__ import unicode_literals
import common.utils
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0014_auto_20170915_0653'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='custom_role_name',
field=models.CharField(blank=True, max_length=50, verbose_name='custom role name'),
),
]
|
[
"dwebdevcore@gmail.com"
] |
dwebdevcore@gmail.com
|
eb7743840173ebacd9715a3594e8cbe849d2e20c
|
eb7047d5a8c00d4370a55c2806a2f051287b452d
|
/tests/pytests/problems/TestSingleObserver.py
|
b8d8b92a1d2272e25463babe394d08632e7fed5a
|
[
"MIT"
] |
permissive
|
mousumiroy-unm/pylith
|
8361a1c0fbcde99657fd3c4e88678a8b5fc8398b
|
9a7b6b4ee8e1b89bc441bcedc5ed28a3318e2468
|
refs/heads/main
| 2023-05-27T18:40:57.145323
| 2021-06-09T19:32:19
| 2021-06-09T19:32:19
| 373,931,160
| 0
| 0
|
MIT
| 2021-06-04T18:40:09
| 2021-06-04T18:40:09
| null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
#!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
# @file tests/pytests/problems/TestSingleObserver.py
#
# @brief Unit testing of Python SingleObserver object.
import unittest
from pylith.testing.UnitTestApp import TestAbstractComponent
from pylith.problems.SingleObserver import (SingleSolnObserver, SinglePhysicsObserver)
class TestSingleSolnObserver(TestAbstractComponent):
"""Unit testing of SingleSolnObserver object.
"""
_class = SingleSolnObserver
class TestSinglePhysicsObserver(TestAbstractComponent):
"""Unit testing of SinglePhysicsObserver object.
"""
_class = SinglePhysicsObserver
if __name__ == "__main__":
suite = unittest.TestSuite()
classes = [
TestSingleSolnObserver,
TestSinglePhysicsObserver,
]
for cls in classes:
suite.addTest(unittest.makeSuite(cls))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
|
[
"baagaard@usgs.gov"
] |
baagaard@usgs.gov
|
794a613a9d0f5d34d09ddbb7cad78d49e670c820
|
4e45d134e09af47025deae667805f0eb79d4f516
|
/neuroConstruct/pythonScripts/RunTestsL5PC.py
|
f275b53a4451da444342a7f07e133d5e957a5ed2
|
[
"MIT"
] |
permissive
|
OpenSourceBrain/L5bPyrCellHayEtAl2011
|
46c5721e21eed58cd23e96ced0cdafae58315473
|
75da6c136254b50b96dd9156b27244c083313156
|
refs/heads/master
| 2023-08-16T10:36:36.618573
| 2023-03-30T12:31:13
| 2023-03-30T12:31:13
| 8,250,486
| 5
| 1
|
NOASSERTION
| 2023-09-06T12:42:46
| 2013-02-17T12:48:49
|
AGS Script
|
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
#
#
# File to test current configuration of project
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Wellcome Trust
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using nC.bat -python XXX.py' or 'nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc # Many useful functions such as SimManager.runMultipleSims found here
projFile = File("../L5bPyrCellHayEtAl2011.ncx")
############## Main settings ##################
simConfigs = []
simConfigs.append("Default Simulation Configuration")
simDt = 0.001
simulators = ["NEURON"]
varTimestepNeuron = True
varTimestepTolerance = 0.00001
plotSims = True
plotVoltageOnly = True
runInBackground = True
analyseSims = True
verbose = True
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
verbose = verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground,
varTimestepNeuron = varTimestepNeuron,
varTimestepTolerance = varTimestepTolerance)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
analyseSims = analyseSims)
times = [711.84126, 720.02236, 730.20796, 745.43043, 827.08991, 929.49095, 1027.7015, \
1122.267, 1214.1016, 1303.3449, 1390.8119, 1476.6185, 1561.1513, 1644.7155, \
1727.3263, 1809.1266, 1890.1703, 1971.0897, 2050.8026, 2130.5578, 2210.0852, \
2289.2517, 2368.1543, 2446.7049, 2524.948, 2603.1542, 2681.297]
spikeTimesToCheck = {'CellGroup_1_0': times}
spikeTimeAccuracy = 0.6
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
|
[
"p.gleeson@gmail.com"
] |
p.gleeson@gmail.com
|
e4e2d58658dd6c7e4adf95dd2a78fe739427c58f
|
63f917864d85f0f9e810cbb4e6163f48611a8b3d
|
/home_content/admin.py
|
dcbef84ea0014ed258e07963b93519ce4277c9fd
|
[] |
no_license
|
davidraywilson/suit_materialized
|
37aa521d52f8dd746b55b121262501147dffb95c
|
035405defedd5ee8257b42aac82749794080af4f
|
refs/heads/master
| 2021-01-18T14:05:01.797452
| 2015-06-03T02:03:55
| 2015-06-03T02:03:55
| 32,526,877
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
from django.contrib import admin
from home_content.models import HomeSection, Billboard, MiniBillboard
class SectionAdmin(admin.ModelAdmin):
model = HomeSection
list_display = ('name', 'order', 'is_published')
list_editable = ('order', 'is_published')
fieldsets = (
(None, {
'fields': ('template', 'name', 'order', 'is_published')
}),
(None, {
'fields': ('image_background',)
}),
)
class Media:
js = [
'/static/admin_js/home_section.js',
'/static/admin_js/tinymce/tinymce.min.js',
'/static/admin_js/tinymce_init.js'
]
class BillboardAdmin(admin.ModelAdmin):
model = Billboard
list_display = ('name', 'order', 'publish_date', 'is_published')
list_filter = ('publish_date', 'is_published')
list_editable = ('order', 'is_published',)
fieldsets = (
(None, {
'classes': (),
'fields': ('name', 'order', 'image', 'header', 'sub_header', 'publish_date', 'expire_date', 'is_published')
}),
)
class Media:
js = [
'/static/admin_js/tinymce/tinymce.min.js',
'/static/admin_js/tinymce_init.js'
]
class MiniBillboardAdmin(admin.ModelAdmin):
model = MiniBillboard
list_display = ('name', 'order', 'publish_date', 'is_published')
list_filter = ('publish_date', 'is_published')
list_editable = ('order', 'is_published',)
fieldsets = (
(None, {
'classes': (),
'fields': ('name', 'order', 'publish_date', 'expire_date', 'is_published')
}),
(None, {
'classes': (),
'fields': ('size', 'image', 'video', 'link')
}),
)
class Media:
js = [
'/static/admin_js/tinymce/tinymce.min.js',
'/static/admin_js/tinymce_init.js'
]
admin.site.register(HomeSection, SectionAdmin)
admin.site.register(Billboard, BillboardAdmin)
admin.site.register(MiniBillboard, MiniBillboardAdmin)
|
[
"davidraywilson@live.com"
] |
davidraywilson@live.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.