blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f05788dedfb6545482ba4bd9b8acffa93ddfb1e | 29c476c037a05170ff2ddef8edd07014d3751614 | /0x03-python-data_structures/10-divisible_by_2.py | 787cefb10383f36ee67ca2a984d31d20a27f6684 | [] | no_license | hacheG/holbertonschool-higher_level_programming | a0aaddb30665833bd260766dac972b7f21dda8ea | 535b1ca229d7cf61124a128bb5725e5200c27fbc | refs/heads/master | 2020-07-22T23:09:27.486886 | 2020-02-13T19:41:34 | 2020-02-13T19:41:34 | 207,360,462 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #!/usr/bin/python3
def divisible_by_2(my_list=[]):
new_list = []
for i in my_list:
if(i % 2 == 0):
new_list.append(True)
else:
new_list.append(False)
return(new_list)
| [
"943@holbertonschool.com"
] | 943@holbertonschool.com |
a452e26672a218523c0d1f3f356856adc98f25b9 | b6553d9371a3612c992cfe0dba678cbc16c6812b | /a-concurrent/http_request.py | 36e4bf8acdab802ce4473dd7627fe7ccf2c97506 | [] | no_license | drgarcia1986/bev-py-concurrency | 071ef6f899c7c892eeb446c024b67bfa56d6a83c | 4159b9acb82ade59c9b7d1b5ae49d764fddf5430 | refs/heads/master | 2021-01-09T05:50:37.331265 | 2017-02-03T15:30:24 | 2017-02-03T15:30:24 | 80,840,883 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | import asyncio
import aiohttp
async def make_get(delay):
response = await aiohttp.get('https://httpbin.org/delay/{}'.format(delay))
response.close()
return delay, response.status == 200
async def make_requests(*delays):
requests = [make_get(d) for d in delays]
return await asyncio.gather(*requests)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
responses = loop.run_until_complete(make_requests(5, 2, 3))
for response in responses:
print(response)
print('Done')
| [
"drgarcia1986@gmail.com"
] | drgarcia1986@gmail.com |
907b45c3d962efdc0f0ec839c4a198f7fa84df2e | 7ac82627034f262d110112112bd3f7e430f3fd90 | /CodeEdgeDetection.py | 13accf650198daa9d37272e2f4a0fbf31bbe2ac1 | [] | no_license | saguileran/Codigos | bca7172e19aefb5ed9ec0720991cafff078278cc | f49a72f57de3769d06ff4a09df07e9d25d4dde29 | refs/heads/master | 2020-06-01T16:53:27.446944 | 2019-06-27T19:26:57 | 2019-06-27T19:26:57 | 190,854,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
# loading image
#img0 = cv2.imread('SanFrancisco.jpg',)
img0 = cv2.imread('Camera.jpg',)
# converting to gray scale
gray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
# remove noise
img = cv2.GaussianBlur(gray,(3,3),0)
# convolute with proper kernels
laplacian = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y
plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
#TOmado de: https://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Gradient_Sobel_Laplacian_Derivatives_Edge_Detection.php
| [
"root@beaglebone.localdomain"
] | root@beaglebone.localdomain |
266c3267d5b874ecb5dda55196cfc42fc8c3ef76 | 29a78032c3b2fdd4722f6c054ab20a5a8cea627c | /studtpy/string2.py | ec6493272f2407e3dd9850cfac2e0d2a611c519b | [] | no_license | jungting20/pythonpro | 838ea188f846b6e1a90f1a7c429f02464b1b0927 | 455dd23132023cb472bab5e8d9ba4a881331db54 | refs/heads/master | 2021-06-27T16:20:54.768172 | 2017-09-16T08:38:19 | 2017-09-16T08:38:19 | 103,737,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | n = 42
f = 7.03
s = 'string cheese'
aa = {'n':42,'f':7.03,'s':'string cheese'}
bb = {'n':40,'f':6.03,'s':'cheese'}
#이걸 기억하자 깔끔하게 기억 넣는순서가있음 딕셔너리는 객체니까 그냥 1개로 보는
#거임 그러니
#결국 저 숫자 0 1의 의미는 .format에 인자에 넣는 순서를 말하는거임 ㅋ
#깨달았다
bb = '{0[n]:<10d} {0[f]:<10f} {0[s]:<10s} {1[n]} {1[f]} {1[s]}'.format(aa,bb)
cc = '{0:0>2d}'.format(1)
print(cc)
| [
"jungting20@gmail.com"
] | jungting20@gmail.com |
6b30f92d6c8692c9de33540170070de26905643f | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1815745/homework02/program02.py | 5dd1b627308412ffb7923fffd477fb45a221ef02 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | '''
Un file di compiti contiene informazioni su un insieme di compiti da eseguire.
Esistono due tipologie di compiti:
- compiti che possono essere eseguiti indipendentemente dagli altri.
- compiti da svolgere solo al termine di un compito preliminare.
I compiti del primo tipo sono codificati nel file mediante una linea che contiene
in sequenza le due sottostringhe "comp" ed "N" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
Compiti del secondo tipo sono codificati nel file mediante due linee di codice.
-- la prima linea, contiene in sequenza le due sottostringhe "comp" ed "N"
(senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
-- la seconda linea (immediatamente successiva nel file) contiene
in sequenza le due sottostringhe "sub" ed "M" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "M" e' l'ID del compito preliminare.
il seguente file di compiti contiene informazioni su 4 compiti (con identificativi 1,3,7 e 9).
I compiti con identificativi 1 e 9 possono essere svolti indipendentemente dagli altri mentre i compiti
con identificativo 3 e 7 hanno entrambi un compito preliminare.
comp 3
sub 9
comp1
comp 9
comp 7
sub3
Scrivere la funzione pianifica(fcompiti,insi,fout) che prende in input:
- il percorso di un file (fcompiti)
- un insieme di ID di compiti da cercare (insi)
- ed il percorso di un file (fout)
e che salva in formato JSON nel file fout un dizionario (risultato).
Il dizionario (risultato) dovra' contenere come chiavi gli identificativi (ID) dei compiti
presenti in fcompiti e richiesti nell'insieme insi.
Associata ad ogni ID x del dizionario deve esserci una lista contenente gli identificativi (ID) dei compiti
che bisogna eseguire prima di poter eseguire il compito x richiesto
(ovviamente la lista di un ID di un compito che non richie un compito preliminare risultera' vuota ).
Gli (ID) devono comparire nella lista nell'ordine di esecuzione corretto, dal primo fino a quello precedente a quello richiesto
(ovviamente il primo ID di una lista non vuota corripondera' sempre ad un compito che non richiede un compito preliminare).
Si puo' assumere che:
- se l' ID di un compito che richieda un compito preliminare e' presente in fcompiti
allora anche l'ID di quest'ultimo e' presente in fcompiti
- la sequenza da associare al compito ID del dizionario esiste sempre
- non esistono cicli (compiti che richiedono se' stessi anche indirettamente)
Ad esempio per il file di compiti fcompiti contenente:
comp 3
sub 9
comp1
comp 9
comp 7
sub3
al termine dell'esecuzione di pianifica(fcompiti,{'7','1','5'}, 'a.json')
il file 'a.json' deve contenere il seguente dizionario
{'7':['9','3'],'1':[]}
Per altri esempi vedere il file grade02.txt
AVVERTENZE:
non usare caratteri non ASCII, come le lettere accentate;
non usare moduli che non sono nella libreria standard.
NOTA: l'encoding del file e' 'utf-8'
ATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.
'''
import json
def pianifica(fcompiti,insi,fout):
with open(fcompiti, encoding = 'utf-8' , mode = 'rt') as f:
diz = {}
dizbis = {}
lista_sub_bis = []
vab = []
l = 0
for linea in f:
l += 1
if linea.find('comp') != -1:
x = linea.strip('comp \n')
k = 0
h = 0
for insieme in insi :
varID = insieme
if x == varID:
lista_sub = []
diz[x] = lista_sub
h = l + 1
if h == 0:
lista_sub_bis = []
dizbis[x] = lista_sub_bis
k = l + 1
if linea.find('sub') != -1:
if l == h:
y = linea.strip('sub \n')
lista_sub.append(y)
if k == l:
y = linea.strip('sub \n')
lista_sub_bis.append(y)
for chbis,vabis in dizbis.items():
for valorebis in vabis:
for ch,va in diz.items():
for valore in va:
if chbis == valore:
va.append(valorebis)
vab = sorted(va)
diz[ch] = vab
with open(fout, mode = 'w') as f:
json.dump(diz,f)
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
bc5d19bd2accbef984fe12bf55bf5efc38843cdd | 0e5291f09c5117504447cc8df683ca1506b70560 | /netbox_client/models/virtual_chassis.py | 55a342402aba80391af4f170111c0cc764deb924 | [
"MIT"
] | permissive | nrfta/python-netbox-client | abd0192b79aab912325485bf4e17777a21953c9b | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | refs/heads/master | 2022-11-13T16:29:02.264187 | 2020-07-05T18:06:42 | 2020-07-05T18:06:42 | 277,121,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | # coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VirtualChassis(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'master': 'NestedDevice',
'domain': 'str',
'tags': 'list[str]',
'member_count': 'int'
}
attribute_map = {
'id': 'id',
'master': 'master',
'domain': 'domain',
'tags': 'tags',
'member_count': 'member_count'
}
def __init__(self, id=None, master=None, domain=None, tags=None, member_count=None): # noqa: E501
"""VirtualChassis - a model defined in Swagger""" # noqa: E501
self._id = None
self._master = None
self._domain = None
self._tags = None
self._member_count = None
self.discriminator = None
if id is not None:
self.id = id
self.master = master
if domain is not None:
self.domain = domain
if tags is not None:
self.tags = tags
if member_count is not None:
self.member_count = member_count
@property
def id(self):
"""Gets the id of this VirtualChassis. # noqa: E501
:return: The id of this VirtualChassis. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VirtualChassis.
:param id: The id of this VirtualChassis. # noqa: E501
:type: int
"""
self._id = id
@property
def master(self):
"""Gets the master of this VirtualChassis. # noqa: E501
:return: The master of this VirtualChassis. # noqa: E501
:rtype: NestedDevice
"""
return self._master
@master.setter
def master(self, master):
"""Sets the master of this VirtualChassis.
:param master: The master of this VirtualChassis. # noqa: E501
:type: NestedDevice
"""
if master is None:
raise ValueError("Invalid value for `master`, must not be `None`") # noqa: E501
self._master = master
@property
def domain(self):
"""Gets the domain of this VirtualChassis. # noqa: E501
:return: The domain of this VirtualChassis. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this VirtualChassis.
:param domain: The domain of this VirtualChassis. # noqa: E501
:type: str
"""
if domain is not None and len(domain) > 30:
raise ValueError("Invalid value for `domain`, length must be less than or equal to `30`") # noqa: E501
self._domain = domain
@property
def tags(self):
"""Gets the tags of this VirtualChassis. # noqa: E501
:return: The tags of this VirtualChassis. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this VirtualChassis.
:param tags: The tags of this VirtualChassis. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def member_count(self):
"""Gets the member_count of this VirtualChassis. # noqa: E501
:return: The member_count of this VirtualChassis. # noqa: E501
:rtype: int
"""
return self._member_count
@member_count.setter
def member_count(self, member_count):
"""Sets the member_count of this VirtualChassis.
:param member_count: The member_count of this VirtualChassis. # noqa: E501
:type: int
"""
self._member_count = member_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VirtualChassis, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VirtualChassis):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"67791576+underline-bot@users.noreply.github.com"
] | 67791576+underline-bot@users.noreply.github.com |
2064f456c5a1818c85b08b9b443632e186ae9c5d | d8010e5d6abc2dff0abb4e695e74fb23b4f7d558 | /publishing/books/views.py | 5a144558fd6f3fac6f3640f5283f5929042b46fd | [
"MIT"
] | permissive | okoppe8/django-nested-inline-formsets-example | d17f4e1181925d132625e51453cb8c50107ade1c | c0b1abb32f6d09a7732d48e40ea61e21b258e959 | refs/heads/master | 2020-03-09T21:48:51.804513 | 2018-03-04T11:13:55 | 2018-03-04T11:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import DetailView, FormView, ListView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from .forms import PublisherBooksWithImagesFormset
from .models import Publisher, Book, BookImage
class HomeView(TemplateView):
template_name = 'books/home.html'
class PublisherListView(ListView):
model = Publisher
template_name = 'books/publisher_list.html'
class PublisherDetailView(DetailView):
model = Publisher
template_name = 'books/publisher_detail.html'
class PublisherUpdateView(SingleObjectMixin, FormView):
model = Publisher
template_name = 'books/publisher_update.html'
def get(self, request, *args, **kwargs):
# The Publisher we're editing:
self.object = self.get_object(queryset=Publisher.objects.all())
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# The Publisher we're uploading for:
self.object = self.get_object(queryset=Publisher.objects.all())
return super().post(request, *args, **kwargs)
def get_form(self, form_class=None):
"""
Use our big formset of formsets, and pass in the Publisher object.
"""
return PublisherBooksWithImagesFormset(
**self.get_form_kwargs(), instance=self.object)
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
form.save()
messages.add_message(
self.request,
messages.SUCCESS,
'Changes were saved.'
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('books:publisher_detail', kwargs={'pk': self.object.pk})
| [
"phil@gyford.com"
] | phil@gyford.com |
b7caeef897518daa65994562807bac7471c5cbf2 | 202180e6b7109e9058cce442054d6532c44c796d | /crm/migrations/0010_auto_20161127_1530.py | 1ffb531b34b3e42c78cc8c52836e43f171d5a024 | [
"Apache-2.0"
] | permissive | pkimber/old-crm-migrated-to-gitlab | 230d4eec0cfa794c90fff8c75154d98699820093 | 835e8ff3161404316b7da35cf61e3851763b37b9 | refs/heads/master | 2021-06-15T22:07:08.207855 | 2017-04-27T21:05:53 | 2017-04-27T21:05:53 | 12,544,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-27 15:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crm', '0009_auto_20160215_0844'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='date_deleted',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ticket',
name='deleted',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='ticket',
name='user_deleted',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| [
"code@pkimber.net"
] | code@pkimber.net |
50dba4a8dc1c11bcde73b1533d03a5da993821bf | 2449715d42e89f51bd352b3f3a311ef41f3f04f2 | /exercise/news_test/muitlprocess_queue.py | 97d0eb41eb5347ae036aa4552a0c54a362fa11e7 | [] | no_license | SmallPuddingComing/PycharmProjects | d0d900c94691efe98d7d0e147f2968c28546f61c | 28965e9e2524cb26449a30a237665f404c5aab70 | refs/heads/master | 2021-01-10T15:47:21.968541 | 2016-05-03T11:23:27 | 2016-05-03T11:23:27 | 54,554,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | #coding:utf8
'''
Created on 2016-4-1
@author : yr
website from : http://www.jb51.net/article/80115.htm
'''
import multiprocessing
import requests
from multiprocessing.process import Process
#IO密集型任务
#多个进程同时下载多个网页
#利用Queue+多进程
#由于是IO密集型,所以同样可以用threading
'''
1、初始化tasks,里面放着一系列的dest_url
2、同时开启4个进程向tasks中获取任务进行执行
3、处理结果贮存在一个result
'''
def main():
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
cpu_count = multiprocessing.cpu_count()#进程数==CPU核数目
create_process(tasks, results, cpu_count)
add_tasks(tasks)
parse(tasks, results)
def create_process(tasks, results, cpu_count):
for _ in range(cpu_count):
p = multiprocessing.Process(target=_worker, args=(tasks, results))
p.daemon = True #子进程随主进程的关闭而关闭
p.start()
def _download(task):
'''下载网页
'''
try:
request = requests.get(task)
if request.status_code == 200:
return request.text
except Exception as e:
print ("connect the url is fail ,{0}".format(str(e)))
def _worker(tasks, results):
while True:
try:
task = tasks.get()
result = _download(task)
results.put(result)
finally:
tasks.task_done()
def get_urls():
urls = ["http://httpbin.org/get"] * 10
return urls
def add_tasks(tasks):
for url in get_urls():
tasks.put(url)
def _parse(results):
print results
def parse(tasks, results):
try:
tasks.join()
except KeyboardInterrupt as e:
print ("tasks has been stopped ,{0}".format(str(e)))
while not results.empty():
_parse(results)
if __name__ == '__main__':
main()
| [
"1076643147@qq.com"
] | 1076643147@qq.com |
89418b88b36775cd5558bcb8e547933c0d213a39 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/v_mware_dvs_config_spec.py | afb05925855d68f024da0b88a95e372d9f3d0e83 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VMwareDVSConfigSpec(vim, *args, **kwargs):
'''This class defines the VMware specific configuration for
DistributedVirtualSwitch.'''
obj = vim.client.factory.create('{urn:vim25}VMwareDVSConfigSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'ipfixConfig', 'linkDiscoveryProtocolConfig', 'maxMtu', 'pvlanConfigSpec',
'vspanConfigSpec', 'configVersion', 'contact', 'defaultPortConfig',
'description', 'extensionKey', 'host', 'maxPorts', 'name',
'numStandalonePorts', 'policy', 'switchIpAddress', 'uplinkPortgroup',
'uplinkPortPolicy', 'vendorSpecificConfig', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
b57adcc39b9c6b4abe4965488f9b60417cd6389c | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartA/Py反转字符串内容2.py | b6b9af014070855a9b3a79797b620fdd1f3d974e | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py |
class Solution():
def reverseString(self,s):
def helper(left, right):
if left < right:
s[left], s[right] = s[right],s[left]
helper(left + 1, right-1)
helper(0, len(s) - 1)
return s
if __name__ == "__main__":
s = Solution()
print(s.reverseString(["h","e","l","l","o"]))
| [
"2901429479@qq.com"
] | 2901429479@qq.com |
d6b58de7a4483d2044cdc0624c57e6f6d3792fbf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/335/usersdata/297/99568/submittedfiles/matriz1.py | 25872a5f7dbc94f97ca7250169563977b88328f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | # -*- coding: utf-8 -*-
matriz=[]
m=int(input('digite o numero de linhas dejado: '))
n=int(input('digite o numero de colunas dejado: '))
for i in range(m):
linha= []
for j in range(n):
linha.append(int(input('digite o valor do indice da linha%d e da coluna %d : ' %((i+1),(j+1)))))
matriz.append(linha)
linhas_superiores=m
linhas_inferiores=0
colunas_da_esquerda=n
colunas_da_direita=0
for i in range(m):
for j in range(n):
if matriz[i][j]==1:
if i<linhas_superiores :
linhas_superiores=i
if i+1>linhas_inferiores:
linhas_inferiores=i+1
if j<colunas_da_esquerda :
colunas_da_esquerda=j
if j+1>colunas_da_direita:
colunas_da_direita=j+1
print(matriz)
'''
#professor se o sr ver esse codigo abaixo testa ele pfvr e ve onde esta o erro de logica dele,pq na parte q ele testa a corte direita,eu coloco pra ele apagar o termo matriz[i][j],com for i in range,mas ele n apga da posição indicada ele apaga do ultimo j
import numpy as np
matriz=[]
m=int(input('digite o numero de linhas da matriz que voceh deseja recortar: '))
n=int(input('digite o numero de colunas da matriz que voceh deseja recortar: '))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('digite o valor do elemento da linha%d e da coluna%d desejada: '%((i+1),(j+1)))))
matriz.append(linha)
linhaszeradas=0
linhaszeradas2=0
colunaszeradas=0
colunaszeradas2=0
#corte superior
for i in range(0,m-1,1) :
y=sum(matriz[i])
if y > 0 :
break
else :
linhaszeradas=linhaszeradas+1
if linhaszeradas>0 :
for i in range(0,linhaszeradas,1):
del matriz[i]
#corte inferior
for i in range(m-linhaszeradas-1,0,-1) :
r=int(sum(matriz[i]))
if r > 0 :
break
else :
linhaszeradas2=linhaszeradas2+1
if linhaszeradas2>0:
for i in range(m-1,m-linhaszeradas2-1,-1):
del matriz[i]
t=0
#corte direito
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(0,n,1) :
if i+1<m-linhaszeradas-linhaszeradas2 :
t=t+matriz[i][j]+matriz[i+1][j]
if t > 0 :
break
else :
colunaszeradas=colunaszeradas+1
if colunaszeradas > 0:
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(colunaszeradas-1,0,-1):
del matriz[i][j]
#corte esquerdo
f=0
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(n-colunaszeradas-1,0,-1) :
f=f+matriz[i][j]
if f > 0 :
break
else :
colunaszeradas2=colunaszeradas2+1
if colunaszeradas2>0 :
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(n-colunaszeradas-colunaszeradas2,n-colunaszeradas-colunaszeradas2-1,-1):
del matriz[i][j]
#saida
print(matriz)'''
'''matriz=[]
m=int(input('digite o numero de linhas da matriz que voceh deseja recortar: '))
n=int(input('digite o numero de colunas da matriz que voceh deseja recortar: '))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('digite o valor do elemento da linha %d desejada: '%(j+1))))
matriz.append(linha)
indice_superior=m-1
indice_inferior=0
indice_superior=0
indice_superior=n-1
for i in range(0,m,1):
encontrou_na_linha = False
for j in range(0,n,1):
if matriz[i][j]==1 :'''
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b869b5aabce6c786603cfd965af2eccc2d2311c2 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_170/run_cfg.py | 72c397a816bf725adf80827a259310277ce5fb80 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1763.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1764.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1765.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1766.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1767.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
f9def055c1faf6d80a0eb67ecc555853b788a02f | 77d834eb125fdc56c96af31cf74db5b741c8e94e | /api_v5/urls.py | 99a4f29e1b51566b40e869de61c5b80f161f01cd | [] | no_license | zhouf00/learn_rest_framework | 7c17124fcb08ce48f54f94201f2da29e41e9d867 | a292e38ee9ff475e43ce4612fbb6c074b4073f84 | refs/heads/master | 2022-10-12T12:05:07.618651 | 2020-06-11T02:40:45 | 2020-06-11T02:40:45 | 268,827,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.conf.urls import url,include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
urlpatterns = [
] | [
"49618748+zhouf00@users.noreply.github.com"
] | 49618748+zhouf00@users.noreply.github.com |
05e9150c6a508e13e2e38e2590747d16dad070dd | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/create_watermark_template_request.py | fe26b61a406957be1a4292565fe85e4762ba11f4 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateWatermarkTemplateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'WatermarkTemplate'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""CreateWatermarkTemplateRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this CreateWatermarkTemplateRequest.
:return: The body of this CreateWatermarkTemplateRequest.
:rtype: WatermarkTemplate
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateWatermarkTemplateRequest.
:param body: The body of this CreateWatermarkTemplateRequest.
:type: WatermarkTemplate
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateWatermarkTemplateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3dface85d6a966e144d8e74a1ed487c73e9b9c72 | d23dab09b21553353ad85246ebafaea790f2afbd | /src/python/pants/backend/scala/lint/scalafmt/rules.py | 82f6198e1339413f45ec690c0b395020ca4addcc | [
"Apache-2.0"
] | permissive | asherf/pants | 00e8c64b7831f814bac3c4fa8c342d2237fef17d | c94d9e08f65e9baf3793dff0ec2c571d682f6b90 | refs/heads/master | 2023-05-28T14:45:35.325999 | 2023-01-18T15:16:07 | 2023-01-18T15:16:07 | 185,082,662 | 0 | 0 | Apache-2.0 | 2023-01-18T15:15:46 | 2019-05-05T21:09:43 | Python | UTF-8 | Python | false | false | 7,856 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from collections import defaultdict
from dataclasses import dataclass
from typing import cast
from pants.backend.scala.lint.scalafmt.skip_field import SkipScalafmtField
from pants.backend.scala.lint.scalafmt.subsystem import ScalafmtSubsystem
from pants.backend.scala.target_types import ScalaSourceField
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest, Partitions
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.core.goals.tailor import group_by_dir
from pants.core.util_rules.partitions import Partition
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.engine.unions import UnionRule
from pants.jvm.goals import lockfile
from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool, GenerateJvmToolLockfileSentinel
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
_SCALAFMT_CONF_FILENAME = ".scalafmt.conf"
@dataclass(frozen=True)
class ScalafmtFieldSet(FieldSet):
required_fields = (ScalaSourceField,)
source: ScalaSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipScalafmtField).value
class ScalafmtRequest(FmtTargetsRequest):
field_set_type = ScalafmtFieldSet
tool_subsystem = ScalafmtSubsystem
class ScalafmtToolLockfileSentinel(GenerateJvmToolLockfileSentinel):
resolve_name = ScalafmtSubsystem.options_scope
@dataclass(frozen=True)
class GatherScalafmtConfigFilesRequest:
filepaths: tuple[str, ...]
@dataclass(frozen=True)
class ScalafmtConfigFiles:
snapshot: Snapshot
source_dir_to_config_file: FrozenDict[str, str]
@dataclass(frozen=True)
class PartitionInfo:
classpath_entries: tuple[str, ...]
config_snapshot: Snapshot
extra_immutable_input_digests: FrozenDict[str, Digest]
@property
def description(self) -> str:
return self.config_snapshot.files[0]
def find_nearest_ancestor_file(files: set[str], dir: str, config_file: str) -> str | None:
while True:
candidate_config_file_path = os.path.join(dir, config_file)
if candidate_config_file_path in files:
return candidate_config_file_path
if dir == "":
return None
dir = os.path.dirname(dir)
@rule
async def gather_scalafmt_config_files(
request: GatherScalafmtConfigFilesRequest,
) -> ScalafmtConfigFiles:
"""Gather scalafmt config files and identify which config files to use for each source
directory."""
source_dirs = frozenset(os.path.dirname(path) for path in request.filepaths)
source_dirs_with_ancestors = {"", *source_dirs}
for source_dir in source_dirs:
source_dir_parts = source_dir.split(os.path.sep)
source_dir_parts.pop()
while source_dir_parts:
source_dirs_with_ancestors.add(os.path.sep.join(source_dir_parts))
source_dir_parts.pop()
config_file_globs = [
os.path.join(dir, _SCALAFMT_CONF_FILENAME) for dir in source_dirs_with_ancestors
]
config_files_snapshot = await Get(Snapshot, PathGlobs(config_file_globs))
config_files_set = set(config_files_snapshot.files)
source_dir_to_config_file: dict[str, str] = {}
for source_dir in source_dirs:
config_file = find_nearest_ancestor_file(
config_files_set, source_dir, _SCALAFMT_CONF_FILENAME
)
if not config_file:
raise ValueError(
f"No scalafmt config file (`{_SCALAFMT_CONF_FILENAME}`) found for "
f"source directory '{source_dir}'"
)
source_dir_to_config_file[source_dir] = config_file
return ScalafmtConfigFiles(config_files_snapshot, FrozenDict(source_dir_to_config_file))
@rule
async def partition_scalafmt(
request: ScalafmtRequest.PartitionRequest, tool: ScalafmtSubsystem
) -> Partitions[PartitionInfo]:
if tool.skip:
return Partitions()
toolcp_relpath = "__toolcp"
filepaths = tuple(field_set.source.file_path for field_set in request.field_sets)
lockfile_request = await Get(GenerateJvmLockfileFromTool, ScalafmtToolLockfileSentinel())
tool_classpath, config_files = await MultiGet(
Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
Get(
ScalafmtConfigFiles,
GatherScalafmtConfigFilesRequest(filepaths),
),
)
extra_immutable_input_digests = {
toolcp_relpath: tool_classpath.digest,
}
# Partition the work by which source files share the same config file (regardless of directory).
source_files_by_config_file: dict[str, set[str]] = defaultdict(set)
for source_dir, files_in_source_dir in group_by_dir(filepaths).items():
config_file = config_files.source_dir_to_config_file[source_dir]
source_files_by_config_file[config_file].update(
os.path.join(source_dir, name) for name in files_in_source_dir
)
config_file_snapshots = await MultiGet(
Get(Snapshot, DigestSubset(config_files.snapshot.digest, PathGlobs([config_file])))
for config_file in source_files_by_config_file
)
return Partitions(
Partition(
tuple(files),
PartitionInfo(
classpath_entries=tuple(tool_classpath.classpath_entries(toolcp_relpath)),
config_snapshot=config_snapshot,
extra_immutable_input_digests=FrozenDict(extra_immutable_input_digests),
),
)
for files, config_snapshot in zip(
source_files_by_config_file.values(), config_file_snapshots
)
)
@rule(desc="Format with scalafmt", level=LogLevel.DEBUG)
async def scalafmt_fmt(
request: ScalafmtRequest.Batch, jdk: InternalJdk, tool: ScalafmtSubsystem
) -> FmtResult:
partition_info = cast(PartitionInfo, request.partition_metadata)
merged_digest = await Get(
Digest,
MergeDigests([partition_info.config_snapshot.digest, request.snapshot.digest]),
)
result = await Get(
ProcessResult,
JvmProcess(
jdk=jdk,
argv=[
"org.scalafmt.cli.Cli",
f"--config={partition_info.config_snapshot.files[0]}",
"--non-interactive",
*request.files,
],
classpath_entries=partition_info.classpath_entries,
input_digest=merged_digest,
output_files=request.files,
extra_jvm_options=tool.jvm_options,
extra_immutable_input_digests=partition_info.extra_immutable_input_digests,
# extra_nailgun_keys=request.extra_immutable_input_digests,
use_nailgun=False,
description=f"Run `scalafmt` on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result)
@rule
def generate_scalafmt_lockfile_request(
_: ScalafmtToolLockfileSentinel, tool: ScalafmtSubsystem
) -> GenerateJvmLockfileFromTool:
return GenerateJvmLockfileFromTool.create(tool)
def rules():
return [
*collect_rules(),
*lockfile.rules(),
*ScalafmtRequest.rules(),
UnionRule(GenerateToolLockfileSentinel, ScalafmtToolLockfileSentinel),
]
| [
"noreply@github.com"
] | asherf.noreply@github.com |
b89e16585be9d5a208e0711271a22f5d6e201515 | 886397f22f566025c268c7591ce1f91aa1413a86 | /Stack_and_Queue/002_geeksforgeeks_Next_Larger_Element/Solution.py | 8ffe729762e7b3fa8dec35455a1e065d75178d3c | [] | no_license | Keshav1506/competitive_programming | cd4323355c96a1368135bdfb6b24511bb0b11477 | f2621cd76822a922c49b60f32931f26cce1c571d | refs/heads/master | 2023-02-04T16:48:16.723296 | 2020-12-24T04:13:45 | 2020-12-24T04:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | #
# Time : O(N); Space: O(1)
# @tag : Stack and Queue
# @by : Shaikat Majumdar
# @date: Aug 27, 2020
# **************************************************************************
# Description:
#
# Given an array A of size N having distinct elements, the task is to find the next greater element for each element of the array in order of their appearance in the array. If no such element exists, output -1
#
# Input:
# The first line of input contains a single integer T denoting the number of test cases.Then T test cases follow. Each test case consists of two lines. The first line contains an integer N denoting the size of the array. The Second line of each test case contains N space separated positive integers denoting the values/elements in the array A.
#
# Output:
# For each test case, print in a new line, the next greater element for each array element separated by space in order.
#
# Constraints:
# 1 <= T <= 100
# 1 <= N <= 107
# 1 <= Ai <= 1018
# Example:
# Input
# 2
# 4
# 1 3 2 4
# 4
# 4 3 2 1
# Output
# 3 4 4 -1
# -1 -1 -1 -1
#
# Explanation:
# Testcase1: In the array, the next larger element to 1 is 3 , 3 is 4 , 2 is 4
# and for 4 ? since it doesn't exist hence -1.
#
# **************************************************************************
# Source: https://practice.geeksforgeeks.org/problems/next-larger-element/0 (GeeksForGeeks - Next Larger Element)
#
#
from typing import List
import unittest
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
stack = []
result = [-1] * len(nums)
for i in range(len(nums)):
while stack and nums[stack[-1]] < nums[i]:
result[stack.pop()] = nums[i]
stack.append(i)
return result
class Test(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_nextGreaterElements(self) -> None:
s = Solution()
for nums, solution in (
[[1, 3, 2, 4], [3, 4, 4, -1]],
[[4, 3, 2, 1], [-1, -1, -1, -1]],
[[1, 3, 3, 4], [3, 4, 4, -1]],
[[1, 3, 3, 3], [3, -1, -1, -1]],
):
self.assertEqual(
solution,
s.nextGreaterElements(nums),
"Should return the next greater element for each element of the array in order of their appearance in the array otherwise -1",
)
if __name__ == "__main__":
unittest.main()
| [
"sm2774us@gmail.com"
] | sm2774us@gmail.com |
997edcc27f6dff73d8aad74d24578f3cf20b226d | 33feacc4ef80da09e6843c6b97469ad99b2215cb | /Dictionary/src/deldict.py | 430e54047f93a5a797e4bbc2eea892edc3b91ce0 | [] | no_license | DheerajJoshi/Python-tribble | cfbecbd4e33da38f514c5eee3d61ddd74408e071 | eb82eb04e299ceec39ad19b9fc07873e5479ab74 | refs/heads/master | 2021-05-30T07:59:17.362407 | 2015-10-25T13:33:19 | 2015-10-25T13:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!/usr/bin/python
dict1 = {'Name': 'Zara', 'Age': 7, 'Class': 'First'};
del dict1['Name']; # remove entry with key 'Name'
dict1.clear();
# remove all entries in dict1
del dict1 ;
# delete entire dictionary
print ("dict1['Age']: ", dict1['Age']);
print ("dict1['School']: ", dict1['School']);
#This will produce the following result. Note an exception raised, this is because after del dict dictionary does not
#exist any more: | [
"joshidj.12@gmail.com"
] | joshidj.12@gmail.com |
da311bd0dc542b41b6afccbf71b942ad15aa3c0a | cc5a3fa80d2ae90afc2626e4a82b9a927726dfa0 | /huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/delete_certificate_request.py | 49280d442ebc8df1c3ca56dc98901b35b2864376 | [
"Apache-2.0"
] | permissive | Logan118/huaweicloud-sdk-python-v3 | eca15e9b08bdccef7122e40735d444ddc958efa8 | bb230c03bd00225b9f5780a56adce596e9456420 | refs/heads/master | 2023-07-17T14:57:50.799564 | 2021-08-25T10:40:43 | 2021-08-25T10:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,098 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteCertificateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'certificate_id': 'str'
}
attribute_map = {
'certificate_id': 'certificate_id'
}
def __init__(self, certificate_id=None):
"""DeleteCertificateRequest - a model defined in huaweicloud sdk"""
self._certificate_id = None
self.discriminator = None
self.certificate_id = certificate_id
@property
def certificate_id(self):
"""Gets the certificate_id of this DeleteCertificateRequest.
证书ID
:return: The certificate_id of this DeleteCertificateRequest.
:rtype: str
"""
return self._certificate_id
@certificate_id.setter
def certificate_id(self, certificate_id):
"""Sets the certificate_id of this DeleteCertificateRequest.
证书ID
:param certificate_id: The certificate_id of this DeleteCertificateRequest.
:type: str
"""
self._certificate_id = certificate_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteCertificateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3ebe149e847b53c61177f7563d3477880c98187a | 88be3911c7e73d4bf71b0482ee6d15f49030463a | /Func_Decorator/demo8_decorator.py | 0957bf20ac97a758b0acbd5f52d072b9f13b2289 | [] | no_license | skyaiolos/Python_KE | 85f879d1cb637debd2e3a0239d7c8d7bfb30c827 | 8cc42c8f4d1245de4b79af429f72a9ed2508bc1a | refs/heads/master | 2021-01-22T08:47:47.761982 | 2017-05-28T14:57:02 | 2017-05-28T14:57:02 | 92,634,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | """
# Script Description:
http://www.cnblogs.com/rhcad/archive/2011/12/21/2295507.html
Python装饰器学习(九步入门)
"""
__author__ = "爱的彼岸(QQ:3124724)"
__copyright__ = "Copyright 2017,3124724@qq.com"
print("第八步:让装饰器带 类 参数")
print("------示例8: 装饰器带类参数")
'''示例8: 装饰器带类参数'''
class locker:
def __init__(self):
print("locker.__init__() should be not called.")
@staticmethod
def acquire():
print("locker.acquire() called. (这里是静态方法)")
@staticmethod
def release():
print(" locker.release() called. (不需要对象实例化)")
def deco(cls):
'''cls 必须实现acquire和release静态方法'''
def _deco(func):
def __deco():
print("before %s called [%s]." % (func.__name__, cls))
cls.acquire()
try:
return func()
finally:
cls.release()
return __deco
return _deco
@deco(locker)
def my_func():
print("my_func() called.")
my_func()
my_func()
# before my_func called [<class '__main__.locker'>].
# locker.acquire() called. (这里是静态方法)
# my_func() called.
# locker.release() called. (不需要对象实例化)
# before my_func called [<class '__main__.locker'>].
# locker.acquire() called. (这里是静态方法)
# my_func() called.
# locker.release() called. (不需要对象实例化)
| [
"skyaiolos@aliyun.com"
] | skyaiolos@aliyun.com |
d32415e83f4447be4139a778226ca0f0b28ff00f | 314245750f897949bc7867883d22b8ff1465fbe1 | /boostcamp/ex/dfs_bfs/1_solved.py | 8564949da94e717d888d487afad6e537ae1696c4 | [] | no_license | dongho108/CodingTestByPython | e608d70235cc6c6a27c71eea86ee28d1271d4d1d | 475b3665377a8f74944d7698e894ad3eafc49ad4 | refs/heads/master | 2023-05-24T15:01:56.563359 | 2021-07-01T14:23:20 | 2021-07-01T14:23:20 | 330,833,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | answer = 0
def dfs(n, sum, numbers, target):
global answer
if n == len(numbers):
if sum == target:
answer += 1
return
dfs(n+1, sum+numbers[n], numbers, target)
dfs(n+1, sum-numbers[n], numbers, target)
def solution(numbers, target):
global answer
dfs(1, numbers[0], numbers, target)
dfs(1, -numbers[0], numbers, target)
return answer | [
"dongho108@naver.com"
] | dongho108@naver.com |
f3d7325e4106686dfd04fb4b95d0df987c6a83c6 | de213b73f703fb8f285bc8cf15e388cc2f98898f | /venv/bin/IveBeenEverywhere.py | 32dad69578024fb806c25252463ac155bd2da901 | [] | no_license | adampehrson/Kattis | 18de025a6a569a46c54cc85c996eec0b55c9f74b | a04922caa356f8113fe30a523f3a148d458a6132 | refs/heads/main | 2023-07-10T02:53:29.782854 | 2021-08-14T10:44:30 | 2021-08-14T10:44:30 | 395,948,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py |
i = 0
x = int(input())
while i<x:
e = 0
cities = list()
total = 0
y = int(input())
while e <y:
newcity = input()
if cities.count(newcity) < 1:
total = total +1
cities.append(newcity)
e +=1
print(total)
i+=1 | [
"85373641+adampehrson@users.noreply.github.com"
] | 85373641+adampehrson@users.noreply.github.com |
1162591a036f543f84cd75cc9f65138a01a11000 | 8723f56398a7f969877709192922c053b0e20d56 | /Kayit/migrations/0002_auto_20191230_2010.py | 67c88c715ff83bd43a9649c5b886ab1c30655f68 | [] | no_license | vektorelpython/Python17Web | f215efd9be96062886f67e456c4d0735602b5f00 | f571a4157d2575d441f091f2450d5e24f4f3645d | refs/heads/master | 2022-04-04T20:03:02.941949 | 2020-01-13T16:16:15 | 2020-01-13T16:16:15 | 230,123,576 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Generated by Django 2.2.3 on 2019-12-30 18:10
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Kayit', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='kayitmodel',
name='kayit_eden',
field=models.ForeignKey(default=1, on_delete='CASCADE', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='kayitmodel',
name='adi',
field=models.CharField(max_length=200, verbose_name='Adı'),
),
]
| [
"ibrahim.ediz@gazi.edu.tr"
] | ibrahim.ediz@gazi.edu.tr |
2fcd4d8f4ade86e244c888b374d3a52a7905389e | 5b3c90d0426dd2adbe756e094c99f066925cda79 | /todoist_tracker/cli/base.py | eee2baa5c52b59f692be8f17b0816a697bee1379 | [
"MIT"
] | permissive | deanmalmgren/todoist-tracker | 5bd85e3903b0f5ca10b05c406535da36a92d7ab5 | f2576a6bf5a80873bc825b3d64a1dc6aed0a145b | refs/heads/master | 2021-01-12T05:02:53.769394 | 2017-01-14T20:08:57 | 2017-01-14T20:08:57 | 77,838,338 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | import argparse
import json
import os
import shutil
from todoist import TodoistAPI
import gspread
from gspread.exceptions import WorksheetNotFound
from oauth2client.service_account import ServiceAccountCredentials
class BaseCommand(object):
help_text = ''
def __init__(self, subcommand_creator):
# keep a local copy of the config file which is useful during
# autocompletion
self.config = None
# set up the subcommand options
self.subcommand_creator = subcommand_creator
self.option_parser = self.subcommand_creator.add_parser(
self.get_command_name(),
help=self.help_text,
description=self.help_text,
)
self.add_command_line_options()
def get_command_name(self):
"""The command name defaults to the name of the module."""
return self.__module__.rsplit('.', 1)[1]
def add_command_line_options(self):
self.option_parser.add_argument(
'--todoist',
type=argparse.FileType('r'),
metavar='JSONFILE',
default='todoist.json',
help='todoist credentials file in json format',
)
self.option_parser.add_argument(
'--google',
type=argparse.FileType('r'),
metavar='JSONFILE',
default='google.json',
help='google credentials file in json format',
)
self.option_parser.add_argument(
'--debug',
action='store_true',
help='log output on command line, NOT google spreadsheet'
)
def execute(self, todoist=None, google=None, debug=None, **kwargs):
"""Common execution workflows are handled here"""
# create an authenticated instance of the TodoistAPI. be sure to store
# the cached data locally and to nuke the existing sync prior to
# running. otherwise the number of outdated tasks grows
credentials = json.load(todoist)
credentials['cache'] = os.path.join(
os.getcwd(), '.todoist-tracker-sync/'
)
if os.path.exists(credentials['cache']):
shutil.rmtree(credentials['cache'])
self.todoist_api = TodoistAPI(**credentials)
# authenticate to google
google_keys = json.load(google)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
google.name,
['https://spreadsheets.google.com/feeds'],
)
gdrive = gspread.authorize(credentials)
self.gdrive_workbook = gdrive.open_by_url(google_keys['workbook_url'])
def get_or_create_worksheet(self, title, header):
try:
worksheet = self.gdrive_workbook.worksheet(title)
except WorksheetNotFound:
worksheet = self.gdrive_workbook.add_worksheet(title, 1, 26)
worksheet.insert_row(header)
return worksheet
| [
"dean.malmgren@datascopeanalytics.com"
] | dean.malmgren@datascopeanalytics.com |
226804966ea7030faa28c955e70c9aaf1b2c505e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/131/usersdata/231/45412/submittedfiles/al10.py | a573e8bb660a39123122360930bff18c017929eb | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
n=int(input('digite o número de termos:'))
numerador=2
denominador=1
produto=1
i=0
while (i)<=n:
produto=(produto*numerador)/denominador
if i%2==1:
numerador=numerador+2
else:
denominador=denomidor+2
i=i+1
print(produto)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
93de3c327880536cb8e6fadfa3bb218dd1f988d7 | 4adc1d1b8f9badefcd8c25c6e0e87c6545ccde2c | /OrcApi/Run/RunDefMod.py | 2f8dc08045cf79a3369be3dfb48b8252fd92eca1 | [] | no_license | orange21cn/OrcTestToolsKit | eb7b67e87a608fb52d7bdcb2b859fa588263c136 | 69b6a3c382a7043872db1282df4be9e413d297d6 | refs/heads/master | 2020-04-15T07:30:35.485214 | 2017-09-30T06:16:17 | 2017-09-30T06:16:17 | 68,078,991 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,612 | py | # coding=utf-8
import os
import re
from OrcLib.LibCommon import OrcString
from OrcLib import get_config
from OrcLib.LibLog import OrcLog
from OrcLib.LibNet import OrcResource
from OrcLib.LibNet import ResourceCheck
from RunData import RunData
class RunDefMod:
"""
运行列表管理,操作目录,目录名为 [类型]_[id],目录内含有 result.res 的属于执行过的, result.res 是一个xml文件
"""
def __init__(self):
self.__config = get_config()
self.__logger = OrcLog("resource.run.run_def.model")
self.__resource_batch_def = OrcResource("BatchDef")
self.__resource_case_def = OrcResource("CaseDef")
self.__data = RunData()
self.__home = self.__config.get_option("RUN", "home")
if not os.path.exists(self.__home):
os.mkdir(self.__home)
def usr_search(self, p_cond=None):
"""
查询列表
:param p_cond: 条件 [id="", run_def_type=""]
:return:
"""
run_list = os.listdir(self.__home)
rtn = list()
# 条件匹配
for _item in run_list:
_status = True
_type, _id = _item.split("_")
# 查找 flag, batch 为 batch_no, case 为 case path
if "BATCH" == _type:
_batch = self.__resource_batch_def.get(path=_id)
# 检查结果
ResourceCheck.result_status(_batch, u"查询计划信息", self.__logger)
_flag = _id if not _batch.data else _batch.data["batch_no"]
else:
_case = self.__resource_case_def.get(path=_id)
# 检查结果
ResourceCheck.result_status(_case, u"查询计划信息", self.__logger)
_flag = _id if not _case.data else _case.data["case_path"]
# 有条件时进行查找,无条件使使用全部数据
if p_cond is not None:
# 匹配 flag
if "run_flag" in p_cond and not re.search(p_cond["run_flag"], _flag):
_status = False
# 匹配 type
if "run_def_type" in p_cond and _type != p_cond["run_def_type"]:
_status = False
if _status:
# 加入当前目录
rtn.append(dict(id=_id, pid=None, run_def_type=_type, run_flag=_flag))
# 加入目录下测试项
_test_list = os.listdir(os.path.join(self.__home, _item))
rtn.extend(list(
dict(id="%s:%s" % (_id, test), pid=_id, run_def_type="TEST", run_flag=test)
for test in _test_list))
return rtn
def usr_add(self, p_data):
"""
增加执行目录 p_test=false, 为 true 时生成结果文件
:param p_data: {id, run_def_type, result}
:return:
:rtype: bool
"""
_type = p_data["run_def_type"]
_id = p_data["id"]
_result = p_data["result"] if "result" in p_data else False
# 生成目录名称
folder_root = os.path.join(self.__home, "%s_%s" % (_type, _id))
# 建目录
if not os.path.exists(folder_root):
os.mkdir(folder_root)
# 建执行结果文件
if _result:
for _index in range(100):
_flag = _index + 1
if 10 > _flag:
_flag = "%s%s" % (0, _flag)
res_folder = os.path.join(folder_root, "%s%s" % (OrcString.get_data_str(), _flag))
res_file = os.path.join(res_folder, "default.res")
if os.path.exists(res_folder):
continue
os.mkdir(res_folder)
self.__data.save_list(_type, _id, res_file)
break
return _id
def usr_delete(self, p_list):
"""
删除
:param p_list:
:type p_list: list
:return:
"""
delete_list = list()
folder_info = {_name.split('_')[1]: _name for _name in os.listdir(self.__home)}
for _item in p_list:
_path = _item.split(':')
if _path[0] in folder_info:
_path[0] = folder_info[_path[0]]
del_folder = self.__home
for _folder in _path:
del_folder = os.path.join(del_folder, _folder)
delete_list.append(del_folder)
for _item in delete_list:
if os.path.exists(_item):
import shutil
shutil.rmtree(_item)
return True
| [
"orange21cn@126.com"
] | orange21cn@126.com |
3d54b8a8ff113386e8decf0364c938387f21328f | 45b4ff6a4e4804ff84847d56400e10cdb0d96186 | /python/test/test_facility_api.py | 45f0beb3033e2eea55022dd52157db63d76832e0 | [] | no_license | pranav/mbta-libraries | fabbc9305569a344e25fa1b281cba290f0fa3f13 | e793696addd94750f722f3132aadc8dfe00adef5 | refs/heads/master | 2021-08-22T11:15:26.014862 | 2017-11-30T03:12:52 | 2017-11-30T03:12:52 | 112,558,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # coding: utf-8
"""
MBTA
MBTA service API. https://www.mbta.com
OpenAPI spec version: 3.0
Contact: developer@mbta.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.facility_api import FacilityApi
class TestFacilityApi(unittest.TestCase):
""" FacilityApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.facility_api.FacilityApi()
def tearDown(self):
pass
def test_api_facility_controller_index(self):
"""
Test case for api_facility_controller_index
"""
pass
def test_api_facility_controller_show(self):
"""
Test case for api_facility_controller_show
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"pgandhi@hubspot.com"
] | pgandhi@hubspot.com |
ed03d817b2745fe31e30d8bef403cb40adbead8c | 02bcd98063c2088e9ab6a266c7f7c57d0c06cd33 | /install.py | 9ec2a4b8a4f81785a93ba6c43892aee798a445c6 | [
"MIT"
] | permissive | kevinsegal/BabySploit | e99d2c89a041de9f0f2e6e3d4f0ce5bb015011f0 | 66bafc25e04e7512e8b87b161bd3b7201bb57b63 | refs/heads/master | 2020-04-07T04:27:18.870230 | 2018-11-18T03:34:38 | 2018-11-18T03:34:38 | 158,056,891 | 1 | 0 | MIT | 2018-11-18T06:35:20 | 2018-11-18T06:35:20 | null | UTF-8 | Python | false | false | 1,407 | py | import os, time, subprocess, sys
from sys import stdout
def Command_exe(msg,cmd):
i = "[STATUS] Processing"
stdout.write(" " + msg + " %s" % i)
stdout.flush()
if subprocess.call(cmd +' >/dev/null 2>&1', shell=True)==0:
i = "Complete [WARNING] "
else:
i = "Error [WARNING] "
stdout.write("\r" + msg +"[STATUS] %s" % i)
def start():
if os.getuid() != 0:
print("[ERROR] Install must be run as root.")
print("Login as root (sudo) or try sudo python3 install.py")
exit()
print(" == BabySploit Installation ==")
input("Press ENTER To Start Installation")
with open("/etc/apt/sources.list", "r") as myfile:
data = myfile.read().replace('\n', "")
if "http://http.kali.org/kali" not in data:
print(Command_exe("["+time.strftime('%H:%M:%S')+"] Adding Repo To Sources... ",'apt-add-repository "deb http://http.kali.org/kali kali-rolling main non-free contrib"'))
else:
pass
print(Command_exe("["+time.strftime('%H:%M:%S')+"] Installing Required Dependencies... ",'apt-get install exploitdb netcat nmap php7.0 perl'))
print(Command_exe("["+time.strftime('%H:%M:%S')+"] Installing Virtual Environment... ",'pip3 install virtualenv'))
print("Complete!")
print("Please start virtualenv and run pip3 install -r requirements.txt!")
start()
| [
"maxlikescs@gmail.com"
] | maxlikescs@gmail.com |
29e7ca9976f3be06036fd6349285796e881773cd | 45df508e4c99f453ca114053a92deb65939f18c9 | /tfx/examples/custom_components/slack/example/taxi_pipeline_slack.py | 3dbce075cb89247de90ea067d0bc447f94df9942 | [
"Apache-2.0"
] | permissive | VonRosenchild/tfx | 604eaf9a3de3a45d4084b36a478011d9b7441fc1 | 1c670e92143c7856f67a866f721b8a9368ede385 | refs/heads/master | 2020-08-09T13:45:07.067267 | 2019-10-10T03:07:20 | 2019-10-10T03:07:48 | 214,100,022 | 1 | 0 | Apache-2.0 | 2019-10-10T06:06:11 | 2019-10-10T06:06:09 | null | UTF-8 | Python | false | false | 7,087 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example pipeline to demonstrate custom TFX component.
This example consists of standard TFX components as well as a custom TFX
component requesting for manual review through Slack.
This example along with the custom `SlackComponent` will only serve as an
example and will not be supported by TFX team.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from slack_component.component import SlackComponent
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_runner import BeamRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import csv_input
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data/simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_module_file = os.path.join(_taxi_root, 'taxi_utils_slack.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
# Slack channel to push the model notifications to.
_slack_channel_id = 'my-channel-id'
# Slack token to set up connection.
_slack_token = os.environ['SLACK_BOT_TOKEN']
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_name = 'chicago_taxi_slack'
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
_metadata_db_root = os.path.join(_tfx_root, 'metadata', _pipeline_name)
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=_taxi_module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model_exports=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# This custom component serves as a bridge between pipeline and human model
# reviewers to enable review-and-push workflow in model development cycle. It
# utilizes Slack API to send message to user-defined Slack channel with model
# URI info and wait for go / no-go decision from the same Slack channel:
# * To approve the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'lgtm' or 'approve'.
# * To reject the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'decline' or 'reject'.
slack_validator = SlackComponent(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
slack_token=_slack_token,
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=slack_validator.outputs['slack_blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, infer_schema, validate_stats, transform,
trainer, model_analyzer, model_validator, slack_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
_metadata_db_root),
)
if __name__ == '__main__':
BeamRunner().run(_create_pipeline())
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
4f28e36150a4c5c9a4bf75957a1d0d02781ce721 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contourcarpet/_db.py | bb09cf5f377e552d7947bdae4b474578ed21a179 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 463 | py | import _plotly_utils.basevalidators
class DbValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="db", parent_name="contourcarpet", **kwargs):
super(DbValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "scaled"}),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
a98d186aebdc18f7d4377a743b524e38f60cc783 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_xc_functional4.py | ad5a17e378f40fa030a908f352016d1de833a6ac | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._becke884 import _becke884
from xcp2k.classes._lyp_adiabatic4 import _lyp_adiabatic4
from xcp2k.classes._becke88_lr_adiabatic4 import _becke88_lr_adiabatic4
from xcp2k.classes._becke88_lr4 import _becke88_lr4
from xcp2k.classes._lyp4 import _lyp4
from xcp2k.classes._pade4 import _pade4
from xcp2k.classes._hcth4 import _hcth4
from xcp2k.classes._optx4 import _optx4
from xcp2k.classes._libxc4 import _libxc4
from xcp2k.classes._ke_libxc4 import _ke_libxc4
from xcp2k.classes._cs14 import _cs14
from xcp2k.classes._xgga4 import _xgga4
from xcp2k.classes._ke_gga4 import _ke_gga4
from xcp2k.classes._p86c4 import _p86c4
from xcp2k.classes._pw924 import _pw924
from xcp2k.classes._pz814 import _pz814
from xcp2k.classes._tfw4 import _tfw4
from xcp2k.classes._tf4 import _tf4
from xcp2k.classes._vwn4 import _vwn4
from xcp2k.classes._xalpha4 import _xalpha4
from xcp2k.classes._tpss4 import _tpss4
from xcp2k.classes._pbe4 import _pbe4
from xcp2k.classes._xwpbe4 import _xwpbe4
from xcp2k.classes._becke974 import _becke974
from xcp2k.classes._becke_roussel4 import _becke_roussel4
from xcp2k.classes._lda_hole_t_c_lr4 import _lda_hole_t_c_lr4
from xcp2k.classes._pbe_hole_t_c_lr4 import _pbe_hole_t_c_lr4
from xcp2k.classes._gv094 import _gv094
from xcp2k.classes._beef4 import _beef4
class _xc_functional4(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.BECKE88 = _becke884()
self.LYP_ADIABATIC = _lyp_adiabatic4()
self.BECKE88_LR_ADIABATIC = _becke88_lr_adiabatic4()
self.BECKE88_LR = _becke88_lr4()
self.LYP = _lyp4()
self.PADE = _pade4()
self.HCTH = _hcth4()
self.OPTX = _optx4()
self.LIBXC_list = []
self.KE_LIBXC_list = []
self.CS1 = _cs14()
self.XGGA = _xgga4()
self.KE_GGA = _ke_gga4()
self.P86C = _p86c4()
self.PW92 = _pw924()
self.PZ81 = _pz814()
self.TFW = _tfw4()
self.TF = _tf4()
self.VWN = _vwn4()
self.XALPHA = _xalpha4()
self.TPSS = _tpss4()
self.PBE = _pbe4()
self.XWPBE = _xwpbe4()
self.BECKE97 = _becke974()
self.BECKE_ROUSSEL = _becke_roussel4()
self.LDA_HOLE_T_C_LR = _lda_hole_t_c_lr4()
self.PBE_HOLE_T_C_LR = _pbe_hole_t_c_lr4()
self.GV09 = _gv094()
self.BEEF = _beef4()
self._name = "XC_FUNCTIONAL"
self._subsections = {'BECKE88': 'BECKE88', 'LYP_ADIABATIC': 'LYP_ADIABATIC', 'BECKE88_LR_ADIABATIC': 'BECKE88_LR_ADIABATIC', 'BECKE88_LR': 'BECKE88_LR', 'LYP': 'LYP', 'PADE': 'PADE', 'HCTH': 'HCTH', 'OPTX': 'OPTX', 'CS1': 'CS1', 'XGGA': 'XGGA', 'KE_GGA': 'KE_GGA', 'P86C': 'P86C', 'PW92': 'PW92', 'PZ81': 'PZ81', 'TFW': 'TFW', 'TF': 'TF', 'VWN': 'VWN', 'XALPHA': 'XALPHA', 'TPSS': 'TPSS', 'PBE': 'PBE', 'XWPBE': 'XWPBE', 'BECKE97': 'BECKE97', 'BECKE_ROUSSEL': 'BECKE_ROUSSEL', 'LDA_HOLE_T_C_LR': 'LDA_HOLE_T_C_LR', 'PBE_HOLE_T_C_LR': 'PBE_HOLE_T_C_LR', 'GV09': 'GV09', 'BEEF': 'BEEF'}
self._repeated_subsections = {'LIBXC': '_libxc4', 'KE_LIBXC': '_ke_libxc4'}
self._attributes = ['Section_parameters', 'LIBXC_list', 'KE_LIBXC_list']
def LIBXC_add(self, section_parameters=None):
new_section = _libxc4()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.LIBXC_list.append(new_section)
return new_section
def KE_LIBXC_add(self, section_parameters=None):
new_section = _ke_libxc4()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.KE_LIBXC_list.append(new_section)
return new_section
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a108251d6955ab18aefd892dba107082ec3cb923 | 612e5a48a75121b741650d345d58a682c0a81285 | /graph/graphic_connection.py | 19540bbd9258914c1ac4f6bf67f3fe13646fdac0 | [] | no_license | BelowzeroA/stochastic-learning | e89f9f459219279eb97c53401295ec5202e11b0b | 62242fd7ca4a63cd7c908032e97368985b1b97c5 | refs/heads/master | 2021-01-25T14:45:27.057441 | 2018-03-05T19:35:21 | 2018-03-05T19:35:21 | 123,724,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from graphics import Line, Point
from math import sqrt
from brain.connection import Connection
from brain.neuron import Neuron
class GraphicConnection(Connection):
def __init__(self, brain, source, target: Neuron):
super(GraphicConnection, self).__init__(brain, source, target)
self.prev_pulsing = False
def update(self, draw=True):
super(GraphicConnection, self).update()
if self.prev_pulsing != self.pulsing and draw:
self.draw()
self.prev_pulsing = self.pulsing
def draw(self):
if self.pulsing:
color = 'red'
else:
color = 'black'
target = self.target
source = self.source
line = Line(source.location, target.location)
line.setWidth(1)
line.setFill(color)
line.setOutline(color)
line.draw(self.brain.win)
dx = target.location.x - source.location.x
dy = target.location.y - source.location.y
k = dy / dx if dx != 0 else dy
k = abs(k)
dd = 20
sign_dx = -1 if dx < 0 else 1
sign_dy = -1 if dy < 0 else 1
dx = -sign_dx * dd / sqrt(k ** 2 + 1)
dy = -sign_dy * k * dd / sqrt(k ** 2 + 1)
# sleep(1)
dp = Point(target.location.x + dx, target.location.y + dy)
line = Line(dp, target.location)
line.setWidth(3)
line.setFill(color)
line.draw(self.brain.win)
| [
"striver8"
] | striver8 |
20bf8a60d72585f0a48a322755aa4788d0275de3 | 16ac9158781d2616141433df9be4820e6d998e03 | /src/eavatar.ava/ava/runtime/config.py | a5ce582b9c006054037c6cd74a3020d2f5fcad62 | [] | no_license | pombredanne/ava-srv | 0a357fb39d0179db0c0d545eb23d707d25b0e446 | 8acef33502d4bc3089f610f0b4ee33e7a5e779ae | refs/heads/master | 2020-12-31T05:56:07.741625 | 2015-03-06T06:29:56 | 2015-03-06T06:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | # -*- coding: utf-8 -*-
"""
Configuration file reading/writing.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import logging.config
import os.path
from ConfigParser import SafeConfigParser
from ava.runtime import environ
AGENT_CONF = os.path.join(environ.conf_dir(), b'agent.ini')
LOGGING_CONF = os.path.join(environ.conf_dir(), b'logging.ini')
PACKAGES_CONF = os.path.join(environ.conf_dir(), b'packages.ini')
# The default configuration file is located at the base directory.
_defaults = dict(base_dir=environ.base_dir(),
conf_dir=environ.conf_dir(),
data_dir=environ.data_dir(),
pkgs_dir=environ.pkgs_dir(),
logs_dir=environ.logs_dir())
class ConfigFile(SafeConfigParser):
def __init__(self, filename, defaults=_defaults):
SafeConfigParser.__init__(self, defaults)
self.filename = os.path.abspath(filename)
def load(self):
self.read(self.filename)
def save(self):
with open(self.filename, 'wb') as fp:
self.write(fp)
_agent = None
def agent(file=AGENT_CONF):
global _agent
if not _agent:
_agent = ConfigFile(file)
_agent.add_section('agent')
_agent.add_section('webfront')
_agent.add_section('data')
_agent.add_section('extension')
# set defaults for various sections.
_agent.set('webfront', 'listen_port', '5000')
_agent.set('webfront', 'listen_addr', '127.0.0.1')
# loads more options from file.
_agent.load()
return _agent
_packages = None
def packages(file=PACKAGES_CONF):
global _packages
if not _packages:
_packages = ConfigFile(file)
_packages.load()
return _packages
# configure logging
logging.config.fileConfig(LOGGING_CONF, defaults=_defaults)
| [
"sam@eavatar.com"
] | sam@eavatar.com |
afae17498d04492c414161c081d29bd04a00c86e | 32233aeda342ff6e107496caaf3c9be322ab80b2 | /06 Brute-Force/6.1 sum + recursiveSum.py | a17932df866565d47efd0e67da2fd83ce2a96581 | [] | no_license | kwr0113/Algo_Python | 1258ed3b71f7826ec55f2ff9e46a6485df6039bf | b17ad19ccdb1bfe979618af8c98f04c67f38495f | refs/heads/master | 2023-08-19T03:52:16.385548 | 2021-09-23T12:22:05 | 2021-09-23T12:22:05 | 407,837,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # 1부터 n까지의 합을 계산하는 반복 함수와 재귀 함수
def ssum(n):
ret = 0
for i in range(1, n+1):
ret += i
return ret
def recursiveSum(n):
if n == 1:
return 1
return n + recursiveSum(n-1)
print(ssum(10))
print(recursiveSum(10))
| [
"kwr0113@gmail.com"
] | kwr0113@gmail.com |
563f2484acad0c35b453f0173217d3702400dd48 | dcb8f9c5739b00838ffa6bb2f9850e0e6f80312c | /hw1/minh_tran_hw1/minh_tran_task1.py | dbf8c8e8cb6dfd452cc68795d420470fbd89d7b1 | [] | no_license | trademark152/Data_Mining_USC | d56c6b51b523c0a8548e94b638d3155fe189dd4e | 124c17b9c55c3880b4c24815bcf369339f198e2b | refs/heads/master | 2020-09-26T09:31:50.250840 | 2019-12-06T02:30:27 | 2019-12-06T02:30:27 | 226,227,472 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 4,845 | py | """
Task1: Data Exploration (3 points)
You will explore the dataset, user.json, containing review information for this task, and you need to write
a program to automatically answer the following questions:
"""
## LIBRARIES
import json
from pyspark import SparkContext
import sys
## TO RUN CODE
""""
spark-submit hw1/minh_tran_task1.py yelp_dataset/testUser.json outputTask1.txt
"""
## (A) Find the total number of users (0.5 point)
def taskA(taskInput):
# map to read json then map each user_id as key to a value of 1
# userID rdd: id1:1, id1:1, id2:1...
data = taskInput.map(lambda x: json.loads(x)).map(lambda x:(x["user_id"], 1))
# Grouping by userID and map all values to 1, then count
# don't need this because user ID is unique to each user
# answer = userIDDict.groupByKey().mapValues(lambda x: 1).count()
answer = data.count()
return [("total_users", answer)]
## (B) Find the average number of written reviews of all users (0.5 point)
def taskB(taskInput):
# map to read json then map each user_id as key to a value of review count
# reviewCount rdd: id1:count1, id2:count2, ...
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["user_id"], x["review_count"]))
# Calculate average number of reviews written by users
numReview = data.map(lambda tup: tup[1]).sum()
numUsers = data.map(lambda tup: tup[0]).count()
answer = numReview/numUsers
return [("avg_reviews", answer)]
## (C) Find the number of distinct user names (0.5 point)
def taskC(taskInput):
# map to read json then map each user name as key to a value of 1
# userID rdd: id1:1, id1:1, id2:1...
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: x["name"])
# Grouping by userID and map all values to 1, then count
# don't need this because user ID is unique to each user
# answer = userIDDict.groupByKey().mapValues(lambda x: 1).count()
answer = data.distinct().count()
return [("distinct_usernames", answer)]
## (D) Find the number of users that joined yelp in the year 2011 (0.5 point)
def taskD(taskInput):
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["user_id"],x["yelping_since"]))
# Filter user that joined yelp in 2011
data2011 = data.filter(lambda x: x[1][:4] == "2011")
answer = data2011.count()
return [("num_users", answer)]
## (E) Find Top 10 popular names and the number of times they appear (user names that appear the most number of times) (0.5 point)
def taskE(taskInput):
# map only "name" as key and 1 as value
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["name"],1))
# collapse by "name" while adding values to find the counts,
# sort by negative value of count and name in ascending order)
answer = data.reduceByKey(lambda x, y: x+y).sortBy(lambda x: (-x[1], x[0]), ascending=True).take(10)
return answer
## (F) Find Top 10 user ids who have written the most number of reviews (0.5 point)
def taskF(taskInput):
# map only "user_id" as key and 1 as value
data = taskInput.map(lambda x: json.loads(x)).map(lambda x: (x["user_id"],(x["review_count"], x["name"])))
# collapse by "name" while adding values to find the counts,
# sort by negative value of count and name in ascending order)
answer = data.sortBy(lambda x: (-x[1][0], x[1][1]), ascending=True).map(lambda x: (x[0], x[1][0])).take(10)
return answer
if __name__ == "__main__":
# ensure number of inputs is 3: py file, input file, output file
if len(sys.argv)!= 3:
print("This script requires 2 input arguments to run inputFile outputFile")
# break it
sys.exit(1)
# import input and output file path from shell
inputFile = sys.argv[1]
outputFile = sys.argv[2]
# create a spark context object using all available cores
#conf = SparkConf().setAppName("INF553_HW1_MT").setMaster("local*]")
sc = SparkContext("local[*]")
# to simplify output
sc.setLogLevel("ERROR")
# get input file and import into the SparkContext object
task1Input = sc.textFile(inputFile).persist()
# answering task 1
tA = taskA(task1Input)
tB = taskB(task1Input)
tC = taskC(task1Input)
tD = taskD(task1Input)
tE = taskE(task1Input)
tF = taskF(task1Input)
# output results based on given ordering
# initiate output
task1Output = {}
task1Output[tA[0][0]] = tA[0][1] # rhs indexing because answer is [('total_users', 4)]
task1Output[tB[0][0]] = tB[0][1]
task1Output[tC[0][0]] = tC[0][1]
task1Output[tD[0][0]] = tD[0][1]
task1Output["top10_popular_names"] = tE
task1Output["top10_most_reviews"] = tF
# write out json files
jsonOutputFile = json.dumps(task1Output)
with open(outputFile,"w") as fileOut:
fileOut.write(jsonOutputFile) | [
"trademark152@gmail.com"
] | trademark152@gmail.com |
f805b12d7737a2384464edefbddf64219b9fd22a | 926fe08bf24a8335f9cec827b651a7c75dc9c000 | /extract_from_node_results.py | b3a61703210b440e7db9725207b38bb96f2dbb59 | [] | no_license | andycasey/ges-idr4-abundances | 71d9cfa38f5909b21c98626fa75cdda8e100cd97 | c0a74f90b5e45a20ef4b3337692014a2fbbcbfa1 | refs/heads/master | 2021-01-22T09:42:53.145128 | 2015-10-14T22:39:00 | 2015-10-14T22:39:00 | 42,256,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,036 | py |
""" Extract OACT results for Li and place them in the line_abundances table """
__author__ = 'Andy Casey <arc@ast.cam.ac.uk>'
import logging
import numpy as np
import release
logger = logging.getLogger("ges")
# We may need to do this many times...
database, remove_existing = ("arc", True)
element, ion, wavelength = ("Li", 1, 6707.8)
node, measurement_type, code = ("OACT", "S", "Unknown")
ges = release.DataRelease(database)
def safe_float(s):
try:
s = float(s)
except (TypeError, ValueError):
return np.nan
else:
return s
def safe_int(s):
try:
s = int(s)
except (TypeError, ValueError):
return 0
else:
return s
# Remove any existing rows in line_abundances for this element from this node?
if remove_existing:
logger.info("Deleting existing {0} {1} line abundances from {2}".format(
element, ion, node))
ges.execute("""DELETE FROM line_abundances WHERE TRIM(node) = %s
AND TRIM(element) = %s AND ion = %s""", (node, element, ion))
ges.commit()
# Get all the details from node results.
node_results = ges.retrieve_table("""SELECT * FROM node_results
WHERE TRIM(node) = %s""", (node, ))
N = len(node_results)
for i, node_result_row in enumerate(node_results):
# Create the spectrum_filename_stub
filenames = node_result_row["filename"].strip().split("|")
spectrum_filename_stub = ("".join([filenames[0][j] \
for j in range(max(map(len, filenames))) \
if len(set([item[j] for item in filenames])) == 1]))[:-5]
# Create the new row of data.
#li1 | upper_combined_li1 | e_li1 | nn_li1 | enn_li1 | nl_li1
upper_column = "upper_{0}{1}".format(element.lower(), ion)
if upper_column not in node_result_row.dtype.names:
upper_column = "upper_combined_{0}{1}".format(element.lower(), ion)
line_abundance_row = {
"abundance_filename": "GES_iDR4_WG11_{0}.fits".format(node),
"spectrum_filename_stub": spectrum_filename_stub,
"node": node,
"cname": node_result_row["cname"],
"code": code,
"object": node_result_row["object"],
"element": element,
"ion": ion,
"wavelength": wavelength,
"ew": np.nan,
"e_ew": np.nan,
"upper_ew": 0,
"abundance": safe_float(node_result_row["{0}{1}".format(element.lower(), ion)]),
"e_abundance": safe_float(node_result_row["e_{0}{1}".format(element.lower(), ion)]),
"upper_abundance": safe_int(node_result_row[upper_column]),
"measurement_type": measurement_type,
}
line_abundance_row["scaled_abundance"] = line_abundance_row["abundance"]
logger.debug("Inserting row {0}/{1} {2}".format(i + 1, N,
line_abundance_row.items()))
ges.execute("""INSERT INTO line_abundances({0}) VALUES ({1})""".format(
", ".join(line_abundance_row.keys()),
", ".join(["%({})s".format(_) for _ in line_abundance_row.keys()])),
line_abundance_row)
ges.commit()
logger.info("Done")
| [
"andycasey@gmail.com"
] | andycasey@gmail.com |
7573a486fe8a2675af898eda3e9751590a91a632 | a9652251346d469d4e6da48ca4e44438f3b6b65d | /neural_decoding/kalman_neural_decoding.py | 190d0d6f668cf144cd31e2548f1dd6c172878bb6 | [] | no_license | weihhh/python3-practice | 201023e214881fb0c08b8dd10e86c47a621b23e5 | 7e2b40b7152ef5bf6fe68264c8b1c0bbbb16c5eb | refs/heads/master | 2021-01-11T02:40:05.087986 | 2018-02-05T15:24:35 | 2018-02-05T15:24:35 | 70,913,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,799 | py | #Import standard packages
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from scipy import io
from scipy import stats
import pickle
#Import metrics
from metrics import get_R2
from metrics import get_rho
#Import decoder functions
from decoders import KalmanFilterDecoder
#获得原始数据
data_folder=''#存储数据的路径
with open(data_folder+'example_data_s1.pickle','rb') as f:
neural_data,vels_binned=pickle.load(f)
#处理原始数据
lag=0 #What time bin of spikes should be used relative to the output
#(lag=-1 means use the spikes 1 bin before the output)相当于可以人工调整滞后
X_kf=neural_data
#格式化输出数据
'''
对于卡尔曼滤波,我们使用位置,速度,加速度作为输出
最终,我们只关注速度的拟合准确度(对于这个数据集)
但是把它们全部最为相关数据可以提高性能
'''
#决定位置
pos_binned=np.zeros(vels_binned.shape) #Initialize
pos_binned[0,:]=0 #Assume starting position is at [0,0]
#基于速度确定每个时间窗对应位置,速度乘以时间
for i in range(pos_binned.shape[0]-1):
pos_binned[i+1,0]=pos_binned[i,0]+vels_binned[i,0]*.05 #Note that .05 is the length of the time bin
pos_binned[i+1,1]=pos_binned[i,1]+vels_binned[i,1]*.05
#确定加速度??
temp=np.diff(vels_binned,axis=0) #一维时间窗,二维两个元素,x,y方向速度
acc_binned=np.concatenate((temp,temp[-1:,:]),axis=0) #假设了最后一个时间窗的加速度和倒数第二个相同,这里就是将最后一行的数据复制一份拼接到尾部
#最后的输出协变量,将各种特征拼接在一起,时间窗个数x3个特征(2,2,2)
y_kf=np.concatenate((pos_binned,vels_binned,acc_binned),axis=1)
num_examples=X_kf.shape[0]#时间窗个数
#Re-align data to take lag into account处理人工设置的滞后
if lag<0:
y_kf=y_kf[-lag:,:]
X_kf=X_kf[0:num_examples+lag,:]
if lag>0:
y_kf=y_kf[0:num_examples-lag,:]
X_kf=X_kf[lag:num_examples,:]
#决定training/testing/validation sets的分配比例
training_range=[0, 0.7]
testing_range=[0.7, 0.85]
valid_range=[0.85,1]
#考虑人工设置滞后的时间窗个数
num_examples_kf=X_kf.shape[0]
#决定数据集的实际坐标范围
#Note that each range has a buffer of 1 bin at the beginning and end
#This makes it so that the different sets don't include overlapping data
training_set=np.arange(np.int(np.round(training_range[0]*num_examples_kf))+1,np.int(np.round(training_range[1]*num_examples_kf))-1)
testing_set=np.arange(np.int(np.round(testing_range[0]*num_examples_kf))+1,np.int(np.round(testing_range[1]*num_examples_kf))-1)
valid_set=np.arange(np.int(np.round(valid_range[0]*num_examples_kf))+1,np.int(np.round(valid_range[1]*num_examples_kf))-1)
#???少了好几个数据?比如第一个?也许是为了排除相关性干扰
#Get training data
X_kf_train=X_kf[training_set,:]
y_kf_train=y_kf[training_set,:]
#Get testing data
X_kf_test=X_kf[testing_set,:]
y_kf_test=y_kf[testing_set,:]
#Get validation data
X_kf_valid=X_kf[valid_set,:]
y_kf_valid=y_kf[valid_set,:]
#归一化
#Z-score inputs
X_kf_train_mean=np.nanmean(X_kf_train,axis=0)
X_kf_train_std=np.nanstd(X_kf_train,axis=0)
X_kf_train=(X_kf_train-X_kf_train_mean)/X_kf_train_std
X_kf_test=(X_kf_test-X_kf_train_mean)/X_kf_train_std
X_kf_valid=(X_kf_valid-X_kf_train_mean)/X_kf_train_std
#Zero-center outputs
y_kf_train_mean=np.mean(y_kf_train,axis=0)
y_kf_train=y_kf_train-y_kf_train_mean
y_kf_test=y_kf_test-y_kf_train_mean
y_kf_valid=y_kf_valid-y_kf_train_mean
#Declare model
model_kf=KalmanFilterDecoder(C=1) #There is one optional parameter that is set to the default in this example (see ReadMe)
#Fit model
model_kf.fit(X_kf_train,y_kf_train)
#Get predictions
y_valid_predicted_kf=model_kf.predict(X_kf_valid,y_kf_valid)
#Get metrics of fit (see read me for more details on the differences between metrics)
#First I'll get the R^2
R2_kf=get_R2(y_kf_valid,y_valid_predicted_kf)
print('R2:',R2_kf[2:4]) #I'm just printing the R^2's of the 3rd and 4th entries that correspond to the velocities
#Next I'll get the rho^2 (the pearson correlation squared)
rho_kf=get_rho(y_kf_valid,y_valid_predicted_kf)
print('rho2:',rho_kf[2:4]**2) #I'm just printing the rho^2's of the 3rd and 4th entries that correspond to the velocities
#As an example, I plot an example 1000 values of the x velocity (column index 2), both true and predicted with the Kalman filter
#Note that I add back in the mean value, so that both true and predicted values are in the original coordinates
fig_x_kf=plt.figure()
plt.plot(y_kf_valid[1000:2000,2]+y_kf_train_mean[2],'b')
plt.plot(y_valid_predicted_kf[1000:2000,2]+y_kf_train_mean[2],'r')
plt.show()
#Save figure
# fig_x_kf.savefig('x_velocity_decoding.eps') | [
"wz591757596@163.com"
] | wz591757596@163.com |
b51213646b02a5741d23ff6c94d22ab6f7e52add | 6550dceb5b2d17dfedf94c4049f63e770c8d7712 | /ffprobe3/ffprobe.py | f9e29250daba998461d685c77f44409a45a53291 | [] | no_license | open-speech-org/openspeechcorpus.com | 03baed28e54f15ece8b8050c501e8df6e641ab44 | e2e612cacab2e0458a44f3729738c5816f57dc8f | refs/heads/master | 2022-12-23T12:32:33.501336 | 2020-11-25T03:27:52 | 2020-11-25T03:27:52 | 171,372,750 | 0 | 1 | null | 2022-12-08T03:28:10 | 2019-02-18T23:42:25 | Python | UTF-8 | Python | false | false | 7,783 | py | """
Python wrapper for ffprobe command line tool. ffprobe must exist in the path.
"""
import os
import sys
import pipes
import platform
import re
import subprocess
from ffprobe3.exceptions import FFProbeError
class FFProbe:
"""
FFProbe wraps the ffprobe command and pulls the data into an object form::
metadata=FFProbe('multimedia-file.mov')
"""
def __init__(self, video_file):
sys.path.append('/Users/ma0/custom-scripts')
print(sys.path)
self.video_file = video_file
try:
with open(os.devnull, 'w') as tempf:
subprocess.check_call(["ffprobe", "-h"], stdout=tempf, stderr=tempf)
except:
raise IOError('ffprobe not found.')
if os.path.isfile(video_file):
if str(platform.system()) == 'Windows':
cmd = ["ffprobe", "-show_streams", self.video_file]
else:
cmd = ["ffprobe -show_streams " + pipes.quote(self.video_file)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
self.format = None
self.created = None
self.duration = None
self.start = None
self.bit_rate = None
self.sample_rate = None
self.bits_per_sample = None
self.channels = None
self.streams = []
self.video = []
self.audio = []
data_lines = []
for a in iter(p.stdout.readline, b''):
a = a.decode('UTF-8')
if re.match(r'\[STREAM\]', a):
data_lines = []
elif re.match(r'\[/STREAM\]', a):
self.streams.append(FFStream(data_lines))
data_lines = []
else:
kvPair = a.strip().split('=')
if len(kvPair) > 1:
if kvPair[0] == "codec_name":
self.format = kvPair[1]
elif kvPair[0] == "created":
self.created = kvPair[1]
elif kvPair[0] == "duration":
self.duration = float(kvPair[1])
elif kvPair[0] == "bit_rate":
self.bit_rate = int(kvPair[1])
elif kvPair[0] == "sample_rate":
self.sample_rate = int(kvPair[1])
elif kvPair[0] == "bits_per_sample":
self.bits_per_sample = int(kvPair[1])
elif kvPair[0] == "channels":
self.channels = int(kvPair[1])
data_lines.append(a)
for a in iter(p.stderr.readline, b''):
a = a.decode('UTF-8')
if re.match(r'\[STREAM\]', a):
data_lines = []
elif re.match(r'\[/STREAM\]', a):
self.streams.append(FFStream(data_lines))
data_lines = []
else:
data_lines.append(a)
p.stdout.close()
p.stderr.close()
for a in self.streams:
if a.is_audio():
self.audio.append(a)
if a.is_video():
self.video.append(a)
else:
raise IOError('No such media file ' + video_file)
class FFStream:
"""
An object representation of an individual stream in a multimedia file.
"""
def __init__(self, data_lines):
for a in data_lines:
kvPair = a.strip().split('=')
if len(kvPair) > 1 :
self.__dict__[kvPair[0]] = kvPair[1]
def is_audio(self):
"""
Is this stream labelled as an audio stream?
"""
val = False
if self.__dict__['codec_type']:
if str(self.__dict__['codec_type']) == 'audio':
val = True
return val
def is_video(self):
"""
Is the stream labelled as a video stream.
"""
val = False
if self.__dict__['codec_type']:
if self.__dict__['codec_type'] == 'video':
val = True
return val
def is_subtitle(self):
"""
Is the stream labelled as a subtitle stream.
"""
val = False
if self.__dict__['codec_type']:
if self.__dict__['codec_type'] == 'subtitle':
val = True
return val
def frame_size(self):
"""
Returns the pixel frame size as an integer tuple (width,height) if the stream is a video stream.
Returns None if it is not a video stream.
"""
size = None
if self.is_video():
width = self.__dict__['width']
height = self.__dict__['height']
if width and height:
try:
size = (int(width), int(height))
except ValueError:
raise FFProbeError("None integer size %s:%s" % (width, height))
return size
def pixel_format(self):
"""
Returns a string representing the pixel format of the video stream. e.g. yuv420p.
Returns none is it is not a video stream.
"""
f = None
if self.is_video():
if self.__dict__['pix_fmt']:
f = self.__dict__['pix_fmt']
return f
def frames(self):
"""
Returns the length of a video stream in frames. Returns 0 if not a video stream.
"""
frame_count = 0
if self.is_video() or self.is_audio():
if self.__dict__['nb_frames']:
try:
frame_count = int(self.__dict__['nb_frames'])
except ValueError:
raise FFProbeError('None integer frame count')
return frame_count
def duration_seconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
duration = 0.0
if self.is_video() or self.is_audio():
if self.__dict__['duration']:
try:
duration = float(self.__dict__['duration'])
except ValueError:
raise FFProbeError('None numeric duration')
return duration
def language(self):
"""
Returns language tag of stream. e.g. eng
"""
lang = None
if self.__dict__['TAG:language']:
lang = self.__dict__['TAG:language']
return lang
def codec(self):
"""
Returns a string representation of the stream codec.
"""
codec_name = None
if self.__dict__['codec_name']:
codec_name = self.__dict__['codec_name']
return codec_name
def codec_description(self):
"""
Returns a long representation of the stream codec.
"""
codec_d = None
if self.__dict__['codec_long_name']:
codec_d = self.__dict__['codec_long_name']
return codec_d
def codec_tag(self):
"""
Returns a short representative tag of the stream codec.
"""
codec_t = None
if self.__dict__['codec_tag_string']:
codec_t = self.__dict__['codec_tag_string']
return codec_t
def bit_rate(self):
"""
Returns bit_rate as an integer in bps
"""
b = 0
if self.__dict__['bit_rate']:
try:
b = int(self.__dict__['bit_rate'])
except ValueError:
raise FFProbeError('None integer bit_rate')
return b
| [
"ma0@contraslash.com"
] | ma0@contraslash.com |
3fc014bbb8096316c01d9b5024ecbfb3f9fd4c45 | 5f1e12cf84d02bbc4220ed11758752fed7cfd6c7 | /samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-snmp-test-trap-act/nc-execute-xr-snmp-test-trap-act-416-ydk.py | 71a0f1f9882b8624945da7f63b058d9a6bf42c89 | [
"Apache-2.0"
] | permissive | eliwilliams/ydk-py-samples | 6c3b8063848c8718910c7255256f7d3aee456974 | 40aa500e7d7ad05e960fb1552c73dab3adbc08c7 | refs/heads/master | 2021-07-25T01:21:26.442018 | 2017-11-06T17:23:05 | 2017-11-06T17:23:05 | 109,724,992 | 0 | 0 | null | 2017-11-06T17:07:40 | 2017-11-06T17:07:39 | null | UTF-8 | Python | false | false | 2,677 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Execute RPC for model Cisco-IOS-XR-snmp-test-trap-act.
usage: nc-execute-xr-snmp-test-trap-act-416-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_snmp_test_trap_act \
as xr_snmp_test_trap_act
import logging
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create executor service
executor = ExecutorService()
entity_fru_fan_tray_oper_status_up_rpc = xr_snmp_test_trap_act.EntityFruFanTrayOperStatusUpRpc() # create object
# execute RPC on NETCONF device
executor.execute_rpc(provider, entity_fru_fan_tray_oper_status_up_rpc)
exit()
# End of script
| [
"saalvare@cisco.com"
] | saalvare@cisco.com |
3349e2469816e9fe64b77eb914dc2f2d778d5f7f | b3ab2979dd8638b244abdb2dcf8da26d45d7b730 | /test/test_pagination_response_permission_set_response_model.py | ce8a5fe159bc67f84afe67dff0d2b4773ad93151 | [] | no_license | CU-CommunityApps/ct-cloudcheckr-cmx-client | 4b3d9b82c5dfdaf24f8f443526868e971d8d1b15 | 18ac9fd4d6c4ae799c0d21745eaecd783da68c0c | refs/heads/main | 2023-03-03T19:53:57.685925 | 2021-02-09T13:05:07 | 2021-02-09T13:05:07 | 329,308,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | # coding: utf-8
"""
CloudCheckr API
CloudCheckr API # noqa: E501
OpenAPI spec version: v1
Contact: support@cloudcheckr.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudcheckr_cmx_client
from cloudcheckr_cmx_client.models.pagination_response_permission_set_response_model import PaginationResponsePermissionSetResponseModel # noqa: E501
from cloudcheckr_cmx_client.rest import ApiException
class TestPaginationResponsePermissionSetResponseModel(unittest.TestCase):
"""PaginationResponsePermissionSetResponseModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaginationResponsePermissionSetResponseModel(self):
"""Test PaginationResponsePermissionSetResponseModel"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudcheckr_cmx_client.models.pagination_response_permission_set_response_model.PaginationResponsePermissionSetResponseModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"pea1@cornell.edu"
] | pea1@cornell.edu |
8b28f3977b55bf2426d6d91b6eebfd7d27177db7 | 9b32771b7d1513ee37bc62dd347675abcfc1bfc9 | /example_snippets/multimenus_snippets/NewSnippets/NumPy/Pretty printing/Formatting functions for specific dtypes/Set formatter for `int` type.py | 43cc777513adf98a89e3dab51b28036320290cf7 | [
"BSD-3-Clause"
] | permissive | listar0810/jupyterlab-snippets-multimenus | 44087ef1aeb030a3074862a337508b57d50072c6 | 477f51cfdbad7409eab45abe53cf774cd70f380c | refs/heads/master | 2022-12-12T18:19:25.221083 | 2020-09-08T01:11:01 | 2020-09-08T01:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | def format_int(x):
return 'int({0})'.format(x)
with printoptions(formatter={'int': format_int}):
print(np.random.randint(-3, 4, 10)) | [
"kptan86@gmail.com"
] | kptan86@gmail.com |
b7c426a31e6b5ddbc89e14288c66694f2ec8c368 | a60e81b51935fb53c0900fecdadba55d86110afe | /python/note/改善python程序的91个建议.py | 96231cef18a927a9f58d3e1b9e81c3ecd2cbf6bd | [] | no_license | FrankieZhen/Lookoop | fab6855f5660467f70dc5024d9aa38213ecf48a7 | 212f8b83d6ac22db1a777f980075d9e12ce521d2 | refs/heads/master | 2020-07-27T08:12:45.887814 | 2019-09-16T11:48:20 | 2019-09-16T11:48:20 | 209,021,915 | 1 | 0 | null | 2019-09-17T10:10:46 | 2019-09-17T10:10:46 | null | UTF-8 | Python | false | false | 1,535 | py | # coding=utf-8
# 2019-1-28
# 改善python程序的91个建议
# %占位符
value = {'name':'yauno', 'sex':'man'}
print('name %(name)s , sex %(sex)s' % value)
# str.format
# ' 与 "的区别
print('"test"')
print("\"test\"")
# 常量的管理
# 12: 不推荐使用type来进行检查
# isinstance(object, classoinfo)
print(isinstance('string', str))
# 13. 涉及除法运算时,尽量先将操作数转换为浮点类型再做运算
# 14. 警惕使用eval()的安全漏洞
# 17. unicode
# 在2.6之后可以使用 import_unicode_literals自动将定义的普通字符识别为Uicode字符串, 这样字符串的行为将保持和pythoh3一致
# 19. import
# (1) 命名空间的冲突
# (2) 循环嵌套导入问题: 不使用 from .. import ... 直接使用 import ...
# 21. ++i 与 i += 1
# 23. else
# (1)
def print_prime(n):
for i in range(n):
for j in range(2, i):
if i % j == 0:
break # 这里终止后不执行后面打印操作
else:
print("%s is prime." % i) # 内嵌for 循环正常执行完后执行打印操作
print_prime(10)
# (2)
try:
pass
except:
pass
else:
pass
finally:
pass
# 25. finally
def finally_test(a):
try:
print("\ntesting...")
if a <= 0:
raise ValueError("data can not be negative.")
else:
return a
except ValueError as e:
print("%s" % e)
finally:
print("end")
return -1
for i in range(-1, 2):
ret = finally_test(i) # 最后返回永远都是-1, 因为返回a之前要执行finall, 而finally直接就返回了-1
print("return value: %s" % ret) | [
"33798487+YangXiaoo@users.noreply.github.com"
] | 33798487+YangXiaoo@users.noreply.github.com |
aca8dd05d90354b8c7b9f7084c40e115a9c3fb42 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_jinn.py | 5f214987ef38459b4a3fc545f79b4395f6eb6f08 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py |
#calss header
class _JINN():
def __init__(self,):
self.name = "JINN"
self.definitions = [u'in Arab and Muslim traditional stories, a magical spirit who may appear in the form of a human or an animal and can take control of a person ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f34128088a213795dfa4a912f86cdfc5140eff13 | 929a816fc299959d0f8eb0dd51d064be2abd6b78 | /LintCode/ladder 08 memorized search/必修/683. Word Break III/solution.py | 11811522a165c1860914fd748163d4584a24de2f | [
"MIT"
] | permissive | vincent507cpu/Comprehensive-Algorithm-Solution | 27940da7bc0343921930a2eafbd649da93a5395d | 04e01e49622457f09af2e1133954f043c0c92cb9 | refs/heads/master | 2023-07-20T07:12:15.590313 | 2021-08-23T23:42:17 | 2021-08-23T23:42:17 | 258,644,691 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | class Solution:
"""
@param: : A string
@param: : A set of word
@return: the number of possible sentences.
"""
def wordBreak3(self, s, dict):
# Write your code here
if not s or not dict:
return 0
lower_dict = set()
for piece in dict:
lower_dict.add(piece.lower())
max_len = max([len(piece) for piece in dict])
return self.memo_search(s.lower(), lower_dict, 0, max_len, {})
def memo_search(self, s, dict, index, max_len, memo):
if index == len(s):
return 1
if index in memo:
return memo[index]
memo[index] = 0
for i in range(index, len(s)):
if i + 1 - index > max_len:
break
word = s[index:i + 1]
if word not in dict:
continue
memo[index] += self.memo_search(s, dict, i + 1, max_len, memo)
return memo[index] | [
"vincent507cpu@gmail.com"
] | vincent507cpu@gmail.com |
7911dfbc4c035eaa8f13d0d5a1931adb62c0bb1f | 72357e298521452cfa3d9ca960235e6ddf1dfe46 | /imsize.py | de19438293a2d32fb248522d0e2440e21c16b107 | [] | no_license | pydemo/project-and-sketch | 25b8fbdc1e85773b9aa150d8f63a0e7ced4c1a13 | 10397f3d40d117c15143ce3deb9bc8bf530c2269 | refs/heads/master | 2020-09-01T16:52:08.478915 | 2019-11-29T18:19:11 | 2019-11-29T18:19:11 | 219,009,137 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | # -*- coding: utf-8 -*-
import struct
import imghdr
def test_jpeg(h, f):
# SOI APP2 + ICC_PROFILE
if h[0:4] == '\xff\xd8\xff\xe2' and h[6:17] == b'ICC_PROFILE':
print "A"
return 'jpeg'
# SOI APP14 + Adobe
if h[0:4] == '\xff\xd8\xff\xee' and h[6:11] == b'Adobe':
return 'jpeg'
# SOI DQT
if h[0:4] == '\xff\xd8\xff\xdb':
return 'jpeg'
imghdr.tests.append(test_jpeg)
def get_image_size(fname):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb', ) as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
what = imghdr.what(None, head)
if what == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif what == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif what == 'jpeg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf or ftype in (0xc4, 0xc8, 0xcc):
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
get_image_size('test.JPG') | [
"olek.buzu@gmail.com"
] | olek.buzu@gmail.com |
d5c0f577677ecd87a61b9a3767430c83a25b4e9c | 64cdb9e8fdcde8a71a16ce17cd822441d9533936 | /_baekjoon/1507_궁금한 민호(플로이드워샬).py | 2413850d526ea506a6078831c86eb5db2427d6ec | [] | no_license | heecheol1508/algorithm-problem | fa42769f0f2f2300e4e463c5731e0246d7b7643c | 6849b355e15f8a538c9a071b0783d1789316d29d | refs/heads/main | 2023-07-20T23:46:07.037975 | 2021-08-31T12:47:33 | 2021-08-31T12:47:33 | 302,830,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | import sys
sys.stdin = open('input.txt', 'r')
N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
visit = [[True] * N for _ in range(N)]
flag = True
for k in range(N):
for i in range(N):
if i != k:
for j in range(N):
if j != k and i != j:
if board[i][j] > board[i][k] + board[k][j]:
flag = False
break
elif board[i][j] == board[i][k] + board[k][j] and visit[i][j] is True:
visit[i][j] = False
if flag is False:
break
if flag is False:
break
if flag is False:
print(-1)
else:
result = 0
for i in range(N - 1):
for j in range(i + 1, N):
if visit[i][j] is True:
result += board[i][j]
print(result)
| [
"heecheol1508@gmail.com"
] | heecheol1508@gmail.com |
5f90fc208121793359d9af378f9e0dbd53d87fea | 297d045a587f354b96cf493dff9a2e719739715d | /pysimplehttp/scripts/ps_to_sq.py | 3f77d7130a785960f715c81420767267909b9502 | [
"MIT"
] | permissive | liaojack8/simplehttp | 4805aef2f72dae9e2ce7eeb3f801818b0c66af43 | 1dbdea11276bc21915fc133fd9893a738654c240 | refs/heads/master | 2022-11-29T06:03:33.027824 | 2020-07-18T14:58:04 | 2020-07-18T14:58:04 | 280,678,089 | 0 | 0 | MIT | 2020-07-18T14:55:08 | 2020-07-18T14:55:07 | null | UTF-8 | Python | false | false | 3,792 | py | #!/usr/bin/env python
"""
generic pubsub to simplequeue daemon that takes command line arguments:
--pubsub-url=<http://127.0.0.1:8090/sub>
(multiple) --simplequeue-url=<http://127.0.0.1:6000>
when multiple destination simplequeue arguments are specified, the daemon will
randomly choose one endpoint to write a message to
"""
import logging
import tornado.httpclient
import tornado.options
import sys
import urllib
import random
try:
import ujson as json
except ImportError:
import json
from pysimplehttp.pubsub_reader import PubsubReader
class PubsubToSimplequeue(PubsubReader):
def __init__(self, simplequeue_urls, filter_require, filter_exclude, **kwargs):
assert isinstance(simplequeue_urls, (list, tuple))
self.simplequeue_urls = simplequeue_urls
self.filter_require = dict([data.split('=', 1) for data in filter_require])
for key, value in self.filter_require.items():
logging.info("requiring json key=%s value=%s" % (key, value) )
self.filter_exclude = dict([data.split('=', 1) for data in filter_exclude])
for key, value in self.filter_exclude.items():
logging.info("excluding json key=%s value=%s" % (key, value) )
self.http = tornado.httpclient.AsyncHTTPClient()
super(PubsubToSimplequeue, self).__init__(**kwargs)
def http_fetch(self, url, params, callback, headers={}):
url += '?' + urllib.urlencode(params)
req = tornado.httpclient.HTTPRequest(url=url,
method='GET',
follow_redirects=False,
headers=headers,
user_agent='ps_to_sq')
self.http.fetch(req, callback=callback)
def _finish(self, response):
if response.code != 200:
logging.info(response)
def callback(self, data):
"""
handle a single pubsub message
"""
if not data or len(data) == 1:
return
assert isinstance(data, str)
if self.filter_require or self.filter_exclude:
try:
msg = json.loads(data)
except Exception:
logging.error('failed json.loads(%r)' % data)
return
for key, value in self.filter_require.items():
if msg.get(key) != value:
return
for key, value in self.filter_exclude.items():
if msg.get(key) == value:
return
endpoint = random.choice(self.simplequeue_urls) + '/put'
self.http_fetch(endpoint, dict(data=data), callback=self._finish)
if __name__ == "__main__":
tornado.options.define('pubsub_url', type=str, default="http://127.0.0.1:8080/sub?multipart=0", help="url for pubsub to read from")
tornado.options.define('simplequeue_url', type=str, multiple=True, help="(multiple) url(s) for simplequeue to write to")
tornado.options.define('filter_require', type=str, multiple=True, help="filter json message to require for key=value")
tornado.options.define('filter_exclude', type=str, multiple=True, help="filter json message to exclude for key=value")
tornado.options.parse_command_line()
if not tornado.options.options.pubsub_url:
sys.stderr.write('--pubsub-url requrired\n')
sys.exit(1)
if not tornado.options.options.simplequeue_url:
sys.stderr.write('--simplequeue-url requrired\n')
sys.exit(1)
reader = PubsubToSimplequeue(
simplequeue_urls=tornado.options.options.simplequeue_url,
filter_require=tornado.options.options.filter_require,
filter_exclude=tornado.options.options.filter_exclude,
pubsub_url=tornado.options.options.pubsub_url
)
reader.start()
| [
"jehiah@gmail.com"
] | jehiah@gmail.com |
b5c10e61e40776f6dadc1d4661b2e717404b230c | 2f6817fc8f6ddb48f5f88c913d8e40b672fc3dbf | /Mining/lec3-4.py | 6d0fc9279955b2a3c15ab6c0cd0159695c4fda73 | [] | no_license | cutz-j/TodayILearned | 320b5774de68a0f4f68fda28a6a8b980097d6ada | 429b24e063283a0d752ccdfbff455abd30ba3859 | refs/heads/master | 2020-03-23T17:34:51.389065 | 2018-11-24T08:49:41 | 2018-11-24T08:49:41 | 141,865,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | from tkinter import *
from tkinter.simpledialog import *
from tkinter.filedialog import *
# 함수선언
def editFile(num):
if num == 1:
value = askinteger('제목', '설명-->', minvalue=1, maxvalue=255)
label1.configure(text=str(value))
def openFile():
fileName = askopenfilename(parent=window, filetypes=(("GIF 파일", "*.gif"), ("모든 파일", "*.*")))
label1.configure(text=fileName)
photo = PhotoImage(file = fileName)
pLabel.configure(image=photo)
pLabel.image = photo
# 변수선언
window = None
# main
window = Tk()
window.title("Memo")
window.geometry("700x700")
mainMenu = Menu(window)
window.config(menu=mainMenu)
label1 = Label(window)
label1.pack()
fileMenu = Menu(mainMenu)
mainMenu.add_cascade(label="파일(F)", menu=fileMenu)
fileMenu.add_command(label="새로만들기(N)")
fileMenu.add_command(label="열기(O)...", command=lambda : openFile())
fileMenu.add_command(label="저장(S)")
fileMenu.add_command(label="다른이름으로 저장(A)...")
fileMenu.add_separator()
fileMenu.add_command(label="페이지 설정(U)...")
fileMenu.add_command(label="인쇄(P)")
fileMenu.add_separator()
fileMenu.add_command(label="끝내기(X)")
fileMenu2 = Menu(mainMenu)
mainMenu.add_cascade(label="편집(E)", menu=fileMenu2)
fileMenu2.add_command(label="실행취소(U)")
fileMenu2.add_separator()
fileMenu2.add_command(label="잘라내기(T)")
fileMenu2.add_command(label="복사(C)", command=lambda : editFile(1))
fileMenu2.add_command(label="붙여넣기(P)", command=lambda : editFile(2))
fileMenu2.add_command(label="삭제(L)", command=lambda : editFile(3))
fileMenu2.add_separator()
fileMenu2.add_command(label="찾기(F)")
fileMenu2.add_command(label="다음 찾기(N)")
fileMenu2.add_command(label="바꾸기(R)")
fileMenu2.add_command(label="이동(G)")
fileMenu2.add_separator()
fileMenu2.add_command(label="모두 선택(A)")
fileMenu2.add_command(label="시간/날짜(D)")
# 빈 사진 준
photo = PhotoImage()
pLabel = Label(window, image=photo)
pLabel.pack(expand = 3, anchor = CENTER)
window.mainloop() | [
"cutz309@gmail.com"
] | cutz309@gmail.com |
dcf2ca8eb4d1386d15d7d71c29f0837616c7b8a3 | dc1d341789a19b0dd8b905538b080149e6cd13ed | /iwmiproject/migrations/0158_yieldplantlevel_crop.py | 252f49c5a654262d3b72ffd24869389f1cbd8651 | [] | no_license | pngimbwa/Data-Collection-Tool | 2629d31742edc311501bd25a2f9728ce7ac97d06 | 6081500d2a1dc2e30af908168cf83a46a6078a0f | refs/heads/master | 2021-01-20T00:01:45.770837 | 2017-04-22T12:31:07 | 2017-04-22T12:31:07 | 89,068,181 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iwmiproject', '0157_yieldfarmlevel_crop'),
]
operations = [
migrations.AddField(
model_name='yieldplantlevel',
name='Crop',
field=models.ForeignKey(blank=True, null=True, verbose_name='Crop', to='iwmiproject.Crop'),
),
]
| [
"pngimbwa6@gmail.com"
] | pngimbwa6@gmail.com |
d5ecabadea34f17640fec743414710537061740d | 8f615c636420f969afaf54fc3bf318028ab5819e | /python_data_wrangling/sast_data_convert.py | d4eda0cff9d49bf38610981f7dd9d61993fbf6ac | [] | no_license | sheltowt/application_security_data_visualizations | a20eff2c8c93b7e03720b6f287b7a1a1f424b351 | f6950d2115d09e9f0e79313d802b9ac873944050 | refs/heads/master | 2022-10-30T04:14:38.495658 | 2020-06-19T16:35:10 | 2020-06-19T16:35:10 | 266,594,130 | 18 | 0 | null | 2020-06-15T18:55:04 | 2020-05-24T17:47:23 | HTML | UTF-8 | Python | false | false | 1,167 | py | import json
with open('../raw_data/appsecco_dvna.json') as json_file:
data = json.load(json_file)
modified_object = {}
modified_object["name"] = "DVNA sast scan"
modified_object["children"] = []
for result in data["runs"][0]["results"]:
new_result = {}
new_result["name"] = result["message"]["text"]
new_result["children"] = []
modified_object["children"].append(new_result)
for result in data["runs"][0]["results"]:
for mod_obj in modified_object["children"]:
if result["message"]["text"] == mod_obj["name"]:
new_child = {}
new_child["name"] = result["locations"][0]["physicalLocation"]["artifactLocation"]["uri"]
new_child["startLine"] = result["locations"][0]["physicalLocation"]["region"]["startLine"]
new_child["size"] = 1
mod_obj["children"].append(new_child)
unique_child_name = []
unique_children = []
for index, child in enumerate(modified_object["children"]):
if child["name"] in unique_child_name:
pass
else:
unique_child_name.append(child["name"])
unique_children.append(child)
modified_object["children"] = unique_children
with open('../public/dvna_sast.json', 'w') as outfile:
json.dump(modified_object, outfile) | [
"sheltowt@gmail.com"
] | sheltowt@gmail.com |
52c32b83c0116f75bd3a04d268912c811b5a0e60 | eeec2adfe1ca4e8cf5e7a0be9eaab2497df25861 | /erudit_catalog/checks.py | 0f45121f5d689aa777705d5ced3a2650f458c953 | [
"BSD-3-Clause"
] | permissive | fabiobatalha/erudit-ps-packtools-plugin | 3ecddab7835a25df44cbc00b228f241fae231155 | 29fabd087b8d8406b96d0b7296386f78da34aaeb | refs/heads/master | 2020-03-15T01:28:04.287126 | 2018-11-21T19:28:07 | 2018-11-21T19:28:07 | 131,894,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | #coding: utf-8
from __future__ import unicode_literals
import logging
import itertools
import json
import plumber
from packtools.style_errors import StyleError
from packtools.catalogs import catalog
LOGGER = logging.getLogger(__name__)
with open(catalog.ISO3166_CODES) as f:
ISO3166_CODES_SET = set(json.load(f))
# --------------------------------
# Basic functionality
# --------------------------------
@plumber.filter
def setup(message):
"""Prepare the message to traverse the pipeline.
The input `message` is an `etree` instance. The pipeline will inspect
this etree and append the errors on an errors list. This errors list
is instantiated at this setup pipe.
"""
return message, []
@plumber.filter
def teardown(message):
"""Finalize the processing pipeline and return the errors list.
"""
_, err_list = message
return err_list
def StyleCheckingPipeline():
"""Factory for style checking pipelines.
"""
return plumber.Pipeline(setup, doctype, country_code, teardown)
@plumber.filter
def doctype(message):
"""Make sure the DOCTYPE declaration is present.
"""
et, err_list = message
if not et.docinfo.doctype:
err = StyleError()
err.message = "Missing DOCTYPE declaration."
err_list.append(err)
return message
@plumber.filter
def country_code(message):
"""Check country codes against iso3166 alpha-2 list.
"""
et, err_list = message
elements = et.findall('//*[@country]')
for elem in elements:
value = elem.attrib['country']
if value not in ISO3166_CODES_SET:
err = StyleError()
err.line = elem.sourceline
err.message = "Element '%s', attribute country: Invalid country code \"%s\"." % (elem.tag, value)
err_list.append(err)
return message
| [
"fabiobatalha@gmail.com"
] | fabiobatalha@gmail.com |
a3ce2a229b18dafa49e9ae81174f429d22c71cc6 | a1b7c1357181320b272ef4c72b70d22600a407c1 | /examples/test_get_locale_code.py | 978cee5e42bcf637ac20361309a79c5c225e4623 | [
"MIT"
] | permissive | BarryYBL/SeleniumBase | 5c96e21eaebd45e2f6ac26d5bd563b3ba300e6f6 | e3cb810331183fa003cea8af81057e4136dfd660 | refs/heads/master | 2022-12-04T11:34:20.134294 | 2020-08-28T05:45:24 | 2020-08-28T05:45:24 | 290,998,663 | 1 | 0 | MIT | 2020-08-28T08:52:44 | 2020-08-28T08:52:44 | null | UTF-8 | Python | false | false | 387 | py | from seleniumbase import BaseCase
class LocaleTestClass(BaseCase):
def test_get_locale_code(self):
self.open("data:,")
locale_code = self.get_locale_code()
message = '\nLocale Code = "%s"' % locale_code
print(message)
self.set_messenger_theme(
theme="flat", location="top_center")
self.post_message(message, duration=4)
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
204f1f4fafb9264a0cf66934ec779f4e94f7674c | a09740e643d6277ada23c82d8e87853a1cd1a9e5 | /oProto/omsql/wipdev/bycols_inupd.py | 68d0417689a3a5d61efb19227176666e68519131 | [
"Apache-2.0"
] | permissive | FuckBrains/omEngin | c5fb011887c8b272f9951df3880a879456f202e8 | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | refs/heads/main | 2023-03-20T18:27:53.409976 | 2021-03-14T15:50:11 | 2021-03-14T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,740 | py | import pandas as pd
import numpy as np
import os
def drop_cols(df, col2drop = []):
if len(col2drop) > 0:
cols = df.columns.to_list()
ncols = []
for i in range(len(cols)):
match = 0
for j in range(len(col2drop)):
if cols[i] == col2drop[j]:
match = 1
if match == 0:
ncols.append(cols[i])
ndf = df[ncols]
return ndf
else:
return df
def qrybuilt(tbl, ndf, bycol, oncols = False):
dfx = drop_cols(ndf, bycol)
ncols = dfx.columns.to_list()
lsqry = []
for i in range(len(ndf)):
x = ''
y = ''
for j in range(len(bycol)):
x1 = str(bycol[j]) + "='" + str(ndf.loc[i, bycol[j]]) + "'"
if x == '':
x = x1
else:
x = x + " and " + x1
for n in range(len(ncols)):
if oncols == False:
a1 = str(ncols[n])
a2 = "'" + str(ndf.loc[i, ncols[n]]) + "'"
if y == '':
y = a1 + '=' + a2
else:
y = y + "," + a1 + '=' + a2
else:
a1 = str(ncols[n])
mat = 0
for j in range(len(oncols)):
if oncols[j] == a1:
mat = 1
break
if mat == 1:
a2 = "'" + str(ndf.loc[i, ncols[n]]) + "'"
if y == '':
y = a1 + '=' + a2
else:
y = y + "," + a1 + '=' + a2
qry = "update " + tbl + ' set ' + y + ' Where ' + x
lsqry.append(qry)
return lsqry
def CheckExist(conn , tbl, colname, values):
qry = "select * from " + tbl + " where " + colname + "='" + values + "'"
dfx = pd.read_sql(qry, conn)
rw = dfx.shape[0]
return rw
def get_key(my_dict, val):
for value, key in my_dict.items():
if value == val:
return key
def modstr(strval):
if isinstance(strval, str):
s1 = strval.replace("'","\\'")
s2 = s1.replace(":","\\:")
return s2
def insert_into_sql(tbl, tbl_property, lscol, lsval):
col = ''
val = ''
dic = tbl_property
if isinstance(lscol, list) and isinstance(lsval, list) and len(lscol) == len(lsval):
for i in range(len(lscol)):
valmod = ''
try:
if lsval[i] != '' and lsval[i] is not None:
dtype = get_key(dic,lscol[i])
if dtype == 'text' or dtype == 'varchar':
valmod = modstr(lsval[i])
else:
valmod = str(lsval[i])
if val == '':
col = lscol[i]
val = "'" + valmod + "'"
else:
col = col + ',' + lscol[i]
val = val + ',' + "'" + valmod + "'"
else:
pass
except:
pass
qry = "insert into " + tbl + " (" + col + ") values (" + val + ")"
return qry
else:
return ""
def prep_update(lscol,lsval):
hp = ''
stval = ''
if isinstance(lscol, list) and isinstance(lsval, list):
if len(lscol) == len(lsval):
for i in range(len(lscol)):
if lsval[i] is not None:
if isinstance(lsval[i],str):
xxx1 = lsval[i].replace("'","\\'")
stval = xxx1.replace(":","\\:")
else:
stval = str(lsval[i])
x = str(lscol[i]) + "='" + stval + "'"
if hp == '' and len(stval) > 0 :
hp = x
else:
if len(stval) > 0:
hp = hp + ',' + x
else:
pass
else:
pass
else:
print('num of col and value are not same')
return hp
elif isinstance(lscol, str) and isinstance(lsval, str):
hp = ""
comma = lsval.count(',')
invertcomma = lsval.count("'")
if invertcomma == (comma+1)*2:
x1 = lscol.split(',')
x2 = lsval.split(',')
print(x1,x2)
for i in range(len(x1)):
x = x1[i] + "=" + x2[i]
if hp == '':
hp = x
else:
hp = hp + ',' + x
if invertcomma <= 2:
x1 = lscol.split(',')
x2 = lsval.split(',')
for i in range(len(x1)):
x = str(x1[i]) + "='" + str(x2[i]) + "'"
if hp == '':
hp = x
else:
hp = hp + ',' + x
return hp
def UPIN(df, tbl, conn, bycols, oncols = False, operation = "and"):
cr = conn.cursor()
if isinstance(bycols, list):
xdf = None
bydf = df[bycols]
ndf = drop_cols(df, bycols)
if oncols:
xdf = ndf[oncols]
else:
xdf = ndf
fcols = xdf.columns.to_list()
fcols_pbycol = xdf.columns.to_list()
for n in range(len(bycols)):
fcols_pbycol.append(bycols[n])
dfup = df[fcols_pbycol]
x = ''
#print(fcols, fcols_pbycol, len(fcols), len(fcols_pbycol))
lsqry = []
for i in range(len(df)):
x = ''
for j in range(len(bycols)):
lss = bycols[j]
lsv = df.loc[i,lss]
st = str(lss) + "='" + str(lsv) + "'"
if x == '':
x = st
else:
x = x + " " + operation + " " + st
qr = "select * from " + tbl + " where " + x
dfx = pd.read_sql(qr, conn)
rw = dfx.shape[0]
ls = []
if rw != 0:
for n in range(len(fcols)):
ls.append(df.loc[i, fcols[n]])
qry = "update " + tbl + ' set ' + prep_update(fcols,ls) + ' where ' + x
else:
for n in range(len(fcols_pbycol)):
ax = df.loc[i, fcols_pbycol[n]]
ls.append(ax)
qry = "insert into " + tbl + ' ' + insert_into_sql(fcols_pbycol,ls)
cr.execute(qry)
lsqry.append(qry)
conn.commit()
print('update done for ', len(lsqry), ' rows ')
return lsqry
elif isinstance(bycols, str):
xdf = None
byc = df[bycols].values.tolist()
ndf = drop_cols(df, [bycols])
if oncols:
xdf = ndf[oncols]
else:
xdf = ndf
fcols = xdf.columns.to_list()
fcols_pbycol = xdf.columns.to_list()
fcols_pbycol.append(bycols)
lsqry = []
for i in range(len(byc)):
condval = byc[i]
rs = CheckExist(conn, tbl, bycols, condval)
ls = []
if rs != 0:
for c1 in xdf:
ls.append(xdf.loc[i,c1])
qry = "update " + tbl + ' set ' + prep_update(fcols,ls) + ' where ' + bycols + "='" + condval + "'"
else:
for c1 in ndf:
ls.append(ndf.loc[i,c1])
ls.append(condval)
qry = "insert into " + tbl + ' ' + insert_into_sql(fcols_pbycol,ls)
print(qry)
cr.execute(qry)
lsqry.append(qry)
conn.commit()
print('update done for ', len(lsqry), ' rows ')
return lsqry | [
"omi.kabirr@gmail.com"
] | omi.kabirr@gmail.com |
65ee2d6385a0bdfe37108f1dcac07c4caeedc45c | bf92a619b9b850678bb691915e45c39cd740fa63 | /examples/work/run_main.py | 1a7dae80a015feb1de2c673cd31a55474f62081c | [] | no_license | jrecuero/jc2cli | a045f1efa431f53351dfac968852fd82e8c963b6 | c97615828880021b3965756aed939e39bac949b6 | refs/heads/master | 2021-05-10T10:16:34.698398 | 2018-11-06T17:43:53 | 2018-11-06T17:43:53 | 118,377,662 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from jc2cli.namespace import Handler
# MAIN = __import__('examples.work.main')
# __import__('examples.work.config')
# __import__('examples.work.execute')
class RunCli(object):
def __init__(self):
__import__('examples.work.main')
# __import__('examples.work.config')
# __import__('examples.work.execute')
handler = Handler()
handler.create_namespace('examples.work.main')
handler.switch_and_run_cli_for_namespace('examples.work.main', rprompt='<RUN>')
if __name__ == '__main__':
RunCli()
| [
"jose.recuero@gmail.com"
] | jose.recuero@gmail.com |
453e33e779ceba90beb4a31868d07efe7f5fd23e | aff88e0922ae5c75f18b624cb1c81c263d12f2af | /layout/Calc.py | 5adecf98c6435ee96b79364527e4a428e621c5fd | [] | no_license | TianJin85/Qtwindow | 44f42c8972382bcdbde7bc26a4a7f5121736e0aa | 3af712d8528d825cb3cecd6bc21c8f836232e775 | refs/heads/master | 2020-10-01T23:05:30.271773 | 2020-02-09T14:39:12 | 2020-02-09T14:39:12 | 227,642,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # -*- encoding: utf-8 -*-
"""
@File : Calc.py
@Time : 2020/1/5 15:59
@Author : Tianjin
@Email : tianjincn@163.com
@Software: PyCharm
"""
'''
栅格布局,实现计算器UI
'''
import sys
from PyQt5.QtWidgets import *
class Calc(QWidget):
def __int__(self):
super(Calc, self).__int__()
self.setWindowTitle('栅格布局')
def initUI(self):
grid = QGridLayout()
self.setLayout(grid)
names = ['Cls', 'Back', '', 'Close',
'7', '8', '9', '/',
'4', '5', '6', '*',
'1', '2', '3', '-',
'0', '.', '=', '+']
positions = [(i, j) for i in range(5) for j in range(4)]
for position, name in zip(positions, names):
if name == '':
continue
button = QPushButton(name)
grid.addWidget(button, *position)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = Calc()
main.initUI()
main.show()
sys.exit(app.exec_()) | [
"307440205@qq.com"
] | 307440205@qq.com |
ea7fe2a8309980eadbd5238e810707e3d19f9d55 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/intersectingDiscs_20200804190524.py | 450fd58ee08624f00420da84936bbe498750bb5c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | def discs(A):
newArr = []
opendiscs = 0
intersections = 0
for i in range(len(A)):
newArr.append((i-A[i]))
newArr.sort()
i = 0
j = 0
while i < len(newArr) and j < len(A):
if i == len(newArr)- 1:
break
if newArr[i]<=A[j]:
opendiscs +=1
if opendiscs == 2:
intersections +=1
if
i+=1
elif newArr[i] > A[j]:
opendiscs -=1
j+=1
print('intersections',intersections)
discs([1,5,2,1,4,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
6dd8c29a98d12f0bea1db2e80ccf811aded94176 | c92398a728817578850ecf508ec4197afe91a88f | /DemoYield Fun.py | b2e001740a146c35ba1011e91c7382282fbeecef | [] | no_license | HitanshuSoni/Python_practice | 4d0ec0378124da85e364a15a7b94ddbbfe2fc929 | 7a3d0977b218ef76f91517d88518b1c0b68b9528 | refs/heads/main | 2023-04-18T21:55:12.709161 | 2021-05-08T15:39:08 | 2021-05-08T15:39:08 | 365,550,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def checkyield():
yield 1
yield 2
yield 3
'''return 1
return 2'''
for value in checkyield():
print(value)
| [
"hitanshusoni10@gmail.com"
] | hitanshusoni10@gmail.com |
f31f2e6b0b3bd7e6c5b2de59aad83d0d08c29089 | 077f29021738c3b577c7c3d9ef5851d76e93cbed | /demo/funs/passing_funs.py | a955b2a06551fd66872ba07965f05428360f0019 | [] | no_license | srikanthpragada/PYTHON_10_JULY_2020 | fb410d87260eb290ebcc5ac6a88b6d6b01ee15b5 | b7a586cbcd49934d36facb4dd748c54038838334 | refs/heads/master | 2022-12-05T09:05:33.192365 | 2020-08-26T14:27:09 | 2020-08-26T14:27:09 | 279,319,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | # func,value,value
def math_op(oper, n1, n2):
return oper(n1, n2)
def multiply(n1, n2):
return n1 * n2
def power(n1,n2):
return n1 ** n2
print(math_op(multiply, 10, 20))
print(math_op(power, 10, 20))
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
4f765a9facddb729a6c6639b7da5a76717970f85 | e6713c7e72d6950c2e35c836ac88588bc673c19e | /auth_api/api.py | 2367c4cb93b42931b648e5b2e9820f2db88543c8 | [] | no_license | tanjibpa/scrumboard-with-drf | 905175069d065b7174f3485832e6c9e8bcb453da | 3d54b33f91a1719c4373677fe9efc7352b6ce53f | refs/heads/master | 2020-06-25T03:59:06.358201 | 2017-06-13T16:43:55 | 2017-06-13T16:43:55 | 94,235,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from django.contrib.auth import authenticate, login, logout
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
from rest_framework import views, status
from rest_framework.response import Response
from .serializers import UserSerializer
class LoginView(views.APIView):
@method_decorator(csrf_protect)
def post(self, request):
user = authenticate(
request,
username=request.data.get('username'),
password=request.data.get('password')
)
if user is None or not user.is_active:
return Response({
'status': 'Unauthorized',
'message': 'Username or password is incorrect'
}, status=status.HTTP_401_UNAUTHORIZED)
login(request, user)
return Response(UserSerializer(user).data)
class LogoutView(views.APIView):
def get(self, request):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT) | [
"ikram.tanjib@gmail.com"
] | ikram.tanjib@gmail.com |
194e61356c8666faa3168ed6093752c9ba74d3fc | ebd9c249d446d809abc9a0f3e4593f34922a1b93 | /leetcode/235_lowest_common_ancestor_of_a_binary_search_tree.py | b20699956509fd6d35339669565185cc0d94e6c0 | [] | no_license | jaychsu/algorithm | ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d | 91892fd64281d96b8a9d5c0d57b938c314ae71be | refs/heads/master | 2023-05-11T00:40:39.237813 | 2022-09-14T07:43:12 | 2022-09-14T07:43:12 | 106,277,156 | 143 | 39 | null | 2022-09-14T07:43:13 | 2017-10-09T11:51:48 | Python | UTF-8 | Python | false | false | 617 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
while root:
if root.val > p.val and root.val > q.val:
root = root.left
elif root.val < p.val and root.val < q.val:
root = root.right
else:
return root
| [
"hi@jaych.su"
] | hi@jaych.su |
bc6fdbc733f95c52d979d6784b735214a3e8dbc3 | 344e2956b4e2a30a8ef7532d951f96d995d1dd1e | /18_mmaction/lib/mmcv/tests/test_image/test_photometric.py | f2e86d450da174e0e5bc8d0b5e362df36a5b7ca6 | [
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | karndeepsingh/Monk_Object_Detection | e64199705326e4cd65e4b29946cae210a4ef9649 | 425fa50a3236cb9097389646275da06bf9185f6b | refs/heads/master | 2022-12-22T18:26:53.933397 | 2020-09-28T12:49:50 | 2020-09-28T12:49:50 | 299,307,843 | 1 | 1 | Apache-2.0 | 2020-09-28T12:52:18 | 2020-09-28T12:52:17 | null | UTF-8 | Python | false | false | 3,235 | py | # Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import cv2
import numpy as np
from numpy.testing import assert_array_equal
import mmcv
class TestPhotometric:
@classmethod
def setup_class(cls):
# the test img resolution is 400x300
cls.img_path = osp.join(osp.dirname(__file__), '../data/color.jpg')
cls.img = cv2.imread(cls.img_path)
cls.mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
cls.std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
def test_imnormalize(self):
rgb_img = self.img[:, :, ::-1]
baseline = (rgb_img - self.mean) / self.std
img = mmcv.imnormalize(self.img, self.mean, self.std)
assert np.allclose(img, baseline)
assert id(img) != id(self.img)
img = mmcv.imnormalize(rgb_img, self.mean, self.std, to_rgb=False)
assert np.allclose(img, baseline)
assert id(img) != id(rgb_img)
def test_imnormalize_(self):
img_for_normalize = np.float32(self.img)
rgb_img_for_normalize = np.float32(self.img[:, :, ::-1])
baseline = (rgb_img_for_normalize - self.mean) / self.std
img = mmcv.imnormalize_(img_for_normalize, self.mean, self.std)
assert np.allclose(img_for_normalize, baseline)
assert id(img) == id(img_for_normalize)
img = mmcv.imnormalize_(
rgb_img_for_normalize, self.mean, self.std, to_rgb=False)
assert np.allclose(img, baseline)
assert id(img) == id(rgb_img_for_normalize)
def test_imdenormalize(self):
norm_img = (self.img[:, :, ::-1] - self.mean) / self.std
rgb_baseline = (norm_img * self.std + self.mean)
bgr_baseline = rgb_baseline[:, :, ::-1]
img = mmcv.imdenormalize(norm_img, self.mean, self.std)
assert np.allclose(img, bgr_baseline)
img = mmcv.imdenormalize(norm_img, self.mean, self.std, to_bgr=False)
assert np.allclose(img, rgb_baseline)
def test_iminvert(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[255, 127, 0], [254, 128, 1], [253, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.iminvert(img), img_r)
def test_solarize(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[0, 127, 0], [1, 127, 1], [2, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.solarize(img), img_r)
img_r = np.array([[0, 127, 0], [1, 128, 1], [2, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.solarize(img, 100), img_r)
def test_posterize(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]],
dtype=np.uint8)
assert_array_equal(mmcv.posterize(img, 1), img_r)
img_r = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]],
dtype=np.uint8)
assert_array_equal(mmcv.posterize(img, 3), img_r)
| [
"abhishek4273@gmail.com"
] | abhishek4273@gmail.com |
22ed3addbe4bfa6066226c177ab15e87da4ccc4c | 4a4579254118db40fb008439d18ad8c573e8fc1a | /devel/lib/python2.7/dist-packages/jsk_gui_msgs/msg/_TouchEvent.py | e602a239330814d3e80962f75199c5cbbf48a8d4 | [] | no_license | amilearning/AD_mpc_ws | 86ff6ef9e61c6cc5aae6e12f20c2c875b1930d41 | 1fc2d385f281e00c16aff688948f7296e02cbd3a | refs/heads/master | 2023-06-24T13:54:59.759921 | 2021-07-16T01:08:52 | 2021-07-16T01:08:52 | 386,465,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,280 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from jsk_gui_msgs/TouchEvent.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TouchEvent(genpy.Message):
_md5sum = "f074642ed1ad51ea5afc186cab8aaca1"
_type = "jsk_gui_msgs/TouchEvent"
_has_header = False # flag to mark the presence of a Header object
_full_text = """byte DOWN=0
byte UP=1
byte MOVE=2
byte state
float32 x
float32 y
float32 w
float32 h"""
# Pseudo-constants
DOWN = 0
UP = 1
MOVE = 2
__slots__ = ['state','x','y','w','h']
_slot_types = ['byte','float32','float32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
state,x,y,w,h
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TouchEvent, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.state is None:
self.state = 0
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.w is None:
self.w = 0.
if self.h is None:
self.h = 0.
else:
self.state = 0
self.x = 0.
self.y = 0.
self.w = 0.
self.h = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_b4f().pack(_x.state, _x.x, _x.y, _x.w, _x.h))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 17
(_x.state, _x.x, _x.y, _x.w, _x.h,) = _get_struct_b4f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_b4f().pack(_x.state, _x.x, _x.y, _x.w, _x.h))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 17
(_x.state, _x.x, _x.y, _x.w, _x.h,) = _get_struct_b4f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b4f = None
def _get_struct_b4f():
global _struct_b4f
if _struct_b4f is None:
_struct_b4f = struct.Struct("<b4f")
return _struct_b4f
| [
"hojin.projects@gmail.com"
] | hojin.projects@gmail.com |
c9a1e1f387c7a2cb2ba7b89988bfb22731bdb725 | 68e5e2c9a7e9372f536edf3d99847067eb734e75 | /05-奠定项目基础-Model/typeidea/typeidea/comment/migrations/0001_initial.py | b025fc6f66017f5dd054dcd8948eed32546a206b | [] | no_license | gy0109/Django-enterprise-development-logs--huyang | f04d21df6d45f5d2f226760d35e38042f74a7ea8 | ab4505f8cdaf0c1f9e3635591cd74645a374a73f | refs/heads/master | 2020-05-17T05:24:51.602859 | 2019-05-08T03:42:13 | 2019-05-08T03:42:13 | 183,534,431 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | # Generated by Django 2.1.7 on 2019-04-27 15:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=50, verbose_name='昵称')),
('email', models.EmailField(max_length=254, verbose_name='邮箱')),
('website', models.URLField(verbose_name='网站')),
('content', models.CharField(max_length=50, verbose_name='内容')),
('status', models.PositiveIntegerField(choices=[('STATUS_NORMAL', '正常'), ('STATUS_DELETE', '删除')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('target', models.ForeignKey(on_delete=True, to='blog.Post', verbose_name='评论目标')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
},
),
]
| [
"1974326896@qq.com"
] | 1974326896@qq.com |
15a8de32a49edae93fb23dd1983e2c341bfda6a0 | 531a5c09ed774dca6f85f3c96827ff4d9f8fc3be | /AutotestWebD/apps/webportal/scripts/emailcopy.py | c6e55aa3e0f74b03e545142c9e259a3103d5155d | [
"MIT"
] | permissive | xiaochaom/sosotest | a98db41088d9411aa7d2723894f5bdc60bfbbd52 | a3a5ce67c3dc302cf4bca906496ec6ee26b42c33 | refs/heads/master | 2020-07-06T09:31:39.598616 | 2020-06-23T07:51:00 | 2020-06-23T07:51:00 | 202,971,957 | 0 | 0 | MIT | 2019-08-18T07:12:52 | 2019-08-18T07:12:51 | null | UTF-8 | Python | false | false | 935 | py | import django
import sys,os
rootpath = os.path.dirname(os.path.realpath(__file__)).replace("\\","/")
rootpath = rootpath.split("/apps")[0]
# print(rootpath)
syspath=sys.path
sys.path=[]
sys.path.append(rootpath) #指定搜索路径绝对目录
sys.path.extend([rootpath+i for i in os.listdir(rootpath) if i[0]!="."])#将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
from apps.common.func.WebFunc import *
from apps.webportal.services.webPortalService import WebPortalService
from all_models.models import *
from apps.task.services.HTTP_taskService import HTTP_taskService
if __name__ == "__main__":
versionTask = TbVersionTask.objects.filter(versionName="v1807")
task = TbTask.objects.filter()
for taskIndex in versionTask:
taskIndex.emailList = task.filter(taskId=taskIndex.taskId)[0].emailList
taskIndex.save()
# tmp = taskIndex.taskId
# print(te)
| [
"wangjilianglong@163.com"
] | wangjilianglong@163.com |
ac170695f08d24863f873bcc35ea080070054620 | 1b1b074cca3a8c9f5a6d0630cd40d56a1d8b7468 | /motorista/migrations/0006_auto_20170526_1645.py | 7814af59c9ad678721f1bcc2b9b4edb020d7c863 | [] | no_license | DiegoDigo/buScool | e392f62e3f43a0a892f14608447329d2b2d8b50a | fca579c0951dfedfabce79b52031804c9b6373ed | refs/heads/master | 2021-01-21T07:00:39.043740 | 2017-05-26T20:19:24 | 2017-05-26T20:19:24 | 91,592,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-26 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motorista', '0005_motorista_descricao'),
]
operations = [
migrations.AddField(
model_name='motorista',
name='deficiente',
field=models.BooleanField(default=False, verbose_name='Aceita Deficiência'),
),
migrations.AddField(
model_name='veiculo',
name='capacidadeDeficiente',
field=models.PositiveIntegerField(blank=True, default=1, null=True),
),
]
| [
"di3g0d0ming05@gmail.com"
] | di3g0d0ming05@gmail.com |
29c3ad28b41ef7b1b7689b86a33f01448b53bf57 | ef42fa903820055b9b0a8b4ebb1863a16d386171 | /config/urls.py | 788b8ccbf55a2bf64fe274419f089478f6da357b | [] | no_license | sinjorjob/django-simple-capture-inquery-form | 2537c8e03bc2c0118f772b69a59866ffb34d7cac | 8bd2900a6bdf97b97ddca7b7240b42f478e14884 | refs/heads/master | 2023-07-02T14:40:43.840669 | 2021-08-10T21:24:24 | 2021-08-10T21:24:24 | 394,784,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('captcha/', include('captcha.urls')),
path('', include('contact.urls')),
]
| [
"sinforjob@gmail.com"
] | sinforjob@gmail.com |
60d3de32df2546f2b7d1a59f47cd31ade136afe5 | 29a580900743a35c0d870c75b02decf3bfd24513 | /src/windows_sniffer_example.py | 6ef3b8a9447679386fb83b1fc2311d7238477496 | [] | no_license | rduvalwa5/PythonNetworking | 6695da9552beb62c3af0711a14c68e52fd412b12 | 52340292e4fbe0f628727838cabdf647c0a62e07 | refs/heads/master | 2021-01-19T17:47:31.589447 | 2019-07-11T06:02:25 | 2019-07-11T06:02:25 | 31,050,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | '''
Created on Nov 26, 2018
https://docs.python.org/3.0/library/ssl.html
@author: rduvalwa2
OSXAir:src rduvalwa2$ ls
Asyncio echo_client.py simpleServer.py
PyNet echo_server.py windows_sniffer_example.py
PyProgramming_Chap5 simpleClient.py
OSXAir:src rduvalwa2$ sudo python windows_sniffer_example.py
Password:
Traceback (most recent call last):
File "windows_sniffer_example.py", line 23, in <module>
s.ioctl(socket.SIO_RCVALL,socket.RCVALL_ON)
AttributeError: '_socketobject' object has no attribute 'ioctl'
OSXAir:src rduvalwa2$
'''
import socket
# the public network interface
HOST = socket.gethostbyname(socket.gethostname())
# create a raw socket and bind it to the public interface
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
s.bind((HOST, 0))
# Include IP headers
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# receive all packages
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
# receive a package
print(s.recvfrom(65565))
# disabled promiscuous mode
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF) | [
"rduvalwa5@hotmail.com"
] | rduvalwa5@hotmail.com |
efac4b3cf729f3ef140268b15ed0ff26865674c9 | f71deab2aabb43128d42d6a9e7d8ccd74740c7dd | /binance/handlers/handlers.py | a39b12896cc0df084fd6dc9cb4df788b42a1e9b1 | [] | no_license | kp-forks/python-binance-sdk | a1d3740d39f6b7b03bf7dc2ba81170de71967020 | 7e1962fe28226c69a5789c2a6f9eba9552f7b051 | refs/heads/master | 2023-03-26T22:43:04.951187 | 2021-03-25T07:33:46 | 2021-03-25T07:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | import traceback
from datetime import datetime
import sys
from typing import TextIO
from binance.common.constants import (
STREAM_TYPE_MAP,
STREAM_OHLC_MAP
)
from binance.common.types import (
DictPayload,
ListPayload
)
from .base import Handler
class HandlerExceptionHandlerBase(Handler):
def receive(
_,
e: Exception,
file: TextIO = sys.stderr
):
"""
Print current datetime and error call stacks
Args:
e (Exception): the error
file (:obj:`TextIO`, optional): output target of the printer, defaults to `sys.stderr`
Returns:
Exception: the error itself
"""
print(f'[{datetime.now()}] ', end='', file=file)
traceback.print_exc(file=file)
return e
BASE_TRADE_COLUMNS_MAP = {
**STREAM_TYPE_MAP,
'E': 'event_time',
's': 'symbol',
'p': 'price',
'q': 'quantity',
'T': 'trade_time',
'm': 'is_maker'
}
TRADE_COLUMNS_MAP = {
**BASE_TRADE_COLUMNS_MAP,
't': 'trade_id',
'b': 'buyer_order_id',
'a': 'seller_order_id'
}
TRADE_COLUMNS = TRADE_COLUMNS_MAP.keys()
class TradeHandlerBase(Handler):
COLUMNS_MAP = TRADE_COLUMNS_MAP
COLUMNS = TRADE_COLUMNS
AGG_TRADE_COLUMNS_MAP = {
**BASE_TRADE_COLUMNS_MAP,
'a': 'agg_trade_id',
'f': 'first_trade_id',
'l': 'last_trade_id',
}
AGG_TRADE_COLUMNS = AGG_TRADE_COLUMNS_MAP
class AggTradeHandlerBase(Handler):
COLUMNS_MAP = AGG_TRADE_COLUMNS_MAP
COLUMNS = AGG_TRADE_COLUMNS
KLINE_COLUMNS_MAP = {
**STREAM_TYPE_MAP,
'E': 'event_time',
't': 'open_time',
'T': 'close_time',
's': 'symbol',
'i': 'interval',
'f': 'first_trade_id',
'L': 'last_trade_id',
**STREAM_OHLC_MAP,
'x': 'is_closed',
'v': 'volume',
'q': 'quote_volume',
'V': 'taker_volume',
'Q': 'taker_quote_volume',
'n': 'total_trades'
}
KLINE_COLUMNS = KLINE_COLUMNS_MAP.keys()
class KlineHandlerBase(Handler):
COLUMNS_MAP = KLINE_COLUMNS_MAP
COLUMNS = KLINE_COLUMNS
def _receive(self, payload: DictPayload):
"""The payload of kline has unnecessary hierarchy,
so just flatten it.
"""
k = payload['k']
k['E'] = payload['E']
return super()._receive(k)
MINI_TICKER_COLUMNS_MAP = {
**STREAM_TYPE_MAP,
'E': 'event_time',
's': 'symbol',
**STREAM_OHLC_MAP,
'v': 'volume',
'q': 'quote_volume',
}
MINI_TICKER_COLUMNS = MINI_TICKER_COLUMNS_MAP.keys()
class MiniTickerHandlerBase(Handler):
COLUMNS_MAP = MINI_TICKER_COLUMNS_MAP
COLUMNS = MINI_TICKER_COLUMNS
TICKER_COLUMNS_MAP = {
**MINI_TICKER_COLUMNS_MAP,
'p': 'price',
'P': 'percent',
'w': 'weighted_average_price',
'x': 'first_trade_price',
'Q': 'last_quantity',
'b': 'best_bid_price',
'B': 'best_bid_quantity',
'O': 'stat_open_time',
'C': 'stat_close_time',
'F': 'first_trade_id',
'L': 'last_trade_id',
'n': 'total_trades'
}
TICKER_COLUMNS = TICKER_COLUMNS_MAP.keys()
class TickerHandlerBase(Handler):
COLUMNS_MAP = TICKER_COLUMNS_MAP
COLUMNS = TICKER_COLUMNS
class AllMarketMiniTickersHandlerBase(Handler):
COLUMNS_MAP = MINI_TICKER_COLUMNS_MAP
COLUMNS = MINI_TICKER_COLUMNS
def _receive(self, payload: ListPayload):
return super()._receive(
payload, None)
class AllMarketTickersHandlerBase(Handler):
COLUMNS_MAP = TICKER_COLUMNS_MAP
COLUMNS = TICKER_COLUMNS
def _receive(self, payload: ListPayload):
return super()._receive(
payload, None)
| [
"i+github@kael.me"
] | i+github@kael.me |
bca560451b0408d76f387dc12b62152b768ac6ba | 3db40bfa5c9e686293aa7f0540aa392be2e99a3b | /__init__.py | 7e361a5cb12d48e18e7f290b9678075d4a2cd44a | [] | no_license | OpenVoiceOS/tskill-ocp-cps | 0926a21f653dfc151ecd1e87f34dfaa95d3157f2 | 216fd7096f090e20ff1dc30846f61de66d8e616f | refs/heads/master | 2023-09-03T15:06:04.577257 | 2021-10-21T16:53:30 | 2021-10-21T16:53:30 | 419,805,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | from mycroft import intent_file_handler
from mycroft.skills.common_play_skill import CommonPlaySkill, CPSMatchLevel
import random
from mycroft.util.parse import match_one
track_dict = {
'bomb jack': 'http://remix.kwed.org/files/RKOfiles/Chronblom%20-%20Bomb%20Jack%20subtune%206%20(violin%20version).mp3',
'druid': 'http://remix.kwed.org/files/RKOfiles/Revel%20Craft%20-%20Druid.mp3',
'crazy comets': 'http://remix.kwed.org/files/RKOfiles/Makke%20-%20Crazy%20Comets%20(Komet%20Non-Stop).mp3',
'boulder dash': 'http://remix.kwed.org/files/RKOfiles/Mahoney%20-%20BoulderDash%20(Commodore%2069%20mix).mp3',
'garfield': 'http://remix.kwed.org/files/RKOfiles/Reyn%20Ouwehand%20-%20Garfield.mp3'
}
class Test(CommonPlaySkill):
"""
say "test audio service play/pause/resume/queue/stop"
-> confirm direct usage of audio service is routed to OCP
say "play crazy comets"
-> verify a track from this skill can be played if selected directly
(if needed remove other ocp skills)
-> verify the track from this skill is in search results and can be
played (select it from playlist, if needed install other ocp skills)
"""
@intent_file_handler("play.intent")
def handle_play_intent(self, message):
uri = track_dict[random.choice(list(track_dict.keys()))]
self.audioservice.play(uri)
@intent_file_handler("queue.intent")
def handle_queue_intent(self, message):
self.audioservice.queue(list(track_dict.values()))
@intent_file_handler("stop.intent")
def handle_stop_intent(self, message):
self.audioservice.stop()
@intent_file_handler("pause.intent")
def handle_pause_intent(self, message):
self.audioservice.pause()
@intent_file_handler("resume.intent")
def handle_resume_intent(self, message):
self.audioservice.resume()
@intent_file_handler("prev.intent")
def handle_prev_intent(self, message):
self.audioservice.prev()
@intent_file_handler("next.intent")
def handle_next_intent(self, message):
self.audioservice.next()
def CPS_match_query_phrase(self, phrase):
""" This method responds wether the skill can play the input phrase.
The method is invoked by the PlayBackControlSkill.
Returns: tuple (matched phrase(str),
match level(CPSMatchLevel),
optional data(dict))
or None if no match was found.
"""
# Get match and confidence
match, confidence = match_one(phrase, track_dict)
# If the confidence is high enough return a match
if confidence > 0.5:
return (match, CPSMatchLevel.TITLE, {"track": match})
# Otherwise return None
else:
return None
def CPS_start(self, phrase, data):
""" Starts playback.
Called by the playback control skill to start playback if the
skill is selected (has the best match level)
"""
url = data['track']
self.audioservice.play(url)
def create_skill():
return Test()
| [
"jarbasai@mailfence.com"
] | jarbasai@mailfence.com |
8a626e8d7d80f256f5efeb0b52ebc5927bc653a7 | c6d389f085c683f33cc0d0ab6497b3f042f7c905 | /vector.py | 3ee0da0ab4351f22b2d671407b21f41f284307f2 | [] | no_license | irhadSaric/computer-geometry | 0d23fbafbedb18b22df30cc8071f4103237eef2d | 25a73c756472896c316d685ca6792c8c94f31361 | refs/heads/master | 2020-04-04T08:01:38.501815 | 2019-02-26T20:05:08 | 2019-02-26T20:05:08 | 155,768,457 | 0 | 0 | null | 2019-02-26T20:10:33 | 2018-11-01T19:56:17 | Python | UTF-8 | Python | false | false | 3,160 | py | from Point import *
class Vector:
def __init__(self, head: 'Point', tail: 'Point'):
self.head = head
self.tail = tail
self.currentPosition = head.y
def changeCurrentPosition(self, value):
self.currentPosition = value
def __lt__(self, other: 'Vector'):
if self.head.x == other.head.x and self.head.y == other.head.y:
return self.tail.y > other.tail.y or (self.tail.y == other.tail.y and self.tail.x > other.tail.x)
return self.head.y < other.head.y or (self.head.y == other.head.y and self.head.x < other.head.x)
def __le__(self, other):
if self.head.x == other.head.x and self.head.y == other.head.y:
return self.tail.y > other.tail.y or (self.tail.y == other.tail.y and self.tail.x > other.tail.x)
return self.head.y < other.head.y or (self.head.y == other.head.y and self.head.x < other.head.x)
def __gt__(self, other: 'Vector'):
return self.head.y > other.head.y or (self.head.y == other.head.y and self.head.x > other.head.x)
def __repr__(self):
return '({}, {})'.format(self.head, self.tail)
def magnitude(self):
return self.head.euclidean_distance(self.tail)
def dot(self, v: 'Vector') -> float:
prod_x = (self.tail.x - self.head.x) * (v.tail.x - v.head.x)
prod_y = (self.tail.y - self.head.y) * (v.tail.y - v.head.y)
return prod_x + prod_y
@staticmethod
def do_intersect(s_1: 'Vector', s_2: 'Vector') -> bool:
# orientation of the (self.tail, self.head, s_2.tail) triangle
s_1_orientation_tail = Point.orientation(s_1.tail, s_1.head, s_2.tail)
# orientation of the (self.tail, self.head, s_2.head) triangle
s_1_orientation_head = Point.orientation(s_1.tail, s_1.head, s_2.head)
# orientation of the (s_2.tail, s_2.head, self.tail) triangle
s_2_orientation_tail = Point.orientation(s_2.tail, s_2.head, s_1.tail)
# orientation of the (s_2.tail, s_2.head, self.head) triangle
s_2_orientation_head = Point.orientation(s_2.tail, s_2.head, s_1.head)
# general case
if s_1_orientation_tail != s_1_orientation_head and s_2_orientation_tail != s_2_orientation_head:
return True
# collinear case
if s_1_orientation_tail == 0 and s_1_orientation_head == 0 and s_2_orientation_tail == 0 and s_2_orientation_head == 0:
if s_1.tail.between(s_2.head, s_2.tail) or s_1.head.between(s_2.head, s_2.tail) \
or s_2.tail.between(s_1.head, s_1.tail) or s_2.head.between(s_1.head, s_1.tail):
return True
return False
@staticmethod
def point_of_intersection(s_1: 'Vector', s_2: 'Vector') -> Point:
x12 = s_1.head.x - s_1.tail.x
x34 = s_2.head.x - s_2.tail.x
y12 = s_1.head.y - s_1.tail.y
y34 = s_2.head.y - s_2.tail.y
c = x12 * y34 - y12 * x34
a = s_1.head.x * s_1.tail.y - s_1.head.y * s_1.tail.x
b = s_2.head.x * s_2.tail.y - s_2.head.y * s_2.tail.x
x = (a * x34 - b * x12) / c
y = (a * y34 - b * y12) / c
return Point(x, y)
| [
"irhad.saric@hotmail.com"
] | irhad.saric@hotmail.com |
f9815346cd1953430a86b298cf50c513fed4f963 | 5330918e825f8d373d3907962ba28215182389c3 | /RecoTracker/RingESSource/python/RingESSourceTIFTIBTOB_cff.py | 007f2f6354f449c09d356f06e54b30988dd8d96e | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | Python | false | false | 349 | py | import FWCore.ParameterSet.Config as cms
# geometry
# tracker geometry
# tracker numbering
import copy
from RecoTracker.RingESSource.RingESSource_cfi import *
# rings esproducer
ringsTIFTIBTOB = copy.deepcopy(rings)
ringsTIFTIBTOB.InputFileName = 'RecoTracker/RingESSource/data/rings_tiftibtob-0004.dat'
ringsTIFTIBTOB.ComponentName = 'TIFTIBTOB'
| [
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] | sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch |
41134729528c1d14ae99ed8d555dec4c20966af9 | cfb6923223bd5b2cad56ece404f74fbb6889837b | /TAPI_RI/funcs_TapiNotification/context_NotifsubscriptionUuid_NotificationNotification_UuidAdditionalinfoImpl.py | a0dbbf54c5e02c9551ac997190cb2f5fc644be56 | [
"Apache-2.0"
] | permissive | XingZhao-CATR/Snowmass-ONFOpenTransport | 27206bd84ff8d9ea2ec7b8ee25a9085b9c96af6d | c5807944bb1333a8ed83d6beea3e55922d006495 | refs/heads/develop | 2021-01-13T16:59:27.016238 | 2016-12-21T13:19:17 | 2016-12-21T13:19:17 | 77,099,371 | 1 | 0 | null | 2016-12-22T01:29:16 | 2016-12-22T01:29:16 | null | UTF-8 | Python | false | false | 690 | py | import os.path, sys
sys.path.append(os.path.join('/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])))
import backend.backend as be
class Context_NotifsubscriptionUuid_NotificationNotification_UuidAdditionalinfoImpl:
@classmethod
def get(cls, uuid, notification_uuid):
print 'handling get'
if uuid in be.Context._notifSubscription:
if notification_uuid in be.Context._notifSubscription[uuid]._notification:
return be.Context._notifSubscription[uuid]._notification[notification_uuid].additionalInfo
else:
raise KeyError('notification_uuid')
else:
raise KeyError('uuid')
| [
"ricard.vilalta@cttc.es"
] | ricard.vilalta@cttc.es |
b2dc3ba20396023b2dfbc243d1019fe2d64a8aed | e987cd566edc75997f9b02377514d4f3a0dba12c | /sys/src/Python/glue/GlueDoc.py | d232ba9d1930437ecefe845450be4cea57ffe4bc | [] | no_license | 7u83/maxdb-buildtools | f942adff2cd55d0a046b6ef3e18f6645b011a26e | ce9a56943f6195d6755e983035aa96cbe95e6cb2 | refs/heads/master | 2020-05-04T18:23:30.849371 | 2015-02-15T19:25:49 | 2015-02-15T19:25:49 | 30,428,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py |
#
# ========== licence begin LGPL
# Copyright (C) 2002 SAP AG
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ========== licence end
#
class GlueDoc:
isMethod = None
def __init__ (self, name, doc, language = None):
self.pyname = name
self.doc = doc
self.language = language
def writeGlue (self, glue):
pass
def methods (self):
return []
def isRealConstructor (self):
return None
def supportFor (self, key, value):
if key != 'language':
return 1
if self.language == None:
return 1
return self.language == value
def isDocumented (self):
return 1
class GlueExample (GlueDoc):
language = None
def __init__ (self, name, doc, code = []):
self.pyname = name
self.doc = doc
self.code = code
def getCode (self):
return code
class GlueImportExample (GlueExample):
def __init__ (self, name, comment, fname):
GlueExample.__init__ (self, name, comment)
self.fname = fname
def getCode (self):
try:
data = open (self.fname, "r").read ()
except IOException:
data = "Access to '%s' has been denied" % self.fname
return data
class GlueExternExample (GlueExample):
def __init__ (self, name, comment, fname):
GlueExample.__init__ (self, name, comment)
self.fname = fname
def getCode (self):
return None
class GlueDirectory (GlueDoc):
def __init__ (self, name, language, items):
GlueDoc.__init__ (self, name, '', language)
self.items = items
def methods (self):
return []
| [
"7u83@mail.ru"
] | 7u83@mail.ru |
461e3e4738dde29dad72d1244aac00aa59a41a84 | 4ba29d0e50d0af604231834b099faa35f2cb369f | /task.py | 8134f928af0feb5ec687a03e666609b86200749f | [] | no_license | boxabhi/task | 8a10b90c429c3e3bdd1d86a5a7e8bfb97653b1ec | 242b7c325821941a95962bdfce384bb4519861fe | refs/heads/main | 2023-04-14T11:13:13.381423 | 2021-04-27T05:48:41 | 2021-04-27T05:48:41 | 361,995,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py |
def checkout_time_for_customers(customers,cashregisters:int):
if cashregisters == 1:
return sum(customers)
elif len(customers) <= cashregisters:
return max(customers)
registers = {}
for i in range(cashregisters):
registers[i] = customers.pop(0)
total_time_taken = 0
while any(registers.values()):
for r in registers.copy():
registers[r] -= 1
if registers[r] <= 0:
try:
registers[r] = customers.pop(0)
except IndexError:
registers[r] = 0
total_time_taken += 1
return total_time_taken
print( checkout_time_for_customers([5, 1, 3], 1))
def check_string(string_list):
str1 , str2 = string_list
set_str1 = set()
for i in str1:
set_str1.add(i.lower())
for i in str2:
if i.lower() not in set_str1:
return False
return True
print(check_string(["hello", "Hello"]))
print(check_string(["hello", "hey"]))
print(check_string(["Alien", "line"]))
| [
"abhijeetg40@gmail.com"
] | abhijeetg40@gmail.com |
e5006a48e87f2df9244a3b3122b5a6df5ffdd88a | 8c96b3a657cfb1cd360b469dac564af58a946199 | /repo/migrations/0001_initial.py | 9a2ccef5ac67f632900fb51ac16ffd866142fa31 | [] | no_license | nc415/ub40 | db3de90533c2adcb997f1ffa57a3099b1905a331 | a3642aad555d355dc8bfd29a3a34bfd6e7507a43 | refs/heads/master | 2021-05-10T18:49:52.746856 | 2018-01-19T14:28:38 | 2018-01-19T14:28:38 | 118,134,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-01-15 07:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('BU_Name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Company_Name', models.CharField(max_length=128)),
('Company_Region', models.CharField(blank=True, max_length=128)),
('pageid', models.SlugField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
| [
"njcollins@live.co.uk"
] | njcollins@live.co.uk |
d20980431d2ee44f070b5a9c1c96fb17d2e08daa | 7c16a9f999f966060c064ae5bd4bddaf8f4e1dd0 | /factorialkabaap.py | b7d7d4c960d665df5099436437cc7c666ee6b215 | [] | no_license | sbd2309/Adv.Python | fd5ed698b14c75484903006da7753a155cf11b47 | f7ef906cd78114643ffaaaaca6d4cb0ccfb34f62 | refs/heads/master | 2021-10-25T01:48:29.420102 | 2021-10-17T06:20:11 | 2021-10-17T06:20:11 | 232,631,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | def factorialbaap(f,p):
n=1
for i in range (f,0,-1):
n=n*i
#print(n)
x=p
flag=0
ans=0
while 1==1:
for i in range (1,1000,1):
x=p**i
if n%x==0:
ans=i
elif x>n:
flag=1
break
break
if flag==1 and ans!=0:
print(ans)
else:
print(0)
n=int(input())
for i in range(0,n,1):
x,y =[int(i) for i in input().strip().split(' ')]
factorialbaap(x,y)
| [
"noreply@github.com"
] | sbd2309.noreply@github.com |
c264903b770885106ba842f139ebd7276582f48c | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/iterators/iterator.py | af8ec00738a77944aec8db10c66ae4989a13a74a | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 237 | py | class Counter():
def __init__(self):
self.count = 0
def __iter__(self):
return self
def __next__(self):
self.count += 1
return self.count
for c in Counter():
print(c)
if c > 10:
break
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
3cf933e63768f3782c4288670a9dbd91e6322762 | e267c91f23055397201c3d9c23d7583b269d51b8 | /backend/pugorugh/tests/test_models.py | 8dc2afd1f43216ec105938d91567b1737188eab6 | [] | no_license | mcintoshsg/pug_or_ugh_v1 | 8678213b4b4ea09a70f369aa08002ff4a8194a29 | 3e735cd840ffc5a85497eab48518800f0757d9f3 | refs/heads/master | 2020-03-19T15:26:41.152968 | 2018-06-14T01:30:49 | 2018-06-14T01:30:49 | 136,670,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,032 | py | from django.contrib.auth.models import User
from django.test import TestCase
from pugorugh.models import Dog, UserDog, UserPref
# 2. Test get all dogs
# 3. Test get single dog
# 4. Test delete single dog
# 5. Test update single dog
# 6. Test create user preferences
# 7. Test get user preferences
# 8. Test update user preferences
# 9. Test update new user prefernces - updates all dogs that match with U
# 10. Test validiators - bad entries
# 11. Test get all liked dogs
# 12. Test get all unliked dogs
# 13. Test get all undecided dogs
# 14. Test iterate through next like or disliked or undecided
# 15. Test new user creation - token creates
# 16. Test the URLS
# create a base modeltest case the models
class BaseTestCase(TestCase):
def setUp(self):
''' setup up dummy data for the Dog model '''
dog_1 = {
'name': 'dog_1',
'image_filename': '1.jpg',
'breed': 'mutt',
'age': 12,
'gender': 'm',
'size': 'm'
}
dog_2 = {
'name': 'dog_2',
'image_filename': '2.jpg',
'breed': 'mutt',
'age': 48,
'gender': 'f',
'size': 'l'
}
self.dog_1 = Dog.objects.create(**dog_1)
self.dog_2 = Dog.objects.create(**dog_2)
def tearDown(self):
pass
class UserModelTestCase(BaseTestCase):
''' test cases for the user model '''
@staticmethod
def create_test_users(count=2):
''' this test creates 2 users in the database
'''
for i in range(count):
User.objects.create(
username='user_{}'.format(i),
email='test_{}@example.com'.format(i),
password='password'
)
def test_create_user(self):
''' test the creation of the user '''
self.create_test_users()
self.assertEqual(User.objects.count(), 2)
self.assertEqual(User.objects.get(id=1).password,'password')
class DogModelTests(BaseTestCase):
''' testing of the Dog model '''
def test_dog_creation(self):
''' test out the creation of our model '''
balto = Dog.objects.get(name="dog_1")
self.assertEqual(balto, self.dog_1)
alfie = Dog.objects.get(name="dog_2")
self.assertEqual(alfie, self.dog_2)
class UserDogModelTests(BaseTestCase):
''' testing of the UserDog model '''
def create_user_dogs(self):
UserModelTestCase.create_test_users(2)
self.user_1 = User.objects.get(id=1)
self.user_2 = User.objects.get(id=2)
UserDog.objects.create(user=self.user_1, dog=self.dog_1, status='u')
UserDog.objects.create(user=self.user_1, dog=self.dog_2, status='u')
UserDog.objects.create(user=self.user_2, dog=self.dog_1, status='u')
UserDog.objects.create(user=self.user_2, dog=self.dog_2, status='u')
def test_user_dog_creation(self):
''' test the creation of userdogs '''
self.create_user_dogs()
self.assertEqual(UserDog.objects.count(), 4)
self.assertEqual(UserDog.objects.get(id=1).user, self.user_1)
self.assertEqual(UserDog.objects.get(id=1).status, 'u')
class UserPrefModelTests(BaseTestCase):
''' testing of the UserDog model '''
def create_user_prefs(self):
UserModelTestCase.create_test_users(1)
self.user_1 = User.objects.get(id=1)
UserPref.objects.create(user=self.user_1,
age='b,y',
gender='m,f',
size='l,xl'
)
def test_user_dog_creation(self):
''' test the creation of userdogs '''
self.create_user_prefs()
self.assertEqual(UserPref.objects.count(), 1)
self.assertEqual(UserPref.objects.get(id=1).user, self.user_1)
self.assertEqual(UserPref.objects.get(id=1).gender, 'm,f')
| [
"s.g.mcintosh@gmail.com"
] | s.g.mcintosh@gmail.com |
edd7051f3b24b7ae5b7bbd28ade6fb8b9621ccf9 | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /DFS-BFS/백준/골드/신기한 소수_dfs.py | 118ff0683648aeac2cd16c566aebdc946eedd153 | [] | no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py |
n = int(input())
prime = [2,3,5,7]
def isprime(num):
if num<2: return False
for i in range(2,num):
if num%i==0: return False
return True
def dfs(first,num):
if num == 0: print(first)
for i in range(1,10,2):
tmp = first*10 + i
if isprime(tmp): dfs(tmp,num-1)
for i in range(4):
dfs(prime[i],n-1)
| [
"41579282+jamwomsoo@users.noreply.github.com"
] | 41579282+jamwomsoo@users.noreply.github.com |
fda5619a7e5ab87fb558f09dcbc1753b0164f43d | 1f177b5e7bdaca49076c6ff806f5e2be9a86e834 | /database/orm/models.py | d75da518cbf97101ca5fe3648930f63c92200bf2 | [] | no_license | silverlyjoo/TIL | 9e19ba407a9dc82c231e66e352f1c7783e767782 | 98a139770a6d19598d787674bcf20d2fe744ced0 | refs/heads/master | 2021-08-17T02:10:35.101212 | 2019-08-26T08:21:32 | 2019-08-26T08:21:32 | 162,099,046 | 6 | 1 | null | 2021-06-10T21:20:36 | 2018-12-17T08:32:39 | Jupyter Notebook | UTF-8 | Python | false | false | 393 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# Table 만들기
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
def __repr__(self):
return f"<user '{self.username}'>"
| [
"silverlyjoo@gmail.com"
] | silverlyjoo@gmail.com |
d54e532e6ca44bc53831b5abdcf003a2c8825d08 | 73db66a771cbef43abf1fefc7e0d210001ec2b4a | /example/example/spiders/book_spider.py | 49c774a8c963ad55285e8c1f814908dc49c7ee0e | [] | no_license | tianrking/Scrapy_Demo | 52008c094d4858383a61c2fd03ba3aa0dddcb3b9 | 9c621d2e1175aac5cfff0f42fc7667be6f46c9c1 | refs/heads/master | 2020-12-14T21:18:33.356797 | 2020-02-03T08:24:48 | 2020-02-03T08:24:48 | 234,871,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | import scrapy
class BookSpider(scrapy.Spider):
name="books"
start_urls=["https://ncov.dxy.cn/ncovh5/view/pneumonia"]
#start_urls=["http://books.toscrape.com/"]
def parse(self,response):
#for book in response.css('article.product_pod'):
#for book in response.xpath('//article[@class="product_pod"]'):
for a in response.css('div.areaBlock2___27vn7'):
#name=book.xpath('./h3/a/@title').extract_first()
#price=book.css('p.price_color::text').extract_first()
name = a.xpath('./p[@class="subBlock*]/text()')
yield{
'name': name,
#'price': price,
}
# next_url=response.css('ul.pager li.next a::attr(href)').extract_first()
# if next_url:
# next_url= response.urljoin(next_url)
# yield scrapy.Request(next_url,callback=self.parse) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c786245e6f92c1f9c62b1acb26e39a9ac9f11ac1 | fafa39d9eda46f5ee0d3ac7bf199237e7a748931 | /API/course/urls.py | 1c02cf1d5389151843b1392e818592f06517b2a3 | [
"MIT"
] | permissive | kasimbozdag/SWE_573 | bfb137b6db94619d76082ea3884036d64cfe934d | 4bce24f98fe6980b1f2c83196b8454b56118186b | refs/heads/master | 2022-02-18T11:42:04.363376 | 2019-05-29T09:56:19 | 2019-05-29T09:56:19 | 171,244,989 | 0 | 0 | MIT | 2022-02-10T10:36:38 | 2019-02-18T08:35:15 | JavaScript | UTF-8 | Python | false | false | 1,259 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^create_course', views.CourseCreateAPIView.as_view(), name="create-course"),
url(r'^list', views.CourseListAPIView.as_view(), name="courses"),
url(r'^my', views.TeacherCoursesAPIView.as_view(), name="my-courses"),
url(r'^(?P<pk>[0-9]+)/inactivate', views.CourseInactivateAPIView.as_view(), name="inactivate"),
url(r'^(?P<pk>[0-9]+)/activate', views.CourseActivateAPIView.as_view(), name="activate"),
url(r'^enrolled', views.EnrolledCourseAPIView.as_view(), name="enroll-list"),
url(r'^(?P<pk>[0-9]+)/enroll', views.EnrollCourseAPIView.as_view(), name="enroll"),
url(r'^(?P<pk>[0-9]+)/drop', views.EnrollmentAPIView.as_view(), name="drop"),
url(r'^(?P<obj_model>[0-9]+)/(?P<obj_pk>[0-9]+)/(?P<p_model>[0-9]+)/(?P<p_pk>[0-9]+)', views.PrerequisiteCreateAPIView.as_view(), name="pre"),
url(r'^prerequisite/(?P<pk>[0-9]+)', views.PrerequisiteAPIView.as_view(), name="pre-delete"),
url(r'^(?P<obj_model>[A-Za-z]+)/(?P<obj_pk>[0-9]+)', views.FullFilledCreateAPIView.as_view(), name="full"),
url(r'^(?P<pk>[0-9]+)', views.CourseAPIView.as_view(), name="course"),
url(r'^models', views.ContentTypeListAPIView.as_view(), name="models"),
]
| [
"bozdag80@yahoo.com"
] | bozdag80@yahoo.com |
401fce8967d656ccb95f7f42b57e3d4814b4d9c3 | aae551baa369fda031f363c2afbdf1984467f16d | /Machine_Learning/Contest/Code/gaussian_process.py | 879a09d88af6d3a713ec7d9b52d8ab5d61a59578 | [] | no_license | ameet-1997/Course_Assignments | 37f7d4115baec383ccf029772efcf9c33beb2a23 | 629e9d5cfc6fa6cf37a96c5fcc33bc669cbdc59d | refs/heads/master | 2021-05-16T16:23:32.731296 | 2018-02-03T05:57:01 | 2018-02-03T05:57:01 | 119,939,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
import time
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessClassifier
# Load and impute the data using the mean
train_data = pd.read_csv("train.csv")
# Get the train labels and subset the data
train_labels = train_data.iloc[:,-1]
train_data = train_data.iloc[:,1:-1]
# Impute it
i = Imputer(strategy='median')
train_data = i.fit_transform(train_data)
test_data = pd.read_csv("test.csv")
test_data = test_data.iloc[:,1:]
test_data = i.transform(test_data)
# Get validation data
train_data, validation_data, train_labels, validation_labels = train_test_split(train_data, train_labels, test_size=1000, stratify=np.array(train_labels))
# Dimensionality Reduction
pca = PCA(n_components=200)
pca.fit(train_data, train_labels)
train_data = pca.transform(train_data)
validation_data = pca.transform(validation_data)
test_data = pca.transform(test_data)
# Gaussian Kernel
start_time = time.time()
lin = GaussianProcessClassifier(n_jobs=-1, max_iter_predict=10, warm_start=True)
lin.fit(train_data, train_labels)
predicted_labels = lin.predict(validation_data)
print("Validation Score: "+str(f1_score(validation_labels, predicted_labels, average='macro')))
print("Total time: "+str(time.time()-start_time))
# test_labels = pd.DataFrame(lin.predict(test_data))
# test_labels.to_csv("gaussian_process1.csv", index=True, index_label=['id','label']) | [
"ameetsd97@gmail.com"
] | ameetsd97@gmail.com |
12631fa3eb7b47872dab382bbdbf156c15689b08 | 1215102b7853653e241e6dfcfc88a0a260aaf3dc | /hyperhyper/pmi.py | f0a385d3a7982cbe0f5086463485244ff486d582 | [
"BSD-2-Clause"
] | permissive | jfilter/hyperhyper | 81cf09763f1b1bebe8b581d4e60a53295babcd77 | 30983a82b1db037408de56bdddde9a5a9508c656 | refs/heads/master | 2023-01-02T00:24:59.259407 | 2020-10-25T21:46:14 | 2020-10-25T21:46:14 | 189,021,107 | 14 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,145 | py | """
implements PMI matrix (Pointwise mutual information)
See: https://en.wikipedia.org/wiki/Pointwise_mutual_information
"""
import heapq
import numpy as np
from gensim import matutils
from scipy.sparse import csr_matrix, dok_matrix
def calc_pmi(counts, cds):
"""
Calculates e^PMI; PMI without the log().
"""
sum_w = np.array(counts.sum(axis=1))[:, 0]
sum_c = np.array(counts.sum(axis=0))[0, :]
if cds != 1:
sum_c = sum_c ** cds
sum_total = sum_c.sum()
sum_w = np.reciprocal(sum_w)
sum_c = np.reciprocal(sum_c)
pmi = csr_matrix(counts)
pmi = multiply_by_rows(pmi, sum_w)
pmi = multiply_by_columns(pmi, sum_c)
pmi = pmi * sum_total
return pmi
def multiply_by_rows(matrix, row_coefs):
normalizer = dok_matrix((len(row_coefs), len(row_coefs)))
normalizer.setdiag(row_coefs)
return normalizer.tocsr().dot(matrix)
def multiply_by_columns(matrix, col_coefs):
normalizer = dok_matrix((len(col_coefs), len(col_coefs)))
normalizer.setdiag(col_coefs)
return matrix.dot(normalizer.tocsr())
class PPMIEmbedding:
"""
Base class for explicit representations. Assumes that the serialized input is e^PMI.
Positive PMI (PPMI) with negative sampling (neg).
Negative samples shift the PMI matrix before truncation.
"""
def __init__(self, matrix, normalize=True, neg=1):
self.m = matrix
self.m.data = np.log(self.m.data)
# not needed?
# # self.normal = normalize
if neg is not None:
self.m.data -= np.log(neg)
self.m.data[self.m.data < 0] = 0
self.m.eliminate_zeros()
if normalize:
self.normalize()
def normalize(self):
m2 = self.m.copy()
m2.data **= 2
norm = np.reciprocal(np.sqrt(np.array(m2.sum(axis=1))[:, 0]))
normalizer = dok_matrix((len(norm), len(norm)))
normalizer.setdiag(norm)
self.m = normalizer.tocsr().dot(self.m)
def represent(self, w_idx):
return self.m[w_idx, :]
def similarity(self, w1, w2):
"""
Assumes the vectors have been normalized.
"""
return self.represent(w1).dot(self.represent(w2).T)[0, 0]
def most_similar(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w).T).T.tocsr()
return heapq.nlargest(n, zip(scores.data, scores.indices))
# TODO: working?
def most_similar_vectors(self, positives, negatives, topn=10):
"""
Some parts taken from gensim.
https://github.com/RaRe-Technologies/gensim/blob/ea87470e4c065676d3d33df15b8db4192b30ebc1/gensim/models/keyedvectors.py#L690
"""
mean = [np.squeeze(self.represent(x).toarray()) for x in positives] + [-1 * np.squeeze(self.represent(x).toarray()) for x in negatives]
mean = matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)
dists = self.m.dot(mean)
best = matutils.argsort(dists, topn=topn, reverse=True)
return [(best_idx, float(dists[best_idx])) for best_idx in best]
| [
"hi@jfilter.de"
] | hi@jfilter.de |
4086e3b1a38b44ca64c3c26ab771c4058a470927 | c2bdcd5aec95d5c4ac4322f166c2ef9b2b8992f9 | /kurstag_8/loesungen_8/Kreditkarte_Loesung2.py | 68fa57d69d26630f26eb934afd6fa936dafcb8d2 | [] | no_license | softborg/Python_HWZ_Start | 4437c5d8676301db8f4c42b75c98f0cc91320012 | 6361647113365df66e3ad84a0d1d1b563137ebbd | refs/heads/master | 2022-07-21T16:27:30.333598 | 2022-07-12T12:08:37 | 2022-07-12T12:08:37 | 252,724,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # coding=utf8
# Aufgabe 2 - Kreditkarte
# 1. erstellen sie folgende Klassen 'Kreditkarte', 'Visa' und 'Mastercard
# 2. beim Instanziieren soll die Kartennummer mitgegeben werden, die Kartennummer ist public
# 3. Die Klassen 'Visa' und 'Mastercard' erben von 'Kreditkarte' und haben jeweils einen eignen Initalisierung
# 4. Bei der Initalisierung der Visa Kartennummer wird die Endung "-1944" angefügt
# 5. Bei der Initalisierung der Mastercard Kartennummer wird die Endung "-1234" angefügt
# 6. Instanziieren sie jeweils eine Visa- und ein Mastercard und eine Kreditkarte !
# 7. Geben sie jeweils die Kartennummer aus
class Kreditkarte:
def __init__(self, kartennr):
self.kartennr = kartennr
class Visa(Kreditkarte):
def __init__(self, kartennr):
self.kartennr = kartennr + "-1944"
class Mastercard(Kreditkarte):
def __init__(self, kartennr):
self.kartennr = kartennr + "-1234"
visa = Visa("412340998")
print(visa.kartennr)
mastercard = Mastercard("77770999")
print(mastercard.kartennr)
kreditkarte = Kreditkarte("1239")
print(kreditkarte.kartennr)
| [
"stefan.berger@softborg.com"
] | stefan.berger@softborg.com |
31fa03847837f428a42b58c029ab3b2371f78651 | 02c6b39399c1cfb434ad718c90bed3d8e6310ed0 | /training/ppo/tune/tune_train_PPO_car.py | dc4022a6f9ce60ae09adfcb96cc19810c33bb75c | [] | no_license | phate09/SafeDRL | 09b8924fa91aa43cf543ea5727ebe4cc8e13c0a5 | 3d4278eaaabb046a90fc1cebd1b5862d63dc5894 | refs/heads/master | 2022-09-17T05:12:28.529329 | 2022-08-29T08:21:32 | 2022-08-29T08:21:32 | 204,663,981 | 8 | 3 | null | 2021-12-02T14:13:46 | 2019-08-27T09:07:04 | Python | UTF-8 | Python | false | false | 3,401 | py | import random
from datetime import datetime
import numpy as np
import ray
from gym.vector.utils import spaces
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from environment.stopping_car import StoppingCar
torch, nn = try_import_torch()
custom_input_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,), dtype=np.float32)
class TorchCustomModel(TorchModelV2, nn.Module):
"""Example of a PyTorch custom model that just delegates to a fc-net."""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(self, custom_input_space, action_space, num_outputs, model_config, name)
nn.Module.__init__(self)
self.torch_sub_model = TorchFC(custom_input_space, action_space, num_outputs, model_config, name)
def forward(self, input_dict, state, seq_lens):
input_dict["obs"] = input_dict["obs"].float()[:, -2:]
fc_out, _ = self.torch_sub_model(input_dict, state, seq_lens)
return fc_out, []
def value_function(self):
return torch.reshape(self.torch_sub_model.value_function(), [-1])
def get_PPO_config(seed, use_gpu=1):
ModelCatalog.register_custom_model("my_model", TorchCustomModel)
config = {"env": StoppingCar, #
"model": {"custom_model": "my_model", "fcnet_hiddens": [64, 64], "fcnet_activation": "relu"}, # model config," "custom_model": "my_model"
"vf_share_layers": False,
"lr": 5e-4,
"num_gpus": use_gpu,
"vf_clip_param": 100000,
"grad_clip": 2500,
"clip_rewards": 5,
"num_workers": 3, # parallelism
"num_envs_per_worker": 10,
"batch_mode": "complete_episodes",
"evaluation_interval": 10,
"evaluation_num_episodes": 20,
"use_gae": True, #
"lambda": 0.95, # gae lambda param
"num_sgd_iter": 10,
"train_batch_size": 4000,
"sgd_minibatch_size": 1024,
"rollout_fragment_length": 1000,
"framework": "torch",
"horizon": 1000,
"seed": seed,
"evaluation_config": {
# Example: overriding env_config, exploration, etc:
# "env_config": {...},
"explore": False
},
"env_config": {"cost_fn": tune.grid_search([0]),
"epsilon_input": tune.grid_search([0])} #
}
return config
if __name__ == "__main__":
seed = 1234
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
ray.init(local_mode=True, include_dashboard=True, log_to_driver=False)
config = get_PPO_config(use_gpu=0.5, seed=seed)
datetime_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
tune.run(
"PPO",
stop={"info/num_steps_trained": 2e8, "episode_reward_mean": -2e1},
config=config,
name=f"tune_PPO_stopping_car",
checkpoint_freq=10,
checkpoint_at_end=True,
log_to_file=True,
# resume="PROMPT",
verbose=1,
num_samples=10
)
ray.shutdown()
| [
"phate09@hotmail.it"
] | phate09@hotmail.it |
5ca47a8f2ebd168cadb146b40760ae74bf5b65dd | a913684fe348945c2b79786115fd392945cfcf72 | /user/urls.py | 314e6f4677e2c1433fef3c06f975cb168ecb7b44 | [] | no_license | LukaszMalucha/docker-django | 0332c4153d50add049db36479079ace2c664bea2 | 4b34f835b7ea3f8f9baa9956943b4ba9111f39fa | refs/heads/master | 2023-02-03T05:56:10.059009 | 2020-12-19T16:55:06 | 2020-12-19T16:55:06 | 322,545,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path("create/", views.CreateUserView.as_view(), name="create"),
path("authenticate/", views.CreateTokenView.as_view(), name="authenticate"),
path("my-account/", views.ManageUserView.as_view(), name="my-account"),
path("current-user/", views.CurrentUserApiView.as_view(), name="current-user"),
]
| [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
bd52bfd00c8d7b8e6a83dda80689159e389c7d39 | 3a57805be67d568bc516cc821deb3d912dbf87ad | /diffscuss/walker.py | feabedbadb58dae353f35514507b42580832274f | [
"MIT"
] | permissive | tomheon/diffscuss | 8e0ee331cc37dd7a3607e2a434fb59ea9ca69d3f | 53f2d001bd3a5cb80c6ada16b4e570afd1989a09 | refs/heads/master | 2023-08-15T23:05:59.277846 | 2021-06-17T21:19:39 | 2021-06-17T21:19:39 | 8,643,720 | 39 | 3 | MIT | 2018-03-21T16:33:17 | 2013-03-08T05:18:43 | Python | UTF-8 | Python | false | false | 4,486 | py | from collections import namedtuple
import re
class BadNestingException(Exception):
pass
class MissingAuthorException(Exception):
pass
class EmptyCommentException(Exception):
pass
class CommentInHeaderException(Exception):
pass
DIFF_HEADER = 'DIFF_HEADER'
DIFF = 'DIFF'
COMMENT_HEADER = 'COMMENT_HEADER'
COMMENT_BODY = 'COMMENT_BODY'
def walk(fil):
"""
Walk a Diffscuss file, yielding either:
(DIFF, line)
For each line that is part of a diff,
(DIFF_HEADER, line)
For each diff header line (e.g. Index lines, range lines),
(COMMENT_HEADER, line)
for each diffscuss comment header line, or
(COMMENT_BODY, line)
for each diffscuss body line.
@fil: a file-like object containing Diffscuss.
The default error handler raises the following exceptions:
MissingAuthorException: if there's no author header at the start
of a comment.
BadNestingException: if a comment is improperly nested.
EmptyCommentException: if a comment has no body.
CommentInHeaderException: if a comment appears in a diff header.
"""
line = fil.readline()
in_header = False
# allow the normal magic header lines (such as encoding), but
# don't consider them part of the diffscuss file.
while line.startswith("#") and not _is_diffscuss_line(line):
line = fil.readline()
while True:
if not line:
break
if _is_diffscuss_line(line):
if in_header:
raise CommentInHeaderException()
tagged_comment_lines, line = _read_comment(line, fil)
for tag, comment_line in tagged_comment_lines:
yield (tag, comment_line)
# continue so we don't read another line at the bottom
continue
elif in_header or _is_not_diff_line(line):
# check for non-diff line has to come second, since the
# --- and +++ in the header will read as diff lines
# otherwise
yield (DIFF_HEADER, line)
in_header = not _is_range_line(line)
else:
yield (DIFF, line)
line = fil.readline()
def _read_comment(line, fil):
header_lines, line = _read_header(line, fil)
_check_header(header_lines)
body_lines, line = _read_body(line, fil)
_check_body(body_lines)
return ([(COMMENT_HEADER, header_line)
for header_line
in header_lines] +
[(COMMENT_BODY, body_line)
for body_line
in body_lines],
line)
def _check_body(body_lines):
if not body_lines:
raise EmptyCommentException()
def _check_header(header_lines):
for line in header_lines:
if _is_author_line(line):
return
if not _is_empty_header(line):
raise MissingAuthorException()
raise MissingAuthorException()
def _level(line):
header_match = _is_header(line)
if header_match:
return len(header_match.group(1)) - 1
body_match = _is_body(line)
if body_match:
return len(body_match.group(1)) - 1
return None
def _read_header(line, fil):
return _read_comment_part(line, fil, _is_header)
def _read_body(line, fil):
return _read_comment_part(line, fil, _is_body)
def _read_comment_part(line, fil, pred):
part_lines = []
level = _level(line)
while True:
if not pred(line):
break
if _level(line) != level:
raise BadNestingException()
part_lines.append(line)
line = fil.readline()
return part_lines, line
HEADER_RE = re.compile(r'^(#[*]+)( |$)')
EMPTY_HEADER_RE = re.compile(r'^(#[*]+)\s*$')
def _is_header(line):
return HEADER_RE.match(line)
def _is_empty_header(line):
return EMPTY_HEADER_RE.match(line)
AUTHOR_RE = re.compile(r'^(#[*]+) author: ')
def _is_author_line(line):
return AUTHOR_RE.match(line)
BODY_RE = re.compile(r'^(#[-]+)( |$)')
def _is_body(line):
return BODY_RE.match(line)
def _is_range_line(line):
return line.startswith('@@')
def _is_diffscuss_line(line):
return line.startswith('#*') or line.startswith('#-')
# legal starts to a unified diff line inside a hunk
DIFF_CHARS = (' ', '+', '-', '\\')
def _is_not_diff_line(line):
"""
Treat a totally blank line as a diff line to be flexible, since emacs
can strip trailing spaces.
"""
return line.strip() and not line.startswith(DIFF_CHARS)
| [
"tomheon@gmail.com"
] | tomheon@gmail.com |
031d3b14a2ac7dac2dfe0897acea866b23cce203 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /1910.py | bb86d16343515a3463d889f65c184f6a9f3e47a0 | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | def perebor(alphabet, length):
if length == 0:
yield ''
else:
for letter in alphabet:
for word in perebor(alphabet, length-1):
yield letter+word
def check(word):
toCount = 'РСТМ'
cnt = 0
for s in word:
if s in toCount:
cnt += 1
return cnt >= 3
ans = set()
for word in perebor('РУСТАМ', 6):
if check(word):
ans.add(word)
print(len(ans)) | [
"a926788@gmail.com"
] | a926788@gmail.com |
e99da711b9b45b9235b594bcc9117c07bc1d1f4a | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_OSPF/test_c140802.py | 1986465490ec5bcd8e0a0201dc5ae7f3c2d1a163 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py |
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_ospf import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_physical_interface import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
test_id = 140802
def test_c140802(browser):
try:
login_web(browser, url=dev1)
start_ospf_jyl(browser)
time.sleep(0.5)
edit_ospf_interface_jyl(browser, ospf_interface="br_0", auth_type="简单密码", text_key="123456", save="yes")
loginfo1 = get_log_info(browser, 管理日志)
time.sleep(0.5)
# print(loginfo1)
edit_ospf_interface_jyl(browser, ospf_interface="br_0", priority="1", hello_interval="10", dead_interval="40",
auth_type="无", save="yes")
stop_ospf_jyl(browser)
try:
assert "成功修改" in loginfo1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "成功修改" in loginfo1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
8942552852c2ba4597d1c70ce5a8adf7e957cec7 | 801510e45d9aebe5c5b8b09a3ce4453a3a11a3ca | /django/oneTable/appOneTable/models.py | 13b38b271e63473033021ebe53d6d188f7645474 | [] | no_license | michelleshan/coding_dojo_python_course | 5581ebca0a645ba7231a2da2d2d64d6c3735bfc4 | e20e8195950004ef0aa09e6b0f84e7f05bd355e8 | refs/heads/master | 2022-11-21T01:34:54.309175 | 2020-07-16T03:29:45 | 2020-07-16T03:29:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from django.db import models
# ONE Dungeon has MANY prisoners
# ONE Prisoner has ONE Dungeon
# ONE to MANY
# ONE Dungeon has MANY dislikes
# ONE Prisoner has MANY dislikes
# MANY to MANY
class Dungeon(models.Model):
name = models.TextField()
num_people_inside = models.IntegerField()
location = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Prisoner(models.Model):
name = models.TextField()
dungeon_inside = models.ForeignKey(Dungeon,related_name="all_prisoners",on_delete=models.CASCADE)
dungeons_disliked = models.ManyToManyField(Dungeon,related_name='prisoners_that_dislike')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True) | [
"michellehan@Michelles-Air.attlocal.net"
] | michellehan@Michelles-Air.attlocal.net |
70bd5e42cf5abc0bac19ba712cb49c33a704467a | 0420ce2fc8799d5fbd6e96313e6716f5e2ef825b | /bagogold/bagogold/migrations/0002_auto_20150626_2230.py | f994aba4d8be4dc46dd336f174e53b2b592b387d | [] | no_license | nizbel/bag-of-gold | 1da10acef4d73b8426ca3329b37a28c5f9587af4 | a3fd89eb47d33d546bd91947f033d71218c8700f | refs/heads/master | 2022-11-13T01:07:26.934813 | 2020-01-14T16:00:16 | 2020-01-14T16:00:16 | 275,689,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bagogold', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Operacao',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('day_trade', models.NullBooleanField(default=False, verbose_name=b'\xc3\x89 day trade?')),
],
),
migrations.CreateModel(
name='OperacaoAcao',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('preco_unitario', models.DecimalField(verbose_name=b'Pre\xc3\xa7o unit\xc3\xa1rio', max_digits=11, decimal_places=2)),
('data', models.DateField(verbose_name=b'Data')),
('corretagem', models.DecimalField(verbose_name=b'Corretagem', max_digits=11, decimal_places=2)),
('emolumentos', models.DecimalField(verbose_name=b'Emolumentos', max_digits=11, decimal_places=2)),
('tipo_operacao', models.CharField(max_length=1, verbose_name=b'Tipo de opera\xc3\xa7\xc3\xa3o')),
('consolidada', models.NullBooleanField(verbose_name=b'Consolidada?')),
('acao', models.ForeignKey(to='bagogold.Acao')),
],
),
migrations.CreateModel(
name='Provento',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('valor_unitario', models.DecimalField(verbose_name=b'Valor unit\xc3\xa1rio', max_digits=11, decimal_places=7)),
('tipo_provento', models.CharField(max_length=1, verbose_name=b'Tipo de provento')),
('data_ex', models.DateField(verbose_name=b'Data EX')),
('data_pagamento', models.DateField(verbose_name=b'Data do pagamento')),
('acao', models.ForeignKey(to='bagogold.Acao')),
],
),
migrations.AddField(
model_name='operacao',
name='compra',
field=models.ForeignKey(related_name='compra', to='bagogold.OperacaoAcao'),
),
migrations.AddField(
model_name='operacao',
name='venda',
field=models.ForeignKey(related_name='venda', to='bagogold.OperacaoAcao'),
),
]
| [
"kingbowserii@gmail.com"
] | kingbowserii@gmail.com |
869a4c69c3206641fbf875e3c5dda79d6c2c898b | fde8c89b352076f95cc16e589b1baf18f7befb51 | /gabbi/json_parser.py | 430a64a64e3cfb16f9e4c2f2260c5414d4e57408 | [] | no_license | 571451370/devstack_mitaka | b11145256deab817bcdf60a01a67bb6b2f9ddb52 | 1bdd3f2598f91c1446b85c5b6def7784a2f6ab02 | refs/heads/master | 2020-08-26T12:53:07.482514 | 2017-04-12T01:32:55 | 2017-04-12T01:32:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extend jsonpath_rw to add a len command."""
import jsonpath_rw
PARSER = None
class Len(jsonpath_rw.JSONPath):
"""The JSONPath referring to the len of the current object.
Concrete syntax is '`len`'.
"""
def find(self, datum):
datum = jsonpath_rw.DatumInContext.wrap(datum)
try:
value = len(datum.value)
except TypeError:
return []
else:
return [jsonpath_rw.DatumInContext(value,
context=None,
path=Len())]
def __eq__(self, other):
return isinstance(other, Len)
def __str__(self):
return '`len`'
def __repr__(self):
return 'Len()'
class GabbiJsonPathParser(jsonpath_rw.parser.JsonPathParser):
"""Custom gabbi LALR-parser for JsonPath"""
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == 'len':
p[0] = Len()
else:
super(GabbiJsonPathParser, self).p_jsonpath_named_operator(p)
def parse(path):
global PARSER
if not PARSER:
PARSER = GabbiJsonPathParser()
return PARSER.parse(path)
| [
"tony.pig@gmail.com"
] | tony.pig@gmail.com |
0c32e339b8ce62f268067fe422d2a0647eb1a8f6 | 160f08e768d7271f9522ad2597ac4ee79c04477a | /src/c3nav/editor/migrations/0007_auto_20170629_1327.py | b6eef0ff11322c72847b632eda0ad004cbc53e55 | [
"Apache-2.0"
] | permissive | c3nav/c3nav | 6254724dfc8589ee03c6028577befd7c65b05857 | 1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7 | refs/heads/main | 2023-08-04T08:36:18.431458 | 2023-07-24T09:57:18 | 2023-07-24T09:57:18 | 56,852,994 | 140 | 47 | Apache-2.0 | 2023-07-05T22:55:27 | 2016-04-22T12:13:51 | Python | UTF-8 | Python | false | false | 686 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 13:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editor', '0006_auto_20170629_1222'),
]
operations = [
migrations.AddField(
model_name='changeset',
name='description',
field=models.TextField(default='', max_length=1000, verbose_name='Description'),
),
migrations.AddField(
model_name='changeset',
name='title',
field=models.CharField(default='', max_length=100, verbose_name='Title'),
),
]
| [
"laura@codingcatgirl.de"
] | laura@codingcatgirl.de |
f7d836cffddca933e0110c1cf6abb4867b2437a0 | a140b45f9f16b74353d15ed573ea765b3fef046d | /algorithms/leet.0703.src.1.py | b36a1c7529c8b932d297432e523a94f045ad3ef2 | [] | no_license | fish-ball/leetcode | 258d4b37f05560d914bcd29f7c54820deeadb33f | 3dfd8f73c65d43cc2766c20700a619141acb927b | refs/heads/master | 2023-05-28T18:32:43.638675 | 2023-05-20T04:25:23 | 2023-05-20T04:25:23 | 31,968,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | import heapq
class KthLargest:
def __init__(self, k: int, nums: List[int]):
self.k = k
heapq.heapify(nums)
self.nums = nums
def add(self, val: int) -> int:
heapq.heappush(self.nums, val)
while len(self.nums) > self.k:
heapq.heappop(self.nums)
return self.nums[0]
# Your KthLargest object will be instantiated and called as such:
# obj = KthLargest(k, nums)
# param_1 = obj.add(val)
| [
"noreply@github.com"
] | fish-ball.noreply@github.com |
a7b2ab6cad42ec68d2b70750712b01acfc831215 | 11dbcc94972a370d92b190cc071826d90ae3ff84 | /conjugation/migrations/0014_auto_20180412_1343.py | 3813e57c26684e4265033d31da78c4628acbf6c0 | [
"Apache-2.0"
] | permissive | 5CORNERS/www.le-francais.ru | ef99b401c24eb7a2b84c04bdf638fc7460e05d81 | ab1a77f99a53b4b66a1c4961c335a288ae38b40d | refs/heads/master | 2023-09-01T15:59:46.534050 | 2023-03-14T15:18:45 | 2023-03-14T15:18:45 | 10,008,050 | 5 | 2 | Apache-2.0 | 2023-08-19T19:17:19 | 2013-05-12T02:06:15 | Python | UTF-8 | Python | false | false | 438 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-12 10:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('conjugation', '0013_auto_20180412_1321'),
]
operations = [
migrations.RenameField(
model_name='verb',
old_name='no_female',
new_name='masculin_only',
),
]
| [
"anton.dumov@gmail.com"
] | anton.dumov@gmail.com |
4a4ea8d341833f55cde5f4b145d6add741371c2b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02788/s685748566.py | 41028a13b217903ab98ffe4b1a1224e7edb5ce04 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import math
from bisect import bisect_right
n, d, a = map(int, input().split())
x_list = []
max_x = 0
for _ in range(n):
x, h = map(int, input().split())
x -= 1
x_list.append([x, h])
max_x = max(max_x, x)
x_list.sort()
xx = [x[0] for x in x_list]
hh = [x[1] for x in x_list]
ans = 0
accum = [0 for _ in range(n)]
for index, [x, h] in enumerate(x_list):
if index != 0:
accum[index] += accum[index - 1]
cnt = max(math.ceil((hh[index] - accum[index]) / a), 0)
ans += cnt
index_right = bisect_right(xx, xx[index] + (2 * d))
accum[index] += cnt * a
if index_right < n:
accum[index_right] -= cnt * a
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7c5ab14f132e8abc3055cf0b989fb0c5a14bba46 | 926b4949f31b99e68e07fdc5c181becf90870e26 | /BioCrowd/apps/login/forms.py | 04860331c93eb7eb4fb2c786d72126591d52af87 | [] | no_license | bxm156/BioCrowd | a563728212d712bc4bfd2cd4b0204789a0a8cc7b | de407fc1640cccbc5354de0dfeb3586fec792899 | refs/heads/master | 2021-01-13T02:14:14.844716 | 2013-05-15T01:15:04 | 2013-05-15T01:15:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | from django.contrib.auth.forms import AuthenticationForm
from django import forms
from crispy_forms.layout import Submit, Layout, Fieldset, Field
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
class CrispyAuthenticationForm(AuthenticationForm):
username = forms.CharField(max_length=254, label="")
password = forms.CharField(label="", widget=forms.PasswordInput)
remember_me = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(CrispyAuthenticationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-login'
self.helper.form_method = 'post'
self.helper.form_action = '/login/'
self.helper.form_tag = False
self.helper.layout = Layout(
Field('username', label='', placeholder="Email address", css_class="input-block-level"),
Field('password', label='', placeholder="Password", css_class="input-block-level"),
'remember_me',
Submit('submit', 'Sign in', css_class="btn-large")
)
#self.helper.filter(basestring, greedy=True).wrap(Field, css_class="input-xlarge") | [
"bxm156@case.edu"
] | bxm156@case.edu |
c383e7a60082d7a8dadc8d9296c4db641dfa7a47 | 307d3837d31f9e3728af2b62ca51ebf63fe6ec6b | /hall_of_fame/kimdonghun/[BOJ]2775_IWillBeAWomenPresident.py | e10e3178d5f2338e9e890aeb876d3a082e2d1843 | [] | no_license | ellynhan/challenge100-codingtest-study | 905043497d154b8a7333ca536e536d013f6e7454 | bcdc6d04f13b12ba80b42e066f9d244d7c2cc698 | refs/heads/master | 2023-09-01T14:10:13.481013 | 2023-08-27T14:38:52 | 2023-08-27T14:38:52 | 401,561,230 | 162 | 176 | null | 2023-09-09T14:56:25 | 2021-08-31T03:30:36 | C++ | UTF-8 | Python | false | false | 413 | py | import sys
import math
T = int(sys.stdin.readline())
for i in range(T) :
K = int(sys.stdin.readline())
N = int(sys.stdin.readline())
m_list = [0] * (N+1)
for l in range(N+1) :
m_list[l] = l
for j in range(K) :
for l in range(1, N+1) :
m_list[l] = m_list[l] + m_list[l-1]
#print(m_list)
print(m_list[N])
| [
"wown252@naver.com"
] | wown252@naver.com |
672b5ae11c94cbd93c53a45adbed6015e142ce3e | 3f6088cf1aaaddc18ca1c6f2d5bfc69590941d60 | /Xianyang_dwt/projects/gbrt_multi_step_one_month.py | 1a6086934a4a27dec07cefec4a9b627a52b417ca | [
"MIT"
] | permissive | YX577/MonthlyRunoffForecastByAutoReg | 80038b1b0401d0dbe9b4b67cf531298090815cf7 | 2d66c628141f001e4ffb3dc3b7520a0f0f0ff239 | refs/heads/master | 2022-03-30T10:48:30.165288 | 2020-01-17T02:36:47 | 2020-01-17T02:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
from variables import multi_step_lags
import sys
sys.path.append(root_path)
from models import multi_step_gbrt
if __name__ == '__main__':
multi_step_gbrt(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='forecast',
llags_dict = variables['lags_dict'],
model_id=1
)
plt.show()
| [
"zuojianyi@outlook.com"
] | zuojianyi@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.