blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a52af025f64100a1354ba9ff12d253bd3a3be007 | 4741174571b160b62ce5f3dc5f505b7e06819f69 | /linked list/add 1 to number.py | cbec023fd4bc92b92f77b17833938ac13aac062b | [] | no_license | AprajitaChhawi/geeksforgeeks | 754554f86e66ab3ca861966b02e36d246e0efc21 | 06fb42dfaaaa85c1a5bb9ca3a4b13a050fb782bf | refs/heads/master | 2022-12-27T07:35:19.516512 | 2020-10-15T15:48:40 | 2020-10-15T15:48:40 | 291,464,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | #User function Template for python3
'''
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
'''
def addOne(head):
curnode=head
prenode=None
while(curnode.next):
temp=curnode.data
a=a*10+temp
curnode=curnode.next
print(a)
return head
#Returns new head of linked List.
#{
# Driver Code Starts
#Initial Template for Python 3
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
# Linked List Class
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
# creates a new node with given value and appends it at the end of the linked list
def insert(self, value):
if self.head is None:
self.head = Node(value)
self.tail = self.head
else:
self.tail.next = Node(value)
self.tail = self.tail.next
def PrintList(head):
while head:
print(head.data,end='')
head = head.next
if __name__ == '__main__':
t=int(input())
for _ in range(t):
num = input()
ll = LinkedList() # create a new linked list 'll1'.
for digit in num:
ll.insert(int(digit)) # add to the end of the list
resHead = addOne(ll.head)
PrintList(resHead)
print()
# } Driver Code Ends
| [
"noreply@github.com"
] | noreply@github.com |
cbba4063f542e0bf519d84589276fccc8b5d45c3 | e7e5ac71c941e3daf82781249ae6d32d8614f78e | /2021/day-01/solve.py | 7cf5aad6252eabebb08679fdc50aa41991bf2cad | [
"MIT"
] | permissive | amochtar/adventofcode | 7f952ebee6b41aa5147cc788710fb054579742e7 | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | refs/heads/master | 2022-07-14T22:46:21.175533 | 2021-12-15T08:14:17 | 2021-12-15T08:28:43 | 222,647,709 | 1 | 0 | MIT | 2022-06-22T04:45:13 | 2019-11-19T08:36:02 | Python | UTF-8 | Python | false | false | 575 | py | #!/usr/bin/env python
import aoc
@aoc.timing
def part1(inp: str):
depths = aoc.ints(inp)
return sum(1 for (a, b) in aoc.sliding_window(depths) if b > a)
@aoc.timing
def part2(inp: str):
depths = map(sum, aoc.sliding_window(aoc.ints(inp), 3))
return sum(1 for (a, b) in aoc.sliding_window(depths) if b > a)
# with open('test.txt', 'r') as f:
# inp = f.read()
# print("Part 1:", part1(inp))
# print("Part 2:", part2(inp))
with open('input.txt', 'r') as f:
inp = f.read()
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
| [
"ade@instruqt.com"
] | ade@instruqt.com |
688a69eeefdd18fc59f72c8a0c55e7ada6bac042 | 00ccdc877771cb0cf493526d1e201e0f625bf5e7 | /dohq_teamcity/models/vcs_root_entry.py | 71eac569239c8a77d6638dac74da2b020c9df602 | [
"MIT"
] | permissive | expobrain/teamcity | a52928045166bb5d34f4a0396cb840bfee8f43d5 | 9f04c0692a2c5b277a608c2f11cc1fb48e0c87e2 | refs/heads/master | 2020-04-13T13:11:07.270515 | 2018-10-18T01:40:06 | 2018-10-18T01:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | # coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
# from dohq_teamcity.models.vcs_root import VcsRoot # noqa: F401,E501
class VcsRootEntry(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'checkout_rules': 'str',
'id': 'str',
'inherited': 'bool',
'vcs_root': 'VcsRoot'
}
attribute_map = {
'checkout_rules': 'checkout-rules',
'id': 'id',
'inherited': 'inherited',
'vcs_root': 'vcs-root'
}
def __init__(self, checkout_rules=None, id=None, inherited=False, vcs_root=None, teamcity=None): # noqa: E501
"""VcsRootEntry - a model defined in Swagger""" # noqa: E501
self._checkout_rules = None
self._id = None
self._inherited = None
self._vcs_root = None
self.discriminator = None
if checkout_rules is not None:
self.checkout_rules = checkout_rules
if id is not None:
self.id = id
if inherited is not None:
self.inherited = inherited
if vcs_root is not None:
self.vcs_root = vcs_root
super(VcsRootEntry, self).__init__(teamcity=teamcity)
@property
def checkout_rules(self):
"""Gets the checkout_rules of this VcsRootEntry. # noqa: E501
:return: The checkout_rules of this VcsRootEntry. # noqa: E501
:rtype: str
"""
return self._checkout_rules
@checkout_rules.setter
def checkout_rules(self, checkout_rules):
"""Sets the checkout_rules of this VcsRootEntry.
:param checkout_rules: The checkout_rules of this VcsRootEntry. # noqa: E501
:type: str
"""
self._checkout_rules = checkout_rules
@property
def id(self):
"""Gets the id of this VcsRootEntry. # noqa: E501
:return: The id of this VcsRootEntry. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VcsRootEntry.
:param id: The id of this VcsRootEntry. # noqa: E501
:type: str
"""
self._id = id
@property
def inherited(self):
"""Gets the inherited of this VcsRootEntry. # noqa: E501
:return: The inherited of this VcsRootEntry. # noqa: E501
:rtype: bool
"""
return self._inherited
@inherited.setter
def inherited(self, inherited):
"""Sets the inherited of this VcsRootEntry.
:param inherited: The inherited of this VcsRootEntry. # noqa: E501
:type: bool
"""
self._inherited = inherited
@property
def vcs_root(self):
"""Gets the vcs_root of this VcsRootEntry. # noqa: E501
:return: The vcs_root of this VcsRootEntry. # noqa: E501
:rtype: VcsRoot
"""
return self._vcs_root
@vcs_root.setter
def vcs_root(self, vcs_root):
"""Sets the vcs_root of this VcsRootEntry.
:param vcs_root: The vcs_root of this VcsRootEntry. # noqa: E501
:type: VcsRoot
"""
self._vcs_root = vcs_root
| [
"allburov@gmail.com"
] | allburov@gmail.com |
87d2010ca3b5538ef7cbec7886af70655cd31c84 | cf587df9bebf15a88582b3d16d0f680478c636fb | /regenerator/__init__.py | e391fc753813be469f56a27210452ba3c510fc82 | [
"BSD-2-Clause"
] | permissive | ghas-results/regenerator | 69383b7467474d7a4a2a47af947e34435ac07c4b | 933e690b122df86b1d4b8404a47ea227eb478f4c | refs/heads/main | 2023-08-09T22:56:16.705355 | 2021-03-11T23:54:38 | 2021-03-11T23:54:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | '''Reentrant generator (regenerator) stream.'''
from .stream import *
| [
"Elliott.Forney@ibm.com"
] | Elliott.Forney@ibm.com |
2dc9330666f5fbcb6526ba3adaba2c90af3de318 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03032/s021397547.py | d664139ae2e131d0ee24b03e0e38925747d285b4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from bisect import bisect_left
N,K = map(int,input().split())
V = list(map(int,input().split()))
vmax = -10**9
for n in range(min(N,K)+1):
A = V[:n]
for m in range(min(N,K)-n+1):
B = V[N-m:]
B = B+A
B = sorted(B)
ind = bisect_left(B,0)
k = min(ind,K-n-m)
v = sum(B[k:])
vmax = max(vmax,v)
print(vmax) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1e4cd8ed5c9c2fd78cfccda92d0753fde46d8b3d | 1c9d9a4fdcc4f8d5ae420dd876fef054c38a89aa | /raypath_plot.py | a258f27ef90f109e9bb977bf023c5126d705b549 | [] | no_license | tianyining/arabian-anisotropy | 751a673a3a3bc48ede6cf9485b8220548eb0bcfd | afc826c6554a502000495a86cfcbbf365c1e75cd | refs/heads/master | 2023-03-25T07:57:04.797570 | 2021-03-18T14:19:27 | 2021-03-18T14:19:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | """
for plotting raypath coverage plots. Draws lines for each station pair. Can be run for all periods or a single period
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from formatter import read_raw_file
import matplotlib.cm as cm
from matplotlib.colors import Normalize
def plot(period, ax):
df = read_raw_file(period)
# boundingbox = [25, 71, 8, 50] # (x0, x1, y0, y1)
# proj = ccrs.LambertConformal(central_longitude=(boundingbox[0] + (boundingbox[1] - boundingbox[0]) / 2),
# central_latitude=(boundingbox[2] + (boundingbox[3] - boundingbox[2]) / 2),
# standard_parallels=(15, 40))
ax.set_extent(boundingbox, crs=ccrs.PlateCarree())
# ax.scatter(df.lon_sta1, df.lat_sta1, transform=ccrs.PlateCarree())
# ax.scatter(df.lon_sta2, df.lat_sta2, transform=ccrs.PlateCarree())
for row in df.itertuples():
xs = [row[4], row[2]] # lon2, lon1
ys = [row[3], row[1]] # lat2, lat1
ax.plot(xs, ys, c=m.to_rgba(row[5]), alpha=0.01, transform=ccrs.PlateCarree())
ax.coastlines()
ax.gridlines(draw_labels=True)
ax.set_title(str(period) + 's', fontsize=30)
# plt.savefig(f'raypath_coverage/{period}.png')
return ax
def main():
proj = ccrs.LambertConformal(central_longitude=(boundingbox[0] + (boundingbox[1] - boundingbox[0]) / 2),
central_latitude=(boundingbox[2] + (boundingbox[3] - boundingbox[2]) / 2),
standard_parallels=(15, 40))
fig, axs = plt.subplots(nrows=3, ncols=3, sharex=True, sharey=True, subplot_kw={'projection': proj},
figsize=(35, 26), squeeze=False)
deraveled = axs.ravel()
fig.delaxes(deraveled[8])
fig.delaxes(deraveled[6])
pairings = {i[1]: i[0] for i in enumerate([10, 18, 25, 36, 46, 55, 70])}
pairings[70] = 7
for period in [10, 18, 25, 36, 46, 55, 70]:
ax = deraveled[pairings[period]]
plot(period, ax)
fig.tight_layout()
fig.suptitle('Raypath Coverage', fontsize=50)
fig.colorbar(m)
plt.subplots_adjust(top=0.92)
plt.show()
if __name__ == '__main__':
boundingbox = [25, 71, 8, 50]
norm = Normalize(vmin=1.9, vmax=4.4)
cmap = cm.plasma
m = cm.ScalarMappable(norm=norm, cmap=cmap)
main()
| [
"jbr43@cam.ac.uk"
] | jbr43@cam.ac.uk |
515ae7d62a3dd2af4a836e14d6f535ba3807c999 | d0cb60e92e6196f5621421301b5ba5ef778caa1c | /mysite/misiproba/urls.py~ | c84095cf797ee25dbc4b364d38964dede43e584f | [] | no_license | har11/djangotest | 9be5bf17fff2baef10879d2f7478ea5bffca6805 | 42d52bd69f445522bdd7650684e88b9321e2fda8 | refs/heads/master | 2020-05-18T08:03:46.440734 | 2013-10-23T20:23:20 | 2013-10-23T20:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | from django.conf.urls import patterns, url
from misiproba import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^newuser', views.newuser, name='newuser'),
url(r'^(?P<user_id>\d+)/edituser/$', views.edituser, name='edituser'),
url(r'^(?P<user_id>\d+)/deleteuser/$', views.deleteuser, name='deleteuser'),
#url(r'^edituser/$', views.edituser, name='edituser')
)
| [
"har11&freemail.hu"
] | har11&freemail.hu | |
ae5d7ca9a3ec119ef5e31e62cd5e3e54613f467f | d8e7b9e1436266e7178fb37b74301d958c4b4507 | /counsel/log.py | 79706795b227b4745dee1e21e6e597dd0c03733c | [] | no_license | actionml/counsel | a6a3e4e33c05cda5164bf6ea9533a932184df30d | e7c3c369304e6a17201977fe25915a2c3afeb635 | refs/heads/master | 2020-03-10T06:53:54.893153 | 2018-04-12T12:36:09 | 2018-04-12T12:36:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import logging
class OutputFormatter(logging.Formatter):
def format(self, record):
args = {
'levelname': record.levelname,
'message': record.msg
}
fmt = '[{levelname}] {message}'.format(**args)
return fmt % record.args
## Use root logger. Since a child handler is created with a level
# (different from NOTSET) it will effectively process message with
# with eaqual or higher priorities
#
# To silience logging set console.setLevel(60) (which is more than critical)
#
log = logging.getLogger()
console = logging.StreamHandler()
console.setLevel(logging.WARN)
console.setFormatter(OutputFormatter())
log.addHandler(console)
| [
"dennybaa@gmail.com"
] | dennybaa@gmail.com |
8d8b0ab231964b949fd0e44dbc04b96e34e71f6c | e2eba55bfcd30ec6109243ad83efa3be4f9d086a | /platzigram/settings/prod.py | 234d11bac9a6f0e7a4131af55e3ef991691d9270 | [] | no_license | peralson/instagram-copy | 7c40cdea9efaa09ee1428ee757498e85f28bd77f | 129689da28c0477f08a3d0af4b9cabcca5af7954 | refs/heads/master | 2022-04-28T18:05:27.384470 | 2020-04-13T23:01:12 | 2020-04-13T23:01:12 | 255,396,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from platzigram.settings.base import *
DEBUG = False
ALLOWED_HOSTS = ['platzigram.clapfy.es']
STATIC_ROOT = 'staticfiles'
MEDIA_URL = '/' | [
"pabloperaltapalacios@gmail.com"
] | pabloperaltapalacios@gmail.com |
e6af2017b432e17d227eb8a96646038566ecc7c5 | 40c1db9961c99487cf718b7d95da7998a41e6bb8 | /itens/models.py | 21785a367d3d62a1b2b02ac668fab6e80f988a71 | [] | no_license | FSaeta/Projeto-PPADS | 4c9c3e28da34d2fa50ef0592585f5db7a0c4f722 | e8ca98e58b97c87fb4db893efbad3c99a73e60ac | refs/heads/master | 2023-07-21T05:46:04.286122 | 2023-07-06T15:43:50 | 2023-07-06T15:43:50 | 374,760,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | from django.db import models
from django.urls.base import reverse
from datetime import datetime
from usuarios.models import Usuario
class Categoria(models.Model):
nome = models.CharField("Categoria", max_length=255)
def __str__(self):
return self.nome
class Itens(models.Model):
data_criacao = models.DateField("Data de Criação", auto_now_add=True)
data_atualizacao = models.DateField("Data de Atualização", auto_now=True)
user_id = models.ForeignKey(Usuario, on_delete=models.SET_NULL, verbose_name="usuario", null=True)
titulo = models.CharField("Título", max_length=100, unique=True)
pais = models.CharField("Pais", max_length=100)
ano_lancamento = models.IntegerField("Ano de Lançamento", default=datetime.now().year)
categoria = models.ForeignKey(Categoria, verbose_name="Categoria", on_delete=models.SET_NULL, null=True)
tipo = models.CharField("Tipo", max_length=100)
ativo = models.BooleanField("Ativo", default=False)
class Meta:
abstract = True
def aprovar_cadastro(self, user):
if user.is_superuser:
self.ativo = True
self.save()
def excluir_cadastro(self, user):
if user.is_superuser:
self.delete()
def get_media_avaliacoes(self):
soma = 0
qtd = 0
for avaliacao in self.avaliacao_set.all():
soma += avaliacao.valor
qtd += 1
if qtd > 0:
media = round(soma/qtd)
return media
return 'Sem Avaliações'
class Livro(Itens):
volume = models.PositiveSmallIntegerField("Volume")
autor = models.CharField("Autor", max_length=100)
editora = models.CharField("Editora", max_length=100)
def get_absolute_url(self):
return reverse("itens:livro", kwargs={'pk': self.pk})
class Filme(Itens):
volume = models.PositiveSmallIntegerField("Volume", null=True)
diretor = models.CharField("Diretor", max_length=100)
elenco = models.TextField("Elenco")
def get_absolute_url(self):
return reverse("itens:filme", kwargs={'pk': self.pk})
class Serie(Itens):
qtd_temporadas = models.PositiveSmallIntegerField("Número de temporadas")
diretor = models.CharField("Diretor", max_length=100)
elenco = models.TextField("Elenco")
def get_absolute_url(self):
return reverse("itens:serie", kwargs={'pk': self.pk}) | [
"fek2009@bol.com.br"
] | fek2009@bol.com.br |
7cd796d4633b40da9af7960b2669410ebd453ada | d3cbf5af9aa3f9945c12831a1a0d30f8ee6265da | /count.py | 3e84ef67571b2863d2f4bde036105909ac55b0c5 | [] | no_license | LongNKCoder/algorithm_learning | d5faf846feec75700e5cad723ea9d566a96bac9b | 5f89873ded68b662059adf036d8651f5b73c417d | refs/heads/main | 2023-04-20T18:37:04.290832 | 2021-05-12T10:48:21 | 2021-05-12T10:48:21 | 349,301,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | def countConsecutive(number):
# constraint on values of L gives us the
# time Complexity as O(N^0.5)
count = 0
L = 1
while( L * (L + 1) < 2 * number):
a = (1.0 * number - (L * (L + 1) ) / 2) / (L + 1)
if (a - int(a) == 0.0):
count += 1
L += 1
return count
number = 15000000000000
print(countConsecutive(number))
| [
"lk.nguyen@linkbynet.com"
] | lk.nguyen@linkbynet.com |
55107bfc0b70cbf0ce0416d7d702e61475dc14dd | f26521284741a1f730e2d52de7426807247e08b6 | /Python/Topics/Class/Who is who/main.py | 2bcfcf86bdc076784c5ae9755f976ba6ac78e8bc | [
"MIT"
] | permissive | drtierney/hyperskill-problems | 0e6fe8ca418d1af700a5a1b1b2eed1f1f07b8e9e | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | refs/heads/main | 2021-10-25T07:02:50.838216 | 2021-10-16T19:08:49 | 2021-10-16T19:08:49 | 253,045,232 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | class Angel:
color = "white"
feature = "wings"
home = "Heaven"
class Demon:
color = "red"
feature = "horns"
home = "Hell"
the_angel = Angel()
print(the_angel.color)
print(the_angel.feature)
print(the_angel.home)
the_demon = Demon()
for attribute in ['color', 'feature', 'home']:
print(getattr(the_demon, attribute))
| [
"d.r.tierney@hotmail.co.uk"
] | d.r.tierney@hotmail.co.uk |
a30102c8069f2a64288c65f21399ddc2787cc43d | a2dcc2f7dcdb3a91383965dbb03858af7c765faa | /hashtags.py | b297e984c6ec4b4402203f534e1ed687df2873ee | [] | no_license | namitamhatre411/Sunny-with-a-chance-of-hashtags | b71de30bd1253889ef4a25594a815128b1adbe24 | f06353c985fc907f43f9230c53d864bb6e4b1bb2 | refs/heads/master | 2020-12-31T00:00:12.905472 | 2017-01-31T21:09:43 | 2017-01-31T21:09:43 | 80,562,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | import pandas as p
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
import scipy
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC
from sklearn import svm
import nltk
from nltk.tokenize import wordpunct_tokenize
np.set_printoptions(suppress =True, precision=3)
train = p.read_csv('train.csv')
test = p.read_csv('test.csv')
class SnowballTokenizer(object):
def __init__(self):
self.wnl = nltk.stem.SnowballStemmer("english")
def __call__(self, doc):
return [self.wnl.stem(t) for t in wordpunct_tokenize(doc)]
classify = Pipeline([('VEC',TfidfVectorizer(max_features=85000, strip_accents='unicode',
analyzer='word',token_pattern=r'\w{3,}',sublinear_tf=1,
ngram_range=(1, 1),stop_words = 'english',tokenizer = SnowballTokenizer())),
('clf',svm.LinearSVC())])
y = np.array(train.ix[:,4:])
classify = classify.fit(train['tweet'],y)
| [
"namitamhatre@Namitas-MacBook-Pro.local"
] | namitamhatre@Namitas-MacBook-Pro.local |
704c62b00673b310a92ce453c982ad8a31dd5b8b | 13844d7554ecb6349fb62bca925b85eb37142771 | /TrueValueDetection.py | 194237dd33ef0c7e26d226e6b76673c548a4a66e | [] | no_license | YEJINLONGxy/shiyanlou-code | 35138656cb141b22beb0d039de8ca39434815dc8 | 6dd71f17dabc2fbfeb38ea488e514dc281ac16f4 | refs/heads/master | 2020-06-26T13:17:19.618178 | 2019-08-22T08:53:05 | 2019-08-22T08:53:05 | 199,642,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | #!/usr/bin/env python3
#检测真值的优雅方式是这样:
if x:
pass
#不要像下面这样做:
if x == True:
pass
| [
"1067530182@qq.com"
] | 1067530182@qq.com |
cb742459992f786fe4dc3efed83b660e36559b65 | 502ef945542800f9dd33ef93ba8222aa7e9fcd1b | /microblog/config.py | eae5f8416ece81a1b1c21f91c41a3468f11634b4 | [] | no_license | csbarker/lets_play_python | cd12e254998ed460712ab07ab2b2df4a3548a8aa | 309a70fe700d36ff0e8063bba90cb64679269bc0 | refs/heads/master | 2021-01-19T14:33:07.223991 | 2014-05-24T14:37:48 | 2014-05-24T14:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
# Database
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
# Search
WHOOSH_BASE = os.path.join(basedir, 'search.db')
## Settings
CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
POSTS_PER_PAGE = 3
MAX_SEARCH_RESULTS = 50
## Common
OPENID_PROVIDERS = [
{ 'name': 'Google', 'url': 'https://www.google.com/accounts/o8/id' },
{ 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
{ 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
{ 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
{ 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }
] | [
"callum.scott.barker@gmail.com"
] | callum.scott.barker@gmail.com |
8e404c16fc03ee455cc24ff6bbe18e6b0d3b45f9 | 1d632e96cb738fe219bd9033a2a92560934fb575 | /project-euler/euler108.py | 598e5975d49a0d5d6371e0f4b9ae222ab9f6452c | [] | no_license | mathyomama/scripts | 8d02025e0f5654210ec38f5ada11f7cd84013a24 | 50afd1b15b1cbfa54b5193cb4b6016a7700f531f | refs/heads/master | 2020-09-22T11:26:43.336236 | 2015-05-29T15:12:57 | 2015-05-29T15:12:57 | 6,764,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import sys
# incase you forget, the way to solve this problem is by realizing that
# f(n) = floor(number_of_factors(n^2)/2) + 1
def main():
limit = int(sys.argv[1])
n_dict = dict()
x = 3
running = True
while running:
for y in range(2, x + 1):
if x*y%(x + y) == 0:
test = x*y/(x + y)
try:
n_dict[test] += 1
if n_dict[test] > limit:
print test, x, y
running = False
break
except KeyError:
n_dict[test] = 1
x += 1
if __name__ == "__main__":
sys.exit(main())
| [
"mathyomama@gmail.com"
] | mathyomama@gmail.com |
053ad976e6336633dc0f6004015d55286c7bdfaf | 35edabd50da3f037b12b9f1f4954d04663173fa2 | /basic_ref/009.collection_list_comprehension_array_filter.py | a61d758f1b686bae2608a8fed90f6db6baf5a56b | [] | no_license | udayshi/ev3 | eede6309bdb3732a2500709273eab36b7ca0a614 | 48d4f4c6c917f64db9148856419cfb429e1f225a | refs/heads/master | 2020-04-01T17:57:26.426379 | 2018-11-01T16:00:21 | 2018-11-01T16:00:21 | 153,461,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | mylist = [1, 4, -5, 10, -7, 2, 3, -1]
pos = [n for n in mylist if n > 0]
| [
"uday.shiwakoti@mullenlowegroup.com"
] | uday.shiwakoti@mullenlowegroup.com |
fafd1bed26be13432e33d8cefeb83118b95437c0 | 61b610cb0a2344786de832e0a0a2fea9f4fc539e | /2.py | 985113c9535284b0736bd3ab7b66a105803b5af4 | [] | no_license | JorgeVazquez19/PythonFirstSteps | 7cfe1cb5ca9fb396189b7d07bd46d7373102c7b0 | 66518c307bc449ffa987cd99e164b83cc199287b | refs/heads/master | 2020-04-20T06:01:50.322635 | 2019-02-01T09:28:15 | 2019-02-01T09:28:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import re
from pip._vendor.distlib.compat import raw_input
password = raw_input("enter password: ")
reMayus = r'[A-Z]+'
reMinus = r'[a-z]+'
reNum = r'[0-9]+'
reExt = r'[\W]+'
if len(password) == 12 and re.match(reMayus, password) and re.match(reMinus, password) and re.match(reNum, password) and re.match(reExt, password):
print ("valid")
else:
print ("not valid")
| [
"jorgitoovazquez@hotmail.com"
] | jorgitoovazquez@hotmail.com |
c71cb016fd30053e434a2b42e23a96a22cca55b8 | 8bada7f2a894b1cc8d7e12b56c7df7d9cbb236e2 | /asynccsv.py | be7d2112ac76df01415362aea15f6c89c08f4049 | [
"MIT"
] | permissive | mattmiller87/RLScrape | 34ac573fe1c44eb0066b41040f1f930f872f1d2c | 6848d68d5997eb816b15663dbfe36066d36621ac | refs/heads/master | 2021-07-11T02:31:33.331638 | 2020-12-17T23:47:20 | 2020-12-17T23:47:20 | 225,527,126 | 3 | 4 | MIT | 2020-12-17T20:38:49 | 2019-12-03T03:59:49 | Python | UTF-8 | Python | false | false | 9,343 | py | #!/usr/bin/python3
import requests
import csv
import datetime
import argparse
import os
import re
from tqdm import tqdm as pbar
import asyncio
from aioify import aioify
from setup_logging import logger
from rlscrape import Webscrape
readibletime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") # used for csvWrite
sem = asyncio.Semaphore(50) # control how many urls are being retrieved at a time
class csvIO:
'''I/O for CSV'''
def __init__(self):
checkFolders()
self.csvinput = results.input
self.csvoutput = results.output
# self.seasons = results.seasons
self.playlists = results.playlists
self.latestseason = '16' #need a better way to update this, perhaps dynamically?
self.header = []
tierchoices = ['1T','2T','TournamentT','3T','All']
tiermatch = [item for item in tierchoices if item in self.playlists]
if len(tiermatch) > 0:
self.tiertf = True
else:
self.tiertf = False
def areadCSVLinks(self):
'''read input CSV file. File MUST be structured either: preferred = *kwargs,Name,Link || optional = *kwargs,Link'''
with open(self.csvinput, 'r', newline='', encoding='latin-1') as csvread:
reader = csv.reader(csvread)
playerdict = {} # define a basic dict to pass csv information into
i = 0
for row in reader:
playerdict[i] = {}
if i < 1: # define headers
self.header = [str(i+1) for i in range(len(row))] # handle kwargs as header - assign number
self.header[-2] = "Name"
self.header[-1] = "Link"
name,link = row[-2:] # select last two items
if "overview" in link:
link = link.replace("/overview","")
try:
gamertag = link.split('/')[-1] # last item in link is gamertag
platform = link.split('/')[-2] # item before gamertag is platform
except IndexError:
logger.error("Gamertag:%(name)s Link:%(link)s is not formatted properly" % locals())
else:
playerdict[i][gamertag] = {} # define dict for each gamertag and values for that gamertag
a = 0
for item in row: # handle kwargs
if len(row) - a > 2:
playerdict[i][gamertag][a] = item
a += 1
if "ps4" == platform or "ps" == platform:
platform = "psn"
if "xbox" == platform:
platform = "xbl"
playerdict[i][gamertag]['platform'] = platform
playerdict[i][gamertag]['name'] = name
playerdict[i][gamertag]['link'] = link
i += 1
return playerdict
async def aRetrieveData(self,gamertag,gamerdict):
platform = gamerdict['platform']
name = gamerdict['name']
link = gamerdict['link']
scrape = Webscrape()
newrow = []
aioretrieve = aioify(obj=scrape.retrieveDataRLTracker, name='aioretrieve')
data = await aioretrieve(gamertag=gamertag,platform=platform)
newrow = self._dictToList(data)
a = 0
for k,v in gamerdict.items(): # handle kwargs
if a == k:
newrow.insert(a,v)
a += 1
newrow.insert(a,name)
newrow.insert(a+1,link)
return newrow
def awriteCSV(self,newrows):
'''write list of data to outputCSV file'''
season = self.latestseason
header_dict = {
'1': "S%s_1s_MMR" % (season), '1GP': "S%s_1s_GamesPlayed" % (season), '1T': "S%s_1s_Tier" % (season),
'2': "S%s_2s_MMR" % (season), '2GP': "S%s_2s_GamesPlayed" % (season), '2T': "S%s_2s_Tier" % (season),
'Tournament': "S%s_Tournament_MMR" % (season), 'TournamentGP': "S%s_Tournament_GamesPlayed" % (season), '3ST': "S%s_Solo3s_Tier" % (season),
'3': "S%s_3s_MMR" % (season), '3GP': "S%s_3s_GamesPlayed" % (season), '3T': "S%s_3s_Tier" % (season),
}
if "All" in self.playlists:
self.header.extend(header_dict[k] for k in header_dict)
else:
self.header.extend(header_dict[k] for k in header_dict if k in self.playlists)
with open(self.csvoutput, 'w',newline='', encoding='latin-1') as csvwrite:
w = csv.writer(csvwrite, delimiter=',')
w.writerow(self.header)
for newrow in newrows:
w.writerow(newrow)
def _dictToList(self,dictdata):
'''Take json formatted dictionary of playerdata and create a list which is better formatted for csv
this is specifically designed for RSC'''
tiertf = self.tiertf
newdict = {}
for gamertag,gdata in dictdata.items():
for season,sdata in gdata.items():
newdict[season] = {
'1': None, '1GP': None, '1T' : None,
'2': None, '2GP': None, '2T' : None,
'Tournament': None, 'TournamentGP': None, 'TournamentT' : None,
'3': None, '3GP': None, '3T' : None
}
for playlist,pdata in sdata.items():
if playlist in 'Ranked Duel 1v1' and pdata is not None and pdata.items():
newdict[season]['1'] = pdata['MMR']
newdict[season]['1GP'] = pdata['Games Played']
if tiertf:
newdict[season]['1T'] = pdata['Tier Number']
if playlist in 'Ranked Doubles 2v2' and pdata is not None and pdata.items():
newdict[season]['2'] = pdata['MMR']
newdict[season]['2GP'] = pdata['Games Played']
if tiertf:
newdict[season]['2T'] = pdata['Tier Number']
if playlist in 'Tournament' and pdata is not None and pdata.items():
newdict[season]['Tournament'] = pdata['MMR']
newdict[season]['TournamentGP'] = pdata['Games Played']
if tiertf:
newdict[season]['TournamentT'] = pdata['Tier Number']
if playlist in 'Ranked Standard 3v3' and pdata is not None and pdata.items():
newdict[season]['3'] = pdata['MMR']
newdict[season]['3GP'] = pdata['Games Played']
if tiertf:
newdict[season]['3T'] = pdata['Tier Number']
newlist = []
for dictseason,v in newdict.items():
if "All" in self.playlists:
newlist.extend([v[k] for k in v])
else:
newlist.extend([v[k] for k in v if k in self.playlists])
return newlist
async def _safe_download(self,gamertag,platform):
async with sem: # only allow so many retrieve requests at a time - helps with progress bar too
return await self.aRetrieveData(gamertag,platform)
def checkFolders():
if not os.path.exists("Scrapes"):
logger.info("Creating Scrapes folder...")
os.makedirs("Scrapes")
async def singleRun():
logger.info("Start for csv input:%s" % (results.input))
inputoutput = csvIO() # initialize class
datadict = inputoutput.areadCSVLinks() # read the csv file
tasks = []
for i,idict in datadict.items():
for k,v in idict.items():
task = loop.create_task(inputoutput._safe_download(k,v)) # start the retrieve process
tasks.append(task)
responses = []
for task in pbar(asyncio.as_completed(tasks),desc='retrieve',total=len(tasks)):
responses.append(await task)
inputoutput.awriteCSV(responses)
logger.info("Finish for csv output:%s" % (results.output))
if __name__ == "__main__":
'''Run locally to this script'''
#Use comandline arguments for input
#edit the default parameter to change options manually without commandline options
parser = argparse.ArgumentParser(description='Scrape Commandline Options', add_help=True)
parser.add_argument('-i', action='store', dest='input', help='Input CSV to use', default='example.csv')
parser.add_argument('-o', action='store', dest='output', help='Output CSV to use', default='Scrapes/%s_RLTN.csv' % (readibletime)) #RLTN = RocketLeague Tracker Network
###
# no longer can search for multiple seasons - this may be revisited at some point
#parser.add_argument('-s', action='store', dest='seasons', help='retrieve for season(s) defined. Example: 8 9 11', nargs='+', default=['14']) #need a better way to update this, perhaps dynamically?
##
parser.add_argument('-p', action='store', dest='playlists', help='playlist options. Example: 1 2 3S 3', choices=("1","2","Tournament","3","1GP","2GP","TournamentGP","3GP","1T","2T","TournamentT","3T","All"), nargs='+', default="['1','1GP','2','2GP','Tournament','TournamentGP','3','3GP']")
results = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(singleRun())
loop.close() | [
"mattmiller87@gmail.com"
] | mattmiller87@gmail.com |
c60612d9601f0566000fea65a50c4c4af60a5282 | a1da8f289df9c088f13528feb8bcac339c005539 | /consensus_and_profile.py | 20e6b8294e23e1319be8abc6c72b2bb294794686 | [] | no_license | ajduberstein/rosalind_problems | 2b4a0d6efb9ae0b556d4c211617a013b42909d23 | f780b1b1a0ddbc5e75ca9ae6cc71a317cd219dab | refs/heads/master | 2021-01-13T01:40:55.709112 | 2014-05-22T19:12:36 | 2014-05-22T19:12:36 | 19,955,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | #In progress
def prune(input_dset):
f = input_dset.replace('\r\n','')[1:]
f = f.split('>')
f = [list(x[13:]) for x in f]
import copy
transposed = copy.deepcopy(f)
for x in xrange(0,len(f)):
for y in range(0,len(f)):
transposed[y][x] = f[x][y]
return transposed
def consensus_and_profile(dna_strings):
from collections import defaultdict
profile_matrix = defaultdict(list)
building_blocks = ('A','T','C','G')
#Profile matrix
for sequence in dna_strings:
for i in building_blocks:
profile_matrix[i].append(sequence.count(i))
#Consensus string
consensus_string = ''
for i in xrange(0,len(dna_strings)):
curr_max = ('',0)
for j in building_blocks:
if profile_matrix[j][i] >= curr_max[1]:
curr_max = (j, profile_matrix[j][i])
consensus_string += curr_max[0]
print consensus_string
profile_matrix = str(profile_matrix).replace(', \'','\n')
return ''.join([x for x in profile_matrix if x in '\n: 1234567890ACTG']).strip()
| [
"ajduberstein@gmail.com"
] | ajduberstein@gmail.com |
fe76135fa9eccd572a16fd5da8714993539e245e | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_route_filters_operations.py | 0a30eb3cd481c39e0c762bf92fbb47cf91856be2 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 25,449 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> "models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2020_03_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
route_filter_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.RouteFilter"
"""Updates tags of a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param parameters: Parameters supplied to update route filter tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilter"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.RouteFilterListResult"]
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.RouteFilterListResult"]
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
9aff4776e7a2ddf4284e16a6e8a0267f27c5ea27 | 1cb7aeb570630c9743a5b0dc7a254197d26016de | /py/testdir_ec2_only/test_parse_syn_s3n_thru_hdfs.py | 877490a8fb08c2a833f6b77182c24885c8f4cc5c | [
"Apache-2.0"
] | permissive | devinshields/h2o | 576dbebc663265190cfca3fe8341b10d2243213c | 9d8b782e5cb7f38f3cb0086fef15ecec7d9282d5 | refs/heads/master | 2021-01-18T04:57:34.703132 | 2013-08-27T00:57:51 | 2013-08-27T00:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import h2o_browse as h2b
import h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts(1,
# this is for our amazon ec hdfs
# see https://github.com/0xdata/h2o/wiki/H2O-and-s3n
hdfs_name_node='10.78.14.235:9000',
hdfs_version='0.20.2')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_syn_s3n_thru_hdfs(self):
# I put these file copies on s3 with unique suffixes
# under this s3n "path"
csvFilename = "*_10000x200*"
trialMax = 1
timeoutSecs = 500
URI = "s3n://home-0xdiag-datasets/syn_datasets"
s3nKey = URI + "/" + csvFilename
for trial in range(trialMax):
# since we delete the key, we have to re-import every iteration
# s3n URI thru HDFS is not typical.
importHDFSResult = h2o.nodes[0].import_hdfs(URI)
s3nFullList = importHDFSResult['succeeded']
### print "s3nFullList:", h2o.dump_json(s3nFullList)
self.assertGreater(len(s3nFullList),1,"Didn't see more than 1 files in s3n?")
key2 = "syn_datasets_" + str(trial) + ".hex"
print "Loading s3n key: ", s3nKey, 'thru HDFS'
start = time.time()
parseKey = h2o.nodes[0].parse(s3nKey, key2,
timeoutSecs=500, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print s3nKey, 'parse time:', parseKey['response']['time']
print "parse result:", parseKey['destination_key']
print "Trial #", trial, "completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseKey['destination_key'])
print "\n" + key2 + \
" num_rows:", "{:,}".format(inspect['num_rows']), \
" num_cols:", "{:,}".format(inspect['num_cols'])
print "Deleting key in H2O so we get it from s3n (if ec2) or nfs again.", \
"Otherwise it would just parse the cached key."
storeView = h2o.nodes[0].store_view()
### print "storeView:", h2o.dump_json(storeView)
print "BROKE: we can't delete keys with a pattern match yet..this fails"
print "So we only do 1 trial and don't delete"
# print "Removing", s3nKey
# removeKeyResult = h2o.nodes[0].remove_key(key=s3nKey)
if __name__ == '__main__':
h2o.unit_main()
| [
"kevin@0xdata.com"
] | kevin@0xdata.com |
a485431bb651f6d2ee5e57cfeb93176fd651c79e | 3e7476abe9667208eb3aa1788237d783cc116665 | /lotte_project/asgi.py | 08729e2af9cd464c3ec54c2bfecae8fa7c73ec3f | [] | no_license | Beans9605/heroku_lotte | dbf190221730d75a608f14658d94843390279a54 | dcaeacac81da50d78dc72bae05ab276512ed6538 | refs/heads/master | 2023-01-16T03:33:01.052234 | 2020-10-25T16:47:49 | 2020-10-25T16:47:49 | 307,112,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for lotte_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lotte_project.settings')
application = get_asgi_application()
| [
"alkad1234@likelion.org"
] | alkad1234@likelion.org |
6d85b979f8691ff29a9f9be5767f2375a74ff9fc | e4b2ef5734175d0ca5bf109b18e0f43943686903 | /bookManagePj/gui/calc2.py | ef59972d22563f3084e6f4a2549cfeb98c93a782 | [] | no_license | jungaSeo/python_pj1 | a23659eb42ac7391ab3d9c22eec79a07b5b19fce | 704b2ab91bcb2d39c7feb62ba4a37f41041f0320 | refs/heads/master | 2020-05-04T20:37:09.806985 | 2019-04-13T10:00:22 | 2019-04-13T10:00:22 | 179,444,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | import sys
from tkinter import *
def clear():
txtDisplay.delete(0, END)
return
root = Tk()
frame = Frame(root)
frame.pack()
root.title('Calculator')
num1 = StringVar()
topframe = Frame(root)
topframe.pack(side=TOP)
txtDisplay = Entry(frame, text = num1, bd=20, insertwidth=1, font=30)
# Entry을 이용하여 텍스트를 입력받거나 출력하기 위한 기입창을 생성할 수 있습니다
# borderwidth=bd : 기입창의 테두리 두께
# insertwidth : 기입창의 키보드 커서 너비
txtDisplay.pack(side=TOP)
button1 = Button(topframe,padx=16, pady=16, bd=8, text="1", fg="black")
# padx: 버튼의 테두리와 내용의 가로 여백
# pady : 버튼의 테두리와 내용의 세로 여백
button1.pack(side = LEFT)
button2 = Button(topframe,padx=16, pady=16, bd=8, text="2", fg="black")
button2.pack(side = LEFT)
button3 = Button(topframe,padx=16, pady=16, bd=8, text="3", fg="black")
button3.pack(side = LEFT)
button4 = Button(topframe,padx=16, pady=16, bd=8, text="4", fg="black")
button4.pack(side = LEFT)
frame1 = Frame(root)
frame1.pack(side = TOP)
button1 = Button(frame1,padx=16, pady=16, bd=8, text="5", fg="black")
button1.pack(side = LEFT)
button2 = Button(frame1,padx=16, pady=16, bd=8, text="6", fg="black")
button2.pack(side = LEFT)
button3 = Button(frame1,padx=16, pady=16, bd=8, text="7", fg="black")
button3.pack(side = LEFT)
button4 = Button(frame1,padx=16, pady=16, bd=8, text="8", fg="black")
button4.pack(side = LEFT)
frame2 = Frame(root)
frame2.pack(side = TOP)
button1 = Button(frame2,padx=16, pady=16, bd=8, text="9", fg="black")
button1.pack(side = LEFT)
button2 = Button(frame2,padx=16, pady=16, bd=8, text="0", fg="black")
button2.pack(side = LEFT)
button3 = Button(frame2,padx=16, pady=16, bd=8, text="C", fg="black", command = clear)
button3.pack(side = LEFT)
button4 = Button(frame2,padx=16, pady=16, bd=8, text="-", fg="black")
button4.pack(side = LEFT)
frame3 = Frame(root)
frame3.pack(side = TOP)
button1 = Button(frame3,padx=16, pady=16, bd=8, text="*", fg="black")
button1.pack(side = LEFT)
button2 = Button(frame3,padx=16, pady=16, bd=8, text="/", fg="black")
button2.pack(side = LEFT)
button3 = Button(frame3,padx=16, pady=16, bd=8, text="-", fg="black")
button3.pack(side = LEFT)
button4 = Button(frame3,padx=16, pady=16, bd=8, text="+", fg="black")
button4.pack(side = LEFT)
root.mainloop() | [
"zzang@DESKTOP-8JUBML4"
] | zzang@DESKTOP-8JUBML4 |
acb9e1b2cbf7f2394c9d8d1e97788526cd9e0a63 | 7d275c13fedf0e44f128f3f55edf077f11898183 | /airlines.py | 6d96904d732e20e22d43a9f7c26f2758f602ac2f | [] | no_license | ShubhamDS-P/Clustering | 30a29f5335ca117de7cbccbdd5373c706fda0f06 | ee7cff405d41ae831fd7e44853f6f36cf44317f9 | refs/heads/main | 2023-02-14T04:24:44.822595 | 2021-01-10T11:34:23 | 2021-01-10T11:34:23 | 328,367,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,317 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 12:21:48 2021
@author: Shubham
"""
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from scipy.cluster.hierarchy import linkage
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
# Calling xlsx file into the invironment
xls= pd.ExcelFile('D:\Data Science study\Documents\Assignments\Clustering\EastWestAirlines.xlsx')
airlines = pd.read_excel(xls,'data')
airlines
# Lets create normalizaion function
def norm_func(i):
x = (i-i.mean()) / (i.std())
return(x)
# Lets normalize the airlines dataframe
airlinesdata = airlines.iloc[:,1:] # Deleting the first column which we don't required
airlinesdata_norm = norm_func(airlinesdata) #Normalizing data
airlinesdata_norm.head() #taking a look at the top five rows
airlinesdata_norm.describe() # Summary of the data
# Now we will use the linkage function to measure the distance between all the records
z = linkage(airlinesdata_norm, method = 'complete', metric = 'euclidean')
plt.figure(figsize = (15,5));plt.title("Heirarchical Clustering Dendrogram");plt.xlabel("Index");plt.ylabel("Distance")
sch.dendrogram(
z,
leaf_rotation = 0.,
leaf_font_size = 8.,
)
plt.show()
# For this data set we can say that the dendrogram doesn't suit it because of its larg size
# But stil for further procession we can select a fix no. by looking at the dendrogram
# And I think the 9 clusters will suffice for that
# Lets use Agglomerative clustering
h_complete = AgglomerativeClustering(n_clusters = 9,linkage = 'complete', affinity = 'Euclidean').fit(airlinesdata_norm)
h_complete.labels_ # shows cluster numbers
# Converting h_complete.labels from arrays into series
airlines_labels = pd.Series(h_complete.labels_)
# creating new final dataset and adding clust column to it
h_airlines = airlines.copy()
h_airlines['clust'] = airlines_labels
# Shifting the clust column in the dataframe
h_airlines.iloc[:,[12,0,1,2,3,4,5,6,7,8,9,10,11]]
# Checking clustervise mean of the columns
result = h_airlines.groupby(h_airlines.clust).mean()
result
# Creating csv of the final dataframe
import os # Importing os
os.getcwd() # Getting the working directry
os.chdir("D:\\Data Science study\\assignment\\Sent\\7")
h_airlines.to_csv("h_airlines", index = False) # saving the file
# we have used the Hierarchical clustering method for above clusters
# Now we will try using the Non hierarchical clustering which is also called
# as the K-means clustering to find different clusters and see how much it differes from the Hierarchical clustering
# Lets create the screw plot o elbow curve
k =list(range(2,15))
k
TWSS = [] # variable for storing the Total Within Sum of Squares
# We are creating the function to determine the apropriate k value for our clusters
for i in k:
kmeans = KMeans(n_clusters = i)
kmeans.fit(airlinesdata_norm)
WSS = [] # Variable for storing within sum of squared values for clusters
for j in range (i):
WSS.append(sum(cdist(airlinesdata_norm.iloc[kmeans.labels_==j,:],kmeans.cluster_centers_[j].reshape(1,airlinesdata_norm.shape[1]),"euclidean")))
TWSS.append(sum(WSS))
# Plotting scree plot
plt.plot(k,TWSS,'ro-');plt.xlabel('No. of Clusters');plt.ylabel('Total within SS');plt.xticks(k)
# From the graph we an say that the most optimal k value should be 10
# So lets take the k as 10
model = KMeans(n_clusters = 10)
model.fit(airlinesdata_norm)
model.labels_ # cluster labels
md = pd.Series(model.labels_) #converting the cluster labels into series dataframe 'md'
k_airlines = airlines.copy()
k_airlines['clust'] = md # putting the values of the md into clust column
k_airlines.head(10) # calling top 10 row
k_airlines.iloc[:,[12,0,1,2,3,4,5,6,7,8,9,10,11]] # shifting clust column to first position
k_airlines
k_airlines.iloc[:,1:12].groupby(k_airlines.clust).mean() # Taking clustervise mean of all the columns
os.getcwd() # Checking current working directry
# Creating the final csv file
k_airlines.to_csv("K_airlines.csv", index = False)
| [
"noreply@github.com"
] | noreply@github.com |
4370cb702c00e24272c7131dbaca565ca7cca73e | 3567d260437aabb73fe7ace92a76afba658444a0 | /main.py | 56de48d02027237feaabfb4d8c9b2f0d76fe0809 | [] | no_license | tiamesfa12/checkers-python | 99fbf45b135521829c122ed1b38790ca0399ee66 | 86110dd9d6d960c832025e4919c96f3cdaa19c18 | refs/heads/main | 2023-02-27T04:16:45.416961 | 2021-02-01T18:15:51 | 2021-02-01T18:15:51 | 335,039,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | import pygame
from checkers2.constants import WIDTH, HEIGHT, SQUARE_SIZE, RED
from checkers2.game import Game
from minimax.algorithm import minimax
FPS = 60
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Checkers")
def get_row_col_from_mouse(pos):
x, y = pos
row = y // SQUARE_SIZE
col = x // SQUARE_SIZE
return row, col
def main(): # we use this to run the game
run = True
clock = pygame.time.Clock()
game = Game(WIN)
while run:
clock.tick(FPS)
if game.winner() != None:
print(game.winner())
run = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
row, col = get_row_col_from_mouse(pos)
game.select(row, col)
game.update()
pygame.quit()
main() | [
"noreply@github.com"
] | noreply@github.com |
9524be5e345203545d7e2e5af8b43f43ec21f9fe | 1141157442bda8d53840ca3bf2e304f779078134 | /competitiveSTDP/ReadDataFunction.py | d90ca688e477cf0707ff2b6322520e36693909f3 | [] | no_license | MyWhiteCastle/ECE523 | 2ea03b9f02bb980dbaf18a1df93568c8f5cc1cc4 | bb2e61b7cbbf9d875c82be6179fc0c6ace78799a | refs/heads/master | 2020-04-05T06:10:22.885834 | 2017-12-09T23:27:00 | 2017-12-09T23:27:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | from skimage import filter as filt
import pickle
import numpy as np
def readData():
#---------------------------------------------
with open('train0.pkl', 'rb') as input:
train0 = pickle.load(input)
with open('test0.pkl', 'rb') as input:
test0 = pickle.load(input)
#---------------------------------------------
with open('train1.pkl', 'rb') as input:
train1 = pickle.load(input)
with open('test1.pkl', 'rb') as input:
test1 = pickle.load(input)
#---------------------------------------------
with open('train2.pkl', 'rb') as input:
train2 = pickle.load(input)
with open('test2.pkl', 'rb') as input:
test2 = pickle.load(input)
#---------------------------------------------
with open('train3.pkl', 'rb') as input:
train3 = pickle.load(input)
with open('test3.pkl', 'rb') as input:
test3 = pickle.load(input)
#---------------------------------------------
with open('train4.pkl', 'rb') as input:
train4 = pickle.load(input)
with open('test4.pkl', 'rb') as input:
test4 = pickle.load(input)
#---------------------------------------------
with open('train5.pkl', 'rb') as input:
train5 = pickle.load(input)
with open('test5.pkl', 'rb') as input:
test5 = pickle.load(input)
#---------------------------------------------
with open('train6.pkl', 'rb') as input:
train6 = pickle.load(input)
with open('test6.pkl', 'rb') as input:
test6 = pickle.load(input)
#---------------------------------------------
with open('train7.pkl', 'rb') as input:
train7 = pickle.load(input)
with open('test7.pkl', 'rb') as input:
test7 = pickle.load(input)
#---------------------------------------------
with open('train8.pkl', 'rb') as input:
train8 = pickle.load(input)
with open('test8.pkl', 'rb') as input:
test8 = pickle.load(input)
#---------------------------------------------
with open('train9.pkl', 'rb') as input:
train9 = pickle.load(input)
with open('test9.pkl', 'rb') as input:
test9 = pickle.load(input)
return train0,train1,train2,train3,train4,train5,train6,train7,train8,train9,test0,test1,test2,test3,test4,test5,test6,test7,test8,test9
def readCollectedSpikes():
with open('trainSpikesTensor2.pkl', 'rb') as input:
trainSpikesTensor = pickle.load(input)
with open('testSpikesTensor2.pkl', 'rb') as input:
testSpikesTensor = pickle.load(input)
return trainSpikesTensor,testSpikesTensor
| [
"zhengzhongliang93@gmail.com"
] | zhengzhongliang93@gmail.com |
ece8c2bdbe9075b3980b6da4c1de9129bd71031b | 495f28047fcc69ee4b579f90b421a9d87aa38f93 | /archconvnets/convnet2/python_util/data.py | a5a01dd5207b4816ae7c9cca0c8405e49f682eca | [
"Apache-2.0"
] | permissive | shyamalschandra/archconvnets | 10dfffa5f29f8b35e2a7d095c934d2112bcc1f45 | 147d8eab7cd21c53b6689a8364cdb613bd602aa6 | refs/heads/master | 2021-01-22T16:38:46.513143 | 2015-01-21T21:14:50 | 2015-01-21T21:14:50 | 29,734,254 | 1 | 0 | null | 2015-01-23T13:47:35 | 2015-01-23T13:47:35 | null | UTF-8 | Python | false | false | 39,138 | py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
from numpy.random import randn, rand, random_integers
import os
from threading import Thread
from collections import OrderedDict
from util import *
import time as systime
import math
import importlib
import hashlib
from skdata import larray
BATCH_META_FILE = "batches.meta"
class DataLoaderThread(Thread):
def __init__(self, path, tgt, mode='pickle'):
Thread.__init__(self)
self.path = path
if mode == 'numpy':
self.path = self.path + '.npy'
self.tgt = tgt
self.mode = mode
def run(self):
if mode == 'pickle':
self.tgt += [unpickle(self.path)]
elif mode == 'numpy':
self.tgt += [n.load(self.path).reshape((1, ))[0]]
class DataProvider:
BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$')
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.batch_meta = self.get_batch_meta(data_dir)
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
def get_next_batch(self):
if self.data_dic is None or len(self.batch_range) > 1:
self.data_dic = self.get_batch(self.curr_batchnum)
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic
def get_batch(self, batch_num, mode='pickle'):
fname = self.get_data_file_name(batch_num)
if mode == 'numpy':
fname += '.npy'
if os.path.isdir(fname): # batch in sub-batches
sub_batches = sorted(os.listdir(fname), key=alphanum_key)
#print sub_batches
num_sub_batches = len(sub_batches)
tgts = [[] for i in xrange(num_sub_batches)]
threads = [DataLoaderThread(os.path.join(fname, s), tgt, mode=mode) for (s, tgt) in zip(sub_batches, tgts)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return [t[0] for t in tgts]
if mode == 'pickle':
return unpickle(fname)
elif mode == 'numpy':
return n.load(fname).reshape((1, ))[0]
def get_data_dims(self,idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
def advance_batch(self):
self.batch_idx = self.get_next_batch_idx()
self.curr_batchnum = self.batch_range[self.batch_idx]
if self.batch_idx == 0: # we wrapped
self.curr_epoch += 1
def get_next_batch_idx(self):
return (self.batch_idx + 1) % len(self.batch_range)
def get_next_batch_num(self):
return self.batch_range[self.get_next_batch_idx()]
# get filename of current batch
def get_data_file_name(self, batchnum=None):
if batchnum is None:
batchnum = self.curr_batchnum
return os.path.join(self.data_dir, 'data_batch_%d' % batchnum)
@classmethod
def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False):
# why the fuck can't i reference DataProvider in the original definition?
#cls.dp_classes['default'] = DataProvider
type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider
if type.startswith("dummy-"):
name = "-".join(type.split('-')[:-1]) + "-n"
if name not in dp_types:
raise DataProviderException("No such data provider: %s" % type)
_class = dp_classes[name]
dims = int(type.split('-')[-1])
return _class(dims)
elif type in dp_types:
_class = dp_classes[type]
return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
raise DataProviderException("No such data provider: %s" % type)
@classmethod
def register_data_provider(cls, name, desc, _class):
if name in dp_types:
raise DataProviderException("Data provider %s already registered" % name)
dp_types[name] = desc
dp_classes[name] = _class
@staticmethod
def get_batch_meta(data_dir):
return unpickle(os.path.join(data_dir, BATCH_META_FILE))
@staticmethod
def get_batch_filenames(srcdir):
return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key)
@staticmethod
def get_batch_nums(srcdir):
names = DataProvider.get_batch_filenames(srcdir)
return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names)))
@staticmethod
def get_num_batches(srcdir):
return len(DataProvider.get_batch_nums(srcdir))
class DummyDataProvider(DataProvider):
def __init__(self, data_dim):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True}
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(512, self.get_data_dims()).astype(n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data}
class LabeledDataProvider(DataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
def get_num_classes(self):
return len(self.batch_meta['label_names'])
class LabeledDataProviderTrans(LabeledDataProvider):
def __init__(self, data_dir,
img_size, num_colors,
batch_range=None,
init_epoch=1, init_batchnum=None, dp_params=None, test=False):
data_dir = data_dir.split('|')
if len(data_dir) == 1:
data_dir = data_dir[0]
if isinstance(data_dir, list):
self._dps = [LabeledDataProviderTrans(d, img_size, num_colors, batch_range=batch_range,
init_epoch=init_epoch, init_batchnum=init_batchnum,
dp_params=dp_params, test=test) for d in data_dir]
else:
self._dps = None
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.num_colors = num_colors
self.img_size = img_size
@staticmethod
def get_batch_meta(data_dir):
if isinstance(data_dir, list):
bm = [DataProvider.get_batch_meta(d) for d in data_dir]
keys = bm[0].keys()
mdict = {}
for k in keys:
if k not in ['data_mean', 'num_vis']:
mdict[k] = bm[0][k]
mdict['num_vis'] = sum([b['num_vis'] for b in bm])
if 'data_mean' in bm[0]:
mdict['data_mean'] = n.concatenate([b['data_mean'] for b in bm])
return mdict
else:
return DataProvider.get_batch_meta(data_dir)
def get_out_img_size( self ):
return self.img_size
def get_out_img_depth( self ):
if isinstance(self.data_dir, list):
return self.num_colors * len(self._dps)
else:
return self.num_colors
def get_next_batch(self):
if isinstance(self.data_dir, list):
bs = [d.get_next_batch() for d in self._dps]
epoch = bs[0][0]
batch_num = bs[0][1]
labels = bs[0][2][1]
data = n.row_stack([b[2][0] for b in bs])
self.advance_batch()
return epoch, batch_num, [data, labels]
else:
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
d['data'] = n.require(d['data'], dtype=n.single, requirements='C')
d['data'] = d['data'].T
d['data'] = n.require(d['data'], requirements='C')
d['labels'] = n.c_[n.require(d['labels'], dtype=n.single, requirements='C')]
return epoch, batchnum, [d['data'], d['labels']]
@staticmethod
def get_batch_nums(srcdir):
if isinstance(srcdir, list):
return DataProvider.get_batch_nums(srcdir[0])
else:
return DataProvider.get_batch_nums(srcdir)
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=7):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim,
'label_names': [str(x) for x in range(num_classes)],
'data_in_rows':True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx=0
self.data = None
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
if self.data is None:
data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand
labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single)
self.data, self.labels = data, labels
else:
data, labels = self.data, self.labels
# print data.shape, labels.shape
return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ]
def dldata_to_convnet_reformatting(stims, lbls):
if stims.ndim > 2:
img_sz = stims.shape[1]
batch_size = stims.shape[0]
if stims.ndim == 3:
new_s = (batch_size, img_sz**2)
stims = stims.reshape(new_s).T
else:
assert stims.ndim == 4
nc = stims.shape[3]
new_s = (nc * (img_sz**2), batch_size)
print(stims.shape)
stims = stims.transpose([3, 1, 2, 0]).reshape(new_s)
else:
stims = stims.T
if lbls is not None:
if hasattr(lbls, 'keys'):
labels = OrderedDict([])
for k in lbls:
lblk = lbls[k]
assert lblk.ndim == 1
lblk = lblk.reshape((1, lblk.shape[0]))
labels[k] = lblk
else:
assert lbls.ndim == 1
labels = lbls.reshape((1, lbls.shape[0]))
return {'data': stims, 'labels': labels}
else:
return {'data': stims}
class DLDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range, init_epoch=1,
init_batchnum=None, dp_params=None, test=False):
#load dataset and meta
self.replace_label = dp_params.get('replace_label', False)
modulename, attrname = dp_params['dataset_name']
module = importlib.import_module(modulename)
dataset_obj = getattr(module, attrname)
dataset_data = dp_params.get('dataset_data', None)
if dataset_data is not None:
dset = dataset_obj(data=dataset_data)
else:
dset = dataset_obj()
meta = self.meta = dset.meta
mlen = len(meta)
self.dp_params = dp_params
#default data location
if data_dir == '':
pstring = hashlib.sha1(repr(dp_params['preproc'])).hexdigest() + '_%d' % dp_params['batch_size']
data_dir = dset.home('convnet_batches', pstring)
#compute number of batches
mlen = len(meta)
batch_size = dp_params['batch_size']
num_batches = self.num_batches = int(math.ceil(mlen / float(batch_size)))
batch_regex = re.compile('data_batch_([\d]+)')
imgs_mean = None
existing_batches = []
isf = 0
if os.path.exists(data_dir):
_L = os.listdir(data_dir)
existing_batches = [int(batch_regex.match(_l).groups()[0]) for _l in _L if batch_regex.match(_l)]
existing_batches.sort()
metafile = os.path.join(data_dir, 'batches.meta')
if existing_batches:
assert os.path.exists(metafile), 'Batches found but no metafile %s' % metafile
if os.path.exists(metafile):
bmeta = cPickle.load(open(metafile))
ebatches = bmeta['existing_batches']
imgs_mean = bmeta['data_mean']
isf = bmeta['images_so_far']
#assertions checking that the things that need to be the same
#for these batches to make sense are in fact the same
assert dp_params['batch_size'] == bmeta['num_cases_per_batch'], (dp_params['batch_size'], bmeta['num_cases_per_batch'])
if 'dataset_name' in bmeta:
assert dp_params['dataset_name'] == bmeta['dataset_name'], (dp_params['dataset_name'], bmeta['dataset_name'])
if 'preproc' in bmeta:
#assert dp_params['preproc'] == bmeta['preproc'], (dp_params['preproc'], bmeta['preproc'])
pass
if 'dataset_data' in bmeta:
assert dataset_data == bmeta['dataset_data'], (dataset_data, bmeta['dataset_data'])
else:
ebatches = []
#assert existing_batches == ebatches, ('Expected batches', ebatches, 'found batches', existing_batches)
needed_batches = [_b for _b in batch_range if _b not in existing_batches]
if existing_batches:
print('Found batches: ', existing_batches)
print('Batches needed: ', needed_batches)
else:
print('data_dir %s does not exist, creating' % data_dir)
needed_batches = batch_range[:]
os.makedirs(data_dir)
if needed_batches or self.replace_label:
indset = self.indset = self.get_indset()
metacol = self.metacol = self.get_metacol()
if needed_batches:
#get stimarray (may be lazyarray)
#something about appearing to require uint8??
#dp_params['preproc']['dtype'] = 'uint8' #or assertion?
stimarray = dset.get_images(preproc=dp_params['preproc'])
#actually write out batches, while tallying img mean
for bnum, inds in enumerate(indset):
if bnum not in needed_batches:
continue
print('Creating batch %d' % bnum)
#get stimuli and put in the required format
stims = n.asarray(stimarray[inds])
if 'float' in repr(stims.dtype):
stims = n.uint8(n.round(255 * stims))
lbls = metacol[inds]
d = dldata_to_convnet_reformatting(stims, lbls)
d['ids'] = meta[inds]['id']
#add to the mean
if imgs_mean is None:
imgs_mean = n.zeros((d['data'].shape[0],))
dlen = d['data'].shape[0]
fr = isf / (isf + float(dlen))
imgs_mean *= fr
imgs_mean += (1 - fr) * d['data'].mean(axis=1)
isf += dlen
#write out batch
outdict = {'batch_label': 'batch_%d' % bnum,
'labels': d['labels'],
'data': d['data'],
'ids': d['ids']
}
outpath = os.path.join(data_dir, 'data_batch_%d' % bnum)
n.save(outpath, outdict)
#write out batches.meta
existing_batches += needed_batches
existing_batches.sort()
outdict = {'num_cases_per_batch': batch_size,
'label_names': self.labels_unique,
'num_vis': d['data'].shape[0],
'data_mean': imgs_mean,
'existing_batches': existing_batches,
'images_so_far': isf,
'dataset_name': dp_params['dataset_name'],
'dataset_data': dataset_data,
'preproc': dp_params['preproc']}
with open(os.path.join(data_dir, 'batches.meta'), 'w') as _f:
cPickle.dump(outdict, _f)
LabeledDataProvider.__init__(self, data_dir, batch_range,
init_epoch, init_batchnum, dp_params, test)
if self.replace_label:
self.batch_meta['label_names'] = self.labels_unique
else:
self.labels_unique = self.batch_meta['label_names']
def get_num_classes(self, name=None):
if name is None or not hasattr(self.labels_unique, 'keys'):
return len(self.labels_unique)
else:
return len(self.labels_unique[name])
def get_next_batch(self):
t0 = systime.time()
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
t1 = systime.time()
#d['data'] = n.require(d['data'].copy(order='A'), requirements='C')
d['data'] = n.require(d['data'], requirements='C')
t2 = systime.time()
if hasattr(d['labels'], 'keys'):
for k in d['labels']:
d['labels'][k] = n.c_[n.require(d['labels'][k], dtype=n.single)]
else:
d['labels'] = n.c_[n.require(d['labels'], dtype=n.single)]
t3 = systime.time()
print('timing: nextbatch %.4f order %.4f labels %.4f' % (t1 - t0, t2 - t1, t3 - t2))
return epoch, batchnum, d
def get_batch(self, batch_num):
dic = LabeledDataProvider.get_batch(self, batch_num, mode='numpy')
if self.replace_label:
metacol = self.metacol
indset = self.indset
lbls = metacol[indset[batch_num]]
assert lbls.ndim == 1
labels = lbls.reshape((1, lbls.shape[0]))
dic['labels'] = labels
return dic
def get_metacol(self):
meta_attr = self.dp_params['meta_attribute']
if isinstance(meta_attr, list):
meta_attr = map(str, meta_attr)
metacol = OrderedDict([])
self.labels_unique = OrderedDict([])
for ma in meta_attr:
mcol, lu = self.get_metacol_base(ma)
metacol[ma] = mcol
self.labels_unique[ma] = lu
else:
meta_attr = str(meta_attr)
metacol, labels_unique = self.get_metacol_base(meta_attr)
self.labels_unique = labels_unique
return metacol
def get_metacol_base(self, ma):
assert isinstance(ma, str), ma
metacol = self.meta[ma][:]
mlen = len(metacol)
try:
metacol + 1
labels_unique = None
except TypeError:
labels_unique = n.unique(metacol)
labels = n.zeros((mlen, ), dtype='int')
for label in range(len(labels_unique)):
labels[metacol == labels_unique[label]] = label
metacol = labels
return metacol, labels_unique
def get_indset(self):
dp_params = self.dp_params
perm_type = dp_params.get('perm_type')
num_batches = self.num_batches
batch_size = dp_params['batch_size']
meta = self.meta
if perm_type is not None:
mlen = len(self.meta)
if perm_type == 'random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
perm = rng.permutation(mlen)
indset = [perm[batch_size * bidx: batch_size * (bidx + 1)] for bidx in range(num_batches)]
elif perm_type == 'ordered_random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
perm = rng.permutation(mlen)
submeta = meta[dp_params['perm_order']].copy()
submeta = submeta[perm]
s = submeta.argsort(order=dp_params['perm_order'])
new_perm = perm[s]
indset = [new_perm[batch_size * bidx: batch_size * (bidx + 1)] for bidx in range(num_batches)]
elif perm_type == 'query_random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
query = dp_params['perm_query']
qf = get_lambda_from_query_config(query)
inds = n.array(map(qf, meta))
indsf = n.invert(inds).nonzero()[0]
indst = inds.nonzero()[0]
inds1 = indst[rng.permutation(len(indst))]
inds2 = indsf[rng.permutation(len(indsf))]
inds = n.concatenate([inds1, inds2])
indset = [inds[batch_size * bidx: batch_size * (bidx + 1)] for bidx in range(num_batches)]
else:
raise ValueError, 'Unknown permutation type.'
else:
indset = [slice(batch_size * bidx, batch_size * (bidx + 1))
for bidx in range(num_batches)]
return indset
def get_perm(self):
dp_params = self.dp_params
perm_type = dp_params.get('perm_type')
meta = self.meta
mlen = len(self.meta)
if perm_type == 'random':
perm_seed = dp_params.get('perm_seed', 0)
rng = n.random.RandomState(seed=perm_seed)
return rng.permutation(mlen), perm_type + '_' + str(perm_seed)
else:
raise ValueError, 'Unknown permutation type.'
class DLDataProvider2(DLDataProvider):
def __init__(self, data_dir, batch_range, init_epoch=1,
init_batchnum=None, dp_params=None, test=False,
read_mode='r', cache_type='memmap'):
#load dataset and meta
modulename, attrname = dp_params['dataset_name']
module = importlib.import_module(modulename)
dataset_obj = getattr(module, attrname)
dataset_data = dp_params.get('dataset_data', None)
if dataset_data is not None:
dset = dataset_obj(data=dataset_data)
else:
dset = dataset_obj()
meta = self.meta = dset.meta
mlen = len(meta)
self.dp_params = dp_params
#compute number of batches
mlen = len(meta)
batch_size = self.batch_size = dp_params['batch_size']
num_batches = self.num_batches = int(math.ceil(mlen / float(batch_size)))
num_batches_for_meta = self.num_batches_for_meta = dp_params['num_batches_for_mean']
perm_type = dp_params.get('perm_type')
images = dset.get_images(preproc=dp_params['preproc'])
if hasattr(images, 'dirname'):
base_dir, orig_name = os.path.split(images.dirname)
else:
base_dir = dset.home('cache')
orig_name = 'images_cache_' + get_id(dp_params['preproc'])
perm, perm_id = self.get_perm()
reorder = Reorder(images)
lmap = larray.lmap(reorder, perm, f_map = reorder)
if cache_type == 'hdf5':
new_name = orig_name + '_' + perm_id + '_hdf5'
print('Getting stimuli from cache hdf5 at %s/%s ' % (base_dir, new_name))
self.stimarray = larray.cache_hdf5(lmap,
name=new_name,
basedir=base_dir,
mode=read_mode)
elif cache_type == 'memmap':
new_name = orig_name + '_' + perm_id + '_memmap'
print('Getting stimuli from cache memmap at %s/%s ' % (base_dir, new_name))
self.stimarray = larray.cache_memmap(lmap,
name=new_name,
basedir=base_dir)
metacol = self.get_metacol()
if hasattr(metacol, 'keys'):
for k in metacol:
metacol[k] = metacol[k][perm]
self.metacol = metacol
else:
self.metacol = metacol[perm]
#default data location
if data_dir == '':
pstring = hashlib.sha1(repr(dp_params['preproc'])).hexdigest() + '_%d' % dp_params['batch_size']
data_dir = dset.home('convnet_batches', pstring)
if not os.path.exists(data_dir):
print('data_dir %s does not exist, creating' % data_dir)
os.makedirs(data_dir)
metafile = os.path.join(data_dir, 'batches.meta')
if os.path.exists(metafile):
print('Meta file at %s exists, loading' % metafile)
bmeta = cPickle.load(open(metafile))
#assertions checking that the things that need to be the same
#for these batches to make sense are in fact the same
assert dp_params['batch_size'] == bmeta['num_cases_per_batch'], (dp_params['batch_size'], bmeta['num_cases_per_batch'])
if 'dataset_name' in bmeta:
assert dp_params['dataset_name'] == bmeta['dataset_name'], (dp_params['dataset_name'], bmeta['dataset_name'])
if 'preproc' in bmeta:
assert dp_params['preproc'] == bmeta['preproc'], (dp_params['preproc'], bmeta['preproc'])
#pass
if 'dataset_data' in bmeta:
assert dataset_data == bmeta['dataset_data'], (dataset_data, bmeta['dataset_data'])
else:
print('Making batches.meta at %s ...' % metafile)
imgs_mean = None
isf = 0
for bn in range(num_batches_for_meta):
print('Meta batch %d' % bn)
#get stimuli and put in the required format
print(self.stimarray.shape, batch_size)
stims = self.stimarray[bn * batch_size: (bn + 1) * batch_size]
print("Shape", stims.shape)
stims = n.asarray(stims)
print('Got stims', stims.shape, stims.nbytes)
if 'float' in repr(stims.dtype):
stims = n.uint8(n.round(255 * stims))
print('Converted to uint8', stims.nbytes)
d = dldata_to_convnet_reformatting(stims, None)
#add to the mean
if imgs_mean is None:
imgs_mean = n.zeros((d['data'].shape[0],))
dlen = d['data'].shape[0]
fr = isf / (isf + float(dlen))
imgs_mean *= fr
imgs_mean += (1 - fr) * d['data'].mean(axis=1)
isf += dlen
#write out batches.meta
outdict = {'num_cases_per_batch': batch_size,
'label_names': self.labels_unique,
'num_vis': d['data'].shape[0],
'data_mean': imgs_mean,
'dataset_name': dp_params['dataset_name'],
'dataset_data': dataset_data,
'preproc': dp_params['preproc']}
with open(metafile, 'wb') as _f:
cPickle.dump(outdict, _f)
self.batch_meta = cPickle.load(open(metafile, 'rb'))
LabeledDataProvider.__init__(self, data_dir, batch_range,
init_epoch, init_batchnum, dp_params, test)
def get_batch(self, batch_num):
print('bn', batch_num)
batch_size = self.batch_size
inds = slice(batch_num * batch_size, (batch_num + 1) * batch_size)
print('got slice')
stims = n.asarray(self.stimarray[inds])
print('got stims')
if 'float' in repr(stims.dtype):
stims = n.uint8(n.round(255 * stims))
print('to uint8')
if hasattr(self.metacol, 'keys'):
lbls = OrderedDict([(k, self.metacol[k][inds]) for k in self.metacol])
else:
lbls = self.metacol[inds]
print('got meta')
d = dldata_to_convnet_reformatting(stims, lbls)
print('done')
return d
class Reorder(object):
def __init__(self, X):
self.X = X
def __call__(self, inds):
mat = self.X[inds]
if 'float' in repr(mat.dtype):
mat = n.uint8(n.round(255 * mat))
if mat.ndim < self.X.ndim:
assert mat.ndim == self.X.ndim - 1, (mat.ndim, self.X.ndim)
assert mat.shape == self.X.shape[1:], (mat.shape, self.X.shape)
mat = mat.reshape((1, ) + mat.shape)
return dldata_to_convnet_reformatting(mat, None)['data'].T
def rval_getattr(self, attr, objs=None):
if attr == 'shape':
xs = self.X.shape
return (n.prod(xs[1:]), )
elif attr == 'dtype':
return 'uint8'
else:
return getattr(self.X, attr)
#########MapProvider
class DLDataMapProvider(DLDataProvider):
"""
Same interace as DLDataProvider2 but allows an arbitrary number of
image-shaped maps. This is specified by:
* dp_params["map_methods"], a list of names of methods for getting maps
from dataset object. This assumes that each of the map-getting
methods take an argument "preproc", just like the standard get_images.
* dp_params["map_preprocs"] = list of preprocs to apply in getting the maps.
"""
def __init__(self, data_dir, batch_range, init_epoch=1,
init_batchnum=None, dp_params=None, test=False,
read_mode='r', cache_type='memmap'):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
#load dataset and meta
modulename, attrname = dp_params['dataset_name']
module = importlib.import_module(modulename)
dataset_obj = getattr(module, attrname)
dataset_data = dp_params.get('dataset_data', None)
if dataset_data is not None:
dset = self.dset = dataset_obj(data=dataset_data)
else:
dset = self.dset = dataset_obj()
meta = self.meta = dset.meta
mlen = len(meta)
self.dp_params = dp_params
#compute number of batches
mlen = len(meta)
batch_size = self.batch_size = dp_params['batch_size']
self.num_batches = int(math.ceil(mlen / float(batch_size)))
self.num_batches_for_meta = dp_params['num_batches_for_mean']
perm, perm_id = self.get_perm()
self.metacol = self.get_metacol()[perm]
map_methods = self.map_methods = dp_params['map_methods']
map_preprocs = self.map_preprocs = dp_params['map_preprocs']
assert hasattr(map_methods, '__iter__')
assert hasattr(map_preprocs, '__iter__')
assert len(map_methods) == len(map_preprocs), (len(map_methods) , len(map_preprocs))
map_list = [getattr(dset, mname)(preproc=pp)
for mname, pp in zip(map_methods, map_preprocs)]
self.map_shapes = [m.shape for m in map_list]
mnames = self.mnames = [mn + '_' + get_id(pp) for mn, pp in zip(map_methods, map_preprocs)]
assert data_dir != ''
self.data_dir = data_dir
if not os.path.exists(data_dir):
print('data_dir %s does not exist, creating' % data_dir)
os.makedirs(data_dir)
self.stimarraylist = []
basedir = self.dset.home('cache')
self.batch_meta_dict = {}
for map, mname, pp in zip(map_list, mnames, map_preprocs):
self.stimarraylist.append(get_stimarray(map, mname, perm, perm_id, cache_type, basedir))
self.make_batch_meta(mname, self.stimarraylist[-1], pp)
def get_num_classes(self, dataIdx=None):
if dataIdx is None or not hasattr(self.labels_unique, 'keys'):
return len(self.labels_unique)
else:
name = self.labels_unique.keys()[dataIdx]
return len(self.labels_unique[name])
def get_next_batch(self):
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
for mn in self.mnames:
d[mn] = n.require(d[mn], requirements='C')
d['labels'] = n.c_[n.require(d['labels'], dtype=n.single)]
return epoch, batchnum, d
def get_batch(self, batch_num):
batch_size = self.batch_size
inds = slice(batch_num * batch_size, (batch_num + 1) * batch_size)
lbls = self.label_reformatting(self.metacol[inds])
return_dict = {'labels': lbls}
for mname, marray in zip(self.mnames, self.stimarraylist):
return_dict[mname] = n.asarray(marray[inds]).T
return return_dict
def make_batch_meta(self, mname, marray, pp):
batch_size = self.batch_size
metafile = os.path.join(self.data_dir, mname + '.meta')
dp_params = self.dp_params
dataset_data = dp_params.get('dataset_data', None)
if os.path.exists(metafile):
print('Meta file at %s exists, loading' % metafile)
bmeta = cPickle.load(open(metafile))
#assertions checking that the things that need to be the same
#for these batches to make sense are in fact the same
assert dp_params['batch_size'] == bmeta['num_cases_per_batch'], (dp_params['batch_size'], bmeta['num_cases_per_batch'])
if 'dataset_name' in bmeta:
assert dp_params['dataset_name'] == bmeta['dataset_name'], (dp_params['dataset_name'], bmeta['dataset_name'])
if 'preproc' in bmeta:
assert pp == bmeta['preproc'], (pp, bmeta['preproc'])
#pass
if 'dataset_data' in bmeta:
assert dataset_data == bmeta['dataset_data'], (dataset_data, bmeta['dataset_data'])
assert bmeta['mname'] == mname, (bmeta['mname'], mname)
else:
print('Making %s meta at %s ...' % (mname, metafile))
imgs_mean = None
isf = 0
for bn in range(self.num_batches_for_meta):
print('Meta batch %d' % bn)
stims = marray[bn * batch_size: (bn + 1) * batch_size]
stims = n.asarray(stims).T
#add to the mean
if imgs_mean is None:
imgs_mean = n.zeros((stims.shape[0],))
dlen = stims.shape[0]
fr = isf / (isf + float(dlen))
imgs_mean *= fr
imgs_mean += (1 - fr) * stims.mean(axis=1)
isf += dlen
#write out batches.meta
outdict = {'num_cases_per_batch': batch_size,
'mname': mname,
'num_vis': stims.shape[0],
'data_mean': imgs_mean,
'dataset_name': dp_params['dataset_name'],
'dataset_data': dataset_data,
'preproc': pp}
with open(metafile, 'wb') as _f:
cPickle.dump(outdict, _f)
self.batch_meta_dict[mname] = cPickle.load(open(metafile, 'rb'))
def label_reformatting(self, lbls):
assert lbls.ndim == 1
labels = lbls.reshape((1, lbls.shape[0]))
return labels
def map_reformatting(stims):
img_sz = stims.shape[1]
batch_size = stims.shape[0]
if stims.ndim == 3:
new_s = (batch_size, img_sz**2)
stims = stims.reshape(new_s).T
else:
assert stims.ndim == 4
nc = stims.shape[3]
new_s = (nc * (img_sz**2), batch_size)
print(stims.shape)
stims = stims.transpose([3, 1, 2, 0]).reshape(new_s)
return stims
class Reorder2(object):
def __init__(self, X):
self.X = X
def __call__(self, inds):
mat = self.X[inds]
if mat.ndim < self.X.ndim:
assert mat.ndim == self.X.ndim - 1, (mat.ndim, self.X.ndim)
assert mat.shape == self.X.shape[1:], (mat.shape, self.X.shape)
mat = mat.reshape((1, ) + mat.shape)
if 'float' in repr(mat.dtype):
mat = n.uint8(n.round(255 * mat))
return map_reformatting(mat).T
def rval_getattr(self, attr, objs=None):
if attr == 'shape':
xs = self.X.shape
return (n.prod(xs[1:]), )
elif attr == 'dtype':
return 'uint8'
else:
return getattr(self.X, attr)
def get_stimarray(marray, mname, perm, perm_id, cache_type, base_dir):
reorder = Reorder2(marray)
lmap = larray.lmap(reorder, perm, f_map = reorder)
if cache_type == 'hdf5':
new_name = mname + '_' + perm_id + '_hdf5'
print('Getting stimuli from cache hdf5 at %s/%s ' % (base_dir, new_name))
return larray.cache_hdf5(lmap,
name=new_name,
basedir=base_dir,
mode=read_mode)
elif cache_type == 'memmap':
new_name = mname + '_' + perm_id + '_memmap'
print('Getting stimuli from cache memmap at %s/%s ' % (base_dir, new_name))
return larray.cache_memmap(lmap,
name=new_name,
basedir=base_dir)
####GENERAL Stuff
dp_types = {"dummy-n": "Dummy data provider for n-dimensional data",
"dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"}
dp_classes = {"dummy-n": DummyDataProvider,
"dummy-labeled-n": LabeledDummyDataProvider}
def get_lambda_from_query_config(q):
"""turns a dictionary specificying a mongo query (basically)
into a lambda for subsetting a data table
"""
if hasattr(q, '__call__'):
return q
elif q == None:
return lambda x: True
else:
return lambda x: all([x[k] in v for k, v in q.items()])
class DataProviderException(Exception):
pass
def get_id(l):
return hashlib.sha1(repr(l)).hexdigest()
| [
"dyamins@gmail.com"
] | dyamins@gmail.com |
afbc0ea56e7cb155afec46f10c5e11b4625c3058 | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/7kyu/so-easy-charge-time-calculation/Python/test.py | 0c7363e56959f0d28c875a4cc5375e8de39a6d2e | [
"MIT"
] | permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # Python - 3.6.0
Test.describe('Basic Tests')
Test.assert_equals(calculate_time(1000, 500), 2.6)
Test.assert_equals(calculate_time(1500, 500), 3.9)
Test.assert_equals(calculate_time(2000, 1000), 2.6)
Test.assert_equals(calculate_time(5000, 1000), 6.5)
Test.assert_equals(calculate_time(1000, 5000), 0.26)
| [
"d79523@hotmail.com"
] | d79523@hotmail.com |
e4b473a220b5e55ed9ddc61367215546c54a10f9 | 565008d400f39cd5f820ac5c0bfdfdff12d8ba85 | /untitled4/lab6/migrations/0001_initial.py | fa5525dc724bad8bdb62d2e7e44118b5d27e8a24 | [] | no_license | VladimirBurash/Lab6 | 3ef58b072c25ba8312f61814116e16f2554cec1c | f8996b37db486f31c48cb879df60cab8b094b2cd | refs/heads/master | 2020-06-10T13:03:49.506464 | 2016-12-08T17:05:37 | 2016-12-08T17:05:37 | 75,958,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-08 16:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('artist_id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist_name', models.CharField(max_length=45)),
('artist_people', models.CharField(max_length=45)),
('artist_contact', models.CharField(max_length=45)),
('artist_vk', models.CharField(max_length=45)),
],
options={
'db_table': 'artist',
},
),
]
| [
"noreply@github.com"
] | noreply@github.com |
d6664e32d60cee4b037eec8832a8a5700d57d63a | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/jdcloud_sdk/services/resourcetag/models/ResourceReqVo.py | d520231ef4b8a9e9d0ce52cfcf8615761daffcb6 | [
"MIT"
] | permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ResourceReqVo(object):
def __init__(self, serviceCodes=None, resourceIds=None, tagFilters=None, orderCondition=None, descOrAsc=None, pageSize=None, currentPage=None):
"""
:param serviceCodes: (Optional) 产品线名称列表
标签系统支持的产品线名称如下
- vm disk sqlserver es mongodb ip
- memcached redis drds rds database db_ro
- percona percona_ro mariadb mariadb_ro pg cdn
- nativecontainer pod zfs jqs kubernetesNodegroup jcq
:param resourceIds: (Optional) 资源id列表
:param tagFilters: (Optional) 标签过滤列表
:param orderCondition: (Optional) 排序依据
:param descOrAsc: (Optional) 排序方向, 取值为ASC, DESC
:param pageSize: (Optional) 每页记录数
:param currentPage: (Optional) 当前页码
"""
self.serviceCodes = serviceCodes
self.resourceIds = resourceIds
self.tagFilters = tagFilters
self.orderCondition = orderCondition
self.descOrAsc = descOrAsc
self.pageSize = pageSize
self.currentPage = currentPage
| [
"a1090693441@163.com"
] | a1090693441@163.com |
e4349aad71a7bcb2e770564e0eb5f182520eb84b | 1cacad76550376a93dc5b87798e9624f22129879 | /docs/source/conf.py | 4eeb5cb8935ea679341c97f381b91f519e3ebbf5 | [
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | clakech/metrics | 9ae4c5b9a9f5db10bc1040de43ac4d9fee42734f | c32f8bd8304267af09582f7979d69e2df8d69ca3 | refs/heads/master | 2020-12-25T03:10:34.158983 | 2012-02-19T07:06:07 | 2012-02-19T07:06:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,359 | py | # -*- coding: utf-8 -*-
#
# Dropwizard documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 13 11:29:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['ytemplates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Metrics'
copyright = u'2010-2012, Coda Hale, Yammer Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'yammerdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'tagline': u'Mind the gap.',
'gradient_start': u'#ff684b',
'gradient_end': u'#cf2c0f',
'gradient_text': u'#fff',
'gradient_bg': u'#ED4A2D',
'gradient_shadow': u'#CF2C0F',
'landing_logo': u'metrics-hat.png',
'landing_logo_width': u'200px',
'github_page': u'https://github.com/codahale/metrics'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u'Metrics'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = u'metrics-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Metricsdoc'
todo_include_todos = True
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Metrics.tex', u'Metrics Documentation',
u'Coda Hale', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'metrics', u'Metrics Documentation',
[u'Coda Hale'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Metrics', u'Metrics Documentation',
u'Coda Hale', 'Metrics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Metrics'
epub_author = u'Coda Hale'
epub_publisher = u'Coda Hale'
epub_copyright = u'2012, Coda Hale'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| [
"coda.hale@gmail.com"
] | coda.hale@gmail.com |
b6b87d81e355a4f8bff6abb5d8f6e610fc0bb9d5 | 68271a37c3c4dd3d31b24c0cddbf574472f9f6a5 | /backend.py | 7e19cee00e6f11342bb1cc90a70ae13c0cee22f0 | [
"MIT"
] | permissive | LSaldyt/automata | 8bcbb269bdfdf01803d66b77eb31be0a7eddb83b | ff0ba058f087fbcd7958866019b4b7cb43e924bd | refs/heads/master | 2020-04-28T19:01:49.703783 | 2019-04-22T21:06:01 | 2019-04-22T21:06:01 | 175,497,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def cuboid(o, size=(1,1,1)):
X = [[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]
X = np.array(X).astype(float)
for i in range(3):
X[:,:,i] *= size[i]
X += np.array(o)
return X
def renderCubes(positions, sizes=None, colors=None, **kwargs):
if not isinstance(sizes,(list,np.ndarray)): sizes=[(1,1,1)]*len(positions)
if not isinstance(colors,(list,np.ndarray)): colors=["C0"]*len(positions)
g = []
for p,s,c in zip(positions,sizes,colors):
g.append( cuboid(p, size=s) )
return Poly3DCollection(np.concatenate(g),
facecolors=np.repeat(colors,6, axis=0), **kwargs)
| [
"lucassaldyt@gmail.com"
] | lucassaldyt@gmail.com |
22f53a51f9d56c1020e164b962c2a83a03669e8f | a1c9c55e1520356113a320be18e8fcb31654a944 | /archive/0.10/generated/seaborn-jointplot-5.py | 97b6478b704f4f926f64361289dfc029bff368a4 | [] | no_license | seaborn/seaborn.github.io | bac12a9255b41c7971e9e94ea393d372ef66ef62 | f70445bc3456f0216169806c2daf03452ca1eba4 | refs/heads/master | 2023-01-06T10:50:10.789810 | 2022-12-30T19:59:55 | 2022-12-30T19:59:55 | 70,731,605 | 16 | 5 | null | 2022-06-28T00:32:07 | 2016-10-12T18:56:12 | HTML | UTF-8 | Python | false | false | 147 | py | g = (sns.jointplot("sepal_length", "sepal_width",
data=iris, color="k")
.plot_joint(sns.kdeplot, zorder=0, n_levels=6))
| [
"mwaskom@nyu.edu"
] | mwaskom@nyu.edu |
e878d66b3561fbb082d23f955080de3cb97a19a0 | ab1ceae0751891a72ffa49f0a3b7b5c6212f05d9 | /Basic/S2_03basic_variables.py | f77dc7e18c35444d1f689ab22ba5317e8e1ecb68 | [] | no_license | Leeeungrae/PythonLibraries | c36a0da91ce99d3aa581b15ab150e809db514a79 | d9f9c5aaf63e6ccf54b8e4c0df1eb1002004b3be | refs/heads/master | 2020-08-06T17:26:24.460839 | 2019-11-16T13:29:54 | 2019-11-16T13:29:54 | 213,091,886 | 0 | 0 | null | 2019-10-18T13:51:06 | 2019-10-06T01:05:26 | null | UTF-8 | Python | false | false | 1,304 | py | #list
mylist = [1, 2, 3, 4, 5]
mylist1 = ['a', 'b', 'c']
mylist2 = [1, 'a', [100, 200, 300], 2]
#list 인덱스
print(mylist[1])
print(mylist2[2][1])
#mylist2[3] = 100
print(mylist2)
#List size
print(len(mylist))
print(len(mylist2))
#append함수
data1 = ["korea", "Japan", "America"]
data1.append("china")
print(data1)
#pop함수
data2 = ['apple', 'banana', 'grape']
#data2.pop()
print(data2)
#sort함수
data3 = ["korean", "english", "science", "art"]
data3.sort()
print(data3)
#list형변환, list함수
data4 = ("hello", "hi", "bye")
d = range(1,10)
data4 = list(data4)
print(data4)
print(type(d))
print(type(list(d)))
#dictionary
mydict = {"korea":82, "Japan":81, "Ameraca":1, "Italy":42}
print(mydict)
#key값으로 접근
print(mydict.keys())
print(mydict['korea'])
#print(mydict[1])
#value값으로 접근
print(mydict.values())
#print(mydict[42]) -> key error, []는 키를 접근.
#items
print(mydict.items())
#tuple, tuple인덱싱
mytuple = (1, 3, 5, 7, 9)
print(mytuple)
print(mytuple[1])
#tuple의 불변 -> error
#mytuple.append(10)
#mytuple[1] = 4
#list와 tuple의 사이즈 비교
import sys
mylist = [1, 3, 5, 7, 9]
print(sys.getsizeof(mylist))
print(sys.getsizeof(mytuple))
#변수 할당
a = 3, 5, 7
print(a)
#list더하기 연산자
h = data1 + mylist
print(h)
| [
"reisei88.dev@gmail.com"
] | reisei88.dev@gmail.com |
cb3a40bd30ee7d01016f649a788ffdf93f58f8d8 | 9c9453734273da52b416ab9dfd5ad97bd03c6774 | /learntools/kt/tests/test_kt_data.py | 32d47faeac8cc102ed4510bcd7a5d19029543e29 | [] | no_license | summyfeb12/vector_edu | 08411ae19b3bc4a2b6a2951957b4d76dc75acac7 | d929498cf3e393aeeac408d6f72066563762fd7d | refs/heads/master | 2021-01-22T00:20:10.951181 | 2015-01-27T03:07:16 | 2015-01-27T03:07:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import numpy as np
from learntools.kt.data import prepare_new_data2, prepare_new_data
from test_kt import use_logger_in_test
# TODO: this test is out of date. Replace it with a better one
'''
@use_logger_in_test
def test_prepare_dataset():
dataset_name = 'data/data4.gz'
ds, train_idx, valid_idx = prepare_new_data2(dataset_name, top_n=14, cv_fold=0)
prepared_data = prepare_new_data(dataset_name, top_eeg_n=14, eeg_only=1, cv_fold=0)
subject_x, skill_x, correct_y, start_x, eeg_x, eeg_table, stim_pairs, train_idx, valid_idx = prepared_data
assert all(subject_x == ds['subject'])
assert all(correct_y == ds['correct'])
assert all(start_x == ds['start_time'])
np.allclose(eeg_table[eeg_x], ds['eeg'])
'''
| [
"yueranyuan@gmail.com"
] | yueranyuan@gmail.com |
5f12c80b451d8ec28245197bdea90002f9550948 | ead63de23c659abe3c85897c03ea3316f2ce3b28 | /model.py | e401f1a8e93aef766d33c85b1207f6b534a97922 | [] | no_license | yqGANs/LCCGAN-v2 | 0f751574b4c61b2d4323d380f625d0de4795234f | d86f61e7f99d43efcdd428cc2ed0f09ee1dc7aaa | refs/heads/master | 2022-11-25T14:29:39.151224 | 2020-07-23T12:38:45 | 2020-07-23T12:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,940 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
class lcc_sampling(nn.Module):
def __init__(self, anchor_num, latent_dim):
super(lcc_sampling, self).__init__()
self.anchor_num = anchor_num
self.latent_dim = latent_dim
self.register_buffer('basis', torch.zeros(self.anchor_num, self.latent_dim))
def reset_basis(self, basis):
if torch.is_tensor(basis):
self.basis.copy_(basis)
else:
self.basis.copy_(basis.data)
def forward(self, x):
batch_size = x.size(0)
sparsity = x.size(1)
assert sparsity <= self.anchor_num
out = Variable(torch.zeros(batch_size, self.anchor_num))
if self.training:
index = torch.LongTensor(batch_size).random_(self.anchor_num)
else:
index = torch.LongTensor(batch_size).zero_()
if x.is_cuda:
index = index.cuda()
basis_select = self.basis[index]
basis_expand = self.basis.view(1, self.anchor_num, self.latent_dim).expand(batch_size, self.anchor_num, self.latent_dim)
select_expand = basis_select.view(batch_size, 1, self.latent_dim).expand(batch_size, self.anchor_num, self.latent_dim)
distance = torch.norm(basis_expand-select_expand, 2, 2) # batch_size x anchor_num
_, indices = torch.sort(distance)
indices = Variable(indices[:, 0:sparsity]) # batch_size x sparsity
if x.is_cuda:
out = out.cuda()
indices = indices.cuda()
out = out.scatter_(1, indices, x)
out = torch.mm(out, Variable(self.basis))
return out.view(out.size(0), out.size(1), 1, 1)
class _netG(nn.Module):
def __init__(self, anchor_num, latent_dim, nz, ngf, nc):
super(_netG, self).__init__()
self.anchor_num = anchor_num
self.latent_dim = latent_dim
self.nz = nz
self.ngf = ngf
self.nc = nc
self.lcc = lcc_sampling(self.anchor_num, self.latent_dim)
# DCGAN
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(self.latent_dim, self.ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(self.ngf * 8, self.ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(self.ngf * 4, self.ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(self.ngf * 2, self.ngf * 1, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 1),
nn.ReLU(True),
# state size. (ngf*1) x 32 x 32
nn.ConvTranspose2d(self.ngf * 1, self.nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (ngf) x 64 x 64
)
def reset_basis(self, basis):
self.lcc.reset_basis(basis)
def forward(self, input):
output = self.lcc(input)
output = self.main(output)
return output
class _netD(nn.Module):
def __init__(self, nc, ndf):
super(_netD, self).__init__()
self.nc = nc
self.ndf = ndf
# DCGAN
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(self.nc, self.ndf * 2, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 32 x 32
nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 16 x 16
nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 8 x 8
nn.Conv2d(self.ndf * 8, self.ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(self.ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
output = self.main(input)
return output.view(-1, 1)
class _decoder(nn.Module):
def __init__(self, nc, ngf, latent_dim):
super(_decoder, self).__init__()
self.nc = nc
self.ngf = ngf
self.latent_dim = latent_dim
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(self.latent_dim, self.ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(self.ngf * 8),
nn.ReLU(True),
# state size. (ngf * 8) x 4 x 4
nn.ConvTranspose2d(self.ngf * 8, self.ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 4),
nn.ReLU(True),
# state size. (ngf * 4) x 8 x 8
nn.ConvTranspose2d(self.ngf * 4, self.ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 2),
nn.ReLU(True),
# state size. (ngf * 2) x 16 x 16
nn.ConvTranspose2d(self.ngf * 2, self.ngf * 1, 4, 2, 1, bias=False),
nn.BatchNorm2d(self.ngf * 1),
nn.ReLU(True),
# state size. (ngf * 1) x 32 x 32
nn.ConvTranspose2d(self.ngf * 1, self.nc, 4, 2, 1, bias=False),
nn.Tanh(),
# state size. (nc) x 64 x 64
)
def forward(self, input):
output = self.main(input)
return output
class _encoder(nn.Module):
def __init__(self, nc, ndf, latent_dim):
super(_encoder, self).__init__()
self.nc = nc
self.ndf = ndf
self.latent_dim = latent_dim
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(self.nc, self.ndf * 2, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf * 2) x 32 x 32
nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf * 4) x 16 x 16
nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf * 4) x 8 x 8
nn.Conv2d(self.ndf * 8, self.ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf * 8) x 4 x 4
nn.Conv2d(self.ndf * 8, self.latent_dim, 4, 1, 0, bias=False),
# state size. (latent_dim) x 1 x 1
)
def forward(self, input):
output = self.main(input)
return output | [
"scottgyguo@tencent.com"
] | scottgyguo@tencent.com |
355fbfb179587e34aec6a725fa153e1adf273f2a | 7c302a09def89d3c363e344856e7312ef812c767 | /users/forms.py | 3b43eda2d73b6a5f32cfd83440802d0b9e1206d0 | [] | no_license | Evishka/django-custom-signup | 4da10da94b0921420af15ccf8ee0201838ffd856 | c2230e3a26dd0389107a597d7f2574e31cd1aaf6 | refs/heads/master | 2022-12-15T01:12:04.177456 | 2019-10-23T05:27:47 | 2019-10-23T05:27:47 | 216,969,661 | 0 | 0 | null | 2022-12-08T06:46:19 | 2019-10-23T04:43:57 | Python | UTF-8 | Python | false | false | 682 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ('first_name','last_name', 'email','personal_url')
def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.personal_url = self.cleaned_data['personal_url']
user.save()
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('first_name','last_name', 'email','personal_url') | [
"luce_fioca@hotmail.com"
] | luce_fioca@hotmail.com |
7cb408bfb81428efbabb5ac73ccd502f8f7242e8 | aa560d3f82f207f713d026a439a061fb5e4131db | /AMTPL/wsgi.py | 34b3f4400824cdd5e8a9d96ebc4ffd6618a4a6a9 | [] | no_license | galactic0/areon-web-deploy-v1 | bb9a2fba9783c73a48bde8cd40ff078e6f0a51bf | bf80b046426c0d3284e8027beb389604cef522ff | refs/heads/master | 2022-09-04T01:33:32.511790 | 2020-05-26T15:15:46 | 2020-05-26T15:15:46 | 267,224,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for AMTPL project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AMTPL.settings')
application = get_wsgi_application()
| [
"aps1aman@gmail.com"
] | aps1aman@gmail.com |
64b76f3987df38265342911529b1721376420922 | a52c7be812be43fbfa5844f5b89a25b1bdb091fa | /datelib.py | 3b1947b66a3ef083dbf0a23f6a9ed36ea9cc92c2 | [] | no_license | rteak/Project001 | 3aec3ef97897210f73d1074e00c6ec267f719553 | 1ee63dfd590c8beb5377b4492170990556b022be | refs/heads/master | 2022-08-01T19:38:28.496415 | 2020-05-27T09:26:20 | 2020-05-27T09:26:20 | 267,269,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | # =============================================================================
# 日数計算
# =============================================================================
import datetime as dt
def day_count(fromdate, todate, delimiter='/'):
# 年月日の分割(from)
fy = int(fromdate.split(delimiter)[0])
fm = int(fromdate.split(delimiter)[1])
fd = int(fromdate.split(delimiter)[2])
# 年月日の分割(to)
ty = int(todate.split(delimiter)[0])
tm = int(todate.split(delimiter)[1])
td = int(todate.split(delimiter)[2])
# fromdate から todate までの日数を計算
days = dt.date(ty, tm, td) - dt.date(fy, fm, fd)
return days.days
def add_days(fromdate, days, delimiter='/'):
from datetime import datetime
import datetime as dt
from datetime import timedelta
# 年月日の分割(from)
fy = int(fromdate.split(delimiter)[0])
fm = int(fromdate.split(delimiter)[1])
fd = int(fromdate.split(delimiter)[2])
# days で指定した日数を加算した日付を求める
d = dt.date(fy, fm, fd) + timedelta(days)
# 日付を文字列に変換し、指定した区切り文字で区切る。
todate = d.strftime("%Y") + delimiter + d.strftime("%m") + delimiter + d.strftime("%d")
return todate
print(day_count("2020/02/02","2020/05/26"))
print(add_days("2020/02/02",114))
| [
"62337633+rteak@users.noreply.github.com"
] | 62337633+rteak@users.noreply.github.com |
aecfb00725f4f7bd87e9f9c56ba210a02afbef5d | 780c1cfe89f5e913d44308b31ce6ce0844426ef2 | /Python/ProblemSet03/Solutions.py | 20b203f772b90bfd4c675974c4de2217fbaa38b0 | [] | no_license | Hariniraghavan12/GraduateTrainingProgram2018 | 80f3969d31de097df14e1012af7987fa34d2ca67 | 01af138707d4653bd9072ca53de60904b83b53c9 | refs/heads/master | 2020-04-15T15:20:53.689103 | 2019-06-07T06:58:01 | 2019-06-07T06:58:01 | 164,792,309 | 0 | 0 | null | 2019-01-09T05:10:55 | 2019-01-09T05:10:55 | null | UTF-8 | Python | false | false | 6,763 | py | #1.A string slice can take a third index that specifies the "step size;" that is, the number of spaces between successive
#characters. A step size of 2 means every other character; 3 means every third, etc.
#>>> fruit = 'banana'
#>>> fruit[0:5:2]
#'bnn'
#A step size of -1 goes through the word backwards, so the slice [::-1] generates a reversed string.
#Use this idiom to write a one-line version of is_palindrome
#Sol:
def is_palindrome(string):
rev_string=string[::-1]
if(rev_string==string):
print("palindrome")
else:
print("not a palindrome")
string=raw_input("enter a string:")
is_palindrome(string)
#2.Write a function called rotate_word() that takes a string and an integer as parameters, and that returns a new
#string that contains the letters from the original string "rotated" by the given amount.
#For example, "cheer" rotated by 7 is "jolly" and "melon" rotated by -10 is "cubed".
#You might want to use the built-in functions ord, which converts a character to a numeric code, and chr,
#which converts numeric codes to characters.
#Sol:
from __future__ import print_function
def rotate_word(string,num):
for ch in string:
nc=ord(ch)
nc1=nc+num
ch1=chr(nc1)
print(ch1,end="")
string=raw_input("enter a string:")
num=int(input("enter a number:"))
rotate_word(string,num)
#3.In 1939 Ernest Vincent Wright published a 50,000 word novel called Gadsby that does not contain the letter "e."
#Since "e" is the most common letter in English, that’s not easy to do. In fact, it is difficult to construct a solitary
#thought without using that most common symbol. It is slow going at first, but with caution and hours of training you can
#gradually gain facility. All right, I’ll stop now. Write a function called has_no_e that returns True if
#the given word doesn’t have the letter "e" in it.
#Sol:
def no_e(string):
if(('e' in string)==True):
print("contains 'e'")
else:
print("does not contain 'e'")
string=raw_input("enter a string:")
no_e(string)
#4.Modify the above program to print only the words that have no “e” and compute the percentage of the words in the
#list have no “e.”
#Sol:
list1=[]
list2=[]
def no_e(list1):
for i in list1:
if(('e' not in i)==True):
list2.append(i)
length1=len(list1)
length2=len(list2)
print("words not containing e:{}".format(list2))
percentage=(float(length2)/float(length1))*100
print("{}%".format(int(percentage)))
n=int(input("enter no of words:"))
for i in range(0,n):
inp=raw_input("enter string:")
list1.append(inp)
no_e(list1)
#5.Write a function named avoids that takes a word and a string of forbidden letters, and that returns True if the word doesn’t use any of the
#forbidden letters.
#Sol:
def avoids(word,forbidden):
for i in range(len(forbidden)):
if forbidden[i] in word:
return True
else:
return False
word=raw_input("enter a word:")
forbidden=raw_input("enter forbidden letters as a string:")
if(avoids(word,forbidden)==False):
print("does not contain")
else:
print("contains")
#6.Modify your program to prompt the user to enter a string of forbidden letters and then print the number of words that
#don’t contain any of them. Can you find a combination of 5 forbidden letters that excludes the smallest number of words?
#Sol:
'''forb_list=[]
str2=''
list2=[]
count=0
def avoids(word,forb_list):
for l in forb_list:
if l not in word:
list2.append(l)
str2=''.join(list2)
nl=str2.split()
print(str2)
print list2
print len(nl)
word=raw_input("enter a word:")
forbidden=raw_input("enter a forbidden string:")
forb_list=forbidden.split(" ")
avoids(word,forbidden)'''
#Sol:
f_word = raw_input("Enter the string of forbidden letters")
sentence = raw_input("Enter a word ")
words=sentence.split(' ')
list_1=[]
def avoid():
for word in words:
count =0
for i in f_word:
if word.__contains__(i):
continue
else:
count+=1
if count==len(f_word):
list_1.append(word)
print len(list_1)
avoid()
#7.Write a function named using_only() that takes a word and a string of letters, and that returns True if the word contains
#only letters in the list. Can you make a sentence using only the letters acefhlo? Other than "Hoe alfalfa?"
#Sol:
flag=0
def using_only(word,list_str):
for i in word:
if i in list_str:
flag=1
else:
flag=0
break
if flag==1:
return True
else:
return False
word_str=raw_input("enter a word:")
word_str.lower()
word_list=word_str.split(' ')
word=''.join(word_list)
#print word
string=raw_input("enter a string:")
string.lower()
list_str=list(string)
print(using_only(word,list_str))
#8.Write a function called is_abecedarian that returns True if the letters in a word appear in alphabetical order
#(double letters are ok). How many abecedarian words are there? (i.e) "Abhor" or "Aux" or "Aadil" should return "True"
#Banana should return "False"
#Sol:
def abecedarian(word):
flag=0
for i in range(len(word)-1):
if(word[i]>word[i+1]):
flag=1
break
if(flag==1):
return False
else:
return True
word=raw_input("enter a word:")
print(abecedarian(word))
#9.Write a function called is_sorted that takes a list as a parameter and returns True if the list is sorted in ascending
#order and False otherwise. You can assume (as a precondition) that the elements of the list can be compared with the
#relational operators <, >, etc. For example, is_sorted([1,2,2]) should return True and is_sorted(['b','a']) should
#return False.
#Sol:
def is_sorted(str_list):
flag=0
for i in range(len(str_list)-1):
if(str_list[i]>str_list[i+1]):
flag=1
break
if(flag==1):
return False
else:
return True
string=raw_input("enter a string:")
str_list=list(string)
print(is_sorted(str_list))
#10.Two words are anagrams if you can rearrange the letters from one to spell the other.
#Write a function called is_anagram that takes two strings and returns True if they are anagrams.
#Sol:
def is_anagram(string1,string2):
str1_list=list(string1)
str2_list=list(string2)
str1_list.sort()
str2_list.sort()
if(str1_list==str2_list):
return True
else:
return False
string1=raw_input("enter the first string:")
string2=raw_input("enter the second string:")
print(is_anagram(string1,string2))
| [
"noreply@github.com"
] | noreply@github.com |
cd6cd17cd062d3d35aa0116da8ae24d23f41ecbe | 17fb6b997c2ab707651fe719f1456dfae3aae052 | /leetcode/146_LRU_Cache.py | 85ccc923e353d4f69b50b49c2f176c49fb7b70a2 | [] | no_license | dmitry-pechersky/algorithms | 40b6184400bcf818c2590b9413ce1bf510b3b355 | 74f13f6e256c2e0c1503ec4bafa6b3e0f227fe2c | refs/heads/master | 2023-05-31T14:20:51.882551 | 2023-05-11T06:59:53 | 2023-05-11T06:59:53 | 122,305,451 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | from unittest import TestCase
from collections import OrderedDict
class LRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.dic = OrderedDict()
def get(self, key: int) -> int:
if key in self.dic:
self.dic.move_to_end(key)
return self.dic[key]
return -1
def put(self, key: int, value: int) -> None:
if key in self.dic:
self.dic[key] = value
self.dic.move_to_end(key)
else:
if len(self.dic) >= self.capacity:
self.dic.popitem(last=False)
self.dic[key] = value
class LRUCacheTest(TestCase):
def test_1(self) -> None:
cache = LRUCache(2)
cache.put(1, 1)
cache.put(2, 2)
self.assertEqual(cache.get(1), 1)
cache.put(3, 3)
self.assertEqual(cache.get(2), -1)
cache.put(4, 4)
self.assertEqual(cache.get(1), -1)
self.assertEqual(cache.get(3), 3)
self.assertEqual(cache.get(4), 4)
def test_2(self) -> None:
cache = LRUCache(3)
cache.put(1,1)
cache.put(2,2)
cache.put(3,3)
cache.put(4,4)
self.assertEqual(cache.get(4), 4)
self.assertEqual(cache.get(3), 3)
self.assertEqual(cache.get(2), 2)
self.assertEqual(cache.get(1), -1)
cache.put(5, 5)
self.assertEqual(cache.get(1), -1)
self.assertEqual(cache.get(2), 2)
self.assertEqual(cache.get(3), 3)
self.assertEqual(cache.get(4), -1)
self.assertEqual(cache.get(5), 5)
def test_3(self) -> None:
cache = LRUCache(2)
self.assertEqual(cache.get(2), -1)
cache.put(2, 6)
self.assertEqual(cache.get(1), -1)
cache.put(1, 5)
cache.put(1, 2)
self.assertEqual(cache.get(1), 2)
self.assertEqual(cache.get(2), 6)
| [
"@none@none.com"
] | @none@none.com |
03a60dacf44e74f548d5b2e88d788ae264bcd283 | 326142a582c8864240064692a6500dc12da91697 | /78_Subsets.py | f08f9628475b776d6ce844ea498b5b8e5ab2f963 | [] | no_license | qscez2001/leetcode | 960e775f3c7190ea2f2c3078a25714aafaf8801b | f124203c13b2e539acc7a863ec9b1a56363b1f96 | refs/heads/master | 2022-02-26T03:35:14.308765 | 2022-02-07T07:22:48 | 2022-02-07T07:22:48 | 238,398,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | '''
Given an integer array nums, return all possible subsets (the power set).
The solution set must not contain duplicate subsets.
Example 1:
Input: nums = [1,2,3]
Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]]
Example 2:
Input: nums = [0]
Output: [[],[0]]
'''
def subsets(nums):
subsets = []
dfs(nums, 0, [], subsets)
return subsets
def dfs(nums, index, path, subsets):
subsets.append(path)
for i in range(index, len(nums)):
dfs(nums[1:], i, path+[nums[i]], subsets)
'''
Algorithm
Power set is all possible combinations of all possible lengths, from 0 to n.
Given the definition, the problem can also be interpreted as finding the power set from a sequence.
So, this time let us loop over the length of combination,
rather than the candidate numbers,
and generate all combinations for a given length with the help of backtracking technique.
'''
'''
We define a backtrack function named backtrack(first, curr) which takes the index of first element to add and a current combination as arguments.
If the current combination is done, we add the combination to the final output.
Otherwise, we iterate over the indexes i from first to the length of the entire sequence n.
Add integer nums[i] into the current combination curr.
Proceed to add more integers into the combination : backtrack(i + 1, curr).
Backtrack by removing nums[i] from curr.
'''
def subsets(self, nums: List[int]) -> List[List[int]]:
def backtrack(first = 0, curr = []):
# if the combination is done
if len(curr) == k:
output.append(curr[:])
return
for i in range(first, n):
# add nums[i] into the current combination
curr.append(nums[i])
# use next integers to complete the combination
backtrack(i + 1, curr)
# backtrack
curr.pop()
output = []
n = len(nums)
for k in range(n + 1):
backtrack()
return output
# nums = [0]
# print(subsets(nums))
# nums = [1,2]
# print(subsets(nums))
nums = [1,2,3]
print(subsets(nums)) | [
"qscez2001@gmail.com"
] | qscez2001@gmail.com |
d346a54dd0ef28cd1f3d2d688b9393ef1b0a680f | f127484351afb41f748967a9f76d0fd10a7f36a5 | /weather/views.py | df4a90105f1034b35043740664d4d39e28301e81 | [] | no_license | Megha912/the_weather | aa4eb4b85d179c232541fd3324e4203f237f0d5c | b74ff5bf56985ca76bb38c7d00343b3f79c600eb | refs/heads/master | 2022-04-18T15:59:42.070959 | 2020-04-20T18:00:29 | 2020-04-20T18:00:29 | 257,363,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | import requests
from django.shortcuts import render , redirect
from .models import City
from .forms import CityForm
def index(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=271d1234d3f497eed5b1d80a07b3fcd1'
err=''
message=""
message_class=""
if request.method == 'POST':
form = CityForm(request.POST)
if form.is_valid():
new_city=form.cleaned_data["name"]
c = City.objects.filter(name=new_city).count()
if c == 0:
r = requests.get(url.format(new_city)).json()
if r["cod"]==200:
form.save()
else:
err="city does not exist in the world"
else:
err="city already exists"
if err:
message = err
message_class = "is-danger"
else:
message = "city added succesfully"
message_class = 'is-success'
form = CityForm()
cities = City.objects.all()
weather_data = []
for city in cities:
r = requests.get(url.format(city)).json()
city_weather = {
'city' : city.name,
'temperature' : r['main']['temp'],
'description' : r['weather'][0]['description'],
'icon' : r['weather'][0]['icon'],
}
weather_data.append(city_weather)
context = {'weather_data' : weather_data, 'form' : form, 'message': message,'message_class': message_class}
return render(request, 'weather.html', context)
def delete_city(request, city_name):
City.objects.get(name=city_name).delete()
return redirect('home') | [
"2017143@iiitdmj.ac.in"
] | 2017143@iiitdmj.ac.in |
56f93826cccd3b8c8efa2400ea3934ed95d6102e | db0e991d02a50eda222aaebeb7a5772b9cba467f | /account/admin.py | 6db31fc81344fb402617759b0d2e5180d5105ae8 | [] | no_license | iamrraj/Djanog_Learn | 1ba90ac797b284c5e2a7dd733fd61353ee8af241 | c522b41411415585468cadfe6999262a6c9b487d | refs/heads/master | 2020-04-17T09:48:09.414269 | 2019-01-18T21:24:53 | 2019-01-18T21:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | from django.contrib import admin
from .models import Profile,Categoty,Product,Slide
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user','location')
list_display = ('user','birth_date','email_confirmed', 'location')
list_filter = [ 'location', 'user' ]
search_fields = ['location', 'user' ]
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','disprice','seller')
list_display = ('name','disprice','seller', 'acprice','categoty')
list_display_links = ('name','disprice','seller', 'acprice','categoty')
list_filter = ['pub_date','categoty','seller']
search_fields = ['categoty','seller','name']
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name','pub_date')
list_display_links = ('name','pub_date')
list_filter = ['pub_date','name']
search_fields = ['id','name','pub_date']
class SlideAdmin(admin.ModelAdmin):
list_display = ('image','pub_date')
list_display_links = ('image','pub_date')
list_filter = ['pub_date','image']
admin.site.register(Profile,ProfileAdmin)
admin.site.register(Product,ProductAdmin)
admin.site.register(Categoty,CategoryAdmin)
admin.site.register(Slide,SlideAdmin) | [
"rajr97555@gmail.com"
] | rajr97555@gmail.com |
b00dbf73d1e70aebddeedd3ffd333d028987ef80 | b0a73293e98e271ca9f3dcdf8aa67250aeb0c863 | /Decorator/DekoratorStopki2.py | 988192c52196dfa789114d4ed69ac8c1469cb20e | [] | no_license | gorkaMaksymilian/design-patterns | 84df1bdb530d3158b80d9cc1560c720b8ceca69b | 8a861612bbae33344d670cc3cdb7d00135ec7c09 | refs/heads/master | 2020-09-13T12:14:21.256238 | 2019-12-17T17:15:58 | 2019-12-17T17:15:58 | 222,775,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from DekoratorPotwierdzenia import DekoratorPotwierdzenia
class DekoratorStopki2(DekoratorPotwierdzenia):
def __init__(self, zamowienie):
super().__init__(zamowienie)
def drukuj(self):
super().drukuj()
self.drkStopka()
def drkStopka(self):
print("STOPKA 2")
| [
"noreply@github.com"
] | noreply@github.com |
2f71df85a3d2d1e020f3b4c9a12f04577e62cffd | 0c2dff339fe515e90dd98bd96c286d3603e6efdd | /hw1/release/secretExchange/naive/utils.py | ce1aa2a540892fe2705e62244265612445744bbb | [] | no_license | KevinPan0508/crypto-HW | e97fb5632e21c30336052762f38cd0e17792cf2a | f94c3df53f93934f86e09fc6a81611c364976c32 | refs/heads/main | 2023-06-19T05:29:35.919399 | 2021-07-09T05:43:10 | 2021-07-09T05:43:10 | 384,327,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,035 | py | #!/usr/bin/python3 -u
import os
import random
from enum import Enum
from Cryptodome.Cipher import AES
class State(Enum):
Initial = 0
WaitKey = 1
WaitMessage = 2
Finish = 3
class Station:
def read(self, prefix: str):
content = input().strip()
assert content.startswith(prefix)
assert isinstance(content, str)
return content[len(prefix):]
def send(self, prefix: str, content):
if isinstance(content, int):
content = str(content)
print(prefix + content)
class Client(Station):
def __init__(self, flag1, flag2, g=2, p=None):
self.state = State.Initial
self.g = g
self.p = p
self.flag1 = flag1
self.flag2 = flag2
# long and tedious state machine
def run(self):
while True:
if self.state == State.Initial:
# send p, g, g^a
# generate private key
self.private = random.randint(2, self.p - 2)
self.public = pow(self.g, self.private, self.p)
# send parameters
self.send("p: ", self.p)
self.send("g: ", self.g)
self.send("public: ", self.public)
# state transition
self.state = State.WaitKey
elif self.state == State.WaitKey:
# read g^b, AES iv, send flag1
# read server's public key
try:
serverPublic = int(self.read("public: "))
assert 1 <= serverPublic < self.p
except:
self.send("Something went wrong...", "")
exit(255)
# generate session key
mask = (1 << 128) - 1
self.sessionKey = (
pow(serverPublic, self.private, self.p) & mask
).to_bytes(16, "big")
# generate cipher kits
try:
self.iv = bytes.fromhex(self.read("iv: "))
assert len(self.iv) == 16
except:
print("Something went wrong...")
exit(255)
self.encrypter = AES.new(
key=self.sessionKey, mode=AES.MODE_CBC, iv=self.iv
)
self.decrypter = AES.new(
key=self.sessionKey, mode=AES.MODE_CBC, iv=self.iv
)
# send flag1
cipher = self.encrypter.encrypt(self.flag1)
self.send("flag1: ", cipher.hex())
# state transition
self.state = State.WaitMessage
elif self.state == State.WaitMessage:
# read flag, send flag
_ = self.read("flag1: ")
cipher = self.encrypter.encrypt(self.flag2)
self.send("flag2: ", cipher.hex())
# state transition
self.state = State.Finish
else:
break
class Server(Station):
def __init__(self, flag1):
self.state = State.WaitKey
self.flag1 = flag1
def run(self):
while True:
if self.state == State.WaitKey:
# read p, g, g^a, send g^b, AES's iv
# read client's public key
try:
self.p = int(self.read("p: "))
self.g = int(self.read("g: "))
clientPublic = int(self.read("public: "))
assert 1 <= clientPublic < self.p
except:
self.send("Something went wrong...", "")
exit(255)
# generate private key
self.private = random.randint(2, self.p - 2)
self.public = pow(self.g, self.private, self.p)
# send public key
self.send("public: ", self.public)
# generate session key
mask = (1 << 128) - 1
self.sessionKey = (
pow(clientPublic, self.private, self.p) & mask
).to_bytes(16, "big")
# generate cipher kits
self.iv = os.urandom(16)
self.encrypter = AES.new(
key=self.sessionKey, mode=AES.MODE_CBC, iv=self.iv
)
self.decrypter = AES.new(
key=self.sessionKey, mode=AES.MODE_CBC, iv=self.iv
)
# send AES's iv
self.send("iv: ", self.iv.hex())
# state transition
self.state = State.WaitMessage
elif self.state == State.WaitMessage:
# read flag, send flag
_ = self.read("flag1: ")
cipher = self.encrypter.encrypt(self.flag1)
self.send("flag1: ", cipher.hex())
# state transition
self.state = State.Finish
else:
break
| [
"k0922322268@gmail.com"
] | k0922322268@gmail.com |
0eef10b543a679ae323f4025fc2649ebd3f9cf06 | 0c67de0db2d3ef93d6a4b6ea865eee6d41c071ba | /aloja-tools/src/parsers/yarn-parser.py | 2f79d28061a40e6a0c30e8a65fcaeb56e2d2fcbe | [
"Apache-2.0"
] | permissive | Aloja/aloja | 1cbaee62a8456d79a627519874fcb3ec2f92671e | ebd336da944bc46443f9f97fe7253af850c608dc | refs/heads/master | 2021-05-22T08:55:19.366358 | 2021-02-17T14:24:30 | 2021-02-17T14:24:30 | 21,858,831 | 25 | 16 | null | 2017-11-17T15:50:18 | 2014-07-15T12:33:19 | JavaScript | UTF-8 | Python | false | false | 6,531 | py | #!/usr/bin/env python
import sys,os
import argparse
import re
import time
import datetime
import glob
import pprint
import csv
def get_id(line,type):
for word in line.split():
if (type == "application" and "application" in word):
return word
elif (type == "container" and "container_" in word):
return word
def get_times(dict,division=1):
start = sys.maxint
stop = 0
for key,value in dict.iteritems():
if (value["start_time"] < start): start = value["start_time"]
if (value["stop_time"] > stop) : stop = value["stop_time"]
duration = float(stop) - float(start)
steps = duration/division
times = []
for i in xrange (0, int(steps) + 2):
times.append(start + (i*division))
return times,start,stop,duration
def get_states(dict):
states = ["timestamp","RUNNING"]
for key,value in dict.iteritems():
for key2 in value.keys():
if (key2 not in states and key2 != "stop_time" and key2 != "start_time"): states.append(key2)
return states
def check_timestamp(stop_time,start_time,timestamp,division):
if (stop_time >= timestamp and stop_time <= (timestamp + division)): return True
elif (start_time >= timestamp and start_time <= (timestamp + division)): return True
elif (start_time <= timestamp and stop_time >= (timestamp + division)): return True
else: return False
def get_app_resources(app_id,containers,time,division):
total_mem = 0
total_cores = 0
for key,value in containers.iteritems():
if (app_id in key):
for key2,value2 in value.iteritems():
if (key2 == "RUNNING"):
for k in xrange(0,len(value2)):
if (check_timestamp(value2[k]["stop_state"],value2[k]["start_state"],time,division)):
if ("cores" in value):
total_cores += value["cores"]
total_mem += value["memory"]
return total_cores,total_mem
def update_dict(dict,id,states,new_state,timestamp):
if (timestamp not in dict):
dict[timestamp] = {}
for state in states:
if (state not in dict[timestamp]): dict[timestamp][state] = []
elif (id in dict[timestamp][state]):
dict[timestamp][state].remove(id)
if (id not in dict[timestamp][new_state]): dict[timestamp][new_state].append(id)
def build_csv (dict,name,save_path,stats,start_time,stop_time,division=1):
if (not os.path.exists(save_path)):
os.makedirs(save_path)
file = open (save_path + '/' + name+'.csv','wb')
stats = ["timestamp"] + stats
writer = csv.DictWriter(file,delimiter=',',fieldnames=stats)
writer.writeheader()
dict_status = {}
row = {}
for stat in stats:
dict_status[stat] = []
for t in range (int(start_time),int(stop_time)):
if t in dict:
for key,value in dict[t].iteritems():
for k in xrange(0, len(value)):
for stat in stats:
if (value[k] in dict_status[stat]):
dict_status[stat].remove(value[k])
if (value[k] not in dict_status[key]):
dict_status[key].append(value[k])
for stat in stats:
row[stat] = len(dict_status[stat])
else:
for stat in stats:
row[stat] = len(dict_status[stat])
row["timestamp"] = t
writer.writerow(row)
def build_data(path,save_path):
containers = {}
applications = {}
application_stats=["RUNNING"]
container_stats=["RUNNING"]
start_time = sys.maxint
stop_time = 0
for file in os.listdir(path):
file_path = os.path.join(path,file)
if "log" in file_path:
current_file = open (file_path,'r')
print ("Parsing log: " , file_path)
last_found_timestamp_apps=0
last_found_timestamp_cont=0
for line in current_file:
if re.match('\d{4}-\d{2}-\d{2}', line):
date = line[0:19]
milis = line[20:23]
timestamp = time.mktime(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").timetuple())
timestamp = float(timestamp)
if (timestamp < start_time): start_time = timestamp
if (timestamp > stop_time): stop_time = timestamp
if ("application" in line and "State change from" in line):
new_state = line.split()[-1]
previous_state = line.split()[-3]
if (new_state not in application_stats) : application_stats.append(new_state)
if (previous_state not in application_stats) : application_stats.append(previous_state)
update_dict(applications,get_id(line,"application"),application_stats,new_state,timestamp,)
elif ("container_" in line and "Container Transitioned" in line):
new_state = line.split()[-1]
previous_state = line.split()[-3]
if (new_state not in container_stats) : container_stats.append(new_state)
if (previous_state not in container_stats) : container_stats.append(previous_state)
update_dict(containers,get_id(line,"container"),container_stats,new_state,timestamp,)
print("Finished parsing log....")
print("Processing applications....")
build_csv(applications,"applications",save_path,application_stats,start_time,stop_time)
print ("Done, data sotored in: " + save_path + "/applications.csv")
print("Processing containers....")
build_csv(containers,"containers",save_path,container_stats,start_time,stop_time)
print ("Done, data sotored in: " + save_path + "/containers.csv")
def main(argc, argv):
parser = argparse.ArgumentParser(description='parse yarn log')
parser.add_argument('source', help='path to the directory containing the logs')
parser.add_argument('save_path', help='folder in which to save the resulting csv')
args = parser.parse_args()## show values ##
source_path = (os.path.normpath(args.source))
save_path = (os.path.normpath(args.save_path))
build_data(source_path,save_path)
print ("END")
sys.exit()
if __name__ == "__main__":
exit(main(len(sys.argv), sys.argv))
| [
"alejandro.montero.rivero@gmail.com"
] | alejandro.montero.rivero@gmail.com |
203a76dcf6c688c6596d22764c7c4afa44ab6b6b | fdaf0e943a58279a1a50ae7cbdc73d3195c71676 | /instagram_web/util/mailgun.py | 6a4bc6d9d7f38328d31aaa7daf7bce3b77dca24c | [] | no_license | WinsonTay/nextagram | 10126e9e971197135ef00ba05ae67098de354e5c | e40fc8c5a303cac74e92a1c772d19e811ffcc903 | refs/heads/master | 2022-12-13T19:19:26.173376 | 2020-03-23T03:02:19 | 2020-03-23T03:02:19 | 245,326,205 | 0 | 0 | null | 2022-12-08T01:51:30 | 2020-03-06T04:01:05 | Python | UTF-8 | Python | false | false | 400 | py | import requests
import os
def send_simple_message():
return requests.post(
"https://api.mailgun.net/v3/sandbox14a64d8d03af4c9badd9c1f2ee2a0d0a.mailgun.org/messages",
auth=("api", os.getenv("MG_API_KEY")),
data={"from": "Excited User sandbox14a64d8d03af4c9badd9c1f2ee2a0d0a.mailgun.org",
"to": ["weeshen90@gmail.com"],
"subject": "Hello",
"text": "Hi, I just donated to your Picture"}) | [
"weeshen90@gmail.com"
] | weeshen90@gmail.com |
962f542aade17ca44777f2b58fcf4e28890718e0 | 59fc88b3c777fb3b86753eed5e6b1cc79976da75 | /transpose.py | 442ee531b0d8d9a24d1e3f623f6203044fd2bdb1 | [] | no_license | atharvsinha/basicpythontranspose | 8d8ece1174d46939864f1fb0089d7f4f69c0dfa0 | 3651524347c96ecd9642d15bc4bd2b9253c9e7a8 | refs/heads/main | 2023-01-23T10:19:17.199398 | 2020-12-04T17:52:14 | 2020-12-04T17:52:14 | 318,591,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import numpy as np
m, n = input('Enter 2 space separated integers as dimensions of an m.n 2D matrix:').split()
m, n = int(m), int(n)
matrix = np.array(list(map(int, input('Enter m.n values to be inserted in the matrix').split())))
matrix = np.reshape(matrix, (m, n))
print(matrix)
transposed =[]
[[transposed.append(matrix[j][i]) for j in range(m)] for i in range(n)]
print(np.reshape(np.array(transposed), (n, m)))
| [
"noreply@github.com"
] | noreply@github.com |
12231e5e5f721075626108298acb1a8317dc3ade | 178fef62f033068be8591057470af2be16542d40 | /gym_ple/ple_env.py | aea8f15ad9026c9020833bfc0e9e9aa956cefaaa | [] | no_license | nju-fuzy/envs | 632dd0541a67503cbcf13a8a85cc305cc08c6a04 | e5ef685b2536a00088eef03d6f9f20152d4d0fc8 | refs/heads/master | 2020-08-07T18:55:45.774585 | 2019-02-11T03:33:05 | 2019-02-11T03:33:05 | 213,557,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | import os
import gym
from gym import spaces
from ple import PLE
import numpy as np
class PLEEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, game_name='FlappyBird', display_screen=True, ple_game=True):
# set headless mode
os.environ['SDL_VIDEODRIVER'] = 'dummy'
# open up a game state to communicate with emulator
import importlib
if ple_game:
game_module_name = ('ple.games.%s' % game_name).lower()
else:
game_module_name = game_name.lower()
game_module = importlib.import_module(game_module_name)
game = getattr(game_module, game_name)()
##################################################################
# old one
#self.game_state = PLE(game, fps=30, display_screen=display_screen)
self.game_state = PLE(game, fps=30, display_screen=display_screen, state_preprocessor = self.process_state)
##################################################################
self.game_state.init()
self._action_set = self.game_state.getActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
self.screen_height, self.screen_width = self.game_state.getScreenDims()
self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_width, self.screen_height, 3), dtype = np.uint8)
self.viewer = None
#############################################
# Add state processer
def process_state(self, state):
return np.array([state.values()])
#############################################
def _step(self, a):
reward = self.game_state.act(self._action_set[a])
state = self._get_image()
terminal = self.game_state.game_over()
#############################################
ram = self.game_state.getGameState()
print(ram)
#############################################
return state, reward, terminal, {}
def _get_image(self):
image_rotated = np.fliplr(np.rot90(self.game_state.getScreenRGB(),3)) # Hack to fix the rotated image returned by ple
return image_rotated
@property
def _n_actions(self):
return len(self._action_set)
# return: (states, observations)
def _reset(self):
self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_width, self.screen_height, 3), dtype = np.uint8)
self.game_state.reset_game()
state = self._get_image()
return state
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self._get_image()
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _seed(self, seed):
rng = np.random.RandomState(seed)
self.game_state.rng = rng
self.game_state.game.rng = self.game_state.rng
self.game_state.init()
| [
"lxcnju@163.com"
] | lxcnju@163.com |
dba057dbb9a90222fe78013ed507874e4483c622 | 26b8745c37acd8127a4913671dd59f781c1ac3db | /getProxy_freeproxylists.py | 11b1ec61c71b4e967fa443e5116c47667b9f94df | [] | no_license | hsh075623201/crawlProxyIps | 696e40064bd13cbcb14a4d469c07a2b547073244 | 55773b25146b6283d054243c6ee9fc1c92f10fc3 | refs/heads/master | 2020-07-14T01:10:52.336412 | 2017-12-01T04:02:10 | 2017-12-01T04:02:10 | 66,363,614 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,247 | py | #coding:utf-8
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import time
import pymysql.cursors
import random
import re
from config import *
UA = [
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36"
]
class FreeProxyServer():
def __init__(self):
self.urls = [
"http://www.freeproxylists.com/anonymous.html"
]
self.req = requests.Session()
retries = Retry(total=3,backoff_factor=0.1,status_forcelist=[ 500, 502, 503, 504 ])
self.req.mount('http://', HTTPAdapter(max_retries=retries))
def getproxy(self):
for url in self.urls:
request_url=url
time.sleep(10)
try:
headers = {
"Host":"www.freeproxylists.com",
'Connection': 'keep-alive',
"User-Agent":UA[random.randint(0, len(UA) - 1)]
}
res = self.req.get(request_url,headers=headers,timeout=8)
content = res.text
patt1=re.compile("(anon/\d+.html)")
for ele in patt1.findall(content):
print ele
subres = self.req.get("http://www.freeproxylists.com/"+ele,headers=headers,timeout=8)
subcontent= subres.text
patt2=re.compile("(/load_anon_\d+.html)")
url="http://www.freeproxylists.com/"+str(patt2.findall(subcontent)[0])
print url
lastres = self.req.get(url,headers=headers,timeout=8)
data=lastres.text
patt3=re.compile("(\d+\.\d+\.\d+\.\d+)</td><td>(\d+)")
for ip in patt3.findall(data):
print ip
self.validate("http",str(ip[0])+":"+str(ip[1]))
#print content
#soup = BeautifulSoup(content,"html.parser",from_encoding="utf8")
# try:
# lists = soup.find('ol').find_all('li')
# except Exception,e:
# print "error:"+str(e)
# continue
# for ele in lists[1:]:
# print ele
# elearrs = ele.find('a').text.strip().split(":")
# ip = elearrs[0]
# port= elearrs[1]
# # port = tds[2].text.strip() #端口
# protocol = "http"
# self.validate(protocol.lower(),ip,port)
except Exception,e:
print "request Exceptin .............:"+str(e)
def validate(self,protocol,ip):
try:
headers = {
"User-Agent":UA[random.randint(0, len(UA) - 1)]
}
proxy_ip = {protocol:protocol+'://'+ip}
print proxy_ip
#requests.get("http://bang.360.cn/liangpin/aj_get_goods?pn=0",proxies=proxy_ip,headers=headers,timeout=5)
requests.get("http://youpin.58.com",proxies=proxy_ip,headers=headers,timeout=8)
except Exception,e:
print "validate Exception:"+str(e)
else:
print "***********************************************"
self.log(ip)
def log(self,ip):
with open("logs/proxyIps.log","a") as f:
f.write(ip+",\n")
f.flush()
if __name__=="__main__":
print "start........."
proxy = FreeProxyServer()
proxy.getproxy()
proxy.req.close() | [
"shihuai.du@aihuishou.com"
] | shihuai.du@aihuishou.com |
b97ce95ce7e648eb306f1d1b9ecb4fb53d4ed64c | 4c5feb80b04f267e10ae03659a0e0bff6545b02b | /handler.py | 3a370e6e870fc208a0282a2d2f816ecc714cd989 | [] | no_license | IonianIronist/mini-python-database | 250f8b440be74b160356f6f5888f17037244b138 | 32caf7e59c989493b255ed9491b3478ed3e975b8 | refs/heads/master | 2020-09-12T18:20:12.639016 | 2019-11-18T17:56:24 | 2019-11-18T17:56:24 | 222,509,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,728 | py | import functions as fn
# MAIN CODE FILE
file_sz = 15000 # writing file size
tupples_lst = [] # tupple buffer
data = "data.csv" # data file name
to_write = ["ordered", "heap_file"]
fn.readnstore(data,tupples_lst) # read the data, put it in the <tupples_lst>
tupples_num = len(tupples_lst) # how many tupples in the list
fn.write_unsorted(tupples_lst,to_write[1],file_sz,tupples_num) # write to the file in initial order
""" Sort the tupples_lst """
col = []
for tupple in tupples_lst :
col.append(tupple.columns[0])
col.sort()
sorted_tupples = []
for line in col :
for tupple in tupples_lst :
if line == tupple.columns[0]:
sorted_tupples.append(tupple)
break
""" End sorting the tupples list"""
fn.write_unsorted(sorted_tupples,to_write[0],file_sz,tupples_num) # write to a file ordered by the first column
col.clear() # empty the col
sorted_tupples.clear() # -||-
tupples_lst.clear() # -||-
print("\n|\n|\n|First exercise \n|\n|\n\n")
print ("\n___________________________________________________________\n")
print("\nPrinting from the ordered file :\n")
print ("\n___________________________________________________________\n\n")
fn.first_ex(col, to_write[0]) # first exercise for the ordered file
print ("\n___________________________________________________________\n")
print ("\nPrinting from the heap file :\n")
print ("\n___________________________________________________________\n\n")
fn.first_ex(col, to_write[1]) # for the unordered file
print("\n|\n|\n|Second exercise \n|\n|\n\n")
print ("\n___________________________________________________________\n")
print("\nPrinting from the ordered file :\n")
print ("\n___________________________________________________________\n\n")
fn.secnd_ex(col, to_write[0]) # second ex for the ordered file
print ("\n___________________________________________________________\n")
print ("\nPrinting from the heap file :\n")
print ("\n___________________________________________________________\n\n")
fn.secnd_ex(col, to_write[1]) # on the heap file
print("\n|\n|\n|Third exercise \n|\n|\n\n")
print ("\n___________________________________________________________\n")
print("\nPrinting from the ordered file :\n")
print ("\n___________________________________________________________\n\n")
fn.third_ex(col, to_write[0]) # third on the ordered
print ("\n___________________________________________________________\n")
print ("\nPrinting from the heap file :\n")
print ("\n___________________________________________________________\n\n")
fn.third_ex(col, to_write[1]) # on on the heap
print("\n|\n|\n|Fourth exercise\n|\n|\n\n")
fn.fourth_ex(col, to_write[0], to_write[1]) # fourt exercise for both files
| [
"noreply@github.com"
] | noreply@github.com |
dcd624ef125ecb43865c6cf90b0020339955f483 | 87f31b789750f6b545d6a79bd0b7028ebf4126c7 | /vislab/_results.py | 1716115a35aa0d860c71db36fd53bb1bdebffacc | [
"BSD-2-Clause"
] | permissive | hertzmann/vislab | db4d1b9e63e9bb8a33e491cff433e02c0315ca81 | fcded208637fb51edfeaef1bde0bf766f9af1941 | refs/heads/master | 2021-01-24T04:20:32.382941 | 2016-07-12T20:21:28 | 2016-07-12T20:21:28 | 13,571,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | import os
import pandas as pd
import cPickle
import numpy as np
import vislab
def load_pred_results(collection_name, cache_dirname, multiclass=False, force=False):
"""
Return DataFrame of prediction experiment results and Panel of per-image
predictions.
"""
if not os.path.exists(cache_dirname):
vislab.util.makedirs(cache_dirname)
results_df_filename = os.path.join(
cache_dirname, '{}_results_df.pickle'.format(collection_name))
preds_panel_filename = os.path.join(
cache_dirname, '{}_preds_panel.pickle'.format(collection_name))
# If cache exists, load and return.
if (os.path.exists(results_df_filename) and
os.path.exists(preds_panel_filename) and
not force):
results_df = pd.read_pickle(results_df_filename)
preds_panel = pd.read_pickle(preds_panel_filename)
print("Loaded from cache: {} records".format(results_df.shape[0]))
return results_df, preds_panel
# Otherwise, construct from database.
client = vislab.util.get_mongodb_client()
collection = client['predict'][collection_name]
print("Results in collection {}: {}".format(collection_name, collection.count()))
df = pd.DataFrame(list(collection.find()))
df.index = df.index.astype(str)
# Make the features list hashable for filtering/joins.
df['features_str'] = df['features'].apply(lambda x: ','.join(sorted(x)))
# We need a unique representation of the predictor settings.
df['setting'] = df.apply(lambda x: '{} {} {}'.format(x['features_str'], x['quadratic'], 'vw'), axis=1)
# And of the task performed.
df['full_task'] = df.apply(lambda x: '{} {}'.format(x['task'], x['data']), axis=1)
df = df.drop_duplicates(cols=['features_str', 'full_task'], take_last=True)
# Just for printing, if needed.
df = df.sort(['full_task', 'setting'])
# Get all predictions in a separate panel and drop the pickled ones.
if multiclass:
data = {}
for setting in df['setting'].unique():
el = df[df['setting'] == setting].iloc[0]
try:
pred_df = cPickle.loads(el['pred_df'])
except:
assert('results_name' in el)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], el['results_name'])
#print(pred_df_filename)
pred_df = pd.read_hdf(pred_df_filename, 'df')
# Not sure why there should ever be duplicate indices, but
# there are for one of the wikipaintings results...
pred_df['__index'] = pred_df.index
pred_df.drop_duplicates(cols='__index', take_last=True, inplace=True)
del pred_df['__index']
data[setting] = pred_df
preds_panel = pd.Panel(data).swapaxes('items', 'minor')
else:
preds_panel = get_all_preds_panel(df)
try:
del df['pred_df']
except KeyError:
pass
df.to_pickle(results_df_filename)
preds_panel.to_pickle(preds_panel_filename)
return df, preds_panel
def get_all_preds_panel(df):
all_full_tasks = df['full_task'].unique()
data = dict((
(full_task, get_all_preds_df(df, full_task))
for full_task in all_full_tasks
))
all_preds_panel = pd.Panel(data)
return all_preds_panel
def get_all_preds_df(df, full_task):
"""
Get the DataFrame of predictions from the results dataframe.
Tip: get all predictions of an image with
all_preds_panel.major_xs('f_1604904579').T
"""
tdf = df[df['full_task'] == full_task]
# Make sure that there are no duplicate settings.
if len(tdf.setting.unique()) != tdf.shape[0]:
try:
del df['pred_df']
except KeyError:
pass
print(tdf.to_string())
raise Exception("Non-unique feature-setting pairs")
pred_dfs = []
for i, row in tdf.iterrows():
try:
pred_df = cPickle.loads(row['pred_df'])
except:
assert('results_name' in row)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], row['results_name'])
pred_df = pd.read_hdf(pred_df_filename, 'df')
pred_df.index = pred_df.index.astype(str)
pred_dfs.append(pred_df)
# Make sure that all the settings had the same label and split information
arbitrary_pred_df = pred_dfs[0]
assert(np.all(df_['label'] == arbitrary_pred_df['label'] for df_ in pred_dfs))
assert(np.all(df_['split'] == arbitrary_pred_df['split'] for df_ in pred_dfs))
data = []
for df_ in pred_dfs:
df_["index"] = df_.index
# TODO: why the fuck are the duplicate indices???
df_ = df_.drop_duplicates('index')
if 'score' in df_.columns:
data.append(df_['score'])
else:
# TODO: temporary, remove when all experiments are re-run
data.append(df_['pred'])
all_preds_df = pd.DataFrame(data, index=tdf['setting']).T
all_preds_df['label'] = arbitrary_pred_df['label']
all_preds_df['split'] = arbitrary_pred_df['split']
# # Re-order columns
# # columns = all_preds_df.columns.values
# # reordered_columns = ['split', 'label'] + (columns - ['split', 'label']).tolist()
# # all_preds_df = all_preds_df[:, reordered_columns]
all_preds_df.index = all_preds_df.index.astype(str)
return all_preds_df
if __name__ == '__main__':
load_pred_results('wikipaintings_oct25', 'whatever', multiclass=True)
| [
"sergeykarayev@gmail.com"
] | sergeykarayev@gmail.com |
e7efebf2086ec9f0d18bc80cfbe417810cd984a8 | 34b3d2bcfb1982ee69a379df90da993ee7df046b | /pybo/models.py | 2e6e73888549915daad574b5404c09a81443bb8b | [] | no_license | swanne-dev/swannepy | aa732ff31eda8bceb9f4566be2200c45dc52cc8d | 7dc2aa215adc0186e8eccaf756a3ab0584c8fe5e | refs/heads/master | 2023-03-25T19:39:49.685541 | 2021-03-20T13:43:05 | 2021-03-20T13:43:05 | 349,740,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Question(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='author_question')
subject = models.CharField(max_length=200)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True)
voter = models.ManyToManyField(User, related_name='voter_question') #voter 추가
def __str__(self):
return self.subject
class Answer(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='author_answer')
question = models.ForeignKey(Question, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True)
voter = models.ManyToManyField(User, related_name='voter_answer')
class Comment(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateTimeField()
modify_date = models.DateTimeField(null=True, blank=True)
question = models.ForeignKey(Question, null=True, blank=True, on_delete=models.CASCADE)
answer = models.ForeignKey(Answer, null=True, blank=True, on_delete=models.CASCADE) | [
"swanne.njs@gmail.com"
] | swanne.njs@gmail.com |
c9cce2b5cdc0b337efc02782fc1719281c024b43 | 3c4bd9f5d989a700220e900aaeefb20a723f7b76 | /www/popviv/popviv/settings.py | 0bbf2c9754252bb29ee39f0fdba722ff45da0761 | [] | no_license | popviv/django-web | 0dc8e95e888dedd133bcb842ad856681e126d621 | 100fbfcac623c4ff6f111d39a0f06e2ae5e9c152 | refs/heads/master | 2021-01-09T06:20:27.262586 | 2017-02-05T04:03:00 | 2017-02-05T04:03:00 | 80,967,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py | """
Django settings for popviv project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@hl)^tnh3k)ri%&i!*yu(iw$-h76uzsnlrcltl0dcn3j^ilq2^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'group',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'popviv.urls'
WSGI_APPLICATION = 'popviv.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'uyagroup_demo',
'USER': 'root',
'PASSWORD': '123456',
'HOST':'10.70.40.250',
'PORT':'3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"popviv@163.com"
] | popviv@163.com |
4d9f52c9e77a45559205cc135334a7f3d9ce7dc0 | cce0f2236140b4cd5dbf76bd704ef56dd86429b2 | /tests/test_models.py | 22b32152a2204f83e5b0da98c578dc6d2d86f64f | [
"MIT"
] | permissive | nightlyds/library-api | d22ec784562e46f95824c579a2273e55692d7ece | aef059dc8e833976b594788295418ce9d22ad8b1 | refs/heads/main | 2023-06-23T03:24:14.979344 | 2021-07-21T19:44:14 | 2021-07-21T19:44:14 | 383,460,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,666 | py | import pytest
from library import bcrypt
from library.models import Genre, Author, Book, \
User, Order, OrderItem, Review, ReviewImage
from sqlalchemy.exc import IntegrityError
@pytest.fixture
def initialize(session):
genre = Genre(id=101, name='comedy')
author1 = Author(id=101, firstname="test firstname 1", lastname="test lastname 1",
biography="test biography 1")
author2 = Author(id=102, firstname="test firstname 2", lastname="test lastname 2",
biography="test biography 2")
user = User(id=101, username="test username 1", firstname="test firstname 1",
lastname="test lastname 1")
user.password = 'test password 1'
session.add_all([genre, author1, author2, user])
session.commit()
book1 = Book(id=101, name="test name 1", isbn="test isbn 1", count=3,
publisher="test publisher 1", pages=101, genre_id=101)
book2 = Book(id=102, name="test name 2", isbn="test isbn 2", count=3,
publisher="test publisher 2", pages=201, genre_id=101)
book3 = Book(id=103, name="test name 3", isbn="test isbn 3", count=3,
publisher="test publisher 3", pages=301, genre_id=101)
session.add_all([book1, book2, book3])
session.commit()
author1.books.append(book1)
author2.books.append(book2)
author1.books.append(book3)
author2.books.append(book3)
order1 = Order(id=101, user_id=101)
order2 = Order(id=102, user_id=101)
review1 = Review(id=101, user_id=101, book_id=101, message="test message 1")
review2 = Review(id=102, user_id=101, book_id=102, message="test message 2")
session.add_all([author1, author2, order1, order2, review1, review2])
session.commit()
order_item1 = OrderItem(id=101, order_id=101, book_id=101,
books_amount=3)
order_item2 = OrderItem(id=102, order_id=101, book_id=102)
order_item3 = OrderItem(id=103, order_id=102, book_id=102,
books_amount=2)
review_image1 = ReviewImage(id=101, review_id=101, image="review_image1.jpg")
review_image2 = ReviewImage(id=102, review_id=101, image="review_image2.jpg")
review_image3 = ReviewImage(id=103, review_id=102, image="review_image3.jpg")
session.add_all([order_item1, order_item2, order_item3,
review_image1, review_image2, review_image3])
session.commit()
class TestAuthor:
def test_get_info(self, session, initialize):
expected_author = {
'id': 101,
'firstname': 'test firstname 1',
'lastname': 'test lastname 1',
'biography': 'test biography 1'
}
author = session.query(Author).filter_by(id=101).first()
author_to_dict = {
'id': author.id,
'firstname': author.firstname,
'lastname': author.lastname,
'biography': author.biography
}
assert author_to_dict == expected_author
def test_get_amount_of_authors(self, session, initialize):
authors = session.query(Author).all()
assert len(authors) == 2
def test_get_books(self, session, initialize):
expected_books = [session.query(Book).filter_by(id=102).first(),
session.query(Book).filter_by(id=103).first()]
author_books = session.query(Author).filter_by(id=102).first().books
assert author_books == expected_books
def test_get_default_values(self, session, initialize):
expected_values = {
'picture': None,
'country': None,
'city': None,
'rating': 0,
'birthday': None
}
author = session.query(Author).filter_by(id=102).first()
author_default_values_to_dict = {
'picture': author.picture,
'country': author.country,
'city': author.city,
'rating': author.rating.value,
'birthday': author.birthday
}
assert author_default_values_to_dict == expected_values
class TestBook:
def test_get_info(self, session, initialize):
expected_book = {
'id': 101,
'name': 'test name 1',
'isbn': 'test isbn 1',
'count': 3,
'publisher': 'test publisher 1',
'pages': 101
}
book = session.query(Book).filter_by(id=101).first()
book_to_dict = {
'id': book.id,
'name': book.name,
'isbn': book.isbn,
'count': book.count,
'publisher': book.publisher,
'pages': book.pages
}
assert book_to_dict == expected_book
def test_get_genre_id(self, session, initialize):
book = session.query(Book).filter_by(id=102).first()
assert book.genre_id == 101
def test_get_amount_of_books(self, session, initialize):
books = session.query(Book).all()
assert len(books) == 3
def test_get_default_values(self, session, initialize):
expected_values = {
'picture': None,
'description': None,
'cover': 'paperbook',
'status': 'available',
'rating': 0,
'format': 'e-book'
}
book = session.query(Book).filter_by(id=101).first()
book_default_values_to_dict = {
'picture': book.picture,
'description': book.description,
'cover': book.cover.value,
'status': book.status.value,
'rating': book.rating.value,
'format': book.format.value
}
assert book_default_values_to_dict == expected_values
def test_will_throw_error_on_create_count_negative(self, session, initialize):
with pytest.raises(AssertionError):
Book(id=104, name="test name 4", isbn="test isbn 4", count=-1,
publisher="test publisher 4", pages=401, genre_id=101)
def test_will_throw_error_on_update_count_negative(self, session, initialize):
with pytest.raises(AssertionError):
book3 = session.query(Book).filter_by(id=103).first()
book3.count = -3
class TestUser:
def test_get_info(self, session, initialize):
expected_user = {
'id': 101,
'username': 'test username 1',
'firstname': 'test firstname 1',
'lastname': 'test lastname 1'
}
user = session.query(User).filter_by(id=101).first()
user_to_dict = {
'id': user.id,
'username': user.username,
'firstname': user.firstname,
'lastname': user.lastname
}
assert user_to_dict == expected_user
def test_password_get(self, session, initialize):
user = session.query(User).filter_by(id=101).first()
assert user.password == user._User__password
def test_check_password_hash(self, session, initialize):
expected_password = 'test password 1'
user = session.query(User).filter_by(id=101).first()
assert bcrypt.check_password_hash(user._User__password, expected_password)
def test_check_password_hash_method(self, session, initialize):
expected_password = 'test password 1'
user = session.query(User).filter_by(id=101).first()
assert user.check_password(expected_password)
def test_get_amount_of_users(self, session, initialize):
users = session.query(User).all()
assert len(users) == 1
def test_get_orders(self, session, initialize):
expected_orders = session.query(Order).all()
user_orders = session.query(User).filter_by(id=101).first().orders
assert user_orders == expected_orders
def test_get_reviews(self, session, initialize):
expected_reviews = session.query(Review).all()
user_reviews = session.query(User).filter_by(id=101).first().reviews
assert user_reviews == expected_reviews
def test_get_default_values(self, session, initialize):
expected_values = {
'picture': None,
'email': None,
'country': None,
'city': None,
'birthday': None,
'role': 0
}
user = session.query(User).filter_by(id=101).first()
user_default_values_to_dict = {
'picture': user.picture,
'email': user.email,
'country': user.country,
'city': user.city,
'birthday': user.birthday,
'role': user.role.value
}
assert user_default_values_to_dict == expected_values
def test_will_throw_error_on_create_email_negative(self, session, initialize):
with pytest.raises(AssertionError):
User(id=102, username="test username 2", email="incorrect email", firstname="test firstname 2",
lastname="test lastname 2")
def test_will_throw_error_on_update_email_negative(self, session, initialize):
with pytest.raises(AssertionError):
user = session.query(User).filter_by(id=101).first()
user.email = "incorrect email"
def test_will_throw_error_on_create_username_already_exist(self, session, initialize):
with pytest.raises(IntegrityError):
user3 = User(id=103, username="test username 2", firstname="test firstname 3",
lastname="test lastname 3")
session.add(user3)
session.commit()
class TestOrder:
def test_get_info(self, session, initialize):
expected_order = {
'id': 101,
'user_id': 101
}
order = session.query(Order).filter_by(id=101).first()
order_to_dict = {
'id': order.id,
'user_id': order.user_id
}
assert order_to_dict == expected_order
def test_get_amount_of_orders(self, session, initialize):
orders = session.query(Order).all()
assert len(orders) == 2
def test_get_order_items(self, session, initialize):
expected_order_items = [session.query(OrderItem).filter_by(id=103).first()]
order_items = session.query(Order).filter_by(id=102).first().items
assert order_items == expected_order_items
def test_get_amount_of_order_items(self, session, initialize):
order_items = session.query(Order).filter_by(id=101).first().items
assert len(order_items) == 2
def test_get_amount_of_books_in_order_item(self, session, initialize):
order1 = session.query(Order).filter_by(id=101).first()
order_item1 = order1.items[0]
order_item2 = order1.items[1]
assert order_item1.books_amount == 3
assert order_item2.books_amount == 1
class TestOrderItem:
def test_get_info(self, session, initialize):
expected_order_item = {
'id': 101,
'order_id': 101,
'book_id': 101,
'books_amount': 3
}
order_item = session.query(OrderItem).filter_by(id=101).first()
order_item_to_dict = {
'id': order_item.id,
'order_id': order_item.order_id,
'book_id': order_item.book_id,
'books_amount': order_item.books_amount
}
assert order_item_to_dict == expected_order_item
def test_get_amount_of_orders(self, session, initialize):
order_items = session.query(OrderItem).all()
assert len(order_items) == 3
def test_get_default_values(self, session, initialize):
expected_values = {
'books_amount': 1,
'status': 'in progress'
}
order_item = session.query(OrderItem).filter_by(id=102).first()
order_item_default_values_to_dict = {
'books_amount': order_item.books_amount,
'status': order_item.status.value
}
assert order_item_default_values_to_dict == expected_values
class TestReview:
def test_get_info(self, session, initialize):
expected_review = {
'id': 102,
'user_id': 101,
'book_id': 102,
'message': 'test message 2'
}
review = session.query(Review).filter_by(id=102).first()
review_to_dict = {
'id': review.id,
'user_id': review.user_id,
'book_id': review.book_id,
'message': review.message
}
assert review_to_dict == expected_review
def test_get_amount_of_reviews(self, session, initialize):
reviews = session.query(Review).all()
assert len(reviews) == 2
def test_get_review_images(self, session, initialize):
expected_review_images = [session.query(ReviewImage).filter_by(id=101).first(),
session.query(ReviewImage).filter_by(id=102).first()]
review = session.query(Review).filter_by(id=101).first().images
assert review == expected_review_images
class TestReviewImage:
def test_get_info(self, session, initialize):
expected_review_image = {
'id': 101,
'review_id': 101,
'image': 'review_image1.jpg'
}
review_image = session.query(ReviewImage).filter_by(id=101).first()
review_image_to_dict = {
'id': review_image.id,
'review_id': review_image.review_id,
'image': review_image.image
}
assert review_image_to_dict == expected_review_image
def test_get_amount_of_reviews_images(self, session, initialize):
reviews = session.query(ReviewImage).all()
assert len(reviews) == 3
| [
"safonov.danil.cw@gmail.com"
] | safonov.danil.cw@gmail.com |
e49326fac55a8895944075f1fa4486e0bc5a5f82 | 55d89253f2c16983629220a54ffe2c1801803aca | /src/p55.py | 0bb62e01637a19b669e7c473e38dba57ee150475 | [] | no_license | YeWenting/LeetCode-Sulotion | a1fbbb3499a844578c7e068f12a002a3d6a736af | 373a9a69b151c1a8de132915f10c5bbaa2ceb22c | refs/heads/master | 2021-07-13T01:31:36.091179 | 2017-10-18T12:12:59 | 2017-10-18T12:12:59 | 105,343,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
farest = 0
n = len(nums)
for i in range(n):
if i <= farest:
if i + nums[i] >= n - 1:
return True
elif i + nums[i] > farest:
farest = i + nums[i]
return False | [
"Wenting_Ye@Outlook.com"
] | Wenting_Ye@Outlook.com |
478a63c1e761ce405aaf06043f87d35e372cd8ac | aaded28b4be880ef3635695e6f576463becb7805 | /api/admin.py | 5c27ef06dcad8607856a7f0b3c9e56d7fd6bc275 | [] | no_license | Raghava248/todo_drf | 312038e0557896b657842a5621dc7f963add82a5 | 805103ba9cee872bb15b5a00873116bc96ee65cc | refs/heads/master | 2023-08-25T05:21:44.338950 | 2021-10-29T05:42:14 | 2021-10-29T05:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Task)
| [
"raghava.mittu@gmail.com"
] | raghava.mittu@gmail.com |
c607744fc4be9e2f417a02ea253ce40dd63c83cf | 5548c05a98ae86df08c21a275f028dfc64bd2819 | /data_script2PP.py | 025a4c4e9a298489da618df97cdadcf2ca244d28 | [] | no_license | nanomer/GLUE-Data-Analysis | 999d2c18715aa40784d792e363d20a7c35c45f45 | 8586e798479ef83b80fd7531ba2ce661fd7f75ef | refs/heads/master | 2020-07-29T13:30:39.415595 | 2019-11-23T22:52:31 | 2019-11-23T22:52:31 | 209,821,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,722 | py | #!/usr/bin/env python3
import sys
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell
import os
from scipy import stats
# ASSUMPTIONS:
# 1. Step size of x-axis is 1 or -1
# 2. Everything is named correctly
# 3. For every IDVG there is a corresponding IDVD file
# 4. Always 0, -20, -40, -60, -80, -100
# GLOBAL VARIABLES
idvdWorksheets = []
listofIDVDy_rvs = []
curWS = 0
skipGraph = 11
workbookName = ''
def main():
global workbookName
# Assume only data text files in Raw Data/
files = os.scandir('Raw Data/')
# First make sure all files are same sample ID
checkSameSampleID = []
for file in files:
filename = file.name
sampleID = filename[filename.find('CMM'): filename.find(' ', filename.find('.'))]
checkSameSampleID.append(sampleID)
workbookName = checkSameSampleID[0]
for check in checkSameSampleID:
if check != workbookName:
sys.exit('Files have different Sample IDs. Please check Raw Data.')
# Create workbook, the excel file
workbook = xlsxwriter.Workbook('Processed Data/' + workbookName + '.xlsx')
files = os.scandir('Raw Data/')
for file in files:
process_file(workbook, file)
workbook.close()
def process_file(workbook, file):
global idvdWorksheets
global curWS
global listofIDVDy_rvs
# Grab the info from the file name to name the worksheet and set constants
filename = file.name
startIndex = filename.find('CMM')
sIndex = filename.find('s', startIndex)
cIndex = filename.find('c', startIndex)
dIndex = filename.find('d', startIndex)
lIndex = filename.find('L', startIndex)
wIndex = filename.find('W', startIndex)
kIndex = filename.find('K', startIndex)
sampleType = filename[0:4]
sampleNum = filename[sIndex + 1: filename.find(' ', sIndex)]
cap = float(filename[cIndex + 1: filename.find(' ', cIndex)])
deviceNum = filename[dIndex + 1: filename.find(' ', dIndex)]
length = int(filename[filename.find(' ', dIndex) + 1: lIndex])
width = int(filename[filename.find(' ', lIndex) + 1: wIndex])
temperature = int(filename[filename.find(' ', wIndex) + 1: kIndex])
worksheetName = 'S' + sampleNum + ' D' + deviceNum + ' ' + str(temperature) + 'K ' + str(length) + 'L ' + sampleType
worksheet = workbook.add_worksheet(worksheetName)
curFile = open(r'Raw Data/' + filename)
# Find the primary information
line = curFile.readline()
while line.find('Measurement.Primary.Start') == -1:
line = curFile.readline()
priStart = int(line[line.find('\t') + 1: line.find('\n')])
line = curFile.readline()
priStop = int(line[line.find('\t') + 1: line.find('\n')])
line = curFile.readline()
priSteps = int(line[line.find('\t') + 1: line.find('\n')]) # Should be -1 or 1
# Find the secondary information
line = curFile.readline()
while line.find('Measurement.Secondary.Start') == -1:
line = curFile.readline()
secStart = int(line[line.find('\t') + 1: line.find('\n')])
line = curFile.readline()
secCount = int(line[line.find('\t') + 1: line.find('\n')])
line = curFile.readline()
secSteps = int(line[line.find('\t') + 1: line.find('\n')])
# Skip lines until you reach data
line = curFile.readline()
while line.find('Ig') == -1 or line.find('Id') == -1 or line.find('V') == -1:
line = curFile.readline()
# Time to start populating w/ data (First 3 columns)
row = 0
col = 0 # Raw data always starts at column A (0)
primary = line[0: 2]
secondary = 'Vd'
if primary == 'Vd':
secondary = 'Vg'
worksheet.write(row, col, line[0: 2])
worksheet.write(row, col + 1, line[3: 5])
worksheet.write(row, col + 2, line[6: 8])
row = 1
line = curFile.readline()
y_fwd = [] # for calculating trend line manually later
y_rvs = []
while line:
nextIndex = 0;
for i in range(3):
worksheet.write(row, col + i, float(line[nextIndex: line.find('\t', nextIndex)]))
if i == 1:
if (row >= 263 and row <= 303) or (row >= 465 and row <= 505) or (
row >= 667 and row <= 707) or (row >= 869 and row <= 909) or (
row >= 1071 and row <= 1111):
y_fwd.append(float(line[nextIndex: line.find('\t', nextIndex)]))
elif (row >= 304 and row <= 344) or (row >= 506 and row <= 546) or (
row >= 708 and row <= 748) or (row >= 910 and row <= 950) or (
row >= 1112 and row <= 1152):
y_rvs.append(float(line[nextIndex: line.find('\t', nextIndex)]))
nextIndex = line.find('\t', nextIndex) + 1
row += 1
line = curFile.readline()
curFile.close()
### Now worksheet has all the data from the file ###
#Useful variables predefined here
endRow = (abs(priStop) - priStart + 1) * 2 # num of rows
wlRatio = width / length
baseSecInterval = secSteps + secStart
maxX = priStop
midX = (priStart + priStop) // 2 + (priStop // 10) # Should be 60/-60, not really mid
minX = priStart
reverse = False
if priSteps < 0:
maxX = priStart
minX = priStop
reverse = True
#Graph dict, starts w/ values for first graph (abs) and will change for others
title = {'name': workbookName + ' ' + worksheetName}
yAxis = {'name': 'ABS IDRAIN (A)',
'label_position': 'high',
'num_format': '#.#0E-0#',
'num_font': {'bold': 1},
'name_font': {'size': 14},
'name_layout': {'x': 0.03, 'y': 0.3},
}
xAxis = {'name': 'VDRAIN (V)',
'reverse': reverse,
'major_gridlines': {'visible': True},
'min': minX,
'max': maxX,
'name_font': {'size': 14},
'num_font': {'bold': 1},
'label_position': 'low',
}
# Organize data by steps
col = 4 # Original data always starts at column E (4)
row = 1
for i in range(secCount):
worksheet.write(0, col, secondary + ' ' + str(baseSecInterval * i))
for j in range(endRow):
worksheet.write(j + 1, col, '=B' + str(row + 1))
row += 1
col += 1
# Absolute value
col += 1
absStart = col # Starting col of abs values
row = 1
for i in range(secCount):
worksheet.write(0, col, 'Abs ' + secondary + ' ' + str(baseSecInterval * i))
for j in range(endRow):
worksheet.write_formula(j + 1, col, '=ABS(' + xl_rowcol_to_cell(j + 1, col - secCount - 1) + ')')
col += 1
# Abs value graph
col += 1
startIDVD = col
absChart = workbook.add_chart({'type': 'scatter'})
if primary == 'Vg':
xAxis['name'] = 'VGATE (V)'
graph(worksheetName, absChart, title, yAxis, xAxis)
for i in range(1, secCount):
absChart.add_series({ 'values': [worksheetName, 1, absStart + i, endRow, absStart + i],
'categories': [worksheetName, 1, 0, endRow, 0],
'name': str(baseSecInterval * i),
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'marker': {'type': 'circle'},
'min': minX,
})
worksheet.insert_chart(xl_rowcol_to_cell(1, col), absChart)
# Log base abs value graph
absLogChart = workbook.add_chart({'type': 'scatter'})
yAxis['name'] = 'ABS IDRAIN (A)'
yAxis['log_base'] = 10
graph(worksheetName, absLogChart, title, yAxis, xAxis)
yAxis.pop('log_base')
for i in range(1, secCount):
absLogChart.add_series({ 'values': [worksheetName, 1, absStart + i, endRow, absStart + i],
'categories': [worksheetName, 1, 0, endRow, 0],
'name': str(baseSecInterval * i),
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'marker': {'type': 'circle'},
'min': minX,
})
worksheet.insert_chart(xl_rowcol_to_cell(26, col), absLogChart)
if primary == 'Vg':
global skipGraph
# Sq root abs values
col += skipGraph
sqrtStart = col;
row = 1
for i in range(secCount):
worksheet.write(0, col, 'SQRT Abs ' + secondary + ' ' + str(baseSecInterval * i))
for j in range(endRow):
worksheet.write_formula(j + 1, col, '=SQRT(' + xl_rowcol_to_cell(j + 1, absStart + i) + ')')
col += 1
# Sq root FWD graph
col += 1
sqrtFwdChart = workbook.add_chart({'type': 'scatter'})
title['name'] = workbookName + ' ' + worksheetName + ' FWD VTH'
yAxis['name'] = 'SQRT ABS IDRAIN (A)'
graph(worksheetName, sqrtFwdChart, title, yAxis, xAxis)
for i in range(1, secCount):
sqrtFwdChart.add_series({'values': [worksheetName, 1, sqrtStart + i, endRow // 2, sqrtStart + i],
'categories': [worksheetName, 1, 0, endRow // 2, 0],
'name': str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'marker': {'type': 'circle'},
'min': minX,
})
worksheet.insert_chart(xl_rowcol_to_cell(1, col), sqrtFwdChart)
# Sq root RVS graph
sqrtRvsChart = workbook.add_chart({'type': 'scatter'})
title['name'] = workbookName + ' ' + worksheetName + ' RVS VTH'
graph(worksheetName, sqrtRvsChart, title, yAxis, xAxis)
for i in range(1, secCount):
sqrtRvsChart.add_series({'values': [worksheetName, endRow // 2 + 1, sqrtStart + i, endRow, sqrtStart + i],
'categories': [worksheetName, endRow // 2 + 1, 0, endRow, 0],
'name': str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'marker': {'type': 'circle'},
'min': minX,
})
worksheet.insert_chart(xl_rowcol_to_cell(26, col), sqrtRvsChart)
# Trend line FWD graph
col += skipGraph
trendFwdChart = workbook.add_chart({'type': 'scatter'})
title['name'] = workbookName + ' ' + worksheetName + ' FWD VTH'
xAxis['max'] = midX
graph(worksheetName, trendFwdChart, title, yAxis, xAxis)
for i in range(1, secCount):
trendFwdChart.add_series({'values': [worksheetName, endRow // 2 - 40, sqrtStart + i, endRow // 2, sqrtStart + i],
'categories': [worksheetName, endRow // 2 - 40, 0, endRow // 2, 0],
'name': str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'min': minX,
'marker': {'type': 'circle'},
'trendline': {'type': 'linear',
'display_equation': True,
'name': 'Lin ' + str(baseSecInterval * i) + ' V',
},
})
worksheet.insert_chart(xl_rowcol_to_cell(1, col), trendFwdChart)
# Trend line RVS graph
trendRvsChart = workbook.add_chart({'type': 'scatter'})
title['name'] = workbookName + ' ' + worksheetName + ' RVS VTH'
graph(worksheetName, trendRvsChart, title, yAxis, xAxis)
for i in range(1, secCount):
trendRvsChart.add_series({'values': [worksheetName, endRow // 2 + 1, sqrtStart + i, endRow // 2 + 41, sqrtStart + i],
'categories': [worksheetName, endRow // 2 + 1, 0, endRow // 2 + 41, 0],
'name': str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'min': minX,
'marker': {'type': 'circle'},
'trendline': {'type': 'linear',
'display_equation': True,
'name': 'Lin ' + str(baseSecInterval * i) + ' V',
},
})
worksheet.insert_chart(xl_rowcol_to_cell(26, col), trendRvsChart)
# Calculate trend line values
mFwd, bFwd, mRvs, bRvs, xInterFwd, xInterRvs = calc_trendline(y_fwd, y_rvs)
# Create intercept chart
col += skipGraph
for i in range(1, secCount):
worksheet.write(i, col, 'Vd ' + str(baseSecInterval * i))
col += 1
worksheet.write(0, col, 'm FWD')
for i in range(5):
worksheet.write(i + 1, col, mFwd[i])
col += 1
worksheet.write(0, col, 'b FWD')
for i in range(5):
worksheet.write(i + 1, col, bFwd[i])
col += 1
fVth = col
worksheet.write(0, col, 'VTH FWD')
for i in range(1, secCount):
worksheet.write(i, col, xInterFwd[i - 1])
col += 1
worksheet.write(0, col, 'm RVS')
for i in range(5):
worksheet.write(i + 1, col, mRvs[i])
col += 1
worksheet.write(0, col, 'b RVS')
for i in range(5):
worksheet.write(i + 1, col, bRvs[i])
col += 1
rVth = col
worksheet.write(0, col, 'VTH RVS')
for i in range(1, secCount):
worksheet.write(i, col, xInterRvs[i - 1])
# dId/dVg
col += 2
dIdStart = col
for i in range(1, secCount):
worksheet.write(0, col, "dId/dVg " + str(baseSecInterval * i))
for j in range(1, endRow - 1):
worksheet.write_formula(j, col, '=LINEST(' + xl_rowcol_to_cell(j, 4 + i) + ':' + xl_rowcol_to_cell(j + 2,
4 + i) + ',A' + str(j + 1) + ':A' + str(j + 3) + ')')
col += 1
# dSQId/dVg
col += 1
dSQIdStart = col
for i in range(1, secCount):
worksheet.write(0, col, "dSQId/dVg " + str(baseSecInterval * i))
for j in range(1, endRow - 1):
worksheet.write_formula(j, col, '=LINEST(' + xl_rowcol_to_cell(j, sqrtStart + i) + ':' + xl_rowcol_to_cell(
j + 2, sqrtStart + i) + ',A' + str(j + 1) + ':A' + str(j + 3) + ')')
col += 1
# Linear Mobility
col += 1
worksheet.write(0, col, "Linear Mobility")
col += 1
linMob = col
for i in range(1, secCount):
worksheet.write(0, col, "lmob " + str(baseSecInterval * i))
for j in range(1, endRow - 1):
worksheet.write_formula(j, col, '=(' + xl_rowcol_to_cell(j, dIdStart + i - 1) + ')/(' + str(
abs(baseSecInterval * i)) + '*' + str(wlRatio * cap) + ')')
col += 1
# Sat Mobility
col += 1
worksheet.write(0, col, "Sat Mobility")
col += 1
satMob = col
for i in range(1, secCount):
worksheet.write(0, col, "smob " + str(baseSecInterval * i))
for j in range(1, endRow - 1):
worksheet.write_formula(j, col, '=(2*(' + xl_rowcol_to_cell(j, dSQIdStart + i - 1) + ')^2)/(' + str(
wlRatio * cap) + ')')
col += 1
# Combined mobilities chart: 0-Vth is sat and Vth + 1 - -100 is lin
col += 1
worksheet.write(0, col, "Combo Mobility")
col += 1
combMob = col
for i in range(1, secCount):
worksheet.write(0, col, "mob " + str(baseSecInterval * i))
curDivPoint = int(round(xInterRvs[i - 1])) + baseSecInterval * i
if curDivPoint < 0:
for j in range(1, abs(curDivPoint) + 2):
if j <= 99:
worksheet.write_formula(j, col, '=' + xl_rowcol_to_cell(j, satMob + i - 1))
if curDivPoint > 0:
for j in range(1, 100):
worksheet.write_formula(j, col, '=' + xl_rowcol_to_cell(j, linMob + i - 1))
else:
for j in range(abs(curDivPoint) + 2, 100):
worksheet.write_formula(j, col, '=' + xl_rowcol_to_cell(j, linMob + i - 1))
col += 1
# Combined mobility graph IDVG
col += 1
mobChart = workbook.add_chart({'type': 'scatter'})
title['name'] = workbookName + ' ' + worksheetName + ' MOBILITY'
yAxis['name'] = 'Mobility (cm^2/Vs)'
yAxis['num_format'] = '#.#'
xAxis['max'] = maxX
graph(worksheetName, mobChart, title, yAxis, xAxis)
for i in range(1, secCount):
curDivPoint = abs(int(round(xInterRvs[i - 1])) + baseSecInterval * i)
mobChart.add_series({'values': [worksheetName, 1, combMob + i - 1, curDivPoint + 2, combMob + i - 1],
'categories': [worksheetName, 1, 0, curDivPoint + 2, 0],
'name': 'Sat' + str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'min': minX,
'marker': {'type': 'circle'},
})
mobChart.add_series({'values': [worksheetName, curDivPoint + 2, combMob + i - 1, 99, combMob + i - 1],
'categories': [worksheetName, curDivPoint + 2, 0, 99, 0],
'name': 'Lin' + str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'solid'},
'min': minX,
'marker': {'type': 'square'},
})
worksheet.insert_chart(xl_rowcol_to_cell(1, col), mobChart)
### Now do IDVD stuff w/ reverse Vth -100 ###
y_rvs = listofIDVDy_rvs[curWS] # load list corresponding to cur idvd ws
y_rvs = y_rvs[0]
# Mob Factor
col = startIDVD
col += skipGraph
idvdWorksheets[curWS].write(0, col, "Mob Factor")
col += 1
factorStart = col
for i in range(1, secCount):
idvdWorksheets[curWS].write(0, col, "F " + str(baseSecInterval * i))
for j in range(1, endRow - 2):
idvdWorksheets[curWS].write_formula(j, col, '=1/((' + str(baseSecInterval * i) + '*A' + str(j + 1) +
')-(' + str(xInterRvs[4]) + '*A' + str(j + 1) + ')-((A' + str(j + 1) + ')^2/2))')
col += 1
# Linear Mobility
col += 1
idvdWorksheets[curWS].write(0, col, "Linear Mobility")
col += 1
linMob = col
for i in range(1, secCount):
idvdWorksheets[curWS].write(0, col, "lmob " + str(baseSecInterval * i))
for j in range(1, endRow - 2):
idvdWorksheets[curWS].write_formula(j, col, '=(' + xl_rowcol_to_cell(j, absStart + i) + '*' +
xl_rowcol_to_cell(j, factorStart + i - 1) + ')/(' + str(wlRatio * cap) + ')')
col += 1
# Sat Mobility
col += 1
idvdWorksheets[curWS].write(0, col, "Sat Mobility")
col += 1
satMob = col
for i in range(1, secCount):
idvdWorksheets[curWS].write(0, col, "smob " + str(baseSecInterval * i))
for j in range(1, endRow - 2):
idvdWorksheets[curWS].write_formula(j, col, '=((2*' + xl_rowcol_to_cell(j, absStart + i) + ')/((' +
str(wlRatio * cap) + ')*(' + str(baseSecInterval * i) + '-' + str(xInterRvs[4]) +')^2))')
col += 1
# Combined mobilities chart: 0-Vth is lin and Vth + 1 - -100 is sat
col += 1
idvdWorksheets[curWS].write(0, col, "Combo Mobility")
col += 1
combMob = col
for i in range(1, secCount):
idvdWorksheets[curWS].write(0, col, "mob " + str(baseSecInterval * i))
curDivPoint = int(round(baseSecInterval * i - xInterRvs[4]))
if curDivPoint < 0:
for j in range(1, abs(curDivPoint) + 1):
idvdWorksheets[curWS].write_formula(j, col, '=' + xl_rowcol_to_cell(j, linMob + i - 1))
curDivPoint = abs(curDivPoint)
else:
curDivPoint = 0;
for j in range(curDivPoint + 1, 100):
idvdWorksheets[curWS].write_formula(j, col, '=' + xl_rowcol_to_cell(j, satMob + i - 1))
col += 1
# Combined mobilities graph
# First find max current (upperbound) for 60 V bias
#max60 = -1.0
# for i in range(41):
# check = abs(y_rvs[41 * 2 + i])
# if(check > 50):
# max60 = check
# max60 = int((2 * max60) / ((wlRatio * cap) * pow(midX - xInterRvs[4], 2))) + 1
col += 1
mobChart = workbook.add_chart({'type': 'scatter'})
title['name'] = workbookName + ' ' + idvdWorksheets[curWS].get_name() + ' MOBILITY'
yAxis['max'] = 50
xAxis['name'] = 'VDRAIN(V)'
graph(worksheetName, mobChart, title, yAxis, xAxis)
for i in range(1, secCount):
curDivPoint = int(round(baseSecInterval * i - xInterRvs[4]))
if curDivPoint < 0:
mobChart.add_series({'values': [idvdWorksheets[curWS].get_name(), 1, combMob + i - 1, abs(curDivPoint) + 2, combMob + i - 1],
'categories': [idvdWorksheets[curWS].get_name(), 1, 0, abs(curDivPoint) + 2, 0],
'name': 'Lin' + str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'round_dot'},
'min': minX,
'marker': {'type': 'circle'},
})
curDivPoint = abs(curDivPoint)
else:
curDivPoint = -1
mobChart.add_series({'values': [idvdWorksheets[curWS].get_name(), curDivPoint + 2, combMob + i - 1, 99,
combMob + i - 1],
'categories': [idvdWorksheets[curWS].get_name(), curDivPoint + 2, 0, 99, 0],
'name': 'Sat' + str(baseSecInterval * i) + ' V',
'name_font': {'bold': 1},
'line': {'dash_type': 'solid'},
'min': minX,
'marker': {'type': 'square'},
})
idvdWorksheets[curWS].insert_chart(xl_rowcol_to_cell(1, col), mobChart)
curWS += 1
else:
idvdWorksheets.append(worksheet)
listofIDVDy_rvs.append((list(y_rvs), y_rvs[0]))
def graph(worksheetName, chart, title, yAxis, xAxis):
global workbookName
chart.set_size({'width': 680,
'height': 480,
})
chart.set_plotarea({'layout': {'x': 0.17,
'y': 0.1,
'width': 0.63,
'height': 0.73
}
})
chart.set_legend({'font': {'bold': 1, 'size': 14}})
chart.set_title(title)
chart.set_y_axis(yAxis)
chart.set_x_axis(xAxis)
def calc_trendline(y_fwd, y_rvs):
xFwd = []
for i in range(41):
xFwd.append(-60 - i)
xRvs = []
for i in range(41):
xRvs.append(-100 + i)
mFwd = []
bFwd = []
mRvs = []
bRvs = []
xInterFwd = []
xInterRvs = []
for num in range(5):
curFwdY = [None] * 41
curRvsY = [None] * 41
for i in range(41):
curFwdY[i] = pow(abs(y_fwd[num * 41 + i]), 0.5)
curRvsY[i] = pow(abs(y_rvs[num * 41 + i]), 0.5)
slopeFwd, interceptFwd, r_valueFwd, p_valueFwd, std_errFwd = stats.linregress(xFwd, curFwdY)
slopeRvs, interceptRvs, r_valueRvs, p_valueRvs, std_errRvs = stats.linregress(xRvs, curRvsY)
mFwd.append(slopeFwd)
bFwd.append(interceptFwd * -1)
mRvs.append(slopeRvs)
bRvs.append(interceptRvs * -1)
xInterFwd.append(interceptFwd * -1 / slopeFwd)
xInterRvs.append(interceptRvs * -1 / slopeRvs)
return mFwd, bFwd, mRvs, bRvs, xInterFwd, xInterRvs
if __name__ == '__main__':
main()
sys.exit(42)
| [
"melissayang@utexas.edu"
] | melissayang@utexas.edu |
002dabe336a49ac40c5c2814c38695ab8728203e | a06619819f686dbf42277fd6382a0de6af2090ea | /password_generator.py | 9b3096f3b76a2e4ed3fe6051101956d7ad51c3be | [] | no_license | martinpzj/PasswordGenerator | 3fb0cc0fd757ad6dc1e2ec5ecaa91b464adf814a | 90f4d0d477ed744232dfd72192f166a505764a17 | refs/heads/master | 2020-03-28T13:06:29.719913 | 2018-09-11T19:15:15 | 2018-09-11T19:15:15 | 148,366,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | import random
#combination of numbers, symbols, uppercase letters, lowercase letters
def generator(length):
password = ''
while len(password) != length:
password = password + random.choice(characters)
return password
characters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'
while True:
#Ask user how long they want their password to be
print('Password ranges from 8-16 characters long')
password_length = int(input('Type password length: '))
if password_length >= 8 and password_length <= 16:
if password_length == 8:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 9:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 10:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 11:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 12:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 13:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 14:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 15:
for num in range(10):
new = generator(password_length)
print(new)
break
elif password_length == 16:
for num in range(10):
new = generator(password_length)
print(new)
break
else: break
#Password was too long
elif password_length > 8:
print('Password length was too long')
continue
#Password was too short
else:
print('Password length was too short')
continue | [
"mperez58@ucsc.edu"
] | mperez58@ucsc.edu |
e7cc18d0c214ed28a66649510ef8b99453ab4b5a | 6f7b63a20d827673db2a12974ec13e0e932c6d6f | /lesson2/task3.py | 547ec4aeed81674ce1024bb3aa17dc5b18c018ab | [] | no_license | Tskon/Pyton-gb | 075652867c98cf43cc3c60af2eb4872c22a69921 | 0aeadee644b2c998216b6549eee44bfeec8650a4 | refs/heads/master | 2020-03-29T10:11:46.691809 | 2018-10-18T17:39:17 | 2018-10-18T17:39:17 | 149,793,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # Дан список заполненный произвольными целыми числами. Получите новый список, элементами которого будут только уникальные элементы исходного.
numList = [1, 15, 3, 1, 2, 12, 3]
# 1 вариант - убираем лишние дубли
uniqNumList1 = list(set(numList))
print('без дублей', uniqNumList1)
# 2 вариант - только неповторяющиеся числа
doubles = numList[:]
for num in set(numList):
doubles.remove(num)
uniqNumList2 = list(set(uniqNumList1) - set(doubles))
print('только неповторяющиеся', uniqNumList2) | [
"tsekhmeister.k@gmail.com"
] | tsekhmeister.k@gmail.com |
422a26973120e08992ce53e7ece4c144afdd9166 | 5c667c2136db43f8c3b07bf29a4e450c47c09122 | /autoClicker/AutoClicker.spec | 229a8ff774ecfa7275606e93502d03616cc6c7ca | [] | no_license | g-ulrich/AutoClicker | 51d8a209f87c437be3f1902d4bc5a52b24029703 | 4a21d6a1c6c4f730a1d2456fe626a168faee3754 | refs/heads/main | 2023-04-22T05:56:19.597633 | 2021-05-03T13:47:06 | 2021-05-03T13:47:06 | 348,132,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['AutoClicker.py'],
pathex=['C:\\desktop2\\2021\\scripts\\python\\PyCharmProjects\\SuperClickerUtility\\venv\\Lib\\site-packages', 'C:\\desktop2\\2021\\scripts\\python\\PyCharmProjects\\SuperClickerUtility'],
binaries=[],
datas=[('images', 'images')],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=['tkinter', 'test', 'sqlite3', 'numpy'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='AutoClicker',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=False , icon='images\\mouse.ico')
| [
"noreply@github.com"
] | noreply@github.com |
1cad71006bfaaca4c9073496567deaea73f84e40 | c191d7db16366f1532c05c6bc238c59f378f008a | /zscanFitter.py | 52938f9afe2a92ace41b1d0549f4d42b9e9f10b3 | [] | no_license | hurxx018/FFS | 410b9cd07da0b2a16c06fc59da31d61d35a4803e | eeaf4dcc76709e58d8775f36ce82354de8262b3b | refs/heads/master | 2020-04-16T18:02:12.130907 | 2017-08-25T01:20:34 | 2017-08-25T01:20:34 | 65,945,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,570 | py | import numpy as np
from zscan_tools.zscanPSFGLsp import zscanPSFGLsp
from zscan_tools.zscanMultiLayer import zscanMultiLayer
from mpfit.mpfit3 import mpfit
class zscanFitter(object):
"""docstring for zscanFitter."""
#TO DO: explain each geo in detail
geodict = {"BASE":-1, "SLAB(L)":0, "UP":1, "DOWN":2, "DELTA":3
, "SPHERE(R)":4, "CYLINDER(R)":5
, "notSLAB":10, "notDELTA":13, "notSPHERE":14, "notCYLINDER":15
, "notSPHERE:up":24, "notCYLINDERup":25}
# mGL : zR, y, w0
# 3DG : zR, w0
# GL : zR, w0
psfdict = {"mGL":3, "3DG":1, "GL":0}
psf_nparasdict = {"mGL":3, "3DG":2, "GL":2}
def __init__(self, psfmodel="mGL", zoffset=None, channels=[1]):
super(zscanFitter, self).__init__()
if psfmodel in self.psfdict.keys():
self._psfmodel = psfmodel # psfmodel
else:
raise ValueError("{} is not a correct PSF model".format(psfmodel))
self._zoffset = zoffset
if not isinstance(channels, list):
raise TypeError("The type of channels is a list.")
self._channels = channels
# psf can differ from channel to channel due to color abberation.
self._psf, self._psffixed = {}, {}
for cha in channels:
self._psf.setdefault(cha, []) # for psf paras on each channel
self._psffixed.setdefault(cha, []) # fixed condition for psf
# geoinfo is given as a list of layers
self._geoinfo = [] # for sample geometric models
self._geoinfofixed = [] # fixed for sample geometric models
# spillover paras for multi-channel zscan FFS
self._spillover = {}
def setkz(self, z, kz=[]):
# TODO
# self.plot()
pass
def _checkfitinputs(self, z, kz, errkz):
"""Check if the fit inputs are correct.
"""
if len(kz) != len(self.channels):
raise ValueError("The number of kzs does not match to the number \
of channels")
if (errkz is not None):
if not isinstance(errkz, list):
raise TypeError("The type of errkz is not list")
elif len(errkz) != len(self.channels):
raise ValueError("The number of kzs does not match to \
the number of channels")
for cha in self.channels:
if self._isPSFEmpty(cha):
raise ValueError("The psf paras for the channel {} is not \
given.".format(cha))
# check the zoffset
if self._isZoffsetEmpty():
tzoffset = input("Set up zoffset in the range between \
{%.2f} and {%.2f} : ".format(z.min(), z.max()))
self._zoffset = float(tzoffset)
elif not (z.min() <= self._zoffset <= z.max()):
print("{} is out of the range between \
{%.2f} and {%.2f} : ".format(z.min(), z.max()))
tzoffset = input("Set up zoffset in the range between \
{%.2f} and {%.2f} : ".format(z.min(), z.max()))
self._zoffset = float(tzoffset)
# check the geo info
if self._isGeoinfoEmpty():
raise ValueError("No geoinfo is available.")
# check the spillover
if len(self.channels) > 1 and self._isSpillOverEmpty():
raise ValueError("No spillover paras is available.")
@staticmethod
def getErrkz(kz):
"""Calculate the error for kz.
sqrt(phton count) is equal to the error of photon count due to the shot
noise.
"""
if not isinstance(kz, list):
raise TypeError("The input kz is not list")
res = [np.sqrt(x) for x in kz]
for x in res:
index = (x == 0)
x[index] += 1.
return res
def fit(self, z, kz=[], errkz=None):
"""fit zscan intensity profiles of multi-channel with a single geometric
model
Questions: How to apply the spillover parameters on each each geometric
layer. especially for the background counts when the fluorescent
intensity is very low.
"""
self._checkfitinputs(z, kz, errkz)
if errkz is None:
errkz = self.getErrkz(kz)
# TODO
x = z
y = np.array(kz).flatten()
yerr = np.array(errkz).flatten()
paras, fixed, fitinfo = self._generateparas()
# see docstring in _generateparas to know what paras, fitinfo, and fixed
# are.
parinfo = [{'value':v, 'fixed':f, 'limited':[1,0], 'limits':[0.,0.]}
for v, f in zip(paras, fixed)]
# TODO consider extra features in myfunct
def myfunct(p, fjac=None, x=None, y=None, err=None, info=None):
model = self.kzMultiLayerFCT(x, p, info=info)
status = 0
return [status, (y-model)/err]
fa = {"x":x, "y":y, "err":yerr, "info":fitinfo}
res = mpfit(myfunct, paras, functkw=fa, parinfo=parinfo, maxiter=300, quiet=1)
yfit = self.kzMultiLayerFCT(x, res.params, info=fitinfo)
return res, yfit
def _checkpsfmodel(self, psfparas, fixed):
if self._psfmodel == "mGL":
if psfparas != []:
assert (len(psfparas) == 3), "The number of paras should be 3."
elif self._psfmodel == "GL":
if psfparas != []:
assert (len(psfparas) == 2), "The number of paras should be 2."
elif self._psfmodel == "3DG":
if psfparas != []:
assert (len(psfparas) == 2), "The number of paras should be 2."
if fixed != []:
assert (len(psfparas) == len(fixed)), \
"The number of paras does not match to the number of fixed."
def setPSF(self, channel=1, psfparas=[], fixed=[]):
"""set PSF model's paras and fixed conditions
psfparas = [zR, y, w0] for mGL
psfparas = [zR, w0] for GL
psfparas = [zR, w0] for 3DG
"""
if channel not in self._channels:
raise ValueError("The channel {} is not in channels".format(channel))
self._checkpsfmodel(psfparas, fixed)
if psfparas != [] and fixed != []:
self._psf[channel] = psfparas
self._psffixed[channel] = fixed
elif psfparas != [] and fixed == []:
self._psf[channel] = psfparas
self._psffixed[channel] = [0]*len(psfparas)
elif psfparas == [] and fixed == []:
self._setPSFparas(channel)
else:
raise ValueError("psfpara is not available.")
def _setPSFparas(self, channel):
"""set PSF paras by hands"""
print("set PSF paras for {} psf model".format(self._psfmodel))
self._psf.setdefault(channel, []) # for psf paras on each channel
self._psffixed.setdefault(channel, []) # fixed condition for psf
if self._psfmodel == "mGL":
psfpara_names = ['zR', 'y', 'w0']
elif self._psfmodel in ['GL', '3DG']:
psfpara_names = ['zR', 'w0']
for i in psfpara_names:
h1 = input("PSF para for {} ? ".format(i))
h2 = input("fix {} (1 or 0) ? ".format(i))
self._psf[channel].append(float(h1))
self._psffixed[channel].append(float(h1))
def getPSF(self, channel=1):
"""get the whole information about the PSF"""
return self._psfmodel, self._psf[channel], self._psffixed[channel]
def addLayer(self, geomodel):
"""geomodel is either integer or string in geodict.
paras and fixed are manually set up.
geomodel >> a key in geodict or a value in geodict
"""
self.setLayer(geomodel)
self._setGeoParas()
return
def _setGeoParas(self):
"""set geo-paras and fixed by hands"""
print("set paras for {} model".format(self._geoinfo[-1]['geo']))
para_names, fpara_names = self._paranames()
for i, j in zip(para_names, fpara_names):
h1 = input("para for {} ? ".format(i))
h2 = input("fix {} (1 or 0) ? ".format(i))
self._geoinfo[-1][i] = float(h1)
self._geoinfofixed[-1][j] = int(h2)
return
def _checkgeomodel(self, geomodel):
if isinstance(geomodel, int):
for key, value in self.geodict.items():
if value == geomodel:
return key
else:
raise ValueError("geomodel is not available.")
elif isinstance(geomodel, str):
if geomodel in self.geodict:
return geomodel
else:
raise ValueError("geomodel is not available.")
else:
raise TypeError("The type of geomodel is incorrect.")
def _paranames(self):
if len(self.channels) == 1:
return ['k1', 'LR'], ['fk1', 'fLR']
elif len(self.channels) == 2:
return ['k1', 'k2', 'LR'], ['fk1', 'fk2', 'fLR']
elif len(self.channels) == 3:
return ['k1', 'k2', 'k3', 'LR'], ['fk1', 'fk2', 'fk3', 'fLR']
else:
raise ValueError("The analysis is not available.")
def setLayer(self, geomodel, paras=[], fixed=[], layer_index=None):
"""set geometric model on each layer with geo-paras and fixed conditions
geomodel : a key in geodict or a value in geodict
paras: list
for single channel [1] : [k1, LR]
for dual channels [1, 2] : [k1, k2, LR]
for triple channels [1, 2, 3] : [k1, k2, k3, LR]
where k1, k2, k3 >> counts per bin, LR >> length or radius
fixed: list of 0 or 1 (0: free, 1: fixed in fitting)
For a given fixed, len(fixed) == len(paras)
layer_index : None or a non-negative integer
None : a single geometric layer is added at the end of current
geometric models
a non-negative integer : a geometric layer of the layer_index is reset
by given paras and fixed
"""
geo = self._checkgeomodel(geomodel)
para_names, fpara_names = self._paranames()
if paras == [] and fixed == []:
paras = [0.]*len(para_names)
fixed = [0]*len(fpara_names)
elif paras != [] and fixed == []:
fixed = [0]*len(fpara_names)
elif paras == [] and fixed != []:
paras = [0.]*len(para_names)
else:
assert len(paras) == len(para_names), "The number elements in paras\
should be equal to {}".format(len(self.channels) + 1)
assert len(fixed) == len(fpara_names), "The number elements in fixed\
should be equal to {}".format(len(self.channels) + 1)
if len(paras) != len(fixed):
raise ValueError("The number of elements in paras does not \
match to the number of elements in fixed.")
# assign paras and fixed to _geoinfo and _geoinfofixed
if layer_index == None:
self._geoinfo.append(dict([(k, v) for k, v in zip(para_names, paras)]))
self._geoinfofixed.append(dict([(k, v) for k, v in zip(fpara_names, fixed)]))
self._geoinfo[-1]['geo'] = geo
self._geoinfofixed[-1]['geo'] = geo
elif (isinstance(layer_index, int) and
layer_index < len(self._geoinfo)):
if self._geoinfo[layer_index]['geo'] == geo:
for k, v in zip(para_names, paras):
self._geoinfo[layer_index][k] = v
for k, v in zip(fpara_names, fixed):
self._geoinfofixed[layer_index][k] = v
else:
raise ValueError("model does not match.")
else:
raise ValueError("layer_index is out of the range")
return
def removeLayer(self, layer_index=None):
# remove a geometric layer
if self._geoinfo != []:
try:
if isinstance(layer_index, int):
self._geoinfo.pop(layer_index)
self._geoinfofixed.pop(layer_index)
elif layer_index == None:
self._geoinfo.pop()
self._geoinfofixed.pop()
except:
raise ValueError("layer_index is out of the allowed range")
else:
print("geoinfo is empty.")
return
def setSpillover(self, paras):
"""a method for setting up spillover parameters
"""
if len(self.channels) == 1:
return
elif len(self.channels) == 2:
spillover = ['f12']
elif len(self.channels) == 3:
spillover = ['f12', 'f13', 'f23']
else:
raise ValueError("The analysis is not available")
assert len(paras) == len(spillover), "The number of elments in paras\
should be equal to {}".format(len(spillover))
for k, v in zip(spillover, paras):
self._spillover[k] = v
return
def _generateparas(self):
"""
paras = [psfparas for channel 1, (psfparas for channel 2, psfparas for channel 3),
offset, layer[0]_paras, layer[1]_paras, ......]
fixed =
fitinfo = {"nch":#, "psfmodel":#, "n_psfparas":#, "geo":[]}
"""
for cha in self._channels:
if self._isPSFEmpty(cha):
raise ValueError("psf paras are not available.")
if self._isGeoinfoEmpty():
raise ValueError("geoinfo paras are not available.")
if self._isZoffsetEmpty():
raise ValueError("zoffset para is not available.")
paras, fixed, fitinfo = [], [], {}
nch = 0
for channel in sorted(self._psf.keys()):
paras += self._psf[channel]
fixed += self._psffixed[channel]
nch += 1
fitinfo["nch"] = nch
fitinfo["psfmodel"] = self.psfdict[self._psfmodel]
fitinfo["n_psfparas"] = self.psf_nparasdict[self._psfmodel]
fitinfo["geo"] = []
paras += [self._zoffset]
fixed += [0]
para_names, fpara_names = self._paranames()
for i, j in zip(self._geoinfo, self._geoinfofixed):
temp = [i[k] for k in para_names]
ftemp = [j[k] for k in fpara_names]
paras.extend(temp)
fixed.extend(ftemp)
fitinfo["geo"] += [self.geodict[i["geo"]]]
fitinfo["spillover"] = self._spillover
return np.array(paras).flatten(), fixed, fitinfo
# def kzfct(self, channel=1):
# self._geoinfo
# zscanMultiLayer
# a, b = self._generateparas()
# kz_fct = self.kzMultiLayerFCT(self.z, a, info=b)
@staticmethod
def kzMultiLayerFCT(z, paras, info=None):
"""zscan multilayer function for the fit.
For given paras and info,
parasPSf, zoffset, model are reconstituted for zscanMultiLayer.
"""
result = np.zeros(z.size) # z.size / nch
nch = info["nch"]
n_psfp0 = info["n_psfparas"]
psfparas = []
for i in range(nch):
psfparas.append(paras[0 + i*n_psfp0: n_psfp0 + i*n_psfp0])
zoff = paras[n_psfp0*nch]
nparas = {1:2, 2:3, 3:4} # nparas[nch] == len(self._paranames()[0])
if nch == 2:
spo = ['f12']
elif nch == 3:
spo = ['f12', 'f13', 'f23']
geomodels = [[] for x in range(nch)]
for x in range(len(info["geo"])):
temp = paras[n_psfp0*nch + 1 + nparas[nch]*x
:n_psfp0*nch + nparas[nch] + 1 + nparas[nch]*x]
geomodels[0].append({"geo":info["geo"][x], "k":temp[0], "LR":temp[nparas[nch]-1]})
if nch == 2:
geomodels[1].append({"geo":info["geo"][x], "k":temp[0], "LR":temp[nparas[nch]-1]})
elif nch == 3:
geomodels[1].append({"geo":info["geo"][x], "k":temp[1], "LR":temp[nparas[nch]-1]})
geomodels[2].append({"geo":info["geo"][x], "k":temp[2], "LR":temp[nparas[nch]-1]})
if nch == 1:
return zscanMultiLayer(z, zoff, psfparas[0], model=geomodels[0], psfmodel=info["psfmodel"])
else:
# TODO zscan profiles for multiple channel
# TODO take into account the spillover.
res = []
for i in range(nch):
t = zscanMultiLayer(z, zoff, psfparas[i], model=geomodels[i], psfmodel=info["psfmodel"])
if i == 0:
res.append(t)
elif i == 1:
a = info["spillover"][spo[0]]
t += a*res[0]
res.append(t)
elif i == 2:
a = info["spillover"][spo[1]]
b = info["spillover"][spo[2]]
res.append()
temp = np.hstack(res)
# print(temp)
return temp
# return np.concatenate(res, axis=1)
def _isPSFEmpty(self, channel):
return self._psf[channel] == []
def _isGeoinfoEmpty(self):
return self._geoinfo == []
def _isZoffsetEmpty(self):
return (self._zoffset is None)
def _isSpillOverEmpty(self):
return self._spillover == {}
@property
def psfmodel(self):
return self._psfmodel
@property
def psf(self):
return self._psf
@property
def psffixed(self):
return self._psffixed
@property
def geoinfo(self):
return self._geoinfo
@property
def geoinfofixed(self):
return self._geoinfofixed
@property
def zoffset(self):
return self._zoffset
@zoffset.setter
def zoffset(self, value):
if not isinstance(value, (int, float)):
raise TypeError("The type of zoffset value is either int or float.")
else:
self._zoffset = value
@property
def channels(self):
return self._channels
@channels.setter
def channels(self, values):
if not isinstance(values, list):
raise TypeError("The type of channels is list.")
else:
self._channels = values
@property
def spillover(self):
return self._spillover
@classmethod
def printgeodict(cls):
temp = sorted(cls.geodict.items(), key= (lambda x:x[1]))
for key, value in temp:
print(key, value)
def main():
from zscanTransformer import zscanTransformer as zscan
from readFFSfromFLEX import readFFSfromFLEX as ffs
from matplotlib import pyplot as plt
data = ffs(["zscan_slab_egfp.dat"], [1, 2], 20000)
temp_zscan = zscan(channels=[2], slice_zscans = True)
res = temp_zscan.transform(data)
zscanfit = zscanFitter(psfmodel="mGL", zoffset=13., channels=[1])
zscanfit.setPSF(channel=1, psfparas=[1., 2., 0.45], fixed=[0, 0, 0])
# print("psf :", zscanfit.getPSF())
#zscanfit.addLayer("DOWN")
zscanfit.setLayer("DOWN", [1., 0.], [0, 1])
zscanfit.setLayer(0, [1300., 1.], [0, 0])
zscanfit.setLayer("UP", [1., 0.], [0, 1])
#zscanfit.setLayer("UP", [5., 5.], [0, 0], layer_index=1)
zz, yfit = zscanfit.fit(res[0], [res[2][0]])
print(zz.params)
# for x in res[2]:
# plt.plot(res[0], x)
plt.plot(res[0], res[2][0])
plt.plot(res[0], yfit, 'r')
plt.xlabel('z (um)')
plt.ylabel('counts per {} bins'.format(temp_zscan.nbins))
plt.show()
temp_zscan = zscan(channels=[1, 2], slice_zscans = True)
res = temp_zscan.transform(data)
# for x, y in zip(res[2], res[1]):
# plt.plot(res[0], x)
# plt.plot(res[0], y)
plt.plot(res[0], res[2][0])
plt.plot(res[0], res[1][0])
plt.show()
zscanfit_dual = zscanFitter(psfmodel="mGL", zoffset=13., channels=[1,2])
zscanfit_dual.setPSF(channel=1, psfparas=[1., 2., 0.45], fixed=[0, 0, 0])
zscanfit_dual.setPSF(channel=2, psfparas=[1., 2., 0.45], fixed=[0, 0, 0])
zscanfit_dual.setSpillover([1./8.])
zscanfit_dual.setLayer("DOWN", [1., 1., 0.], [0, 0, 1])
zscanfit_dual.setLayer(0, [1300., 400., 1.], [0, 0, 0])
zscanfit_dual.setLayer("UP", [1., 1., 0.], [0, 0, 1])
zz, yfit = zscanfit_dual.fit(res[0], [res[2][0], res[1][0]])
print(zz.params)
v = yfit.reshape((2, res[0].size))
print(v)
print(v.shape)
plt.plot(res[0], res[2][0])
plt.plot(res[0], res[1][0])
plt.plot(res[0], v[0], 'r')
plt.plot(res[0], v[1], 'r')
plt.show()
if __name__=="__main__":
main()
| [
"hurxx018@gmail.com"
] | hurxx018@gmail.com |
d707e48f2206d0063a2ee821c1c8af3036bea642 | 998195df831258000d53402dd1a0f84e4b08a54c | /resources/virtual/__init__.py | 8ee0ef15ef26eb390ac6b76f609216f0e76cedeb | [
"MIT"
] | permissive | GerasimovRM/Where-I-Am | 2ceb0e1e9b4494ffb672b00a0d2197d3c390cda1 | 58f6f0d1533421890f199dacabe523a447486b9f | refs/heads/master | 2022-12-09T17:06:43.660654 | 2020-03-31T21:39:32 | 2020-03-31T21:39:32 | 249,545,743 | 0 | 0 | MIT | 2022-12-08T03:52:26 | 2020-03-23T21:15:44 | Python | UTF-8 | Python | false | false | 37 | py | from .virtual_user import VirtualUser | [
"romagrizly@gmail.com"
] | romagrizly@gmail.com |
afdd99802f2c3e8b542e770aaf8599a418822320 | 2298c6afb9fc3877a2bf283e7e7422ac1843cabd | /python-studentmgmt/run.py | 8aebcb755642578db26ac2fe956f4df57b022a95 | [] | no_license | desrop/student-course-mgmt | 32d4e86c5a5ab3bf1531652dad9ce5b78f04cdc9 | e3213b7be5fc2f2e005219f977d4b3af3883f3b3 | refs/heads/master | 2020-04-29T12:02:57.237635 | 2019-04-07T15:52:59 | 2019-04-07T15:52:59 | 176,123,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from studentmgmt import app
if __name__ == '__main__':
app.run(debug=True) | [
"dean.desrosiers@gmail.com"
] | dean.desrosiers@gmail.com |
ccfcc55c4c14c4b986184af7508e852d73a51294 | 124537802d65a56bcc8221196add1d3b62bb46d9 | /mysite/application/admin.py | f9d77ee33249e1619d52b0b003ec40d2bd308b71 | [] | no_license | Watson-Sei/Django-Girls-Tutorial | a9c749c334ea0c0786c80fa8a9492ee29a222184 | 3e5c3735aa25e8f819228d34a50c47c0820fafc3 | refs/heads/master | 2022-12-09T13:13:29.430290 | 2020-07-30T04:38:56 | 2020-07-30T04:38:56 | 247,466,139 | 2 | 0 | null | 2022-11-22T05:23:55 | 2020-03-15T12:55:09 | Python | UTF-8 | Python | false | false | 312 | py | from django.contrib import admin
from .models import Post,Like,Question,QuestionLike, Question2
from markdownx.admin import MarkdownxModelAdmin
admin.site.register(Post)
admin.site.register(Like)
admin.site.register(Question,MarkdownxModelAdmin)
admin.site.register(QuestionLike)
admin.site.register(Question2)
| [
"seinabehack@gmail.com"
] | seinabehack@gmail.com |
e7dc87d8f60339b7be557f18f118cc68c3545903 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2098/49405/273048.py | cff498a9698319a280edf449fde35bbd033aa422 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | a = int(input())
b = 1
while b < a:
b *= 26
for i in range(b, 0, -1):
print(chr(a // b + ord("A") - 1), end="")
a %= 26
b //= 26
if a > 0: print(chr(a + ord("A") - 1)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
330f15d9c0a1213fd20d89c663d6431dc1aafdf4 | fd1dd65c5dd4495dba1e94fcf44dc8c45e8908ef | /myenv/bin/pip | 50aa6f472314e96ed16169d483c3949e190e74e5 | [] | no_license | ayushin78/django-blog | 0a1e9eabfa5159696ee98d37eecd9d1ada4bd6a2 | df8334f3c85a7e57d6d4dc81610d5250b4b31a6a | refs/heads/master | 2020-03-21T15:50:05.777763 | 2018-07-08T10:10:16 | 2018-07-08T10:10:16 | 138,735,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/home/ayushin78/djangogirls/myenv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ayushin78@gmail.com"
] | ayushin78@gmail.com | |
aacf718ffaf5dc66b0a37c3bc12ef730d77e3012 | 1fd1c877b0560ce579349d794642d36ad18330d9 | /prac_08/unreliable_car_test.py | 588ea0a804fafb26c253d3daeaa0233cc61c1cdd | [] | no_license | malia-d/cp1404practicals | cbc83478fdf363175e745af8018262712b15b18f | 4367e29e5b711fa89cd055c8a57f4f8fa3568742 | refs/heads/master | 2023-05-06T00:25:29.344351 | 2021-05-30T12:17:15 | 2021-05-30T12:17:15 | 348,734,500 | 0 | 1 | null | 2021-05-30T12:17:16 | 2021-03-17T14:12:35 | Python | UTF-8 | Python | false | false | 741 | py | """
Test the Unreliable Car class by creating two cars, one with high reliability and one with low reliability. Test both
cars multiple times and print the distance each car has driven.
Unreliable Car Test. Created by Malia D'Mello, May 2021.
"""
from prac_08.unreliable_car import UnreliableCar
def main():
"""Test Unreliable Car class."""
first_car = UnreliableCar("Prius 1", 100, 95)
second_car = UnreliableCar("Kia Rio", 100, 5)
for i in range(1, 11):
print("Attempting to drive {}km:".format(i))
print("{:2} drove {:2}km".format(first_car.name, first_car.drive(i)))
print("{:2} drove {:2}km".format(second_car.name, second_car.drive(i)))
print(first_car)
print(second_car)
main()
| [
"malia.dmello@my.jcu.edu.au"
] | malia.dmello@my.jcu.edu.au |
7c455d82ac872f0ef5ae750b0acf3d726db9b587 | b7423aabf39b7ebacbd57388d20de1a9bf43f2f2 | /coding-bat/logic-1/sorta_sum.py | 1d74d2b7a01b52980779c5ec0e070d93ebb8306d | [] | no_license | usman-tahir/python-snippets | b9e2bfe8e96d321314b2e87560c2fa3cd102d0e8 | cfe564ecb5a27d8b61c8c9930458bf3fdeab4d8e | refs/heads/master | 2021-01-12T13:56:06.263461 | 2017-05-07T17:53:53 | 2017-05-07T17:53:53 | 68,925,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | # Given 2 ints, a and b, return their sum. However, sums in the range 10..19
# inclusive, are forbidden, so in that case just return 20.
def sorta_sum(a, b):
result = a + b
if result in range(10, 20):
return 20
return result
| [
"tahir.usman.ali94@outlook.com"
] | tahir.usman.ali94@outlook.com |
b9b1e7f05c541f5842356071d917aea0d8095dd2 | e6ea9634d8ed01fcd203ff65bfdc350c735c485d | /maskrcnn_benchmark/structures/bounding_box.py | 05b8aa9dfacb51f5e3cf64fa1d794afd6bb4da5a | [] | no_license | salvatorepolizzotto/oshot_detection | 5edbbf5be062f3924cfe71a657e4cb526296a75f | a1259d34e4c66156bbbf048d1e9655e8ad24c336 | refs/heads/master | 2022-12-15T07:26:57.934777 | 2020-08-26T18:37:23 | 2020-08-26T18:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,165 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy"):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimension of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM, ROTATE_90, ROTATE_180, ROTATE_270):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
elif method == ROTATE_90:
transposed_xmin = ymin
transposed_xmax = ymax
transposed_ymin = xmin
transposed_ymax = xmax
self.size = (image_height, image_width)
elif method == ROTATE_180:
return self.transpose(FLIP_LEFT_RIGHT).transpose(FLIP_TOP_BOTTOM)
elif method == ROTATE_270:
return self.transpose(ROTATE_90).transpose(ROTATE_180)
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Crops a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == "xyxy":
TO_REMOVE = 1
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == "xywh":
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError("Should not be here")
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(field, self))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
| [
"ant.dinnocente@gmail.com"
] | ant.dinnocente@gmail.com |
0988817d20c1b9ff5aeed8eef0f7a93ff66730ed | 4774d125ec2006e803788737e95a5a76b6df145f | /python-packages/contract_wrappers/src/zero_ex/contract_wrappers/order_conversions.py | a639ffe1c524751abf3d0b0d1cf09538b6aa6d5e | [
"Apache-2.0"
] | permissive | 0xProject/0x-monorepo | 6e71def8d1f0548fdc5f49b5d404f89e66afaca1 | 53b5bb16d8b4c9050a46978b6f347ef7595fe103 | refs/heads/development | 2023-07-10T02:38:56.466840 | 2021-04-24T01:16:16 | 2021-04-24T01:16:16 | 92,181,371 | 1,132 | 527 | NOASSERTION | 2021-06-26T03:02:14 | 2017-05-23T14:17:33 | TypeScript | UTF-8 | Python | false | false | 7,334 | py | """Utilities to convert between JSON and Python-native objects.
Converting between the JSON wire format and the types accepted by Web3.py (eg
`bytes` vs `str`) can be onerous. This module provides conveniences for
converting Exchange structs between JSON and Python objects.
"""
from copy import copy
from typing import cast, Dict, Union
from eth_utils import remove_0x_prefix
from zero_ex.json_schemas import assert_valid
from zero_ex.contract_wrappers.exchange.types import Order
def order_to_jsdict(
order: Order,
chain_id: int,
exchange_address="0x0000000000000000000000000000000000000000",
signature: str = None,
) -> dict:
"""Convert a Web3-compatible order struct to a JSON-schema-compatible dict.
More specifically, do explicit decoding for the `bytes`:code: fields, and
convert numerics to strings.
>>> import pprint
>>> pprint.pprint(order_to_jsdict(
... {
... 'makerAddress': "0x0000000000000000000000000000000000000000",
... 'takerAddress': "0x0000000000000000000000000000000000000000",
... 'feeRecipientAddress':
... "0x0000000000000000000000000000000000000000",
... 'senderAddress': "0x0000000000000000000000000000000000000000",
... 'makerAssetAmount': 1,
... 'takerAssetAmount': 1,
... 'makerFee': 0,
... 'takerFee': 0,
... 'expirationTimeSeconds': 1,
... 'salt': 1,
... 'makerAssetData': (0).to_bytes(1, byteorder='big') * 20,
... 'takerAssetData': (0).to_bytes(1, byteorder='big') * 20,
... 'makerFeeAssetData': (0).to_bytes(1, byteorder='big') * 20,
... 'takerFeeAssetData': (0).to_bytes(1, byteorder='big') * 20,
... },
... chain_id=50
... ))
{'chainId': 50,
'exchangeAddress': '0x0000000000000000000000000000000000000000',
'expirationTimeSeconds': '1',
'feeRecipientAddress': '0x0000000000000000000000000000000000000000',
'makerAddress': '0x0000000000000000000000000000000000000000',
'makerAssetAmount': '1',
'makerAssetData': '0x0000000000000000000000000000000000000000',
'makerFee': '0',
'makerFeeAssetData': '0x0000000000000000000000000000000000000000',
'salt': '1',
'senderAddress': '0x0000000000000000000000000000000000000000',
'takerAddress': '0x0000000000000000000000000000000000000000',
'takerAssetAmount': '1',
'takerAssetData': '0x0000000000000000000000000000000000000000',
'takerFee': '0',
'takerFeeAssetData': '0x0000000000000000000000000000000000000000'}
"""
jsdict = cast(Dict, copy(order))
def encode_bytes(bytes_or_str: Union[bytes, str]) -> bytes:
def ensure_hex_prefix(hex_str: str):
if hex_str[0:2] != "0x":
hex_str = "0x" + hex_str
return hex_str
return ensure_hex_prefix(
cast(bytes, bytes_or_str).hex()
if isinstance(bytes_or_str, bytes)
else bytes_or_str
)
jsdict["makerAssetData"] = encode_bytes(order["makerAssetData"])
jsdict["takerAssetData"] = encode_bytes(order["takerAssetData"])
jsdict["makerFeeAssetData"] = encode_bytes(order["makerFeeAssetData"])
jsdict["takerFeeAssetData"] = encode_bytes(order["takerFeeAssetData"])
jsdict["exchangeAddress"] = exchange_address
jsdict["expirationTimeSeconds"] = str(order["expirationTimeSeconds"])
jsdict["makerAssetAmount"] = str(order["makerAssetAmount"])
jsdict["takerAssetAmount"] = str(order["takerAssetAmount"])
jsdict["makerFee"] = str(order["makerFee"])
jsdict["takerFee"] = str(order["takerFee"])
jsdict["salt"] = str(order["salt"])
jsdict["chainId"] = chain_id
if signature is not None:
jsdict["signature"] = signature
assert_valid(jsdict, "/orderSchema")
return jsdict
def jsdict_to_order(jsdict: dict) -> Order:
r"""Convert a JSON-schema-compatible dict order to a Web3-compatible struct.
More specifically, do explicit encoding of the `bytes`:code: fields, and
parse integers from strings.
>>> import pprint
>>> pprint.pprint(jsdict_to_order(
... {
... 'makerAddress': "0x0000000000000000000000000000000000000000",
... 'takerAddress': "0x0000000000000000000000000000000000000000",
... 'feeRecipientAddress': "0x0000000000000000000000000000000000000000",
... 'senderAddress': "0x0000000000000000000000000000000000000000",
... 'makerAssetAmount': "1000000000000000000",
... 'takerAssetAmount': "1000000000000000000",
... 'makerFee': "0",
... 'takerFee': "0",
... 'expirationTimeSeconds': "12345",
... 'salt': "12345",
... 'makerAssetData': "0x0000000000000000000000000000000000000000",
... 'takerAssetData': "0x0000000000000000000000000000000000000000",
... 'makerFeeAssetData': "0x0000000000000000000000000000000000000000",
... 'takerFeeAssetData': "0x0000000000000000000000000000000000000000",
... 'exchangeAddress': "0x0000000000000000000000000000000000000000",
... 'chainId': 50
... },
... ))
{'chainId': 50,
'expirationTimeSeconds': 12345,
'feeRecipientAddress': '0x0000000000000000000000000000000000000000',
'makerAddress': '0x0000000000000000000000000000000000000000',
'makerAssetAmount': 1000000000000000000,
'makerAssetData': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00',
'makerFee': 0,
'makerFeeAssetData': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00',
'salt': 12345,
'senderAddress': '0x0000000000000000000000000000000000000000',
'takerAddress': '0x0000000000000000000000000000000000000000',
'takerAssetAmount': 1000000000000000000,
'takerAssetData': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00',
'takerFee': 0,
'takerFeeAssetData': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'}
""" # noqa: E501 (line too long)
assert_valid(jsdict, "/orderSchema")
order = cast(Order, copy(jsdict))
order["makerAssetData"] = bytes.fromhex(
remove_0x_prefix(jsdict["makerAssetData"])
)
order["makerFeeAssetData"] = bytes.fromhex(
remove_0x_prefix(jsdict["makerFeeAssetData"])
)
order["takerAssetData"] = bytes.fromhex(
remove_0x_prefix(jsdict["takerAssetData"])
)
order["takerFeeAssetData"] = bytes.fromhex(
remove_0x_prefix(jsdict["takerFeeAssetData"])
)
order["makerAssetAmount"] = int(jsdict["makerAssetAmount"])
order["takerAssetAmount"] = int(jsdict["takerAssetAmount"])
order["makerFee"] = int(jsdict["makerFee"])
order["takerFee"] = int(jsdict["takerFee"])
order["expirationTimeSeconds"] = int(jsdict["expirationTimeSeconds"])
order["salt"] = int(jsdict["salt"])
del order["exchangeAddress"] # type: ignore
# silence mypy pending release of
# https://github.com/python/mypy/issues/3550
return order
| [
"noreply@github.com"
] | noreply@github.com |
94542bccbb0867623afc8de7f1953f075ba9a6c6 | 7d105a72821a0fdc19923177f7bdd65846a64af1 | /mikomiko_hk_v3.py | 496e61bd628b01ae7a0c4abdbef25788e70fcd84 | [
"MIT"
] | permissive | tiankong1999/ArcFace-Multiplex-Recognition | 5c5ab115469db3c84f914e6307dada71439a68f4 | 16a075e499a2cf7dd6ee3e0204b6309946bc9c23 | refs/heads/master | 2020-05-24T15:45:06.579777 | 2019-05-16T21:04:44 | 2019-05-16T21:04:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,716 | py | # coding: utf-8
import cv2
import os
import numpy as np
import time
from termcolor import colored
from helper import read_pkl_model, start_up_init, encode_image
from multiprocessing import Process, Queue
import asyncio
import socketio
import IPCamera.interface as ipc
import face_embedding
import face_detector
async def upload_loop(url="http://127.0.0.1:6789"):
# =====================Uploader Setsup========================
sio = socketio.AsyncClient()
@sio.on('response', namespace='/remilia')
async def on_response(data):
current_address, upload_frame = upstream_frame_queue.get()
image_string = 0
# strat_time = time.time()
if current_address == data:
image_string = encode_image(upload_frame)
# mid_time = time.time()
await sio.emit('frame_data', image_string, namespace='/remilia')
try:
img, dt, prob, name = result_queue.get_nowait()
result_string = {
'image': encode_image(img),
'time': dt,
'name': name,
'prob': prob
}
await sio.emit('result_data', result_string, namespace='/remilia')
except Exception as e:
pass
# print(mid_time-strat_time, time.time()-mid_time)
@sio.on('connect', namespace='/remilia')
async def on_connect():
await sio.emit('frame_data', 0, namespace='/remilia')
await sio.connect(url)
await sio.wait()
async def embedding_loop(preload):
# =================== FR MODEL ====================
mlp, class_names = read_pkl_model(preload.classification)
embedding = face_embedding.EmbeddingModel(preload)
while True:
img = suspicion_face_queue.get()
dt = time.strftime('%m-%d %H:%M:%S')
predict = mlp.predict_proba([embedding.get_one_feature(img)])
prob = predict.max(1)[0]
name = class_names[predict.argmax(1)[0]]
result_queue.put((img, dt, prob, name))
# [[0.30044544 0.31831665 0.30363247 0.07760544]]
async def detection_loop(preload, frame_queue):
# =================== FD MODEL ====================
detector = face_detector.DetectorModel(preload)
ip_address = preload.ip_address
embedding_threshold = preload.embedding_threshold
loop = asyncio.get_running_loop()
while True:
start_time = loop.time()
head_frame = frame_queue.get()
# tracker = cv2.MultiTracker_create()
# t_box = []
for img, box in detector.get_all_boxes(head_frame, save_img=False):
if box[4] > embedding_threshold:
try:
suspicion_face_queue.put_nowait(img)
except Exception as _:
pass
box = box.astype(np.int)
cv2.rectangle(head_frame, (box[0], box[1]), (box[2], box[3]),
[255, 255, 0], 2)
# t_box.append(box[:4]/2)
# print(colored(loop.time() - start_time, 'blue'))
# head_frame = cv2.resize(head_frame, (960, 540), cv2.INTER_AREA)
# for item in t_box:
# tracker.add(cv2.TrackerMedianFlow_create(), head_frame, tuple(item))
upstream_frame_queue.put((ip_address, head_frame))
print(colored(loop.time() - start_time, 'red'), flush=True)
for i in range(int((loop.time() - start_time) * 25)):
body_frame = frame_queue.get()
# ok, tricker_boxes = tracker.update(body_frame)
# if ok:
# for box in tricker_boxes:
# box = box.astype(int)
# cv2.rectangle(body_frame, (box[0], box[1]),
# (box[2], box[3]), [255, 255, 0], 2)
upstream_frame_queue.put((ip_address, body_frame))
# await sio.emit('frame_data', encode_image(body_frame), namespace='/remilia')
# end_time = loop.time()
# print(colored(loop.time()-track_time, 'red'))
async def camera_loop(preload):
reciprocal_of_max_frame_rate = 1 / preload.max_frame_rate
address_dict = preload.address_dict
camera_dict = {}
# from CXMIPCamera import XMIPCamera
# for address in address_dict:
# xmcp = XMIPCamera(address.encode('UTF-8'), 34567, b"admin", b"")
# xmcp.start()
# camera_dict[address] = xmcp
for address in address_dict:
hkcp = ipc.HKIPCamera(address.encode('UTF-8'), 8000, b"admin",
b"humanmotion01")
hkcp.start()
camera_dict[address] = hkcp
frame_counter = 0
loop = asyncio.get_running_loop()
# =================== ETERNAL LOOP ====================
while True:
start_time = loop.time()
frame_queue_231.put(camera_dict['10.41.0.231'].frame(rows=540,
cols=960))
# frame_queue_231.put(camera_dict['10.41.0.198'].frame(rows=540, cols=960))
# frame_queue_232.put(camera_dict['10.41.0.199'].frame(rows=540, cols=960))
# frame_counter = frame_counter % 1000
# if not frame_counter % 5:
# print(loop.time() - start_time, upstream_frame_queue.qsize(),
# frame_queue_231.qsize())
# print(loop.time() - start_time, upstream_frame_queue.qsize(),
# frame_queue_231.qsize(), frame_queue_232.qsize())
restime = reciprocal_of_max_frame_rate - loop.time() + start_time
if restime > 0:
await asyncio.sleep(restime)
# =================== INIT ====================
# address_dict = ['10.41.0.198', '10.41.0.199']
address_dict = ['10.41.0.231']
# frame_queue_232 = Queue(maxsize=frame_buffer_size)
# Process(target=lambda: asyncio.run(
# detection_loop(args, frame_queue_232))).start()
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
frame_buffer_size = 25 * len(address_dict)
upstream_frame_queue = Queue(maxsize=frame_buffer_size)
suspicion_face_queue = Queue(maxsize=frame_buffer_size)
result_queue = Queue(maxsize=frame_buffer_size)
# =================== ARGS ====================
args = start_up_init()
args.address_dict = address_dict
# =================== Process On ====================
args.ip_address = '10.41.0.231'
frame_queue_231 = Queue(maxsize=frame_buffer_size)
Process(
target=lambda: asyncio.run(detection_loop(args, frame_queue_231))).start()
# args.ip_address = '10.41.0.232'
# frame_queue_232 = Queue(maxsize=frame_buffer_size)
# Process(target=lambda: asyncio.run(
# detection_loop(args, frame_queue_232))).start()
Process(target=lambda: asyncio.run(embedding_loop(args))).start()
Process(target=lambda: asyncio.run(camera_loop(args))).start()
asyncio.run(upload_loop())
| [
"1996scarlet@gmail.com"
] | 1996scarlet@gmail.com |
b9b0349e2f15cb79dfa7708c61f629acb8d06ad9 | 399b6f19250ea7e7abbf16020195fce9e81e0e41 | /python_Basics/abnormal/abnormal.py | b88a90c34f5efa1abb7e3353c549e81771fe2ec7 | [] | no_license | zhanghui0228/study | 87cd248a2f3e4af38bf0d2416fed0b9515f54f45 | 43351858858e087d060630f2669e30546acbaf8f | refs/heads/master | 2020-07-23T05:50:02.556812 | 2020-05-24T10:40:04 | 2020-05-24T10:40:04 | 207,463,463 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | #abnormal 异常处理
'''
内置的异常类:
exception 几乎所有的异常类都是从它派生而来的
AttributeError 引用属性或给它赋值失败时引发
OSError 操作系统不能执行指定的任务(如打开文件)时引发,有多个子类
IndexError 使用序列中不存在的索引时引发,为LookupError的子类
KeyError 使用映射中不存在的键时引发,为LookupError的子类
NameError 找不到名称(变量)时引发
SyntaxError 代码不正确时引发
TypeError 将内置操作或者函数用于类型不正确的对象时引发
ValueError 将内置操作或者函数用于这样的对象时引发:其类型正确但包含的值不合适
ZeroDIvisionError 在除法或求模运算的第二个参数为零时引发
捕获异常:
使用try...except 捕获所有的异常
使用try...except...finally 处理必不可少的逻辑
'''
def test_div(num1, num2):
try:
resutl = num1 / num2
print( resutl)
# except TypeError:
# print("除数要为数字")
# except ZeroDivisionError:
# print("除数不能为0")
except (TypeError, ZeroDivisionError) as error:
print("错误信息:{0}".format(error))
def test_finally():
try:
with open("test.txt", 'r', encoding='UTf8') as f:
info = f.read()
print(info)
except:
print("程序异常")
finally:
try:
print("程序已退出")
except:
pass
if __name__ == '__main__':
test_div(5, 0)
test_div(6, '2')
test_div(6, 2)
print('*' * 30)
test_finally()
| [
"helloworldhui@163.com"
] | helloworldhui@163.com |
6c8b0d28a63bb2ea5c3b6e93e7362380b4638fbc | be34946b978f7c6dc4be5a3cf53156167a8eea77 | /sampling.py | 5cc6ca871e7e7927666f79187f2cb82f0c2f7221 | [] | no_license | ZhenxiangWang/Link-Prediction | e5ccd017f7449dd44a9b9141b0f322fde4141ab4 | c143b2b016a37f36a12ec57a0cf35a963bbba9ca | refs/heads/master | 2020-03-28T05:18:06.540314 | 2018-09-10T04:33:51 | 2018-09-10T04:33:51 | 147,767,914 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | import numpy as np
print("Loading train data......")
train_data={}
with open('train.txt','r') as train:
for line in train:
neighbour_list=[int(i) for i in line.split()]
train_data[neighbour_list[0]]=set([neighbour_list[i+1] for i in range(len(neighbour_list)-1)])
def get_train_sources_and_sinks(file):
with open(file, 'r') as train:
sources=set()
sinks=set()
for line in train:
neighbour_list=[int(i) for i in line.split()]
sources.add(neighbour_list[0])
for i in range(len(neighbour_list)-1):
sinks.add(neighbour_list[i+1])
return sources,sinks
train_sources,train_sinks=get_train_sources_and_sinks('train.txt')
print(len(train_sources))
print(len(train_sinks))
import random
def positive_sampling():
print("Positive sampling......")
positive_samples=[]
count=0
for i in range(51100):
if (count % 1000 == 0):
print(count)
count+=1
source_random_index=random.randint(0,19999)
source=(list(train_sources))[source_random_index]
origin_sinks=train_data[source] # origin_sinks is a set
try:
sink=random.choice(list(origin_sinks))
positive_samples.append((source,sink))
except:
# print(origin_sinks)
pass
print(len(positive_samples))
return positive_samples
def negative_sampling():
print("Negative sampling......")
negative_samples=[]
count = 0
for i in range(50020):
if (count % 10 == 0):
print(count)
count+=1
source_random_index=random.randint(0,19999)
source=(list(train_sources))[source_random_index]
origin_sinks = train_data[source]
sink=random.choice(list(train_sinks))
if sink not in origin_sinks:
negative_samples.append((source, sink))
print(len(negative_samples))
return negative_samples
positive_samples=positive_sampling()
np.save('positive_samples.npy',np.array(positive_samples))
negative_samples=negative_sampling()
np.save('negative_samples.npy',np.array(negative_samples))
| [
"noreply@github.com"
] | noreply@github.com |
d3631e9c43a81f9d30d6e22c349820df2729ef75 | 6c362fb828bf364f36f93a00e85d8fdaeaafc64d | /poe-sledgehammer/poe-sledgehammer.py | 263b0d59296b583f58c470c176d651ddd12a2137 | [] | no_license | jcostom/pyez-toys | 8ded93428a831307ea8561066b04babdd9c05a4d | 806b71476c39e3427b93ef52c674a66b8fb4c4c6 | refs/heads/main | 2023-08-16T00:11:18.175728 | 2023-07-27T02:52:35 | 2023-07-27T02:52:35 | 58,584,766 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | #!/usr/bin/env python3
# This tool is a blunt instrument - HANDLE WITH CARE.
# It disables PoE on all ports, commits the configuration,
# then executes a rollback 1 and commits again.
# You would only use this tool if you wanted to completely
# disable PoE then rollback that change.
import argparse
import logging
import os
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
# Setup logger
logger = logging.getLogger()
ch = logging.StreamHandler()
logger.setLevel(logging.ERROR)
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('[%(asctime)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
parser = argparse.ArgumentParser(
description='Juniper Switch PoE Bounce Utility'
)
parser.add_argument('--switch', action="store")
parser.add_argument('--user', action="store",
default=os.getenv('USER'),
help="Will default to your current username.")
parser.add_argument('--password', action="store", help="Omit this option if you're using ssh keys to authenticate") # noqa: E501
args = parser.parse_args()
def main():
disableCommand = "set poe interface all disable"
disableComment = "drop the PoE sledgehammer on all ports"
rollbackComment = "rollback - restoring PoE"
dev = Device(host=args.switch, user=args.user)
logger.error(f"Connecting to: {args.switch}")
dev.open()
dev.bind(cu=Config)
logger.error(f"Locking the configuration on: {args.switch}")
dev.cu.lock()
logger.error("Now shutting down PoE on all ports.")
dev.cu.load(disableCommand, format='set')
dev.cu.commit(comment=disableComment, timeout=180)
logger.error(f"Now executing rollback on: {args.switch}")
dev.cu.rollback(rb_id=1)
dev.cu.commit(comment=rollbackComment, timeout=180)
logger.error(f"Unlocking the configuration on: {args.switch}")
dev.cu.unlock()
dev.close()
logger.error("Done!")
if __name__ == "__main__":
main()
| [
"jcostom@jasons.org"
] | jcostom@jasons.org |
c1c7e98416024eb3e29fc245b4b49b8df8efa47b | 75a98dd6c84220441832ac7f5b5046a4777d9af4 | /sliding_window.py | 2c77952dab04c0803fc0446cf386c9b95f4fd822 | [] | no_license | Hyunwoo-Park-Yonsei/Sliding_Window | 9a94799ac32eba437e4afe96577508f58e35a78c | 2fb5cfcdb0fd2fd9cdaac25df9ca2ec6d598b8f2 | refs/heads/main | 2023-07-01T19:10:55.227303 | 2021-08-03T14:16:50 | 2021-08-03T14:16:50 | 392,334,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2, random, math, copy
Width = 640
Height = 480
#동영상 읽기
cap = cv2.VideoCature("xycar_track1.mp4")
window_title = 'camera'
#와핑한 이미지의 사이즈 bird eye view로 본 것의 이미지의 사이즈
warp_img_w = 320
warp_img_h = 240
#와핑할때 의 margin 값
warpx_margin =20
warpy_margin =3
#슬라이디 윈도우 개수, 슬라이딩 윈도우의 넓이, 선을 그릴 때의 threshold 값
nwindows = 9
margin =12
minpix = 5
lane_bin_th = 145
# bird eye view로 변환 작업
# 와핑할 영역 선정
warp_src = np.array([
[230-warpx_margin, 300-warpy_margin],
[45-warpx_margin, 450+warpy_margin],
[445-warpx_margin, 300+warpy_margin],
[610-warpx_margin, 450+warpy_margin],
], dtype=np.float32)
# 결과 이미지 크기 선정
warp_dist = np.array([
[0,0],
[0,warp_img_h],
[warp_img_w,0],
[warp_img_w,warp_img_h],
],dtype=np.float32)
calibrated =True
#자이카 카메라 왜곡에 의한 calibration
if calibrated:
mtx = np.array([
[422.037858,0.0,245.895397],
[0.0,435.589734,163.625535],
[0.0,0.0,1.0]
])
dist = np.array([-0.289296,0.061035,0.001786,0.15238,0.0])
cal_mtx, cal_roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (Width,Height),1,(Width,Height))
#왜곡된 이미지를 펴는 함수
def calibrate_image(frame):
global Width, Height
global mtx, dist
global cal_mtx, cal_roi
tf_image = cv2.undistort(frame,mtx,dist,None,cal_mtx)
x,y,w,h = cal_roi
tf_image = tf_image[y:y+h, x:x+w]
return cv2.resize(tf_image,(Wdith,Height))
#변환 전후의 4개점 좌표를 전달해 새로운 이미지로 만든다
def warp_image(img,src,dst,size):
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warp_img = cv2.warpPerspective(img,M,size,flags=cv2.INTER_LINEAR)
return warp_img,M,Minv
def warp_process_image(img):
global nwindows
global margin
global minpix
global lane_bin_th
#가우시안 블러로 노이즈 제거
blur = cv2.GaussianBlur(img,(5,5),0)
#HLS포맷에서 흰색선 구분 쉬워서 L채널을 사용
_,L,_ = cv2.split(cv2.cvtColor(blur,cv2.COLOR_BGR2HLS))
#L채널을 확실하게 하기 위해 이진화한다.
_, lane = cv2.thershold(L,lane_bin_th, 255, cv2.THRESH_BINARY)
#추출된 이미지를 히스토그램화한다
histogram = np.sum(lane[lane.shape[0]//2:,:], axis = 0)
#x좌표를 반으로 나누어 왼쪽차선과 오른쪽차선 구분한다
midpoint = np.int(histogram.shape[0]/2)
#왼쪽차선중 흰색 픽셀이 가장 많은 지점을 왼쪽 시작지점으로 잡는다
leftx_current = np.argmax(histogram[:midpoint])
#오른쪽차선중 흰색 픽셀이 가장 많은 지점을 오른쪽 시작지점으로 잡는다
rightx_current = np.argmax(histogram[midpoint:]) + midpoint
#차선의 위치에 슬라이딩 윈도우를 그린다
#윈도우 하나의 크기 설정
window_height = np.int(lane.shape[0]/nwindows)
nz = lane.nonzero()
left_lane_inds = []
right_lane_inds = []
lx,ly,rx,ry = [], [], [], []
out_img = np.dstack((lane,lane,lane))*255
#윈도우 그리기기
for window in range(nwindows):
win_yl = lane.shape[0] - (window+1)*window_height
win_yh = lane.shape[0] - (window) * window_height
win_xll = leftx_current - margin
win_xlh = leftx_current + margin
win_xrl = rightx_current - margin
win_xrh = rightx_current + margin
cv2.rectangle(out_img, (win_xll,win_yl),(win_xlh,win_yh),(0,255,0),2)
cv2.rectangle(out_img, (win_xrl, win_yl), (win_xrh, win_yh), (0, 255, 0), 2)
#픽셀의 x 좌표를 모은다
good_left_inds = ((nz[0] >= win_yl) & (nz[0] < win_yh) &
(nz[1] >= win_xll) & (nz[1] < win_xlh)).nonzero()[0]
good_right_inds = ((nz[0] >= win_yl) & (nz[0] < win_yh) &
(nz[1] >= win_xrl) & (nz[1] < win_xrh)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
#흰색점이 5개 이상인 경우일때 x좌표의 평균값을 구한다.
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nz[1][good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nz[1][good_right_inds]))
lx.append(leftx_current)
ly.append((win_yl + win_yh)/2)
rx.append(rightx_current)
ry.append((win_yl + win_yh)/2)
# 모은 점의 좌표를 통해 2차함수를 fit한다
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
lfit = np.polyfit(np.array(ly), np.array(lx),2)
rfit = np.polyfit(np.array(ry), np.array(rx), 2)
# 구한 lfit rfit을 다시 원근 변환하여 원래 이미지에 덧그린다
def draw_line(image,warp_img,MInv,left_fit,right_fit):
global Width, Height
yMax = warp_img.shape[0]
ploty = np.linspace(0, yMax -1,yMax)
color_warp = np.zeros_like(warp_img).astype(np,.uint8)
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx,ploty])))])
pts = np.hstack((pts_left,pts_right))
color_warp = cv2.fillPoly(color_warp,np.int_([pts]), (0,255,0))
newwarp = cv2.warpPerspective(color_warp, Minv, (Width, Height))
return cv2.addWeighted(image,1,newwarp, 0.3,0)
def start():
global Width, Height, cap
_, frame = cap.read()
while not frame.size == (Width*Height*3):
_, frame = cap.read()
continue
while cap.isOpened():
_, frame = cap.read()
image = calibrate_image(frame)
warp_img, M, Minv = warp_image(image,warp_src,warp_dist,(warp_img_w,warp_img_h))
left_fit, right_fit = warp_process_image(warp_img)
lane_img = draw_line(iamge,warp_img,Minv,left_fit,right_fit)
cv2.imshow(window_title, lane_img)
cv2.waitkey(1)
if __name__ == '__main__':
start()
| [
"kevin3671@naver.com"
] | kevin3671@naver.com |
3ba84876fba42a1e11f5c5082263a679fefaafe0 | bf05cde65299758a3e3579ceec78048be9cd2e2c | /__init__.py | bea9caed2f92e001565baeb13e484e89f0358f3e | [] | no_license | krislmassey/DpdkElmoConfig | 21c5d7fd4d850dd11d37941a1ffc2e201fdcdc1d | 8a0b5bdb6953544bc9b3ef23aa8b35c9dc31c82d | refs/heads/master | 2020-12-30T09:26:25.598812 | 2015-07-14T18:00:47 | 2015-07-14T18:00:47 | 39,090,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,703 | py | '''
Download test_cli Python Package
--------------------------------
The test_cli Python package can be downloaded from
:releasezip:`here <http://elmo.adtran.com/doc/api>`.
.. warning:: The test_cli does **not** support Python 3! Please use Python 2.7.
.. warning:: Currently, this package does **not** support ELMO units with firmware below version, 2.1.0.
Firmware upgrades can be downloaded from: `ELMO Firmware <http://elmo.adtran.com/firmware>`_. Please
use the latest available production version, if possible, especially before reporting bugs.
Bugs and feature requests should be reported to:
`Trevor Bowen <mailto:trevor.bowen@adtran.com?subject=test_cli>`_.
Introduction to test_cli Python Package
---------------------------------------
ELMO test automation is supported via the test_cli package. The test_cli package provides a generic
driver for **any** local, telnet, or ssh CLI process that accepts input commands via STDIN,
preceded by a prompt. Convenience functions are provided for submitting commands and verifying
output based on strict matching, whitespace insensitive matching, case insensitive matching, and
regular expressions.
An ELMO-specific test automation reference driver (:class:`.elmo.ElmoTelnetConnection`) and
connection factory (:func:`.elmo.ElmoConnection`) is included to provide connections to ELMO units.
.. note:: Please use the connection factory (:func:`.elmo.ElmoConnection`) for **all** ELMO connections.
A demonstration of a test program using the driver is included in the :mod:`.demo` module. Please
examine the `source code <_modules/test_cli/demo.html>`_ of that module for exemplary usage.
The test_cli package consists of the following essential classes and functions for ELMO test
automation:
* :func:`.elmo.ElmoConnection` - Factory function used to create connections with automatically \
defined parsers.
* :class:`.elmo.ElmoPipeConnection` - Connection to local ELMO CLI via Unix pipes, returned by \
:func:`.elmo.ElmoConnection`.
* :class:`.elmo.ElmoTelnetConnection` - Connection to remote ELMO CLI over Telnet, returned by \
:func:`.elmo.ElmoConnection`.
* :class:`.elmo.ElmoSSHConnection` - Connection remote ELMO CLI over SSH, manually created and not \
preferred because of paramiko package dependency.
.. note:: The command output parsers for the ELMO driver are detailed in the `ELMO Parsers`_ section.
The test_cli package also includes the following generic classes, which can be used to drive **any**
CLI process:
* :class:`.pipe.PipeConnection` - Connection to any local CLI process through a Unix Pipe.
* :class:`.telnet.TelnetConnection` - Connection to a remote CLI process through a Telnet session.
* :class:`.ssh.SSHConnection` - Connection to a remote CLI process through a SSH session.
Some additional utilty functions are provided in the :mod:`.utils` module to facilitate parsing and
analysis of output text.
.. warning:: Currently, there are **no** parsers available for any firmware below version, 2.1.0.
Firmware upgrades can be downloaded from: `ELMO Firmware <http://elmo.adtran.com/firmware>`_.
.. note:: The test_cli archive for |release| can be downloaded from \
:releasezip:`here <http://elmo.adtran.com/doc/api>`.
'''
__version__ = '2.2.0'
__author__ = 'Trevor Bowen <trevor.bowen@adtran.com>'
__all__ = [
'approx_eq',
'approx_ge',
'PipeConnection',
# 'SSHConnection',
'TelnetConnection',
'ElmoConnection',
]
from test_cli.utils import approx_eq, approx_ge
from test_cli.pipe import PipeConnection
# from .ssh import SSHConnection
from test_cli.telnet import TelnetConnection
from test_cli.elmo import ElmoConnection
| [
"kristenmassey@yahoo.com"
] | kristenmassey@yahoo.com |
50fe6dc2ba53bceded57e2bc29077589d7d54e66 | c77aa0dafe8d4f3f47c4d17e06939f49a347b31c | /env/bin/django-admin.py | 62e177aab5954f33fcf7a537d3d52bad081b34c7 | [] | no_license | aruncognicor/VAbackendTool | a2dc3cf22483e2e955b91b61632c6f3a3dbcd716 | ff86a505bfd9e2c0c7e221a059ced16dadda5f49 | refs/heads/master | 2016-08-12T03:45:06.154833 | 2016-01-12T04:47:45 | 2016-01-12T04:47:45 | 49,475,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | #!/home/arun/Documents/projects/backend_tool/env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"arun@thanzi-Vostro-270s.(none)"
] | arun@thanzi-Vostro-270s.(none) |
d3b51dffc41c9928f3160d598f19f4ab7ba584b4 | 3df0309a9269ade9d1a6c70d7608880cc90ed2ab | /GT668/Samples/Python/DataStorage.py | 9a9d1b00b28c5468428c76d9df3031877036c5cf | [] | no_license | motogo/PRCCounterApp | 3e8a2f9339f4937e67673d1a7c72b3c053fafca7 | 3bcfa8166801b29de036a6d7b79f8c5b3debcf53 | refs/heads/master | 2022-12-19T16:52:39.734844 | 2020-10-22T21:30:47 | 2020-10-22T21:30:47 | 306,461,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,732 | py | from GT668Driver import *
gt = GT668Driver()
gt.initialize(0)
# size of array per channel
size = 10
# read timeout
timeout = 10.0
# configure inputs
gt.set_input_prescale(GT668.Signal.GT_SIG_A, GT668.Prescale.GT_DIV_1)
gt.set_input_threshold(GT668.Signal.GT_SIG_A, GT668.ThresholdMode.GT_THR_VOLTS, 0.0)
# configure inputs
gt.set_input_prescale(GT668.Signal.GT_SIG_B, GT668.Prescale.GT_DIV_1)
gt.set_input_threshold(GT668.Signal.GT_SIG_B, GT668.ThresholdMode.GT_THR_VOLTS, 0.0)
#read tags up to 100 tags on either channel
gt.start_measurements()
# initialize timetags set object
timetagsSet = TimetagsSet(channel0Size=size, channel1Size=size)
# read time
start_time = gt.read_sys_time()
# the measurement will last untill the timeout
while gt.read_sys_time() - start_time < timeout:
gt.read_timetags(timetagsSet)
# when both channels have the specified tags count, terminates
if timetagsSet.channel0Count == timetagsSet.channel0Size and timetagsSet.channel1Count == timetagsSet.channel1Size:
break
#free card
gt.close()
#store as time aligned two row csv
GT668DataUtils.save_tags_to_time_aligned_two_collumns_CSV(timetagsSet, ',', "ch0", "ch1", "/path/to/simple_two_column.csv")
#store as simple csv
GT668DataUtils.save_tags_as_simple_csv(timetagsSet, True, "/path/to/simple.csv", ",")
#store as simple txt
GT668DataUtils.save_tags_as_simple_text(timetagsSet, True, "/path/to/simple.txt")
#store as formatted file
dff = Data_Format_Factory()
dff.file_type = File_Type.csv #optional: default CSV
dff.tags_per_file = 50 #optional: default -1 (save all tags in single file)
dff.delimiter = ';' #optional: default ','
dff.empty_tag_representation = "n/a" #optional: default "---"
dff.file_name = "Some_file_name" #optional: default GT668Tags
dff.restart_numbering_in_new_file = True #optional: default false
dff.include_row_numbering = True #optional: default true
dff.header = "HEADER" #optional: default None (no header)
dff.store_header_in_each_file = True #optional: default false
#cell initialization
c1 = Cell()
c2 = Cell()
c3 = Cell()
c4 = Cell()
#single cell consists of prefix Var [either tags from channel 0 or 1] and suffix
#each cell can have all or just one of above fields
#cells are separated by Data_Format_Factory.delimiter field value
c1.prefix = "prefix"
c2.prefix = "pre_before_tag_value: "
c2.var = Var.ch_0_tags
c3.var = Var.ch_1_tags
c4.suffix = "suffix"
#row definition
dff.row = [c1, c2, c3, c4] #required: sets row definition, see documentation for details
GT668DataUtils.save_tags_with_formatting(timetagsSet, dff, "/path/to/") | [
"horst.ender@softend.de"
] | horst.ender@softend.de |
d9595880395a2d6b6d84ff064f4f1997d10f39cc | 68c0fa1e3f8bcc53d352c112c559cbd2981b3dfa | /src/main/python/tasks/workers.py | babfde41be47a2af1234d75e1d2f3f3741af7895 | [] | no_license | boyunli/article-generator | d647e47536c7a17b60c2fed988980501b1f70315 | 4cf45733a4ddfd3feeb5607f398860c78314ece7 | refs/heads/master | 2020-03-08T18:41:09.764569 | 2018-07-04T09:15:05 | 2018-07-04T09:15:05 | 128,314,361 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # coding=utf-8
from celery import Celery
from python.sites.news.sohu import SoHu
from python.sites.news.toutiao import TouTiao
from python.sites.news.wechat import Wechat
celery_app = Celery('article', include=['python.tasks.workers'])
celery_app.config_from_object('python.tasks.celery_config')
@celery_app.task(bind=True)
def crawler_news(self):
try:
SoHu().parse()
TouTiao().parse()
Wechat().parse()
except Exception as exc:
raise self.retry(exc=exc, countdown=1*60, max_retries=5)
| [
"liling@meifang.com"
] | liling@meifang.com |
0f002addf74bef460a8840967aaf1a0aba19ff6d | 47136f769b2e870242f438927cee8dabcbca94c0 | /week8/informatics/4/F.py | ac3e3e385b9712dcdb1ec40e27313b118220704f | [] | no_license | Almanova/WebDevelopment-Spring2020 | de97b5aba1f13a766e2ef183151e39db3c8bba53 | 0abdee8f25dee1a4d32da2b633903d33936b6e77 | refs/heads/master | 2023-01-11T08:20:27.232203 | 2020-04-17T01:31:01 | 2020-04-17T01:31:01 | 236,373,539 | 0 | 0 | null | 2023-01-07T16:25:00 | 2020-01-26T20:42:31 | TypeScript | UTF-8 | Python | false | false | 177 | py | n = int(input())
list = input().split()
cnt = 0
for i in range(1, n - 1):
if int(list[i - 1]) < int(list[i]) and int(list[i + 1]) < int(list[i]):
cnt += 1
print(cnt) | [
"almanovamadina@yahoo.com"
] | almanovamadina@yahoo.com |
e54d5f06e5fc1b80bc598b447f5332574e03328c | 35f7970d0423dac96f1fefda6fb2246ada0bd483 | /catkin_ws/build/rotors_joy_interface/catkin_generated/pkg.installspace.context.pc.py | fe71778c0472e11f9e3595aebb9f1e531858b5eb | [] | no_license | HugoGrellier/ros_project_bebop | 7c169635fa5ffe664bdb4155bac212a0a5f7b941 | d6c8c3ada879747a7b070dc88646d4c3b86d28c5 | refs/heads/master | 2023-02-09T10:37:22.209574 | 2020-10-20T18:52:04 | 2020-10-20T18:52:04 | 306,311,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;geometry_msgs;mav_msgs;sensor_msgs;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rotors_joy_interface"
PROJECT_SPACE_DIR = "/home/student/Documents/ros_project_bebop/catkin_ws/install"
PROJECT_VERSION = "2.1.1"
| [
"hugo.grellier@cpe.fr"
] | hugo.grellier@cpe.fr |
a55ff55edf59f5225b76b60492f4fd70d55087df | 0e033cadbbd9fa3d3898936411343741f418b902 | /daily-data/data_pb2.py | a8e4ab91f66c10fe8c2da9d45d968e5b5e69616f | [] | no_license | BSVino/DAData | b5fe004025fd5c4cedda0133b7e30d14136373c7 | 4c821345b8999a820d24bbbb29889de8c03be933 | refs/heads/master | 2021-01-18T14:33:46.552060 | 2014-11-27T03:25:57 | 2014-11-27T03:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 31,687 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import math_pb2
DESCRIPTOR = descriptor.FileDescriptor(
name='data.proto',
package='da.protobuf',
serialized_pb='\n\ndata.proto\x12\x0b\x64\x61.protobuf\x1a\nmath.proto\"\x9f\x06\n\x08GameData\x12\r\n\x05\x64\x65\x62ug\x18\x01 \x01(\x08\x12\x10\n\x08map_name\x18\x02 \x01(\t\x12\x13\n\x0bserver_name\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x05\x12/\n\tpositions\x18\x05 \x01(\x0b\x32\x1c.da.protobuf.PlayerPositions\x12\x0e\n\x06\x63heats\x18\x06 \x01(\x08\x12+\n\x05kills\x18\x07 \x01(\x0b\x32\x1c.da.protobuf.PlayerPositions\x12,\n\x06\x64\x65\x61ths\x18\x08 \x01(\x0b\x32\x1c.da.protobuf.PlayerPositions\x12\x13\n\x0b\x63onnections\x18\t \x01(\x05\x12\x10\n\x08teamplay\x18\n \x01(\x08\x12\x1a\n\x12thirdperson_active\x18\x0b \x01(\x05\x12\x1c\n\x14thirdperson_inactive\x18\x0c \x01(\x05\x12\x16\n\x0e\x64isconnections\x18\r \x01(\x05\x12\x1f\n\x17unique_players_this_map\x18\x0e \x01(\x05\x12\x12\n\nda_version\x18\x0f \x01(\x05\x12\x19\n\x11\x63haracters_chosen\x18\x10 \x03(\t\x12\x16\n\x0eweapons_chosen\x18\x11 \x03(\x05\x12\x15\n\rskills_chosen\x18\x12 \x03(\x05\x12&\n\x05votes\x18\x13 \x03(\x0b\x32\x17.da.protobuf.VoteResult\x12\x18\n\x10weapons_chosen_s\x18\x14 \x03(\t\x12\x17\n\x0fskills_chosen_s\x18\x15 \x03(\t\x12\x10\n\x08map_time\x18\x16 \x01(\x02\x12\x11\n\tvr_active\x18\x17 \x01(\x05\x12\x13\n\x0bvr_inactive\x18\x18 \x01(\x05\x12\x18\n\x10platform_windows\x18\x19 \x01(\x05\x12\x16\n\x0eplatform_linux\x18\x1a \x01(\x05\x12\x14\n\x0cplatform_osx\x18\x1b \x01(\x05\x12+\n\x0ckill_details\x18\x1c \x03(\x0b\x32\x15.da.protobuf.KillInfo\x12,\n\x0bplayer_list\x18\x1d \x03(\x0b\x32\x17.da.protobuf.PlayerList\"8\n\x0fPlayerPositions\x12%\n\x08position\x18\x01 \x03(\x0b\x32\x13.da.protobuf.Vector\"<\n\nVoteResult\x12\r\n\x05issue\x18\x01 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x02 \x01(\t\x12\x0e\n\x06result\x18\x03 \x01(\x08\"\xb7\x02\n\nPlayerInfo\x12%\n\x08position\x18\x01 \x01(\x0b\x32\x13.da.protobuf.Vector\x12\x0e\n\x06health\x18\x02 \x01(\x05\x12\r\n\x05\x66lags\x18\x03 \x01(\x04\x12\x0e\n\x06weapon\x18\x04 \x01(\t\x12\r\n\x05skill\x18\x05 \x01(\t\x12\x11\n\taccountid\x18\x06 \x01(\r\x12\r\n\x05style\x18\x07 \x01(\x02\x12\x13\n\x0btotal_style\x18\x08 \x01(\x02\x12\r\n\x05kills\x18\t \x01(\r\x12\x0e\n\x06\x64\x65\x61ths\x18\n \x01(\r\x12\x10\n\x08waypoint\x18\x0b \x01(\r\x12/\n\x12objective_position\x18\x0c \x01(\x0b\x32\x13.da.protobuf.Vector\x12\x13\n\x0bslowmo_type\x18\r \x01(\t\x12\x16\n\x0eslowmo_seconds\x18\x0e \x01(\x02\"\x8b\x01\n\x08KillInfo\x12\'\n\x06victim\x18\x01 \x01(\x0b\x32\x17.da.protobuf.PlayerInfo\x12\'\n\x06killer\x18\x02 \x01(\x0b\x32\x17.da.protobuf.PlayerInfo\x12-\n\x10grenade_position\x18\x03 \x01(\x0b\x32\x13.da.protobuf.Vector\"<\n\nPlayerList\x12\x11\n\taccountid\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05style\x18\x03 \x01(\x02\"\xa9\x01\n\x0bServerReply\x12\x14\n\x0c\x64\x61ily_leader\x18\x01 \x01(\t\x12\x1a\n\x12\x64\x61ily_leader_style\x18\x02 \x01(\x02\x12\x15\n\rweekly_leader\x18\x03 \x01(\t\x12\x1b\n\x13weekly_leader_style\x18\x04 \x01(\x02\x12\x16\n\x0emonthly_leader\x18\x05 \x01(\t\x12\x1c\n\x14monthly_leader_style\x18\x06 \x01(\x02*\xbe\x02\n\tKillFlags\x12\x14\n\x10KILL_THIRDPERSON\x10\x00\x12\x0e\n\nKILL_AIMIN\x10\x01\x12\x0f\n\x0bKILL_DIVING\x10\x02\x12\x10\n\x0cKILL_ROLLING\x10\x03\x12\x10\n\x0cKILL_SLIDING\x10\x04\x12\x11\n\rKILL_FLIPPING\x10\x05\x12\x15\n\x11KILL_SUPERFALLING\x10\x06\x12\x13\n\x0fKILL_BY_GRENADE\x10\x07\x12\x11\n\rKILL_BY_BRAWL\x10\x08\x12\x15\n\x11KILL_SKILL_ACTIVE\x10\t\x12\x1b\n\x17KILL_SUPER_SKILL_ACTIVE\x10\n\x12\x12\n\x0eKILL_IS_TARGET\x10\x0b\x12\x16\n\x12KILL_HAS_BRIEFCASE\x10\x0c\x12\x0f\n\x0bKILL_IS_BOT\x10\r\x12\x13\n\x0fKILL_IS_SUICIDE\x10\x0e')
_KILLFLAGS = descriptor.EnumDescriptor(
name='KillFlags',
full_name='da.protobuf.KillFlags',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='KILL_THIRDPERSON', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_AIMIN', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_DIVING', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_ROLLING', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_SLIDING', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_FLIPPING', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_SUPERFALLING', index=6, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_BY_GRENADE', index=7, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_BY_BRAWL', index=8, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_SKILL_ACTIVE', index=9, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_SUPER_SKILL_ACTIVE', index=10, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_IS_TARGET', index=11, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_HAS_BRIEFCASE', index=12, number=12,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_IS_BOT', index=13, number=13,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KILL_IS_SUICIDE', index=14, number=14,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1652,
serialized_end=1970,
)
KILL_THIRDPERSON = 0
KILL_AIMIN = 1
KILL_DIVING = 2
KILL_ROLLING = 3
KILL_SLIDING = 4
KILL_FLIPPING = 5
KILL_SUPERFALLING = 6
KILL_BY_GRENADE = 7
KILL_BY_BRAWL = 8
KILL_SKILL_ACTIVE = 9
KILL_SUPER_SKILL_ACTIVE = 10
KILL_IS_TARGET = 11
KILL_HAS_BRIEFCASE = 12
KILL_IS_BOT = 13
KILL_IS_SUICIDE = 14
_GAMEDATA = descriptor.Descriptor(
name='GameData',
full_name='da.protobuf.GameData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='debug', full_name='da.protobuf.GameData.debug', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='map_name', full_name='da.protobuf.GameData.map_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='server_name', full_name='da.protobuf.GameData.server_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='timestamp', full_name='da.protobuf.GameData.timestamp', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='positions', full_name='da.protobuf.GameData.positions', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cheats', full_name='da.protobuf.GameData.cheats', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kills', full_name='da.protobuf.GameData.kills', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deaths', full_name='da.protobuf.GameData.deaths', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='connections', full_name='da.protobuf.GameData.connections', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='teamplay', full_name='da.protobuf.GameData.teamplay', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='thirdperson_active', full_name='da.protobuf.GameData.thirdperson_active', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='thirdperson_inactive', full_name='da.protobuf.GameData.thirdperson_inactive', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disconnections', full_name='da.protobuf.GameData.disconnections', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='unique_players_this_map', full_name='da.protobuf.GameData.unique_players_this_map', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='da_version', full_name='da.protobuf.GameData.da_version', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='characters_chosen', full_name='da.protobuf.GameData.characters_chosen', index=15,
number=16, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weapons_chosen', full_name='da.protobuf.GameData.weapons_chosen', index=16,
number=17, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='skills_chosen', full_name='da.protobuf.GameData.skills_chosen', index=17,
number=18, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='votes', full_name='da.protobuf.GameData.votes', index=18,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weapons_chosen_s', full_name='da.protobuf.GameData.weapons_chosen_s', index=19,
number=20, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='skills_chosen_s', full_name='da.protobuf.GameData.skills_chosen_s', index=20,
number=21, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='map_time', full_name='da.protobuf.GameData.map_time', index=21,
number=22, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='vr_active', full_name='da.protobuf.GameData.vr_active', index=22,
number=23, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='vr_inactive', full_name='da.protobuf.GameData.vr_inactive', index=23,
number=24, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='platform_windows', full_name='da.protobuf.GameData.platform_windows', index=24,
number=25, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='platform_linux', full_name='da.protobuf.GameData.platform_linux', index=25,
number=26, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='platform_osx', full_name='da.protobuf.GameData.platform_osx', index=26,
number=27, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kill_details', full_name='da.protobuf.GameData.kill_details', index=27,
number=28, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='player_list', full_name='da.protobuf.GameData.player_list', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=40,
serialized_end=839,
)
_PLAYERPOSITIONS = descriptor.Descriptor(
name='PlayerPositions',
full_name='da.protobuf.PlayerPositions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='position', full_name='da.protobuf.PlayerPositions.position', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=841,
serialized_end=897,
)
_VOTERESULT = descriptor.Descriptor(
name='VoteResult',
full_name='da.protobuf.VoteResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='issue', full_name='da.protobuf.VoteResult.issue', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='details', full_name='da.protobuf.VoteResult.details', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='result', full_name='da.protobuf.VoteResult.result', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=899,
serialized_end=959,
)
_PLAYERINFO = descriptor.Descriptor(
name='PlayerInfo',
full_name='da.protobuf.PlayerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='position', full_name='da.protobuf.PlayerInfo.position', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='health', full_name='da.protobuf.PlayerInfo.health', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='flags', full_name='da.protobuf.PlayerInfo.flags', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weapon', full_name='da.protobuf.PlayerInfo.weapon', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='skill', full_name='da.protobuf.PlayerInfo.skill', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='accountid', full_name='da.protobuf.PlayerInfo.accountid', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='style', full_name='da.protobuf.PlayerInfo.style', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='total_style', full_name='da.protobuf.PlayerInfo.total_style', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kills', full_name='da.protobuf.PlayerInfo.kills', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deaths', full_name='da.protobuf.PlayerInfo.deaths', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='waypoint', full_name='da.protobuf.PlayerInfo.waypoint', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='objective_position', full_name='da.protobuf.PlayerInfo.objective_position', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='slowmo_type', full_name='da.protobuf.PlayerInfo.slowmo_type', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='slowmo_seconds', full_name='da.protobuf.PlayerInfo.slowmo_seconds', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=962,
serialized_end=1273,
)
_KILLINFO = descriptor.Descriptor(
name='KillInfo',
full_name='da.protobuf.KillInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='victim', full_name='da.protobuf.KillInfo.victim', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='killer', full_name='da.protobuf.KillInfo.killer', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='grenade_position', full_name='da.protobuf.KillInfo.grenade_position', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1276,
serialized_end=1415,
)
_PLAYERLIST = descriptor.Descriptor(
name='PlayerList',
full_name='da.protobuf.PlayerList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='accountid', full_name='da.protobuf.PlayerList.accountid', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='da.protobuf.PlayerList.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='style', full_name='da.protobuf.PlayerList.style', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1417,
serialized_end=1477,
)
_SERVERREPLY = descriptor.Descriptor(
name='ServerReply',
full_name='da.protobuf.ServerReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='daily_leader', full_name='da.protobuf.ServerReply.daily_leader', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='daily_leader_style', full_name='da.protobuf.ServerReply.daily_leader_style', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weekly_leader', full_name='da.protobuf.ServerReply.weekly_leader', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weekly_leader_style', full_name='da.protobuf.ServerReply.weekly_leader_style', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='monthly_leader', full_name='da.protobuf.ServerReply.monthly_leader', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='monthly_leader_style', full_name='da.protobuf.ServerReply.monthly_leader_style', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1480,
serialized_end=1649,
)
_GAMEDATA.fields_by_name['positions'].message_type = _PLAYERPOSITIONS
_GAMEDATA.fields_by_name['kills'].message_type = _PLAYERPOSITIONS
_GAMEDATA.fields_by_name['deaths'].message_type = _PLAYERPOSITIONS
_GAMEDATA.fields_by_name['votes'].message_type = _VOTERESULT
_GAMEDATA.fields_by_name['kill_details'].message_type = _KILLINFO
_GAMEDATA.fields_by_name['player_list'].message_type = _PLAYERLIST
_PLAYERPOSITIONS.fields_by_name['position'].message_type = math_pb2._VECTOR
_PLAYERINFO.fields_by_name['position'].message_type = math_pb2._VECTOR
_PLAYERINFO.fields_by_name['objective_position'].message_type = math_pb2._VECTOR
_KILLINFO.fields_by_name['victim'].message_type = _PLAYERINFO
_KILLINFO.fields_by_name['killer'].message_type = _PLAYERINFO
_KILLINFO.fields_by_name['grenade_position'].message_type = math_pb2._VECTOR
DESCRIPTOR.message_types_by_name['GameData'] = _GAMEDATA
DESCRIPTOR.message_types_by_name['PlayerPositions'] = _PLAYERPOSITIONS
DESCRIPTOR.message_types_by_name['VoteResult'] = _VOTERESULT
DESCRIPTOR.message_types_by_name['PlayerInfo'] = _PLAYERINFO
DESCRIPTOR.message_types_by_name['KillInfo'] = _KILLINFO
DESCRIPTOR.message_types_by_name['PlayerList'] = _PLAYERLIST
DESCRIPTOR.message_types_by_name['ServerReply'] = _SERVERREPLY
class GameData(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GAMEDATA
# @@protoc_insertion_point(class_scope:da.protobuf.GameData)
class PlayerPositions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYERPOSITIONS
# @@protoc_insertion_point(class_scope:da.protobuf.PlayerPositions)
class VoteResult(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _VOTERESULT
# @@protoc_insertion_point(class_scope:da.protobuf.VoteResult)
class PlayerInfo(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYERINFO
# @@protoc_insertion_point(class_scope:da.protobuf.PlayerInfo)
class KillInfo(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _KILLINFO
# @@protoc_insertion_point(class_scope:da.protobuf.KillInfo)
class PlayerList(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLAYERLIST
# @@protoc_insertion_point(class_scope:da.protobuf.PlayerList)
class ServerReply(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SERVERREPLY
# @@protoc_insertion_point(class_scope:da.protobuf.ServerReply)
# @@protoc_insertion_point(module_scope)
| [
"jorge@lunarworkshop.com"
] | jorge@lunarworkshop.com |
6291822cb31b4bf8385ea3f7c22d79a5f2a4e13f | fb5b1b8dce103dea28be52f7bbd9ea84da2cec81 | /kolibri/core/discovery/api.py | 1a8389a92afb0d82f12c4c2108850fecba086a18 | [
"MIT"
] | permissive | lyw07/kolibri | d7f6f92656faa0483cd2cbdf57a3b6c54d52c2f2 | 11e0d01e2bc43850a6dfd4238e6408004449c3dc | refs/heads/develop | 2021-01-02T09:40:04.457976 | 2019-05-20T21:29:27 | 2019-05-20T21:29:27 | 99,255,547 | 1 | 0 | MIT | 2018-03-08T18:43:36 | 2017-08-03T16:53:09 | Python | UTF-8 | Python | false | false | 380 | py | from rest_framework import viewsets
from .models import NetworkLocation
from .serializers import NetworkLocationSerializer
from kolibri.core.content.permissions import CanManageContent
class NetworkLocationViewSet(viewsets.ModelViewSet):
permission_classes = (CanManageContent,)
serializer_class = NetworkLocationSerializer
queryset = NetworkLocation.objects.all()
| [
"jamalex@gmail.com"
] | jamalex@gmail.com |
617da9ce72d24b2fca23ff0772bcfbdf9521d87f | 294e5260acb7aa0c888e6462ae0c114de95c8a18 | /morus_msgs/cfg/MavPosCtlParams.cfg | f3c0b726f2f9ababe1a452299781ddbfa9f612a3 | [] | no_license | larics/morus_uav_gazebo | c8d344ab60b847965dcf3f19fcc0fe542f16773b | 15dfc5fb36f0e646547b6e4d3cb5c1561e8f3144 | refs/heads/master | 2021-01-20T10:55:31.086516 | 2018-05-17T14:35:48 | 2018-05-17T14:35:48 | 51,870,669 | 6 | 7 | null | 2017-10-11T10:27:09 | 2016-02-16T21:13:50 | Python | UTF-8 | Python | false | false | 1,507 | cfg | #!/usr/bin/env python
PACKAGE = "mav_msgs"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("x_kp", double_t, 0, "X ctl PID P gain", 0.01, 0, 1000)
gen.add("x_ki", double_t, 0, "X ctl PID I gain", 0, 0, 1000)
gen.add("x_kd", double_t, 0, "X ctl PID D gain", 0, 0, 1000)
gen.add("vx_kp", double_t, 0, "Vx ctl PID P gain", 0.01, 0, 1000)
gen.add("vx_ki", double_t, 0, "Vx ctl PID I gain", 0, 0, 1000)
gen.add("vx_kd", double_t, 0, "Vx ctl PID D gain", 0, 0, 1000)
gen.add("y_kp", double_t, 0, "Y ctl PID P gain", 0.01, 0, 1000)
gen.add("y_ki", double_t, 0, "Y ctl PID I gain", 0, 0, 1000)
gen.add("y_kd", double_t, 0, "Y ctl PID D gain", 0, 0, 1000)
gen.add("vy_kp", double_t, 0, "Vy ctl PID P gain", 0.01, 0, 1000)
gen.add("vy_ki", double_t, 0, "Vy ctl PID I gain", 0, 0, 1000)
gen.add("vy_kd", double_t, 0, "Vy ctl PID D gain", 0, 0, 1000)
gen.add("z_kp", double_t, 0, "Z ctl PID P gain", 0.5, 0, 1000)
gen.add("z_ki", double_t, 0, "Z ctl PID I gain", 0.125, 0, 1000)
gen.add("z_kd", double_t, 0, "Z ctl PID D gain", 0.0, 0, 1000)
gen.add("vz_kp", double_t, 0, "Vz ctl PID P gain", 75, 0, 1000)
gen.add("vz_ki", double_t, 0, "Vz ctl PID I gain", 10, 0, 1000)
gen.add("vz_kd", double_t, 0, "Vz ctl PID D gain", 0.41472, 0, 1000)
gen.add("filter_ref", double_t, 0, "Filter constant for reference", 0.01, 0, 5)
gen.add("filter_meas", double_t, 0, "Filter constant for measurement", 0.01, 0, 5)
exit(gen.generate(PACKAGE, "mav_msgs", "MavPosCtlParams"))
| [
"tomislav.haus@gmail.com"
] | tomislav.haus@gmail.com |
5d2241b7297c60cb21d4db6772de0c17b076f8ef | d7bba9a2ba17831d58a34b3338f8f1e8d566d262 | /blog/migrations/0001_initial.py | 56985464e1503737df695d7509ef7569b1e4c854 | [] | no_license | harmi2009/my-first-blog | 16943a25901929faddf16a2f5dc6281f35449ae6 | bb8a29e35bee07ef0b829e9cb5c69e676d74640d | refs/heads/master | 2022-12-21T08:05:07.001697 | 2020-10-05T21:44:53 | 2020-10-05T21:44:53 | 300,715,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-10-02 18:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"harmi2009@gmail.com"
] | harmi2009@gmail.com |
2280537cfc1cb168db84141e3af868073fca59d3 | 4cc25f1df9530daf2f46648df3997099f2a1b85e | /source/aws/services/iam.py | 7775689bfafbbd074c1d57dce09a1dffef59cd80 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcripps9/aws-control-tower-customizations | ccc2cb6eb067edb2524a0a5efbafc08d484eb1db | e88025eda57adbfcab12e79fbbf12e580e9a9ccd | refs/heads/master | 2022-04-16T12:33:36.623952 | 2020-04-15T20:59:04 | 2020-04-15T20:59:04 | 256,033,675 | 0 | 0 | Apache-2.0 | 2020-04-15T20:50:40 | 2020-04-15T20:50:39 | null | UTF-8 | Python | false | false | 1,821 | py | ##############################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is #
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, express or implied. See the License for the specific language #
# governing permissions and limitations under the License. #
##############################################################################
# !/bin/python
from botocore.exceptions import ClientError
from aws.utils.boto3_session import Boto3Session
class IAM(Boto3Session):
def __init__(self, logger, region, **kwargs):
self.logger = logger
__service_name = 'iam'
kwargs.update({'region': region})
super().__init__(logger, __service_name, **kwargs)
self.iam_client = super().get_client()
def update_assume_role_policy(self, role_name, policy):
try:
self.iam_client.update_assume_role_policy(
RoleName=role_name,
PolicyDocument=policy
)
except ClientError as e:
self.logger.log_unhandled_exception(e)
raise
| [
"jleavert@amazon.com"
] | jleavert@amazon.com |
5c154ddbe4004a161a0f02a8e1c1ecaa29732ce9 | 17a464cf499ea9cc492a9775fa4ea8314858bf8c | /storage/migrations/0002_auto_20210226_1933.py | 2aa6b900772828cc9913bfb4a036bd1e7ca79b97 | [] | no_license | Miladrzh/graph-calc | 8feeb7ffb266ac37508c2df09af624ea8a651a4d | 4452b1d3cb7779120717d54a95744880ee68cba2 | refs/heads/master | 2023-04-18T00:09:45.619274 | 2021-04-19T18:45:42 | 2021-04-19T18:45:42 | 342,690,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # Generated by Django 2.1 on 2021-02-26 19:33
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('storage', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='generatedgraph',
old_name='vertex_count',
new_name='node_count',
),
migrations.RemoveField(
model_name='generatedgraph',
name='hash_key',
),
migrations.AddField(
model_name='generatedgraph',
name='file_hash',
field=models.CharField(default=django.utils.timezone.now, max_length=16, primary_key=True, serialize=False),
preserve_default=False,
),
]
| [
"miladrzh@gmail.com"
] | miladrzh@gmail.com |
adb9f5111e2b6fde239785e3fac1f6d150e83c22 | 6fdbdb53b6c6696199282e3f7f8b86793dd2f2c2 | /venv/lib/python3.7/site-packages/optimal/__init__.py | ed2032bb81c9a35376fb89df3764ab581227c146 | [] | no_license | juliusHin/KNR_Stock_Prediction | 5695fc95e1c430bfa281856a0e7b61e79032921c | 76580ceb891d1abd2af2fee347acb250f0c00ef8 | refs/heads/master | 2022-12-12T20:20:07.442221 | 2019-10-26T03:53:49 | 2019-10-26T03:53:49 | 173,898,473 | 2 | 0 | null | 2022-12-08T01:40:18 | 2019-03-05T07:39:47 | Python | UTF-8 | Python | false | false | 1,599 | py | ###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 Justin Lovinger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# All algorithms
from optimal.algorithms import gaoperators # Also operators for GA
from optimal.algorithms.genalg import GenAlg
from optimal.algorithms.gsa import GSA
from optimal.algorithms.crossentropy import CrossEntropy
# Necessary classes
from optimal.optimize import Problem
| [
"juliustanuwijaya.indonesian@gmail.com"
] | juliustanuwijaya.indonesian@gmail.com |
32ee9b2d8471e963468143f7e7d5b60c5c33dd40 | 6d0436f5684a5c829247f1e15c1eab69b3b14e34 | /api/core/blacklist_helpers.py | 2ee79f60a4088329742a0d40b2e80508e45f0601 | [
"MIT"
] | permissive | reddimohan/flask-rest-api-barebones-with-authentication | 61881e83e0d16d821ec5368190c2a6133a1a260e | 6daf04355531dd8d60937cdfa54967dd01e6ff54 | refs/heads/master | 2023-04-08T02:07:30.271224 | 2023-03-02T15:00:54 | 2023-03-02T15:00:54 | 210,322,303 | 6 | 5 | null | 2023-03-21T09:31:37 | 2019-09-23T09:59:52 | Python | UTF-8 | Python | false | false | 3,126 | py | from datetime import datetime
from database import TokenBlacklist
from exceptions import TokenNotFound
from flask_jwt_extended import decode_token
from main.db import MongoDB
db = MongoDB()
def _epoch_utc_to_datetime(epoch_utc):
"""
Helper function for converting epoch timestamps (as stored in JWTs) into
python datetime objects (which are easier to use with sqlalchemy).
"""
return datetime.fromtimestamp(epoch_utc)
def add_token_to_database(encoded_token, identity_claim):
"""
Adds a new token to the database. It is not revoked when it is added.
:param identity_claim:
"""
decoded_token = decode_token(encoded_token)
jti = decoded_token["jti"]
token_type = decoded_token["type"]
user_identity = decoded_token[identity_claim]
expires = _epoch_utc_to_datetime(decoded_token["exp"])
revoked = False
db_token = TokenBlacklist(
jti=jti,
token_type=token_type,
user_identity=user_identity,
expires=expires,
revoked=revoked,
)
print(db_token)
# db.session.add(db_token)
# db.session.commit()
def is_token_revoked(decoded_token):
"""
Checks if the given token is revoked or not. Because we are adding all the
tokens that we create into this database, if the token is not present
in the database we are going to consider it revoked, as we don't know where
it was created.
"""
jti = decoded_token["jti"]
try:
token = TokenBlacklist.query.filter_by(jti=jti).one()
return token.revoked
except NoResultFound:
return True
def get_user_tokens(user_identity):
"""
Returns all of the tokens, revoked and unrevoked, that are stored for the
given user
"""
return TokenBlacklist.query.filter_by(user_identity=user_identity).all()
def revoke_token(token_id, user):
"""
Revokes the given token. Raises a TokenNotFound error if the token does
not exist in the database
"""
try:
token = TokenBlacklist.query.filter_by(id=token_id, user_identity=user).one()
token.revoked = True
db.session.commit()
except NoResultFound:
raise TokenNotFound("Could not find the token {}".format(token_id))
def unrevoke_token(token_id, user):
"""
Unrevokes the given token. Raises a TokenNotFound error if the token does
not exist in the database
"""
try:
token = TokenBlacklist.query.filter_by(id=token_id, user_identity=user).one()
token.revoked = False
db.session.commit()
except NoResultFound:
raise TokenNotFound("Could not find the token {}".format(token_id))
def prune_database():
"""
Delete tokens that have expired from the database.
How (and if) you call this is entirely up you. You could expose it to an
endpoint that only administrators could call, you could run it as a cron,
set it up with flask cli, etc.
"""
now = datetime.now()
expired = TokenBlacklist.query.filter(TokenBlacklist.expires < now).all()
for token in expired:
db.session.delete(token)
db.session.commit()
| [
"reddimohana@gmail.com"
] | reddimohana@gmail.com |
0c3368f28cc8a7287541310f6418e199dc6aefbd | 7bf8719d2f31702d8161329c6f1553e15643802a | /products/migrations/0001_initial.py | 2c2451f1e1e290f4a353117c47068cff81bc8243 | [] | no_license | Code-Institute-Submissions/e-commerce | d3529ac7935daf7f767a07d9de8dab33390bec6b | 2309c2b80ad785ad56024473f8e2a36b73cc4e66 | refs/heads/master | 2020-03-17T10:10:37.660519 | 2018-05-14T06:57:44 | 2018-05-14T06:57:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-20 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=254)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('image', models.ImageField(upload_to='images')),
],
),
] | [
"cristian.burloiu@yahoo.ro"
] | cristian.burloiu@yahoo.ro |
ff0ee0e4588368b12e9f15bb47f3ded00562d6b7 | eda355ad3bd32d230c9fa55e816dbd8658afe2c7 | /khash.py | f750deaf7126247dac716725da7ba89a61417bd5 | [
"MIT"
] | permissive | forging2012/khashmir-1 | d311904706aecedfd2bb2c17d777b963e6afae2f | 6595c1ff4b8ed538dfa0633ccdf7730d6ad5bb8d | refs/heads/master | 2021-05-26T21:01:34.691621 | 2011-06-14T21:13:38 | 2011-06-14T21:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | ## Copyright 2002-2003 Andrew Loewenstern, All Rights Reserved
# see LICENSE.txt for license information
from sha import sha
import whrandom
#this is ugly, hopefully os.entropy will be in 2.4
try:
from entropy import entropy
except ImportError:
def entropy(n):
s = ''
for i in range(n):
s += chr(whrandom.randint(0,255))
return s
def intify(hstr):
"""20 bit hash, big-endian -> long python integer"""
assert len(hstr) == 20
return long(hstr.encode('hex'), 16)
def stringify(num):
"""long int -> 20-character string"""
str = hex(num)[2:]
if str[-1] == 'L':
str = str[:-1]
if len(str) % 2 != 0:
str = '0' + str
str = str.decode('hex')
return (20 - len(str)) *'\x00' + str
def distance(a, b):
"""distance between two 160-bit hashes expressed as 20-character strings"""
return intify(a) ^ intify(b)
def newID():
"""returns a new pseudorandom globally unique ID string"""
h = sha()
h.update(entropy(20))
return h.digest()
def newIDInRange(min, max):
return stringify(randRange(min,max))
def randRange(min, max):
return min + intify(newID()) % (max - min)
def newTID():
return randRange(-2**30, 2**30)
### Test Cases ###
import unittest
class NewID(unittest.TestCase):
def testLength(self):
self.assertEqual(len(newID()), 20)
def testHundreds(self):
for x in xrange(100):
self.testLength
class Intify(unittest.TestCase):
known = [('\0' * 20, 0),
('\xff' * 20, 2L**160 - 1),
]
def testKnown(self):
for str, value in self.known:
self.assertEqual(intify(str), value)
def testEndianessOnce(self):
h = newID()
while h[-1] == '\xff':
h = newID()
k = h[:-1] + chr(ord(h[-1]) + 1)
self.assertEqual(intify(k) - intify(h), 1)
def testEndianessLots(self):
for x in xrange(100):
self.testEndianessOnce()
class Disantance(unittest.TestCase):
known = [
(("\0" * 20, "\xff" * 20), 2**160L -1),
((sha("foo").digest(), sha("foo").digest()), 0),
((sha("bar").digest(), sha("bar").digest()), 0)
]
def testKnown(self):
for pair, dist in self.known:
self.assertEqual(distance(pair[0], pair[1]), dist)
def testCommutitive(self):
for i in xrange(100):
x, y, z = newID(), newID(), newID()
self.assertEqual(distance(x,y) ^ distance(y, z), distance(x, z))
class RandRange(unittest.TestCase):
def testOnce(self):
a = intify(newID())
b = intify(newID())
if a < b:
c = randRange(a, b)
self.assertEqual(a <= c < b, 1, "output out of range %d %d %d" % (b, c, a))
else:
c = randRange(b, a)
assert b <= c < a, "output out of range %d %d %d" % (b, c, a)
def testOneHundredTimes(self):
for i in xrange(100):
self.testOnce()
if __name__ == '__main__':
unittest.main()
| [
"casey.marshall@memeo-inc.com"
] | casey.marshall@memeo-inc.com |
0d63ef8757c6bbbe4b2869b89a3fa0c54f9021dd | 84da64c474e17a039ad22337ff111c6bf87ee0c6 | /comprehension.py | 4d01c3cb6723f76618cd075aa70f02f863069179 | [] | no_license | karibou/python_training | 525829bd6c046c82daa4a9d3828ca6652b046e11 | 9d0a9bd1d02675f5c6eff185cc5275d84a6a7ac0 | refs/heads/master | 2020-09-22T01:41:19.246336 | 2014-10-13T11:28:54 | 2014-10-13T11:28:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | #!/usr/bin/python
l = ['toto','tata','tutu']
l2 = []
for i in l:
if 'o' in i:
l2.append(i)
print l2
l3 = [ i for i in l if 'o' in i]
print l3
| [
"louis.bouchard@ubuntu.com"
] | louis.bouchard@ubuntu.com |
5e558014567ad4fa8515820433c67c0266299166 | b2b54285bcb74399578b53482bcc86df7c57add0 | /interfaces/mainWindow.py | 9cff4d5cbbc00714ee9c73ea5e67e60051fc9776 | [] | no_license | ZalmanHack/e-circuit_builder | 75ed41f2ee5cf3b46275249896d80076963dc0bb | b38156be3a4df49bf06de1ab767de7b9ee3e27e4 | refs/heads/master | 2022-03-24T09:22:14.305186 | 2019-12-22T22:50:31 | 2019-12-22T22:50:31 | 229,633,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,559 | py | import sys
import pickle
from functions.ecircuit import ECircuit
from interfaces import mainWindowUI
from interfaces.myGraphicsView import MyGraphicView
from PyQt5.Qt import *
class MainWindow(QMainWindow, mainWindowUI.Ui_MainWindow):
class SignalsForECircuit(QObject):
setTable = pyqtSignal(list)
getMatrix = pyqtSignal()
startBuild = pyqtSignal()
startMinimize = pyqtSignal()
startStruct = pyqtSignal()
closed = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.initUI()
self.initECircuit()
def initUI(self):
self.setWindowIcon(QIcon('./icons/icon.ico'))
self.window().setWindowTitle("Построитель Е-Схем")
# инициализация таблицы ----------------------------------------------------------------------------------------
self.initModel()
# инициализация своей графической сцены ------------------------------------------------------------------------
self.graphicView = MyGraphicView(self)
self.gridLayout.addWidget(self.graphicView)
# инициализация текстового отображения ветвей-------------------------------------------------------------------
font = QFont()
font.setPixelSize(14)
font.setBold(False)
font.setFamily("Arial")
self.plainTextEdit.setFont(font)
self.plainTextEdit.setReadOnly(True)
# инициализация кнопок отображения окон ------------------------------------------------------------------------
self.showECircuit.setCheckable(True)
self.showBranches.setCheckable(True)
self.showTable.setCheckable(True)
self.showECircuit.setChecked(True)
self.showBranches.setChecked(True)
self.showTable.setChecked(True)
# инициализируем нижний блок -----------------------------------------------------------------------------------
self.setBottomInfo()
# отключения элементов -------------------------- ! ! ! ! ! ! --------------------------------------------------
# self.pushMinimization.deleteLater()
# self.pushStructuring.deleteLater()
# self.pushDestructuring.deleteLater()
self.menuInfo.deleteLater()
def initModel(self):
self.model = QStandardItemModel()
self.model.setColumnCount(3)
self.model.setRowCount(1)
self.model.setItem(0,0,QStandardItem("START"))
self.model.dataChanged.connect(self.on_model_dataChanged)
self.tableView.setModel(self.model)
self.tableView.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tableView.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.tableView.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)
self.tableView.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)
def initECircuit(self):
self.items = []
self.ec_signals = self.SignalsForECircuit()
self.e_circuit_thread = QThread(self)
self.e_circuit = ECircuit()
self.ec_signals.setTable.connect(self.e_circuit.setTable)
self.ec_signals.startBuild.connect(self.e_circuit.build)
self.ec_signals.startMinimize.connect(self.e_circuit.minimize)
self.ec_signals.startStruct.connect(self.e_circuit.structuring)
self.e_circuit.built.connect(self.built) # сообщение от построителя с матрицей и дллиной текста
self.e_circuit.error.connect(self.msg_error)
self.e_circuit.moveToThread(self.e_circuit_thread)
self.e_circuit_thread.start()
@pyqtSlot(QModelIndex)
def on_model_dataChanged(self, index: QModelIndex):
for column in range(0, self.model.columnCount()):
if self.model.index(index.row(),column).data() is None or self.model.index(index.row(),column).data() == '':
return
if index.row() == self.model.rowCount()-1:
self.model.appendRow([QStandardItem(), QStandardItem(), QStandardItem()])
if index.row() not in range(0, len(self.items)):
temp_row = []
for column in range(0, self.model.columnCount()):
temp_row.append(self.model.index(index.row(), column).data())
self.items.append(temp_row)
self.ec_signals.setTable.emit(self.items)
self.ec_signals.startBuild.emit()
return
if self.items[index.row()][index.column()] != self.model.index(index.row(), index.column()).data():
self.items[index.row()][index.column()] = self.model.index(index.row(), index.column()).data()
self.ec_signals.setTable.emit(self.items)
self.ec_signals.startBuild.emit()
return
@pyqtSlot()
def on_pushClear_triggered(self):
self.initModel()
self.items.clear()
self.plainTextEdit.clear()
self.graphicView.scene.clear()
self.graphicView.scene.setSceneRect(0, 0, 50, 50)
self.setBottomInfo()
@pyqtSlot()
def on_pushMinimization_triggered(self):
self.ec_signals.setTable.emit(self.items)
self.ec_signals.startMinimize.emit()
@pyqtSlot()
def on_pushStructuring_triggered(self):
self.ec_signals.setTable.emit(self.items)
self.ec_signals.startStruct.emit()
@pyqtSlot()
def on_pushExport_triggered(self):
file_name = QFileDialog.getSaveFileName(self, "Сохранить файл как", "Схема", "PNG(*.png)")
if file_name[0] != '':
image = QImage(self.graphicView.scene.width()*2, self.graphicView.scene.height()*2, QImage.Format_ARGB32_Premultiplied)
image.fill(QColor(Qt.white))
painter = QPainter(image)
painter.setRenderHint(QPainter.Antialiasing)
self.graphicView.scene.render(painter)
painter.end()
image.save(file_name[0])
@pyqtSlot()
def on_pushSave_triggered(self):
file_name = QFileDialog.getSaveFileName(self, "Сохранить файл", "Схема", "ECB(*.ecb)")
if file_name[0] != '':
with open(file_name[0], 'wb') as file:
pickle.dump(self.items, file)
@pyqtSlot()
def on_pushOpen_triggered(self):
file_name = QFileDialog.getOpenFileName(self, "Открыть файл", "Схема", "ECB(*.ecb)")
if file_name[0] != '':
with open(file_name[0], 'rb') as file:
info = pickle.load(file)
if type(info) == list and len(info) > 0:
self.on_pushClear_triggered()
self.model.setRowCount(0)
for row in info:
self.model.appendRow([QStandardItem(row[0]), QStandardItem(row[1]), QStandardItem(row[2])])
self.items.append([row[0], row[1], row[2]])
self.ec_signals.setTable.emit(self.items)
self.ec_signals.startBuild.emit()
@pyqtSlot()
def on_showECircuit_triggered(self):
self.panelECircuit.setVisible(self.showECircuit.isChecked())
@pyqtSlot()
def on_showBranches_triggered(self):
self.panelBranches.setVisible(self.showBranches.isChecked())
@pyqtSlot()
def on_showTable_triggered(self):
self.panelTable.setVisible(self.showTable.isChecked())
@pyqtSlot()
def on_closeTable_clicked(self):
self.showTable.setChecked(False)
self.on_showTable_triggered()
@pyqtSlot()
def on_closeECircuit_clicked(self):
self.showECircuit.setChecked(False)
self.on_showECircuit_triggered()
@pyqtSlot()
def on_closeBranches_clicked(self):
self.showBranches.setChecked(False)
self.on_showBranches_triggered()
@pyqtSlot(QCloseEvent)
def closeEvent(self, event):
self.closed.emit()
return super(MainWindow, self).closeEvent(event)
@pyqtSlot(str)
def msg_error(self, text):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle("Сообщение")
msg.exec_()
@pyqtSlot(list, list, list, int, int)
def built(self, branches: list, matrix: list, items: list, textLen: int, quantityKnots: int):
if items:
self.items = items
self.updateTableView(items)
# считаем кол-во элементов
elements = []
for row in self.items:
if row[0] != "START" and row[0] not in elements:
elements.append(row[0])
# отображаем данные
self.updateGraphisView(matrix, textLen)
self.updateBranchesText(branches)
self.setBottomInfo(len(branches), len(elements), quantityKnots)
def setBottomInfo(self, branches: int = 0, quantityElements: int = 0, quantityKnots: int = 0):
self.labelBranches.setText("Ветви: {0}".format(branches))
self.labelElements.setText("Элементы: {0}".format(quantityElements))
self.labelKnots.setText("Узлы: {0}".format(quantityKnots))
self.labelEmty.setText("Данные взяты из таблицы смежности")
@pyqtSlot(list)
def updateBranchesText(self, branches: list):
self.plainTextEdit.clear()
number = 1
for branch in branches:
self.plainTextEdit.appendPlainText("{0}) {1}".format(number, "---".join(branch)))
number += 1
@pyqtSlot(list)
def updateTableView(self, items: list):
if len(items) > 0:
self.initModel()
self.model.setRowCount(0)
self.model.itemData(QModelIndex()).clear()
self.items = items
for row in range(0, len(self.items)):
items_row = []
for column in range(0, len(self.items[row])):
items_row.append(QStandardItem(self.items[row][column]))
self.model.appendRow(items_row)
@pyqtSlot(list, int)
def updateGraphisView(self, matrix: list, textLen: int):
if len(matrix) > 0:
self.graphicView.setMatrix(matrix)
self.graphicView.setTextSetting(textLen, 25)
self.graphicView.draw()
else:
self.graphicView.scene.clear() | [
"dobrenko44@gmail.com"
] | dobrenko44@gmail.com |
d06a16fc3cb7202fdd9058558cf45839be272a0b | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/wagtail_hooks_20201030120530.py | 0719f7961b7e5d8a34a3fef28930d93e6014a64d | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | """ Kategoria zostanie dodana w pasku bocznym u admina"""
from wagtail.contrib.modeladmin.options import ModelAdmin
import ModelAdmin, decorator
class MenuAdmin(ModelAdmin) | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
8b0fa8d0528ab3c49f1ccebb016a8ebf942927bc | 6234b81db8f4edce8e33fb80602a5b334a5f7587 | /wiki/encyclopedia/views.py | cdb0591a8cf8e7cb0ae73f58b2905471895ae36e | [] | no_license | Razeen-Shaikh/cs50-web-projects | 000b3ac03e5f35cf1fc724dec3159967d1d55741 | be597e545cd3cb6d66ff3fec171f42a2ed5147ff | refs/heads/master | 2023-01-11T07:41:03.110580 | 2020-11-07T05:56:29 | 2020-11-07T05:56:29 | 305,310,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | from django.shortcuts import render, redirect
from django import forms
from . import util
from random import choice
class NewEntryForm(forms.Form):
title = forms.CharField(
label="Title",
widget=forms.TextInput(
attrs={
'class': 'ml-2 px-2'
}
)
)
content = forms.CharField(
label="",
widget=forms.Textarea(
attrs={
'class': 'my-2'
}
)
)
def index(request):
entries = util.list_entries()
return render(request, "encyclopedia/index.html", {
"entries": entries,
})
def create(request):
entries = util.list_entries()
if request.method == 'POST':
form = NewEntryForm(request.POST)
if form.is_valid:
title = request.POST['title']
content = request.POST['content']
for entry in entries:
if entry.upper() == title.upper():
return render(request, "encylopedia/add.html", {
"name": "Create Page",
"form": "Page Already Exist",
})
else:
util.save_entry(title, content)
return redirect("wiki:entry", title=title)
else:
return render(request, "encyclopedia/add.html", {
"name": "Create Page",
"form": NewEntryForm(),
})
def edit(request, title):
entries = util.list_entries()
content = util.get_entry(title)
form = NewEntryForm(initial={'title': title, 'content': content})
form.fields['title'].widget.attrs['readonly'] = True
if request.method == 'POST':
if form.is_valid:
content = request.POST.get('content')
for entry in entries:
util.save_entry(title, content)
return redirect("wiki:entry", title=title)
else:
return render(request, "encyclopedia/add.html", {
"name": "Edit Page",
"form": form,
})
def entry(request, title):
content = util.get_entry(title)
if content == None:
return render(request, "encyclopedia/error.html")
else:
return render(request, "encyclopedia/entry.html", {
"title": title,
"content": content,
})
def random(request):
entries = util.list_entries()
title = choice(entries)
return redirect("wiki:entry", title=title)
def search(request):
entries = util.list_entries()
query = request.GET['q']
if len(query) != 0:
listEntries = []
for entry in entries:
if query.upper() in entry.upper():
listEntries.append(entry)
if query.upper() == entry.upper():
return redirect("wiki:entry", entry=entry)
return render(request, "encyclopedia/index.html", {
"entries": listEntries,
})
else:
return redirect(("wiki:index"))
| [
"razeen9796@outlook.com"
] | razeen9796@outlook.com |
3e9b9b44893d5a4cc092a14ba7839da8c1f34f86 | dcc1c8e2184bcb61f40efa3d5714e721d1dcd5b4 | /questions/wsgi.py | 502f4e3ddd39b3790e3008694f77582112b1f601 | [] | no_license | yashk2810/InterviewDB | 68616bff03abe5f999af381c66f3efdcb0a01100 | 144813b5176a23a45a61ad1ee0cb93db6f21590b | refs/heads/master | 2021-01-13T03:34:01.115019 | 2016-12-25T05:18:50 | 2016-12-25T05:18:50 | 77,311,638 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for questions project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "questions.settings")
application = get_wsgi_application()
| [
"ykat95@gmail.com"
] | ykat95@gmail.com |
584ba2592b9203d3fc17548082493d44770f5f55 | 18f152fd5028f92769d991291e967b39fb4fdae7 | /problem23.py | a64c9576760bc38c309c6f08c89faa4cf7e4270f | [] | no_license | ethanluckett/projecteuler | a9a7ce36fbb1118ed4b3951d7ba80d6b00622821 | 46b0348a2d94c32217cf77adb8d81f93d96c268c | refs/heads/master | 2020-05-18T20:55:29.760927 | 2019-05-08T15:23:15 | 2019-05-08T15:23:15 | 184,646,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | import math
import sys
def is_abundant(n):
divisors = set([1])
for i in range(2, int(math.ceil(math.sqrt(n)))+1):
if n % i == 0 and n != i:
divisors.add(i)
divisors.add(n/i)
return sum(divisors) > n
def problem23():
# numbers which cannot be written as sum of two+ abundant numbers
non_abundant_sums = set(range(28123))
abundant_nums = list(filter(is_abundant, range(1, 28123)))
# print(abundant_nums)
for i in abundant_nums:
for j in abundant_nums:
if i + j > 28123:
break
if i + j in non_abundant_sums:
non_abundant_sums.discard(i + j)
return sum(non_abundant_sums)
if __name__ == '__main__':
solution = problem23()
print(solution)
assert solution == 4179871
| [
"ethanluckett@gmail.com"
] | ethanluckett@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.