hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c06e23ed799b3c26013ae87b60031567becb3c33
| 15,687
|
py
|
Python
|
app.py
|
G-Radhika/BooksCatalogApp
|
595a84b55086e4ee05c2b3b1107fa8d3506ca965
|
[
"MIT"
] | null | null | null |
app.py
|
G-Radhika/BooksCatalogApp
|
595a84b55086e4ee05c2b3b1107fa8d3506ca965
|
[
"MIT"
] | null | null | null |
app.py
|
G-Radhika/BooksCatalogApp
|
595a84b55086e4ee05c2b3b1107fa8d3506ca965
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect
from flask import jsonify, url_for, flash, session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, BookSeries, IndividualBook, User
import random
import string
# imports for oauth2client
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Books Catalog Application"
# Connect to Database and create database session
engine = create_engine('sqlite:///bookseries_User.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/login')
def showLogin():
'''Create anti-forgery state token'''
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
'''Gathers data from Google Sign In API and places
it inside a session variable.'''
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
code = request.data.decode('utf-8')
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
# print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# Check if user exists
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
#print "done!"
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session['email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one_or_none()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one_or_none()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one_or_none()
return user.id
except:
return None
@app.route('/gdisconnect')
def gdisconnect():
'''DISCONNECT - Revoke a current user's token and
reset their login_session'''
access_token = login_session['access_token']
# print 'In gdisconnect access token is %s', access_token
# print 'User name is: '
# print login_session['username']
if access_token is None:
# print 'Access Token is None'
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
#print 'result is '
#print result
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps
('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# jsonify data
@app.route('/bookseries/<int:bookseries_id>/book/JSON')
def IndividualBookJSON(bookseries_id):
bookseries = session.query(BookSeries).filter_by(id=bookseries_id).one_or_none()
items = session.query(IndividualBook).filter_by(
bookseries_id=bookseries_id).all()
return jsonify(IndividualBook=[i.serialize for i in items])
@app.route('/bookseries/<int:bookseries_id>/book/<int:book_id>/JSON')
def bookItemJSON(bookseries_id, book_id):
Book_Item = session.query(IndividualBook).filter_by(id=book_id).one_or_none()
return jsonify(Book_Item=Book_Item.serialize)
@app.route('/bookseries/JSON')
def bookseriesJSON():
bookseries = session.query(BookSeries).all()
return jsonify(bookseries=[i.serialize for i in bookseries])
@app.route('/')
@app.route('/booksCatalog')
def home():
return render_template('booksCatalog.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/bookseries')
def showBookSeries():
'''Show titles of book series.'''
bookseries = session.query(BookSeries).all()
if 'username' not in login_session:
return render_template('public_bookSeries.html', bookseries=bookseries)
else:
return render_template('bookSeries.html', bookseries=bookseries)
@app.route('/bookseries/new/', methods=['GET', 'POST'])
def bookSeries_new():
'''ADD NEW BOOK Series'''
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newBookSeries = BookSeries(name=request.form['name'],
user_id=login_session['user_id'])
session.add(newBookSeries)
flash('New Book Series %s Successfully Created' % newBookSeries.name)
session.commit()
return redirect(url_for('showBookSeries'))
else:
return render_template('bookSeries_new.html')
# EDIT BOOK SERIES, if you change your mind click
# cancle and land on the Book Series Page
@app.route('/bookseries/<int:bookseries_id>/edit/', methods=['GET', 'POST'])
def bookSeries_edit(bookseries_id):
editedBookSeries = session.query(BookSeries).filter_by(id=bookseries_id).one_or_none()
if 'username' not in login_session:
return redirect('/login')
if editedBookSeries.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to EDIT this BOOK SERIES. Please create your own LOGIN in order to EDIT.');}</script><body onload='myFunction()''>"
if request.method == 'POST':
if request.form['name']:
editedBookSeries.name = request.form['name']
flash('Book Series Successfully Edited %s' % editedBookSeries.name)
return redirect(url_for('showBookSeries'))
else:
return render_template('bookSeries_edit.html', bookseries=editedBookSeries)
# DELETE Book Series, if you change your mind click cancle and land
# on the Book Series Page
@app.route('/bookseries/<int:bookseries_id>/delete/', methods=['GET', 'POST'])
def bookSeries_delete(bookseries_id):
bookSeriesToDelete = session.query(BookSeries).filter_by(id=bookseries_id).one_or_none()
if 'username' not in login_session:
return redirect('/login')
if bookSeriesToDelete.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to DELETE this BOOK SERIES. Please create your own LOGIN in order to DELETE.');}</script><body onload='myFunction()''>"
if request.method == 'POST':
session.delete(bookSeriesToDelete)
session.commit()
flash('Book Series Successfully DELETED %s' % bookSeriesToDelete.name)
return redirect(url_for('showBookSeries', bookseries_id=bookseries_id))
else:
return render_template('bookSeries_delete.html', bookseries=bookSeriesToDelete)
# Show the indivual books in the bookseries.
# This page has the NEW/Edit/Delete functionality
@app.route('/bookseries/<int:bookseries_id>/')
@app.route('/bookseries/<int:bookseries_id>/book')
def showBookList(bookseries_id):
bookseries = session.query(BookSeries).filter_by(id=bookseries_id).one()
creator = getUserInfo(bookseries.user_id)
items = session.query(IndividualBook).filter_by(bookseries_id=bookseries_id).all()
if 'username' not in login_session or creator.id != login_session['user_id']:
return render_template('public_bookList.html', items=items, bookseries=bookseries, creator=creator)
else:
return render_template('bookList.html', items=items, bookseries=bookseries, creator=creator)
## http://localhost:5000/bookseries/11/book/new/
## "POST /bookseries/new/?bookseries_id= HTTP/1.1" 302 -
## Bad Request:The browser (or proxy) sent a request that this server could not understand.
@app.route('/bookseries/<int:bookseries_id>/book/new/', methods=['GET', 'POST'])
def bookList_new(bookseries_id):
if 'username' not in login_session:
return redirect('/login')
bookseries = session.query(BookSeries).filter_by(id=bookseries_id).one()
if request.method == 'POST':
newBook = IndividualBook(name=request.form.get('name'),
author=request.form.get('author'),
language=request.form.get('language'),
year= request.form.get('year'),
genre=request.form.get('genre'),
description=request.form.get('description'),
review=request.form.get('review'),
bookseries_id=bookseries_id,
user_id=bookseries.user_id)
session.add(newBook)
session.commit()
flash('New Book %s Item Successfully Created' % (newBook.name))
return redirect(url_for('showBookList', bookseries_id=bookseries_id))
else:
return render_template('bookList_new.html', bookseries_id=bookseries_id)
@app.route('/bookseries/<int:bookseries_id>/book/<int:book_id>/edit',methods=['GET', 'POST'])
def bookList_edit(bookseries_id, book_id):
if 'username' not in login_session:
return redirect('/login')
bookList_edited = session.query(IndividualBook).filter_by(id=book_id).one_or_none()
bookseries = session.query(BookSeries).filter_by(id=bookseries_id).one_or_none()
if login_session['user_id'] != bookseries.user_id:
return "<script>function myFunction() {alert('You are not authorized to edit BOOK in this series. Please create your own BookSeries in order to edit items.');}</script><body onload='myFunction()''>"
if request.method == 'POST':
if request.form['name']:
bookList_edited.name = request.form['name']
if request.form['author']:
bookList_edited.author = request.form['author']
if request.form['language']:
bookList_edited.language = request.form['language']
if request.form['discription']:
bookList_edited.discription = request.form['discription']
session.add(bookList_edited)
session.commit()
flash('Book Successfully Edited')
return redirect(url_for('showBookList', bookseries_id=bookseries_id))
else:
return render_template('bookList_edit.html',bookseries_id=bookseries_id, item=bookList_edit)
# Delete BOOK
@app.route('/bookseries/<int:bookseries_id>/book/<int:book_id>/delete',methods=['GET', 'POST'])
def bookList_delete(bookseries_id, book_id):
if 'username' not in login_session:
return redirect('/login')
bookseries = session.query(BookSeries).filter_by(id=bookseries_id).one_or_none()
bookToDelete = session.query(IndividualBook).filter_by(id=book_id).one_or_none()
if login_session['user_id'] != bookseries.user_id:
return "<script>function myFunction() {alert('You are not authorized to delete books. Please create your own Book Series in order to so.');}</script><body onload='myFunction()''>"
if request.method == 'POST':
session.delete(bookToDelete)
session.commit()
flash('Book Successful Deleted')
return redirect(url_for('showBookList', bookseries_id=bookseries_id))
else:
return render_template('bookList_delete.html', item=bookToDelete)
'''Add new user sign up!'''
@app.route('/register')
def register():
return render_template('register.html')
if __name__ == '__main__':
app.secret_key = 'super_SECRET_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| 42.860656
| 207
| 0.671894
|
f92f2efed4ddc52b04e5f912d2d90c9dd2642839
| 10,135
|
py
|
Python
|
func/train_eval_ops.py
|
gongda0e/AVT
|
d6a7032b86416e852c76cc04a20ccabe34f111dc
|
[
"Apache-2.0"
] | 102
|
2021-09-24T03:49:34.000Z
|
2022-03-29T19:55:50.000Z
|
func/train_eval_ops.py
|
gongda0e/AVT
|
d6a7032b86416e852c76cc04a20ccabe34f111dc
|
[
"Apache-2.0"
] | 32
|
2021-10-02T04:31:39.000Z
|
2022-03-16T05:46:30.000Z
|
func/train_eval_ops.py
|
gongda0e/AVT
|
d6a7032b86416e852c76cc04a20ccabe34f111dc
|
[
"Apache-2.0"
] | 19
|
2021-09-25T01:57:37.000Z
|
2022-03-15T07:02:37.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modular implementation of the basic train ops
"""
from typing import Dict, Union, Tuple
import torch
import torch.nn as nn
import hydra
from hydra.types import TargetConf
from common import utils
from datasets.base_video_dataset import FUTURE_PREFIX
from models.base_model import PAST_LOGITS_PREFIX
from loss_fn.multidim_xentropy import MultiDimCrossEntropy
class NoLossAccuracy(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
return {}, {}
class BasicLossAccuracy(nn.Module):
def __init__(self, dataset, device, balance_classes=False):
super().__init__()
kwargs = {'ignore_index': -1}
if balance_classes:
assert dataset.class_balanced_sampling is False, (
'Do not re-weight the losses, and do balanced sampling')
weight = torch.zeros((len(dataset.classes, )),
device=device,
dtype=torch.float)
for cls_id, count in dataset.classes_counts.items():
weight[cls_id] = count
weight = weight / torch.sum(weight) # To get ratios for non -1 cls
weight = 1 / (weight + 0.00001)
kwargs['weight'] = weight
kwargs['reduction'] = 'none' # to get batch level output
self.cls_criterion = MultiDimCrossEntropy(**kwargs)
def forward(self, outputs, target, target_subclips):
"""
Args:
outputs['logits'] torch.Tensor (B, num_classes) or
(B, T, num_classes)
Latter in case of dense prediction
target: {type: (B) or (B, T')}; latter in case of dense prediction
target_subclips: {type: (B, #clips, T)}: The target for each input
frame
"""
losses = {}
accuracies = {}
for tgt_type, tgt_val in target.items():
logits = outputs[f'logits/{tgt_type}']
assert logits.ndim == tgt_val.ndim + 1
loss = self.cls_criterion(logits, tgt_val)
dataset_max_classes = logits.size(-1)
acc1, acc5 = utils.accuracy(logits,
tgt_val,
topk=(1, min(5, dataset_max_classes)))
# Don't use / in loss since I use the config to set weights, and
# can't use / there.
losses[f'cls_{tgt_type}'] = loss
accuracies[f'acc1/{tgt_type}'] = acc1
accuracies[f'acc5/{tgt_type}'] = acc5
# Incur past losses
past_logits_key = f'{PAST_LOGITS_PREFIX}logits/{tgt_type}'
# If this key exists, means we asked for classifier on the last
# layer, so the loss should be incurred.
if past_logits_key in outputs and target_subclips is not None:
past_logits = outputs[past_logits_key]
# Take mode over the frames to get the subclip level loss
past_target = torch.mode(target_subclips[tgt_type], -1)[0]
assert past_logits.shape[:-1] == past_target.shape, (
f'The subclips should be set such that the past logits '
f'and past targets match in shape. Currently they are '
f'{past_logits.shape} and {past_target.shape}')
losses[f'past_cls_{tgt_type}'] = self.cls_criterion(
past_logits, past_target)
# Else likely not using subclips, so no way to do this loss
return losses, accuracies
class Basic:
def __init__(self,
model,
device,
dataset,
cls_loss_acc_fn: TargetConf,
reg_criterion: TargetConf = None):
super().__init__()
self.model = model
self.device = device
self.cls_loss_acc_fn = hydra.utils.instantiate(cls_loss_acc_fn,
dataset, device)
del reg_criterion # not used here
def _basic_preproc(self, data, train_mode):
if not isinstance(data, dict):
video, target = data
# Make a dict so that later code can use it
data = {}
data['video'] = video
data['target'] = target
data['idx'] = -torch.ones_like(target)
if train_mode:
self.model.train()
else:
self.model.eval()
return data
def __call__(
self,
data: Union[Dict[str, torch.Tensor], # If dict
Tuple[torch.Tensor, torch.Tensor]], # vid, target
train_mode: bool = True):
"""
Args:
data (dict): Dictionary of all the data from the data loader
"""
data = self._basic_preproc(data, train_mode)
video = data['video'].to(self.device, non_blocking=True)
target = {}
target_subclips = {}
for key in data['target'].keys():
target[key] = data['target'][key].to(self.device,
non_blocking=True)
outputs, aux_losses = self.model(video,
target_shape=next(
iter(target.values())).shape)
if 'target_subclips' in data:
for key in data['target_subclips'].keys():
target_subclips[key] = data['target_subclips'][key].to(
self.device, non_blocking=True)
else:
target_subclips = None
losses, accuracies = self.cls_loss_acc_fn(outputs, target,
target_subclips)
losses.update(aux_losses)
return data, outputs, losses, accuracies
class PredFutureFeat(Basic):
def __init__(self,
*args,
reg_criterion: TargetConf = None,
future_target: str = 'temp_agg_projected',
incur_loss_style: str = 'separately',
combine_future_losses: TargetConf = {'_target_': 'torch.min'},
cumulative_future: bool = False,
**kwargs):
'''
Args:
incur_loss_style (str): Defines how to incur losses for multiple
futures. Could do 'separately', and then combine using
`combine_future_losses`. Or 'together', such as for MIL-NCE.
'''
super().__init__(*args, **kwargs)
self.reg_criterion = hydra.utils.instantiate(reg_criterion)
self.future_target = future_target
self.incur_loss_style = incur_loss_style
self.combine_future_losses = combine_future_losses
self.cumulative_future = cumulative_future
def __call__(
self,
data: Union[Dict[str, torch.Tensor], # If dict
Tuple[torch.Tensor, torch.Tensor]], # vid, target
train_mode: bool = True):
data = self._basic_preproc(data, train_mode)
video = data['video'].to(self.device, non_blocking=True)
target = {
key: val.to(self.device, non_blocking=True)
for key, val in data['target'].items()
}
batch_size = video.size(0)
if train_mode:
# At test time, I don't sample the extra future video, since
# that is only used during training
all_videos = [video]
nfutures = len(
[key for key in data.keys() if key.startswith(FUTURE_PREFIX)])
for i in range(nfutures):
future_vid = data[f'{FUTURE_PREFIX}_{i}_video'].to(
self.device, non_blocking=True)
all_videos.append(future_vid)
video = torch.cat(all_videos, dim=0) # Add to batch dim
outputs_full, aux_losses = self.model(video)
# Just the actual video for outputs
outputs = {key: val[:batch_size] for key, val in outputs_full.items()}
# if self.cls_loss_wt != 0:
# Doing this makes some layers not have gradients and it gives errors,
# so just leaving it here for now. The gradient should be 0 anyway
losses, accuracies = self.cls_loss_acc_fn(outputs, target)
losses.update(aux_losses)
losses['cls'] = losses['cls']
if train_mode:
# Incur the regression losses, for each of the futures
reg_losses = []
if self.incur_loss_style == 'separately':
for i in range(nfutures):
future_feats = outputs_full[self.future_target][
(i + 1) * batch_size:(i + 2) * batch_size]
if self.cumulative_future:
future_feats = torch.cumsum(future_feats, 0)
# Divide by the position to get mean of features until then
future_feats = future_feats / (torch.range(
1,
future_feats.size(0),
device=future_feats.device,
dtype=future_feats.dtype).unsqueeze(1))
loss = self.reg_criterion(outputs['future_projected'],
future_feats)
reg_losses.append(loss)
final_reg_loss = hydra.utils.call(self.combine_future_losses,
torch.stack(reg_losses))
elif self.incur_loss_style == 'together':
future_feats = outputs_full[self.future_target][batch_size:]
future_feats = future_feats.reshape(
(-1, batch_size, future_feats.size(-1))).transpose(0, 1)
final_reg_loss = self.reg_criterion(
outputs['future_projected'], future_feats)
else:
raise NotImplementedError(self.incur_loss_style)
losses['reg'] = final_reg_loss
return data, outputs, losses, accuracies
| 43.685345
| 83
| 0.551554
|
9f6dc01827aeac22b6a3b84669a4595bb6cb8a35
| 17,135
|
py
|
Python
|
src/python/joint_state_publisher.py
|
quan-dao/ECN-MANIP
|
4b455cb5e0db06d9e24b1a70dd7289a68f29067f
|
[
"MIT"
] | null | null | null |
src/python/joint_state_publisher.py
|
quan-dao/ECN-MANIP
|
4b455cb5e0db06d9e24b1a70dd7289a68f29067f
|
[
"MIT"
] | null | null | null |
src/python/joint_state_publisher.py
|
quan-dao/ECN-MANIP
|
4b455cb5e0db06d9e24b1a70dd7289a68f29067f
|
[
"MIT"
] | 3
|
2018-11-08T13:32:15.000Z
|
2021-09-13T23:33:37.000Z
|
#!/usr/bin/env python
import rospy
import random
from python_qt_binding.QtCore import pyqtSlot
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtCore import Signal
from python_qt_binding.QtGui import QFont
from python_qt_binding.QtWidgets import QApplication
from python_qt_binding.QtWidgets import QHBoxLayout
from python_qt_binding.QtWidgets import QLabel
from python_qt_binding.QtWidgets import QLineEdit
from python_qt_binding.QtWidgets import QPushButton
from python_qt_binding.QtWidgets import QSlider
from python_qt_binding.QtWidgets import QVBoxLayout
from python_qt_binding.QtWidgets import QGridLayout
from python_qt_binding.QtWidgets import QScrollArea
from python_qt_binding.QtWidgets import QSpinBox
from python_qt_binding.QtWidgets import QWidget
import xml.dom.minidom
from sensor_msgs.msg import JointState
from math import pi
from threading import Thread
import sys
import signal
import math
RANGE = 10000
def get_param(name, value=None):
private = "~%s" % name
if rospy.has_param(private):
return rospy.get_param(private)
elif rospy.has_param(name):
return rospy.get_param(name)
else:
return value
class JointStatePublisher():
def init_collada(self, robot):
robot = robot.getElementsByTagName('kinematics_model')[0].getElementsByTagName('technique_common')[0]
for child in robot.childNodes:
if child.nodeType is child.TEXT_NODE:
continue
if child.localName == 'joint':
name = child.getAttribute('name')
if child.getElementsByTagName('revolute'):
joint = child.getElementsByTagName('revolute')[0]
else:
rospy.logwarn("Unknown joint type %s", child)
continue
if joint:
limit = joint.getElementsByTagName('limits')[0]
minval = float(limit.getElementsByTagName('min')[0].childNodes[0].nodeValue)
maxval = float(limit.getElementsByTagName('max')[0].childNodes[0].nodeValue)
if minval == maxval: # this is fixed joint
continue
self.joint_list.append(name)
joint = {'min':minval*pi/180.0, 'max':maxval*pi/180.0, 'zero':0, 'position':0, 'velocity':0, 'effort':0}
self.free_joints[name] = joint
def init_urdf(self, robot):
robot = robot.getElementsByTagName('robot')[0]
# Find all non-fixed joints
for child in robot.childNodes:
if child.nodeType is child.TEXT_NODE:
continue
if child.localName == 'joint':
jtype = child.getAttribute('type')
if jtype == 'fixed' or jtype == 'floating':
continue
name = child.getAttribute('name')
self.joint_list.append(name)
if jtype == 'continuous':
minval = -pi
maxval = pi
else:
try:
limit = child.getElementsByTagName('limit')[0]
minval = float(limit.getAttribute('lower'))
maxval = float(limit.getAttribute('upper'))
except:
rospy.logwarn("%s is not fixed, nor continuous, but limits are not specified!" % name)
continue
safety_tags = child.getElementsByTagName('safety_controller')
if self.use_small and len(safety_tags) == 1:
tag = safety_tags[0]
if tag.hasAttribute('soft_lower_limit'):
minval = max(minval, float(tag.getAttribute('soft_lower_limit')))
if tag.hasAttribute('soft_upper_limit'):
maxval = min(maxval, float(tag.getAttribute('soft_upper_limit')))
mimic_tags = child.getElementsByTagName('mimic')
if self.use_mimic and len(mimic_tags) == 1:
tag = mimic_tags[0]
entry = {'parent': tag.getAttribute('joint')}
if tag.hasAttribute('multiplier'):
entry['factor'] = float(tag.getAttribute('multiplier'))
if tag.hasAttribute('offset'):
entry['offset'] = float(tag.getAttribute('offset'))
self.dependent_joints[name] = entry
continue
if name in self.dependent_joints:
continue
if self.zeros and name in self.zeros:
zeroval = self.zeros[name]
elif minval > 0 or maxval < 0:
zeroval = (maxval + minval)/2
else:
zeroval = 0
joint = {'min': minval, 'max': maxval, 'zero': zeroval}
if self.pub_def_positions:
joint['position'] = zeroval
if self.pub_def_vels:
joint['velocity'] = 0.0
if self.pub_def_efforts:
joint['effort'] = 0.0
if jtype == 'continuous':
joint['continuous'] = True
self.free_joints[name] = joint
def __init__(self, joint_manual):
description = get_param('/robot_description')
self.free_joints = {}
self.joint_list = [] # for maintaining the original order of the joints
self.dependent_joints = get_param("dependent_joints", {})
self.use_mimic = get_param('use_mimic_tags', True)
self.use_small = get_param('use_smallest_joint_limits', True)
self.zeros = get_param("zeros")
self.pub_def_positions = get_param("publish_default_positions", True)
self.pub_def_vels = get_param("publish_default_velocities", False)
self.pub_def_efforts = get_param("publish_default_efforts", False)
robot = xml.dom.minidom.parseString(description)
if robot.getElementsByTagName('COLLADA'):
self.init_collada(robot)
else:
self.init_urdf(robot)
num_rows = get_param("num_rows", 0)
self.gui = JointStatePublisherGui("Joint State Publisher", self, num_rows)
joint_manual.addWidget(self.gui)
source_list = get_param("source_list", [])
self.sources = []
for source in source_list:
self.sources.append(rospy.Subscriber(source, JointState, self.source_cb))
self.pub = rospy.Publisher('joint_states', JointState, queue_size=5)
def source_cb(self, msg):
for i in range(len(msg.name)):
name = msg.name[i]
if name not in self.free_joints:
continue
if msg.position:
position = msg.position[i]
else:
position = None
if msg.velocity:
velocity = msg.velocity[i]
else:
velocity = None
if msg.effort:
effort = msg.effort[i]
else:
effort = None
joint = self.free_joints[name]
if position is not None:
joint['position'] = position
if velocity is not None:
joint['velocity'] = velocity
if effort is not None:
joint['effort'] = effort
if self.gui is not None:
# signal instead of directly calling the update_sliders method, to switch to the QThread
self.gui.sliderUpdateTrigger.emit()
def loop(self):
hz = get_param("rate", 10) # 10hz
r = rospy.Rate(hz)
delta = get_param("delta", 0.0)
# Publish Joint States
while not rospy.is_shutdown():
msg = JointState()
msg.header.stamp = rospy.Time.now()
if delta > 0:
self.update(delta)
# Initialize msg.position, msg.velocity, and msg.effort.
has_position = len(self.dependent_joints.items()) > 0
has_velocity = False
has_effort = False
for name, joint in self.free_joints.items():
if not has_position and 'position' in joint:
has_position = True
if not has_velocity and 'velocity' in joint:
has_velocity = True
if not has_effort and 'effort' in joint:
has_effort = True
num_joints = (len(self.free_joints.items()) +
len(self.dependent_joints.items()))
if has_position:
msg.position = num_joints * [0.0]
if has_velocity:
msg.velocity = num_joints * [0.0]
if has_effort:
msg.effort = num_joints * [0.0]
for i, name in enumerate(self.joint_list):
msg.name.append(str(name))
joint = None
# Add Free Joint
if name in self.free_joints:
joint = self.free_joints[name]
factor = 1
offset = 0
# Add Dependent Joint
elif name in self.dependent_joints:
param = self.dependent_joints[name]
parent = param['parent']
factor = param.get('factor', 1)
offset = param.get('offset', 0)
# Handle recursive mimic chain
recursive_mimic_chain_joints = [name]
while parent in self.dependent_joints:
if parent in recursive_mimic_chain_joints:
error_message = "Found an infinite recursive mimic chain"
rospy.logerr("%s: [%s, %s]", error_message, ', '.join(recursive_mimic_chain_joints), parent)
sys.exit(-1)
recursive_mimic_chain_joints.append(parent)
param = self.dependent_joints[parent]
parent = param['parent']
offset += factor * param.get('offset', 0)
factor *= param.get('factor', 1)
joint = self.free_joints[parent]
if has_position and 'position' in joint:
msg.position[i] = joint['position'] * factor + offset
if has_velocity and 'velocity' in joint:
msg.velocity[i] = joint['velocity'] * factor
if has_effort and 'effort' in joint:
msg.effort[i] = joint['effort']
if msg.name or msg.position or msg.velocity or msg.effort:
# Only publish non-empty messages
self.pub.publish(msg)
try:
r.sleep()
except rospy.exceptions.ROSTimeMovedBackwardsException:
pass
def update(self, delta):
for name, joint in self.free_joints.iteritems():
forward = joint.get('forward', True)
if forward:
joint['position'] += delta
if joint['position'] > joint['max']:
if joint.get('continuous', False):
joint['position'] = joint['min']
else:
joint['position'] = joint['max']
joint['forward'] = not forward
else:
joint['position'] -= delta
if joint['position'] < joint['min']:
joint['position'] = joint['min']
joint['forward'] = not forward
class JointStatePublisherGui(QWidget):
sliderUpdateTrigger = Signal()
def __init__(self, title, jsp, num_rows=0):
super(JointStatePublisherGui, self).__init__()
self.jsp = jsp
self.joint_map = {}
self.vlayout = QVBoxLayout(self)
self.scrollable = QWidget()
self.gridlayout = QGridLayout()
self.scroll = QScrollArea()
self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scroll.setWidgetResizable(True)
font = QFont("Helvetica", 9, QFont.Bold)
### Generate sliders ###
sliders = []
for name in self.jsp.joint_list:
if name not in self.jsp.free_joints:
continue
joint = self.jsp.free_joints[name]
if joint['min'] == joint['max']:
continue
joint_layout = QVBoxLayout()
row_layout = QHBoxLayout()
label = QLabel(name)
label.setFont(font)
row_layout.addWidget(label)
display = QLineEdit("0.00")
display.setAlignment(Qt.AlignRight)
display.setFont(font)
display.setReadOnly(True)
row_layout.addWidget(display)
joint_layout.addLayout(row_layout)
slider = QSlider(Qt.Horizontal)
slider.setFont(font)
slider.setRange(0, RANGE)
slider.setValue(RANGE/2)
joint_layout.addWidget(slider)
self.joint_map[name] = {'slidervalue': 0, 'display': display,
'slider': slider, 'joint': joint}
# Connect to the signal provided by QSignal
slider.valueChanged.connect(self.onValueChanged)
sliders.append(joint_layout)
# Determine number of rows to be used in grid
self.num_rows = num_rows
# if desired num of rows wasn't set, default behaviour is a vertical layout
if self.num_rows == 0:
self.num_rows = len(sliders) # equals VBoxLayout
# Generate positions in grid and place sliders there
self.positions = self.generate_grid_positions(len(sliders), self.num_rows)
for item, pos in zip(sliders, self.positions):
self.gridlayout.addLayout(item, *pos)
# Set zero positions read from parameters
self.center()
# Synchronize slider and displayed value
self.sliderUpdate(None)
# Set up a signal for updating the sliders based on external joint info
self.sliderUpdateTrigger.connect(self.updateSliders)
self.scrollable.setLayout(self.gridlayout)
self.scroll.setWidget(self.scrollable)
self.vlayout.addWidget(self.scroll)
# Buttons for randomizing and centering sliders and
# Spinbox for on-the-fly selecting number of rows
self.ctrbutton = QPushButton('Center', self)
self.ctrbutton.clicked.connect(self.center_event)
self.vlayout.addWidget(self.ctrbutton)
self.setLayout(self.vlayout)
@pyqtSlot(int)
def onValueChanged(self, event):
# A slider value was changed, but we need to change the joint_info metadata.
for name, joint_info in self.joint_map.items():
joint_info['slidervalue'] = joint_info['slider'].value()
joint = joint_info['joint']
joint['position'] = self.sliderToValue(joint_info['slidervalue'], joint)
joint_info['display'].setText("%.2f" % joint['position'])
@pyqtSlot()
def updateSliders(self):
self.update_sliders()
def update_sliders(self):
for name, joint_info in self.joint_map.items():
joint = joint_info['joint']
joint_info['slidervalue'] = self.valueToSlider(joint['position'],
joint)
joint_info['slider'].setValue(joint_info['slidervalue'])
def center_event(self, event):
self.center()
def center(self):
rospy.loginfo("Centering")
for name, joint_info in self.joint_map.items():
joint = joint_info['joint']
joint_info['slider'].setValue(self.valueToSlider(joint['zero'], joint))
def generate_grid_positions(self, num_items, num_rows):
if num_rows==0:
return []
positions = [(y, x) for x in range(int((math.ceil(float(num_items) / num_rows)))) for y in range(num_rows)]
positions = positions[:num_items]
return positions
def sliderUpdate(self, event):
for name, joint_info in self.joint_map.items():
joint_info['slidervalue'] = joint_info['slider'].value()
self.update_sliders()
def valueToSlider(self, value, joint):
return (value - joint['min']) * float(RANGE) / (joint['max'] - joint['min'])
def sliderToValue(self, slider, joint):
pctvalue = slider / float(RANGE)
return joint['min'] + (joint['max']-joint['min']) * pctvalue
if __name__ == '__main__':
try:
rospy.init_node('joint_state_publisher')
jsp = JointStatePublisher()
if jsp.gui is None:
jsp.loop()
else:
Thread(target=jsp.loop).start()
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(jsp.app.exec_())
except rospy.ROSInterruptException:
pass
| 38.854875
| 120
| 0.564634
|
f90f5d930fb15540a14e95e705f189dfa2f40acc
| 1,424
|
py
|
Python
|
test/functional/wallet_resendwallettransactions.py
|
torunxxx001/MyOriginalCoin
|
7f0558d07d065f1d89b7998d6beb088b8e941e7d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_resendwallettransactions.py
|
torunxxx001/MyOriginalCoin
|
7f0558d07d065f1d89b7998d6beb088b8e941e7d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_resendwallettransactions.py
|
torunxxx001/MyOriginalCoin
|
7f0558d07d065f1d89b7998d6beb088b8e941e7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resendwallettransactions RPC."""
from test_framework.test_framework import MyOriginalCoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class ResendWalletTransactionsTest(MyOriginalCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['--walletbroadcast=false']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled.
assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions)
# Should return an empty array if there aren't unconfirmed wallet transactions.
self.stop_node(0)
self.start_node(0, extra_args=[])
assert_equal(self.nodes[0].resendwallettransactions(), [])
# Should return an array with the unconfirmed wallet transaction.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
assert_equal(self.nodes[0].resendwallettransactions(), [txid])
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
| 43.151515
| 151
| 0.742275
|
6830f7381beee5c886faf15a83c033172ec363e5
| 3,768
|
py
|
Python
|
app.py
|
srdg/GRAider
|
4850002b5794dd34538b62acc1489be0fc43b407
|
[
"MIT"
] | null | null | null |
app.py
|
srdg/GRAider
|
4850002b5794dd34538b62acc1489be0fc43b407
|
[
"MIT"
] | null | null | null |
app.py
|
srdg/GRAider
|
4850002b5794dd34538b62acc1489be0fc43b407
|
[
"MIT"
] | null | null | null |
try:
from tkinter import *
except:
import subprocess
subprocess.call(['pip3','install','tkinter'])
from tkinter import font
import shutil
import os, sys
import urllib.request
import requests
class downloadGradeCard():
def __init__(self,master):
'''
initializes and sets up UI requires the user to mention
sem [SEM1 by default] and stream [CSE by default]
'''
master.geometry("300x300")
master.title("GrAider")
Label(master,text="\n\n").grid(row=1, column=2,sticky=W)
Label(master,text="Choose stream : ").grid(row=1, column=3,sticky=E)
self.stream = StringVar()
STREAMS = ["CSE","ECE","IT","ME","CE","EE"]
self.stream.set(STREAMS[0])
OptionMenu(master, self.stream, *STREAMS).grid(row = 1,column = 4,sticky=E)
Label(master,text="\n\nChoose semester : ").grid(row=2, column=3,sticky=E)
self.sem = StringVar()
SEMS = ["SEM1","SEM2","SEM3","SEM4","SEM5","SEM6", "SEM7", "SEM8"]
self.sem.set(SEMS[0])
OptionMenu(master, self.sem, *SEMS).grid(row = 2, column=4, sticky=E)
Label(master,text="\n\nAdmission Year : ").grid(row=3, column=3,sticky=E)
self.year = StringVar()
YEARS = ["2013","2014","2015","2016","2017","2018"]
self.year.set(YEARS[2])
OptionMenu(master, self.year, *YEARS).grid(row = 3, column=4, sticky=E)
Label(master,text="\n\nType of admission : ").grid(row=4, column=3,sticky=E)
self.typeofstudent = IntVar()
Radiobutton(master, text="Regular", variable = self.typeofstudent, value=0).grid(row = 5, column = 3, sticky = E)
Radiobutton(master, text="Lateral", variable = self.typeofstudent, value=1).grid(row = 5, column = 4, sticky = E)
Label(master,text="\n\n\n").grid(row=6, column=3,sticky=W)
download = Button(master, text="Start Now", command = self.startDownload)
font_obj = font.Font(download,download.cget("font"))
font_obj.configure(underline = True)
download.configure(font=font_obj)
download.grid(row = 6, column = 4, sticky = E)
quit = Button(master, text="Quit", command = self.quitWindow)
font_obj = font.Font(download,download.cget("font"))
font_obj.configure(underline = True)
quit.configure(font=font_obj)
quit.grid(row = 7, column = 4, sticky = W)
def startDownload(self):
'''
Generates the url by parsing arguments entered as inputs
Retrieves the file using urlretrieve() [might be deprecated]
Saves and stores the retrieved file to a local system folder
'''
endset = {'CSE': '40', 'ECE': '50', 'IT': '60', 'CE':'10','EE':'20','ME':'30'}
_url = "http://jgec.ac.in/php/coe/results/"+self.sem.get()[3]+"/"+self.stream.get()+"_"+self.sem.get()+"_"
preset = self.year.get()[-2:]
roll = str(int(preset) -1*(self.typeofstudent.get()))+"10110"+endset[self.stream.get()]
# create custom directory if not available
if not os.path.exists(self.stream.get()+"_"+self.sem.get()+"_Results Stored Here"):
os.makedirs(self.stream.get()+"_"+self.sem.get()+"_Results Stored Here")
start,stop = 1,100
for offset in range(start,stop):
complete_url = _url+roll+"0"*(offset<10)+str(offset)+".pdf"
print("Generating custom URL ... done.")
response = requests.get(complete_url)
with open(complete_url[36:], 'wb') as fw:
fw.write(response.content)
fw.close()
shutil.move(complete_url[36:], os.getcwd()+"/"+self.stream.get()+"_"+self.sem.get()+"_Results Stored Here")
print("Saving file "+complete_url[36:]+" .... done.")
if offset==70:
if self.typeofstudent.get()==1:
roll = preset+"10110"+endset[self.stream.get()]
else:
roll = str(int(preset)+1)+"10110"+endset[self.stream.get()]
def quitWindow(self):
'''
Quits the window
'''
global root
root.destroy()
root = Tk()
def main():
downloaderObject = downloadGradeCard(root)
root.mainloop()
if __name__=="__main__":
main()
| 34.254545
| 115
| 0.669851
|
0da3affe055e9cddc72a6e450dbb6e0ef4dda1ac
| 2,506
|
py
|
Python
|
cosmic-core/systemvm/patches/centos7/opt/cosmic/startup/setup_ssvm.py
|
RobbertJanSW/cosmic
|
08cfd7be26bbd943de8a49c04042a7e08638b214
|
[
"Apache-2.0"
] | null | null | null |
cosmic-core/systemvm/patches/centos7/opt/cosmic/startup/setup_ssvm.py
|
RobbertJanSW/cosmic
|
08cfd7be26bbd943de8a49c04042a7e08638b214
|
[
"Apache-2.0"
] | null | null | null |
cosmic-core/systemvm/patches/centos7/opt/cosmic/startup/setup_ssvm.py
|
RobbertJanSW/cosmic
|
08cfd7be26bbd943de8a49c04042a7e08638b214
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
from utils import Utils
def setup_html():
html_dir = "/var/www/html/copy"
if not os.path.isdir(html_dir):
os.makedirs(html_dir, 0o755, True)
def setup_iptable_rules(cmdline):
iptables_rules = """
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
:HTTP - [0:0]
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p icmp --icmp-type 13 -j DROP
-A INPUT -p icmp -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -s 169.254.0.1/32 --dport 3922 -j ACCEPT
-A OUTPUT -o %s -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT
-A OUTPUT -o %s -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT
COMMIT
""" % (
cmdline['controlnic'],
cmdline['mgtnic'],
cmdline['publicnic'],
cmdline['controlnic'],
cmdline['mgtnic'],
cmdline['mgtnic']
)
with open("/tmp/iptables-secstorage", "w") as f:
f.write(iptables_rules)
os.system("iptables-restore < /tmp/iptables-secstorage")
class SecondaryStorageVM:
def __init__(self, cmdline) -> None:
super().__init__()
self.cmdline = cmdline
self.config_dir = "/etc/cosmic/agent/"
def start(self):
logging.info("Setting up configuration for %s" % self.cmdline["type"])
self.setup_agent_config()
setup_html()
setup_iptable_rules(self.cmdline)
self.setup_nginx()
Utils(self.cmdline).set_rfc1918_routes()
os.system("systemctl start cosmic-agent")
def setup_agent_config(self):
Utils(self.cmdline).setup_agent_properties()
def setup_nginx(self):
if not os.path.isdir("/var/www/html/userdata"):
os.makedirs("/var/www/html/userdata", 0o755, True)
vhost = """
server {
listen %s:80;
listen %s:443 ssl;
server_name _;
root /var/www/html;
autoindex off;
location /userdata {
autoindex off;
}
}
""" % (self.cmdline["publicip"], self.cmdline["publicip"])
filename = "/etc/nginx/conf.d/vhost-%s.conf" % (self.cmdline["publicip"])
with open(filename, 'w') as f:
f.write(vhost)
os.system("systemctl start nginx")
os.system("systemctl reload nginx")
| 25.571429
| 83
| 0.618117
|
5684f1976bb22fffb5b49debd8914197c130e4ea
| 956
|
py
|
Python
|
openstack_dashboard/dashboards/project/volumes/panel.py
|
ameoba/horizon
|
ff9e367c98a8bb79f10914abffaaa04b0a461819
|
[
"Apache-2.0"
] | 2
|
2019-12-29T09:20:13.000Z
|
2020-01-01T13:12:34.000Z
|
openstack_dashboard/dashboards/project/volumes/panel.py
|
yongquanf/horizon
|
9aad7fd6f66588fed7c27b720642e47a4a12854b
|
[
"Apache-2.0"
] | 1
|
2015-03-12T01:03:44.000Z
|
2015-03-12T01:03:44.000Z
|
openstack_dashboard/dashboards/project/volumes/panel.py
|
yongquanf/horizon
|
9aad7fd6f66588fed7c27b720642e47a4a12854b
|
[
"Apache-2.0"
] | 4
|
2015-05-05T08:17:28.000Z
|
2020-02-05T10:47:06.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Volumes(horizon.Panel):
name = _("Volumes")
slug = 'volumes'
permissions = ('openstack.services.volume',)
dashboard.Project.register(Volumes)
| 30.83871
| 78
| 0.736402
|
963426f7641e12509298f2e5b683006f7272e0f7
| 3,338
|
py
|
Python
|
Smart Buy/myproject/settings.py
|
chandankeshri1812/IEEE-Megaproject
|
081479294d90914625315f9f2ce8a9e2020f3e00
|
[
"MIT"
] | 8
|
2022-01-07T14:07:07.000Z
|
2022-02-05T04:03:07.000Z
|
Smart Buy/myproject/settings.py
|
chandankeshri1812/IEEE-Megaproject
|
081479294d90914625315f9f2ce8a9e2020f3e00
|
[
"MIT"
] | 41
|
2022-01-08T08:42:08.000Z
|
2022-02-28T01:49:15.000Z
|
Smart Buy/myproject/settings.py
|
chandankeshri1812/IEEE-Megaproject
|
081479294d90914625315f9f2ce8a9e2020f3e00
|
[
"MIT"
] | 50
|
2022-01-07T13:15:15.000Z
|
2022-02-25T04:35:15.000Z
|
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
import os
import myapp
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-%qv_6v*i-vz2unr-w4!__m6j+_8ca)ka!c7je7(x(bsnnjraxe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR,'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.875969
| 91
| 0.704314
|
aa5eaa34d2ce9770466e260eb750ade08ba62275
| 1,163
|
py
|
Python
|
setup.py
|
LysandreJik/doc-builder
|
6f689181aafb70b98a0c1035e202311e60f07f6c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
LysandreJik/doc-builder
|
6f689181aafb70b98a0c1035e202311e60f07f6c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
LysandreJik/doc-builder
|
6f689181aafb70b98a0c1035e202311e60f07f6c
|
[
"Apache-2.0"
] | null | null | null |
# Doc-builder package setup.
# The line above is checked by some of the utilities in this repo, do not change it.
from setuptools import find_packages, setup
install_requires = ["tqdm", "pyyaml", "packaging", "nbformat"]
extras = {}
extras["transformers"] = ["transformers[dev]"]
extras["testing"] = ["pytest", "pytest-xdist", "torch", "transformers"]
extras["quality"] = ["black~=22.0", "isort>=5.5.4", "flake8>=3.8.3"]
extras["all"] = extras["testing"] + extras["quality"]
extras["dev"] = extras["all"]
setup(
name="hf-doc-builder",
version="0.2.0.dev0",
author="Hugging Face, Inc.",
author_email="sylvain@huggingface.co",
description="Doc building utility",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="doc documentation doc-builder huggingface hugging face",
url="https://github.com/huggingface/doc-builder",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=extras,
install_requires=install_requires,
entry_points={"console_scripts": ["doc-builder=doc_builder.commands.doc_builder_cli:main"]},
)
| 34.205882
| 96
| 0.692175
|
82f9cf71e3a867a6c4797754199353d7ee0dc887
| 782
|
py
|
Python
|
checkov/terraform/checks/resource/aws/S3ObjectCopyEncryptedWithCMK.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1
|
2021-02-13T15:24:42.000Z
|
2021-02-13T15:24:42.000Z
|
checkov/terraform/checks/resource/aws/S3ObjectCopyEncryptedWithCMK.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 7
|
2021-04-12T06:54:07.000Z
|
2022-03-21T14:04:14.000Z
|
checkov/terraform/checks/resource/aws/S3ObjectCopyEncryptedWithCMK.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1
|
2021-12-16T03:09:55.000Z
|
2021-12-16T03:09:55.000Z
|
from checkov.common.models.consts import ANY_VALUE
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class S3ObjectCopyEncryptedWithCMK(BaseResourceValueCheck):
def __init__(self):
name = "Ensure S3 Object Copy is encrypted by KMS using a customer managed Key (CMK)"
id = "CKV_AWS_181"
supported_resources = ['aws_s3_object_copy']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return "kms_key_id"
def get_expected_value(self):
return ANY_VALUE
check = S3ObjectCopyEncryptedWithCMK()
| 35.545455
| 106
| 0.762148
|
a575a6dac2fdc58854714d382eb1feed82a07352
| 2,270
|
py
|
Python
|
ooobuild/lo/drawing/framework/basic_pane_factory.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/drawing/framework/basic_pane_factory.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/drawing/framework/basic_pane_factory.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing.framework
import typing
from abc import abstractmethod
from .x_resource_factory import XResourceFactory as XResourceFactory_b3561268
if typing.TYPE_CHECKING:
from ...frame.x_controller import XController as XController_b00e0b8f
class BasicPaneFactory(XResourceFactory_b3561268):
"""
Service Class
The BasicPaneFactory is a resource factory that provides the panes used by the Draw and Impress applications.
This factory provides the center, left, and right pane. For the left pane there are two URLS, private:resource/floater/LeftImpressPane and private:resource/floater/LeftDrawPane, one for Impress, the other for Draw. The center pane and the right pane have the URLs private:resource/floater/CenterPane and private:resource/floater/RightPane respectively.
This factory is typically created indirectly by registering it in the configuration and have the XModuleController create it on demand.
See Also:
`API BasicPaneFactory <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1drawing_1_1framework_1_1BasicPaneFactory.html>`_
"""
__ooo_ns__: str = 'com.sun.star.drawing.framework'
__ooo_full_ns__: str = 'com.sun.star.drawing.framework.BasicPaneFactory'
__ooo_type_name__: str = 'service'
@abstractmethod
def create(self, xController: 'XController_b00e0b8f') -> None:
"""
Give the controller to new instances so that they have access to the drawing framework controllers.
"""
__all__ = ['BasicPaneFactory']
| 42.830189
| 356
| 0.76696
|
2939e2770fb523882a5ceec188838d06d210e1d9
| 43,599
|
py
|
Python
|
sdk/core/azure-core/tests/test_basic_transport.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/core/azure-core/tests/test_basic_transport.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/tests/test_basic_transport.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from six.moves.http_client import HTTPConnection
from collections import OrderedDict
import sys
try:
from unittest import mock
except ImportError:
import mock
from azure.core.pipeline.transport import HttpResponse as PipelineTransportHttpResponse, RequestsTransport
from azure.core.pipeline.transport._base import HttpTransport, _deserialize_response, _urljoin
from azure.core.pipeline.policies import HeadersPolicy
from azure.core.pipeline import Pipeline
from azure.core.exceptions import HttpResponseError
import logging
import pytest
from utils import HTTP_REQUESTS, request_and_responses_product, HTTP_CLIENT_TRANSPORT_RESPONSES, create_transport_response
from azure.core.rest._http_response_impl import HttpResponseImpl as RestHttpResponseImpl
from azure.core.pipeline._tools import is_rest
class PipelineTransportMockResponse(PipelineTransportHttpResponse):
def __init__(self, request, body, content_type):
super(PipelineTransportMockResponse, self).__init__(request, None)
self._body = body
self.content_type = content_type
def body(self):
return self._body
class RestMockResponse(RestHttpResponseImpl):
def __init__(self, request, body, content_type):
super(RestMockResponse, self).__init__(
request=request,
internal_response=None,
content_type=content_type,
block_size=None,
status_code=200,
reason="OK",
headers={},
stream_download_generator=None,
)
# the impl takes in a lot more kwargs. It's not public and is a
# helper implementation shared across our azure core transport responses
self._body = body
def body(self):
return self._body
@property
def content(self):
return self._body
MOCK_RESPONSES = [PipelineTransportMockResponse, RestMockResponse]
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Multipart serialization not supported on 2.7 + dict order not deterministic on 3.5")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_http_request_serialization(http_request):
# Method + Url
request = http_request("DELETE", "/container0/blob0")
serialized = request.serialize()
expected = (
b'DELETE /container0/blob0 HTTP/1.1\r\n'
# No headers
b'\r\n'
)
assert serialized == expected
# Method + Url + Headers
request = http_request(
"DELETE",
"/container0/blob0",
# Use OrderedDict to get consistent test result on 3.5 where order is not guaranteed
headers=OrderedDict({
"x-ms-date": "Thu, 14 Jun 2018 16:46:54 GMT",
"Authorization": "SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE=", # fake key suppressed in credscan
"Content-Length": "0",
})
)
serialized = request.serialize()
expected = (
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE=\r\n' # fake key suppressed in credscan
b'Content-Length: 0\r\n'
b'\r\n'
)
assert serialized == expected
# Method + Url + Headers + Body
request = http_request(
"DELETE",
"/container0/blob0",
headers={
"x-ms-date": "Thu, 14 Jun 2018 16:46:54 GMT",
},
)
request.set_bytes_body(b"I am groot")
serialized = request.serialize()
expected = (
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'Content-Length: 10\r\n'
b'\r\n'
b'I am groot'
)
assert serialized == expected
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_url_join(http_request):
assert _urljoin('devstoreaccount1', '') == 'devstoreaccount1/'
assert _urljoin('devstoreaccount1', 'testdir/') == 'devstoreaccount1/testdir/'
assert _urljoin('devstoreaccount1/', '') == 'devstoreaccount1/'
assert _urljoin('devstoreaccount1/', 'testdir/') == 'devstoreaccount1/testdir/'
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(HTTP_CLIENT_TRANSPORT_RESPONSES))
def test_http_client_response(port, http_request, http_response):
# Create a core request
request = http_request("GET", "http://localhost:{}".format(port))
# Fake a transport based on http.client
conn = HTTPConnection("localhost", port)
conn.request("GET", "/get")
r1 = conn.getresponse()
response = create_transport_response(http_response, request, r1)
if is_rest(http_response):
response.read()
# Don't assume too much in those assert, since we reach a real server
assert response.internal_response is r1
assert response.reason is not None
assert isinstance(response.status_code, int)
assert len(response.headers.keys()) != 0
assert len(response.text()) != 0
assert "content-type" in response.headers
assert "Content-Type" in response.headers
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_response_deserialization(http_request):
# Method + Url
request = http_request("DELETE", "/container0/blob0")
body = (
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
)
response = _deserialize_response(body, request)
assert response.status_code == 202
assert response.reason == "Accepted"
assert response.headers == {
'x-ms-request-id': '778fdc83-801e-0000-62ff-0334671e284f',
'x-ms-version': '2018-11-09'
}
# Method + Url + Headers + Body
request = http_request(
"DELETE",
"/container0/blob0",
headers={
"x-ms-date": "Thu, 14 Jun 2018 16:46:54 GMT",
},
)
request.set_bytes_body(b"I am groot")
body = (
b'HTTP/1.1 200 OK\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'I am groot'
)
response = _deserialize_response(body, request)
assert isinstance(response.status_code, int)
assert response.reason == "OK"
assert response.headers == {
'x-ms-request-id': '778fdc83-801e-0000-62ff-0334671e284f',
'x-ms-version': '2018-11-09'
}
assert response.text() == "I am groot"
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_response_deserialization_utf8_bom(http_request):
request = http_request("DELETE", "/container0/blob0")
body = (
b'HTTP/1.1 400 One of the request inputs is not valid.\r\n'
b'x-ms-error-code: InvalidInput\r\n'
b'x-ms-request-id: 5f3f9f2f-e01e-00cc-6eb1-6d00b5000000\r\n'
b'x-ms-version: 2019-02-02\r\n'
b'Content-Length: 220\r\n'
b'Content-Type: application/xml\r\n'
b'Server: Windows-Azure-Blob/1.0\r\n'
b'\r\n'
b'\xef\xbb\xbf<?xml version="1.0" encoding="utf-8"?>\n<Error><Code>InvalidInput</Code><Message>One'
b'of the request inputs is not valid.\nRequestId:5f3f9f2f-e01e-00cc-6eb1-6d00b5000000\nTime:2019-09-17T23:44:07.4671860Z</Message></Error>'
)
response = _deserialize_response(body, request)
assert response.body().startswith(b'\xef\xbb\xbf')
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
req0 = http_request("DELETE", "/container0/blob0")
req1 = http_request("DELETE", "/container1/blob1")
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[header_policy],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" # Fix it so test are deterministic
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send_with_context(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
req0 = http_request("DELETE", "/container0/blob0")
req1 = http_request("DELETE", "/container1/blob1")
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[header_policy],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525", # Fix it so test are deterministic
headers={'Accept': 'application/json'}
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'Accept: application/json\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'Accept: application/json\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send_with_one_changeset(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
requests = [
http_request("DELETE", "/container0/blob0"),
http_request("DELETE", "/container1/blob1")
]
changeset = http_request("", "")
changeset.set_multipart_mixed(
*requests,
policies=[header_policy],
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525",
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send_with_multiple_changesets(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
changeset1 = http_request("", "")
changeset1.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
http_request("DELETE", "/container1/blob1"),
policies=[header_policy],
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
changeset2 = http_request("", "")
changeset2.set_multipart_mixed(
http_request("DELETE", "/container2/blob2"),
http_request("DELETE", "/container3/blob3"),
policies=[header_policy],
boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset1,
changeset2,
policies=[header_policy],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525",
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 3\r\n'
b'\r\n'
b'DELETE /container3/blob3 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send_with_combination_changeset_first(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
changeset = http_request("", "")
changeset.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
http_request("DELETE", "/container1/blob1"),
policies=[header_policy],
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset,
http_request("DELETE", "/container2/blob2"),
policies=[header_policy],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send_with_combination_changeset_last(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
changeset = http_request("", "")
changeset.set_multipart_mixed(
http_request("DELETE", "/container1/blob1"),
http_request("DELETE", "/container2/blob2"),
policies=[header_policy],
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
changeset,
policies=[header_policy],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7")
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_multipart_send_with_combination_changeset_middle(http_request):
transport = mock.MagicMock(spec=HttpTransport)
header_policy = HeadersPolicy({
'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT'
})
changeset = http_request("", "")
changeset.set_multipart_mixed(
http_request("DELETE", "/container1/blob1"),
policies=[header_policy],
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
changeset,
http_request("DELETE", "/container2/blob2"),
policies=[header_policy],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
with Pipeline(transport) as pipeline:
pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive(http_request, mock_response):
class ResponsePolicy(object):
def on_response(self, request, response):
# type: (PipelineRequest, PipelineResponse) -> None
response.http_response.headers['x-ms-fun'] = 'true'
req0 = http_request("DELETE", "/container0/blob0")
req1 = http_request("DELETE", "/container1/blob1")
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[ResponsePolicy()]
)
body_as_str = (
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 202 Accepted\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n"
"x-ms-version: 2018-11-09\r\n"
"\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 2\r\n"
"\r\n"
"HTTP/1.1 404 The specified blob does not exist.\r\n"
"x-ms-error-code: BlobNotFound\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e2852\r\n"
"x-ms-version: 2018-11-09\r\n"
"Content-Length: 216\r\n"
"Content-Type: application/xml\r\n"
"\r\n"
'<?xml version="1.0" encoding="utf-8"?>\r\n'
"<Error><Code>BlobNotFound</Code><Message>The specified blob does not exist.\r\n"
"RequestId:778fdc83-801e-0000-62ff-0334671e2852\r\n"
"Time:2018-06-14T16:46:54.6040685Z</Message></Error>\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
response = mock_response(
request,
body_as_str.encode('ascii'),
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
response = response.parts()
assert len(response) == 2
res0 = response[0]
assert res0.status_code == 202
assert res0.headers['x-ms-fun'] == 'true'
res1 = response[1]
assert res1.status_code == 404
assert res1.headers['x-ms-fun'] == 'true'
@pytest.mark.parametrize("mock_response", MOCK_RESPONSES)
def test_raise_for_status_bad_response(mock_response):
response = mock_response(request=None, body=None, content_type=None)
response.status_code = 400
with pytest.raises(HttpResponseError):
response.raise_for_status()
@pytest.mark.parametrize("mock_response", MOCK_RESPONSES)
def test_raise_for_status_good_response(mock_response):
response = mock_response(request=None, body=None, content_type=None)
response.status_code = 200
response.raise_for_status()
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive_with_one_changeset(http_request, mock_response):
changeset = http_request(None, None)
changeset.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
http_request("DELETE", "/container1/blob1")
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = mock_response(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
for part in response.parts():
parts.append(part)
assert len(parts) == 2
res0 = parts[0]
assert res0.status_code == 202
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive_with_multiple_changesets(http_request, mock_response):
changeset1 = http_request(None, None)
changeset1.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
http_request("DELETE", "/container1/blob1")
)
changeset2 = http_request(None, None)
changeset2.set_multipart_mixed(
http_request("DELETE", "/container2/blob2"),
http_request("DELETE", "/container3/blob3")
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset1, changeset2)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 3\r\n'
b'\r\n'
b'HTTP/1.1 409\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = mock_response(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
for part in response.parts():
parts.append(part)
assert len(parts) == 4
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
assert parts[3].status_code == 409
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive_with_combination_changeset_first(http_request, mock_response):
changeset = http_request(None, None)
changeset.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
http_request("DELETE", "/container1/blob1")
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset, http_request("DELETE", "/container2/blob2"))
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = mock_response(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive_with_combination_changeset_middle(http_request, mock_response):
changeset = http_request(None, None)
changeset.set_multipart_mixed(http_request("DELETE", "/container1/blob1"))
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
http_request("DELETE", "/container0/blob0"),
changeset,
http_request("DELETE", "/container2/blob2")
)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = mock_response(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive_with_combination_changeset_last(http_request, mock_response):
changeset = http_request(None, None)
changeset.set_multipart_mixed(
http_request("DELETE", "/container1/blob1"),
http_request("DELETE", "/container2/blob2")
)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(http_request("DELETE", "/container0/blob0"), changeset)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = mock_response(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_multipart_receive_with_bom(http_request, mock_response):
req0 = http_request("DELETE", "/container0/blob0")
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(req0)
body_as_bytes = (
b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\n"
b"Content-Type: application/http\n"
b"Content-Transfer-Encoding: binary\n"
b"Content-ID: 0\n"
b'\r\n'
b'HTTP/1.1 400 One of the request inputs is not valid.\r\n'
b'Content-Length: 220\r\n'
b'Content-Type: application/xml\r\n'
b'Server: Windows-Azure-Blob/1.0\r\n'
b'\r\n'
b'\xef\xbb\xbf<?xml version="1.0" encoding="utf-8"?>\n<Error><Code>InvalidInput</Code><Message>One'
b'of the request inputs is not valid.\nRequestId:5f3f9f2f-e01e-00cc-6eb1-6d00b5000000\nTime:2019-09-17T23:44:07.4671860Z</Message></Error>\n'
b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
response = mock_response(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
response = response.parts()
assert len(response) == 1
res0 = response[0]
assert res0.status_code == 400
assert res0.body().startswith(b'\xef\xbb\xbf')
@pytest.mark.parametrize("http_request,mock_response", request_and_responses_product(MOCK_RESPONSES))
def test_recursive_multipart_receive(http_request, mock_response):
req0 = http_request("DELETE", "/container0/blob0")
internal_req0 = http_request("DELETE", "/container0/blob0")
req0.set_multipart_mixed(internal_req0)
request = http_request("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(req0)
internal_body_as_str = (
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 400 Accepted\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n"
"x-ms-version: 2018-11-09\r\n"
"\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
body_as_str = (
"--batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 202 Accepted\r\n"
"Content-Type: multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"\r\n"
"{}"
"--batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6--"
).format(internal_body_as_str)
response = mock_response(
request,
body_as_str.encode('ascii'),
"multipart/mixed; boundary=batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6"
)
response = response.parts()
assert len(response) == 1
res0 = response[0]
assert res0.status_code == 202
internal_response = res0.parts()
assert len(internal_response) == 1
internal_response0 = internal_response[0]
assert internal_response0.status_code == 400
def test_close_unopened_transport():
transport = RequestsTransport()
transport.close()
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_timeout(caplog, port, http_request):
transport = RequestsTransport()
request = http_request("GET", "http://localhost:{}/basic/string".format(port))
with caplog.at_level(logging.WARNING, logger="azure.core.pipeline.transport"):
with Pipeline(transport) as pipeline:
pipeline.run(request, connection_timeout=100)
assert "Tuple timeout setting is deprecated" not in caplog.text
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_tuple_timeout(caplog, port, http_request):
transport = RequestsTransport()
request = http_request("GET", "http://localhost:{}/basic/string".format(port))
with caplog.at_level(logging.WARNING, logger="azure.core.pipeline.transport"):
with Pipeline(transport) as pipeline:
pipeline.run(request, connection_timeout=(100, 100))
assert "Tuple timeout setting is deprecated" in caplog.text
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
def test_conflict_timeout(caplog, port, http_request):
transport = RequestsTransport()
request = http_request("GET", "http://localhost:{}/basic/string".format(port))
with pytest.raises(ValueError):
with Pipeline(transport) as pipeline:
pipeline.run(request, connection_timeout=(100, 100), read_timeout = 100)
| 36.423559
| 149
| 0.641941
|
68b0edb448645cc1ec3735e533a8e694e11aa872
| 8,543
|
py
|
Python
|
train_abstractor.py
|
silenceliang/Cascading-agents-hybridSum
|
6c127df51bd8cc926878f62ebdb66bc1042bb58c
|
[
"MIT"
] | 1
|
2020-02-23T15:38:18.000Z
|
2020-02-23T15:38:18.000Z
|
train_abstractor.py
|
silenceliang/CascadingAgentsHybridSum
|
6c127df51bd8cc926878f62ebdb66bc1042bb58c
|
[
"MIT"
] | null | null | null |
train_abstractor.py
|
silenceliang/CascadingAgentsHybridSum
|
6c127df51bd8cc926878f62ebdb66bc1042bb58c
|
[
"MIT"
] | null | null | null |
""" train the abstractor"""
import argparse
import json
import os
from os.path import join, exists
import pickle as pkl
from cytoolz import compose
import torch
from torch import optim
from torch.nn import functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from model.copy_summ import CopySumm
from model.util import sequence_loss
from training import get_basic_grad_fn, basic_validate
from training import BasicPipeline, BasicTrainer
from data.data import CnnDmDataset
from data.batcher import coll_fn, prepro_fn
from data.batcher import convert_batch_copy, batchify_fn_copy
from data.batcher import BucketedGenerater
from utils import PAD, UNK, START, END
from utils import make_vocab, make_embedding
# NOTE: bucket size too large may sacrifice randomness,
# to low may increase # of PAD tokens
BUCKET_SIZE = 6400
try:
DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
class MatchDataset(CnnDmDataset):
""" single article sentence -> single abstract sentence
(dataset created by greedily matching ROUGE)
"""
def __init__(self, split):
super().__init__(split, DATA_DIR)
def __getitem__(self, i):
js_data = super().__getitem__(i)
art_sents, abs_sents, extracts = (
js_data['article'], js_data['abstract'], js_data['extracted'])
# 被label的句子
matched_arts = [art_sents[i] for i in extracts]
return matched_arts, abs_sents[:len(extracts)]
def configure_net(vocab_size, emb_dim, n_hidden, bidirectional, n_layer):
net_args = {}
net_args['vocab_size'] = vocab_size
net_args['emb_dim'] = emb_dim
net_args['n_hidden'] = n_hidden
net_args['bidirectional'] = bidirectional
net_args['n_layer'] = n_layer
net = CopySumm(**net_args)
return net, net_args
def configure_training(opt, lr, clip_grad, lr_decay, batch_size):
""" supports Adam optimizer only"""
assert opt in ['adam']
opt_kwargs = {}
opt_kwargs['lr'] = lr
train_params = {}
train_params['optimizer'] = (opt, opt_kwargs)
train_params['clip_grad_norm'] = clip_grad
train_params['batch_size'] = batch_size
train_params['lr_decay'] = lr_decay
nll = lambda logit, target: F.nll_loss(logit, target, reduce=False)
def criterion(logits, targets):
return sequence_loss(logits, targets, nll, pad_idx=PAD)
return criterion, train_params
def build_batchers(word2id, cuda, debug):
prepro = prepro_fn(args.max_art, args.max_abs)
def sort_key(sample):
src, target = sample
return (len(target), len(src))
batchify = compose(
batchify_fn_copy(PAD, START, END, cuda=cuda),
convert_batch_copy(UNK, word2id)
)
train_loader = DataLoader(
MatchDataset('train'), batch_size=BUCKET_SIZE,
shuffle=not debug,
num_workers=4 if cuda and not debug else 0,
collate_fn=coll_fn
)
train_batcher = BucketedGenerater(train_loader, prepro, sort_key, batchify,
single_run=False, fork=not debug)
val_loader = DataLoader(
MatchDataset('val'), batch_size=BUCKET_SIZE,
shuffle=False, num_workers=4 if cuda and not debug else 0,
collate_fn=coll_fn
)
val_batcher = BucketedGenerater(val_loader, prepro, sort_key, batchify,
single_run=True, fork=not debug)
return train_batcher, val_batcher
def main(args):
# create data batcher, vocabulary
# batcher
with open(join(DATA_DIR, 'vocab_cnt.pkl'), 'rb') as f:
wc = pkl.load(f)
word2id = make_vocab(wc, args.vsize)
train_batcher, val_batcher = build_batchers(word2id, args.cuda, args.debug)
# make net
net, net_args = configure_net(len(word2id), args.emb_dim, args.n_hidden, args.bi, args.n_layer)
if args.w2v:
# NOTE: the pretrained embedding having the same dimension
# as args.emb_dim should already be trained
embedding, _ = make_embedding(
{i: w for w, i in word2id.items()}, args.w2v)
net.set_embedding(embedding)
# configure training setting
criterion, train_params = configure_training(
'adam', args.lr, args.clip, args.decay, args.batch
)
# save experiment setting
if not exists(args.path):
os.makedirs(args.path)
with open(join(args.path, 'vocab.pkl'), 'wb') as f:
pkl.dump(word2id, f, pkl.HIGHEST_PROTOCOL)
meta = {}
meta['net'] = 'base_abstractor'
meta['net_args'] = net_args
meta['traing_params'] = train_params
with open(join(args.path, 'meta.json'), 'w') as f:
json.dump(meta, f, indent=4)
# prepare trainer
val_fn = basic_validate(net, criterion)
grad_fn = get_basic_grad_fn(net, args.clip)
optimizer = optim.Adam(net.parameters(), **train_params['optimizer'][1])
scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True,
factor=args.decay, min_lr=0,
patience=args.lr_p)
if args.cuda:
net = net.cuda()
pipeline = BasicPipeline(meta['net'], net,
train_batcher, val_batcher, args.batch, val_fn,
criterion, optimizer, grad_fn)
trainer = BasicTrainer(pipeline, args.path,
args.ckpt_freq, args.patience, scheduler)
print('start training with the following hyper-parameters:')
print(meta)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='training of the abstractor (ML)'
)
parser.add_argument('--path', required=True, help='root of the model')
parser.add_argument('--vsize', type=int, action='store', default=50000,
help='vocabulary size')
parser.add_argument('--emb_dim', type=int, action='store', default=512,
help='the dimension of word embedding')
# parser.add_argument('--vsize', type=int, action='store', default=30000,
# help='vocabulary size')
# parser.add_argument('--emb_dim', type=int, action='store', default=128,
# help='the dimension of word embedding')
parser.add_argument('--w2v', action='store',
help='use pretrained word2vec embedding')
parser.add_argument('--n_hidden', type=int, action='store', default=256,
help='the number of hidden units of LSTM')
parser.add_argument('--n_layer', type=int, action='store', default=1,
help='the number of layers of LSTM')
parser.add_argument('--no-bi', action='store_true',
help='disable bidirectional LSTM encoder')
# length limit
parser.add_argument('--max_art', type=int, action='store', default=100,
help='maximun words in a single article sentence')
parser.add_argument('--max_abs', type=int, action='store', default=30,
help='maximun words in a single abstract sentence')
# training options
parser.add_argument('--lr', type=float, action='store', default=1e-3,
help='learning rate')
parser.add_argument('--decay', type=float, action='store', default=0.5,
help='learning rate decay ratio')
parser.add_argument('--lr_p', type=int, action='store', default=0,
help='patience for learning rate decay')
parser.add_argument('--clip', type=float, action='store', default=2.0,
help='gradient clipping')
parser.add_argument('--batch', type=int, action='store', default=32,
help='the training batch size')
parser.add_argument(
'--ckpt_freq', type=int, action='store', default=3000,
help='number of update steps for checkpoint and validation'
)
parser.add_argument('--patience', type=int, action='store', default=5,
help='patience for early stopping')
parser.add_argument('--debug', action='store_true',
help='run in debugging mode')
parser.add_argument('--no-cuda', action='store_true',
help='disable GPU training')
args = parser.parse_args()
args.bi = not args.no_bi
args.cuda = torch.cuda.is_available() and not args.no_cuda
main(args)
| 38.138393
| 99
| 0.638886
|
71503e0df20e900fa7f9883d5159fc985aa88ac9
| 518
|
py
|
Python
|
formatter.py
|
pooran/githubot
|
87afe422cb4ac6e3888324e67c50aeeff81f9527
|
[
"MIT"
] | 2
|
2018-03-08T16:57:54.000Z
|
2018-12-14T23:48:33.000Z
|
formatter.py
|
pooran/githubot
|
87afe422cb4ac6e3888324e67c50aeeff81f9527
|
[
"MIT"
] | null | null | null |
formatter.py
|
pooran/githubot
|
87afe422cb4ac6e3888324e67c50aeeff81f9527
|
[
"MIT"
] | 3
|
2018-09-01T16:17:07.000Z
|
2021-10-13T03:41:13.000Z
|
import re
def format_issue(author, number, title, text, url):
max_length = 4000
if len(title + text) > max_length:
text = text[:max_length - len(title)] + '...'
string = ('<b>Issue #{}</b>\n'
'<b>Título:</b> {}\n'
'<b>Autor:</b> {}\n\n'
'{}\n\n'
'<a href="{}">Link</a>'.format(number, title, author, text,
url))
return string
def fix_html(text):
return re.sub('<.*?>', 'IMAGEN', text)
| 27.263158
| 73
| 0.449807
|
0325cc5c74249fc957fecf49949974ad375ea887
| 5,423
|
py
|
Python
|
lib/os/__init__.py
|
wangwansan/grumpy
|
aff3b3ff62a34c5baa9783307764f640df021337
|
[
"Apache-2.0"
] | 11,252
|
2017-01-04T16:19:12.000Z
|
2022-03-31T13:42:31.000Z
|
lib/os/__init__.py
|
wangwansan/grumpy
|
aff3b3ff62a34c5baa9783307764f640df021337
|
[
"Apache-2.0"
] | 301
|
2017-01-04T17:34:00.000Z
|
2022-03-15T21:40:21.000Z
|
lib/os/__init__.py
|
wangwansan/grumpy
|
aff3b3ff62a34c5baa9783307764f640df021337
|
[
"Apache-2.0"
] | 819
|
2017-01-04T17:26:26.000Z
|
2022-03-20T14:11:28.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous operating system interfaces."""
# pylint: disable=g-multiple-import
from '__go__/io/ioutil' import ReadDir
from '__go__/os' import (Chdir, Chmod, Environ, Getpid as getpid, Getwd, Pipe,
ProcAttr, Remove, StartProcess, Stat, Stdout, Stdin,
Stderr, Mkdir)
from '__go__/path/filepath' import Separator
from '__go__/grumpy' import (NewFileFromFD, StartThread, ToNative)
from '__go__/reflect' import MakeSlice
from '__go__/runtime' import GOOS
from '__go__/syscall' import (Close, SYS_FCNTL, Syscall, F_GETFD, Wait4,
WaitStatus, WNOHANG)
from '__go__/sync' import WaitGroup
from '__go__/time' import Second
import _syscall
from os import path
import stat as stat_module
import sys
sep = chr(Separator)
error = OSError # pylint: disable=invalid-name
curdir = '.'
name = 'posix'
environ = {}
for var in Environ():
k, v = var.split('=', 1)
environ[k] = v
def mkdir(path, mode=0o777):
err = Mkdir(path, mode)
if err:
raise OSError(err.Error())
def chdir(path):
err = Chdir(path)
if err:
raise OSError(err.Error())
def chmod(filepath, mode):
# TODO: Support mode flags other than perms.
err = Chmod(filepath, stat(filepath).st_mode & ~0o777 | mode & 0o777)
if err:
raise OSError(err.Error())
def close(fd):
err = Close(fd)
if err:
raise OSError(err.Error())
def fdopen(fd, mode='r'): # pylint: disable=unused-argument
# Ensure this is a valid file descriptor to match CPython behavior.
_, _, err = Syscall(SYS_FCNTL, fd, F_GETFD, 0)
if err:
raise OSError(err.Error())
return NewFileFromFD(fd, None)
def listdir(p):
files, err = ReadDir(p)
if err:
raise OSError(err.Error())
return [x.Name() for x in files]
def getcwd():
dir, err = Getwd()
if err:
raise OSError(err.Error())
return dir
class _Popen(object):
def __init__(self, command, mode):
self.mode = mode
self.result = None
self.r, self.w, err = Pipe()
if err:
raise OSError(err.Error())
attr = ProcAttr.new()
# Create a slice using a reflect.Type returned by ToNative.
# TODO: There should be a cleaner way to create slices in Python.
files_type = ToNative(__frame__(), attr.Files).Type()
files = MakeSlice(files_type, 3, 3).Interface()
if self.mode == 'r':
fd = self.r.Fd()
files[0], files[1], files[2] = Stdin, self.w, Stderr
elif self.mode == 'w':
fd = self.w.Fd()
files[0], files[1], files[2] = self.r, Stdout, Stderr
else:
raise ValueError('invalid popen mode: %r', self.mode)
attr.Files = files
# TODO: There should be a cleaner way to create slices in Python.
args_type = ToNative(__frame__(), StartProcess).Type().In(1)
args = MakeSlice(args_type, 3, 3).Interface()
shell = environ['SHELL']
args[0] = shell
args[1] = '-c'
args[2] = command
self.proc, err = StartProcess(shell, args, attr)
if err:
raise OSError(err.Error())
self.wg = WaitGroup.new()
self.wg.Add(1)
StartThread(self._thread_func)
self.file = NewFileFromFD(fd, self.close)
def _thread_func(self):
self.result = self.proc.Wait()
if self.mode == 'r':
self.w.Close()
self.wg.Done()
def close(self, _):
if self.mode == 'w':
self.w.Close()
self.wg.Wait()
state, err = self.result
if err:
raise OSError(err.Error())
return state.Sys()
def popen(command, mode='r'):
return _Popen(command, mode).file
def remove(filepath):
if stat_module.S_ISDIR(stat(filepath).st_mode):
raise OSError('Operation not permitted: ' + filepath)
err = Remove(filepath)
if err:
raise OSError(err.Error())
def rmdir(filepath):
if not stat_module.S_ISDIR(stat(filepath).st_mode):
raise OSError('Operation not permitted: ' + filepath)
err = Remove(filepath)
if err:
raise OSError(err.Error())
class StatResult(object):
def __init__(self, info):
self._info = info
def st_mode(self):
# TODO: This is an incomplete mode flag. It should include S_IFDIR, etc.
return self._info.Mode()
# TODO: Make this a decorator once they're implemented.
st_mode = property(st_mode)
def st_mtime(self):
return float(self._info.ModTime().UnixNano()) / Second
# TODO: Make this a decorator once they're implemented.
st_mtime = property(st_mtime)
def st_size(self):
return self._info.Size()
# TODO: Make this a decorator once they're implemented.
st_size = property(st_size)
def stat(filepath):
info, err = Stat(filepath)
if err:
raise OSError(err.Error())
return StatResult(info)
unlink = remove
def waitpid(pid, options):
status = WaitStatus.new()
_syscall.invoke(Wait4, pid, status, options, None)
return pid, _encode_wait_result(status)
def _encode_wait_result(status):
return status.Signal() | (status.ExitStatus() << 8)
| 26.072115
| 78
| 0.678776
|
678b32f7a9fb0553d6b1753a5ea827c3bd12858a
| 2,005
|
py
|
Python
|
2021/day25.py
|
mbcollins2/aoc
|
b94380fd5e92b4fe9f4af654e7762174c1c6ac91
|
[
"MIT"
] | null | null | null |
2021/day25.py
|
mbcollins2/aoc
|
b94380fd5e92b4fe9f4af654e7762174c1c6ac91
|
[
"MIT"
] | 3
|
2021-12-15T19:12:38.000Z
|
2021-12-15T19:14:42.000Z
|
2021/day25.py
|
mbcollins2/aoc
|
b94380fd5e92b4fe9f4af654e7762174c1c6ac91
|
[
"MIT"
] | null | null | null |
from util.util import base
from util import grid
from collections import defaultdict
class solve_day(base):
def __init__(self, type='data'):
super().__init__(type=type)
self.data = grid.parse_as_grid(self.data, dtype='str')
def get_moves(self, dir):
moves = defaultdict(str)
for row_i, row in enumerate(self.data):
row_check = row_i+1 if row_i < len(self.data)-1 else 0
for column_i, column in enumerate(row):
column_check = column_i+1 if column_i < len(row)-1 else 0
# east moves
if dir == 'east':
if column == '>':
if self.data[row_i][column_check] == '.':
moves[(row_i, column_i)] = '.'
moves[(row_i, column_check)] = '>'
# south moves
if dir == 'south':
if column == 'v':
if self.data[row_check][column_i] == '.':
moves[(row_i, column_i)] = '.'
moves[(row_check, column_i)] = 'v'
return moves
def make_moves(self, moves):
for coord in moves:
self.data[coord[0]][coord[1]] = moves[coord]
def part1(self):
i=0
while True:
i += 1
no_move_count = 0
moves = self.get_moves(dir='east')
if len(moves) == 0:
no_move_count += 1
pass
else:
self.make_moves(moves)
moves = self.get_moves(dir='south')
if len(moves) == 0:
no_move_count += 1
pass
else:
self.make_moves(moves)
if no_move_count == 2:
return i
def part2(self):
pass
if __name__ == '__main__':
s = solve_day('lines')
s.sub(s.part1(), part='a')
s.sub(s.part2(), part='b')
| 27.847222
| 73
| 0.458853
|
b79e6501a7f82561d1ab03a6129fde115713c666
| 17,243
|
py
|
Python
|
octis/dataset/dataset.py
|
atypon/OCTIS
|
ff49bb3785197ca929c5626a3f04fbb917de2376
|
[
"MIT"
] | null | null | null |
octis/dataset/dataset.py
|
atypon/OCTIS
|
ff49bb3785197ca929c5626a3f04fbb917de2376
|
[
"MIT"
] | null | null | null |
octis/dataset/dataset.py
|
atypon/OCTIS
|
ff49bb3785197ca929c5626a3f04fbb917de2376
|
[
"MIT"
] | null | null | null |
import codecs
import json
import pickle
from os.path import join, exists
from pathlib import Path
import pandas as pd
from octis.dataset.downloader import get_data_home, _pkl_filepath, download_dataset
class Dataset:
"""
Dataset handles a dataset and offers methods to access, save and edit the dataset data
"""
def __init__(self, corpus=None, raw_corpus=None, vocabulary=None, labels=None, covariates=None, metadata=None,
document_indexes=None):
"""
Initialize a dataset, parameters are optional
if you want to load a dataset, initialize this
class with default values and use the load method
Parameters
----------
corpus : corpus of the dataset
vocabulary : vocabulary of the dataset
labels : labels of the dataset
metadata : metadata of the dataset
"""
self.__corpus = corpus
self.__raw_corpus = raw_corpus
self.__vocabulary = vocabulary
self.__metadata = metadata
self.__labels = labels
self.__covariates = covariates
self.__original_indexes = document_indexes
self.dataset_path = None
self.is_cached = False
# Partitioned Corpus getter
def get_partitioned_corpus(self, use_validation=True):
if "last-training-doc" in self.__metadata:
last_training_doc = self.__metadata["last-training-doc"]
if use_validation:
last_validation_doc = self.__metadata["last-validation-doc"]
if self.__corpus is not None and last_training_doc != 0:
train_corpus = []
test_corpus = []
validation_corpus = []
for i in range(last_training_doc):
train_corpus.append(self.__corpus[i])
for i in range(last_training_doc, last_validation_doc):
validation_corpus.append(self.__corpus[i])
for i in range(last_validation_doc, len(self.__corpus)):
test_corpus.append(self.__corpus[i])
return train_corpus, validation_corpus, test_corpus
else:
if self.__corpus is not None and last_training_doc != 0:
if "last-validation-doc" in self.__metadata.keys():
last_validation_doc = self.__metadata["last-validation-doc"]
else:
last_validation_doc = 0
train_corpus = []
test_corpus = []
for i in range(last_training_doc):
train_corpus.append(self.__corpus[i])
if last_validation_doc != 0:
for i in range(last_validation_doc, len(self.__corpus)):
test_corpus.append(self.__corpus[i])
else:
for i in range(last_training_doc, len(self.__corpus)):
test_corpus.append(self.__corpus[i])
return train_corpus, test_corpus
else:
return [self.__corpus]
def get_split_indices(self, use_validation=False):
train_indices = None
test_indices = None
valid_indices = None
if "last-training-doc" in self.__metadata:
last_training_doc = self.__metadata["last-training-doc"]
if use_validation:
last_validation_doc = self.__metadata["last-validation-doc"]
if self.__corpus is not None and last_training_doc != 0:
train_indices = list(range(0, last_training_doc))
valid_indices = list(range(last_training_doc, last_validation_doc))
test_indices = list(range(last_validation_doc, len(self.__corpus)))
else:
if self.__corpus is not None and last_training_doc != 0:
if "last-validation-doc" in self.__metadata.keys():
last_validation_doc = self.__metadata["last-validation-doc"]
else:
last_validation_doc = 0
train_indices = list(range(0, last_training_doc))
if last_validation_doc != 0:
test_indices = list(range(last_validation_doc, len(self.__corpus)))
else:
test_indices = list(range(last_training_doc, len(self.__corpus)))
return train_indices, test_indices, valid_indices
# Corpus getter
def get_raw_corpus(self):
return self.__raw_corpus
# Corpus getter
def get_corpus(self):
return self.__corpus
# Indexes getter
def get_document_indexes(self):
return self.__original_indexes
# Edges getter
def get_edges(self):
return self.__edges
# Labels getter
def get_labels(self):
return self.__labels
# Covariates getter
def get_covariates(self):
return self.__covariates
# Metadata getter
def get_metadata(self):
return self.__metadata
# Info getter
def get_info(self):
if "info" in self.__metadata:
return self.__metadata["info"]
else:
return None
# Vocabulary getter
def get_vocabulary(self):
return self.__vocabulary
def _save_metadata(self, file_name):
"""
Saves metadata in json serialized format
Parameters
----------
file_name : name of the file to write
Returns
-------
True if the data is saved
"""
data = self.get_metadata()
if data is not None:
with open(file_name, 'w') as outfile:
json.dump(data, outfile)
return True
else:
raise Exception("error in saving metadata")
def _load_metadata(self, file_name):
"""
Loads metadata from json serialized format
Parameters
----------
file_name : name of the file to read
"""
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as metadata_file:
metadata = json.load(metadata_file)
self.__metadata = metadata
def _load_corpus(self, file_name):
"""
Loads corpus from a file
Parameters
----------
file_name : name of the file to read
"""
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as corpus_file:
corpus = [line.strip().split() for line in corpus_file]
self.__corpus = corpus
else:
raise Exception("error in loading corpus")
def _load_raw_corpus(self, file_name):
"""
Loads raw corpus from a file
Parameters
----------
file_name : name of the file to read
"""
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as corpus_file:
corpus = [line.strip().split() for line in corpus_file]
self.__raw_corpus = corpus
else:
raise Exception("error in loading raw corpus")
def _save_edges(self, file_name):
"""
Saves edges in a file, a line for each document
Parameters
----------
file_name : name of the file to write
"""
data = self.get_edges()
if data is not None:
with open(file_name, 'w') as outfile:
for element in data:
outfile.write("%s\n" % element)
else:
raise Exception("error in saving edges")
def _load_edges(self, file_name):
"""
Loads edges from a file
Parameters
----------
file_name : name of the file to read
"""
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as edges_file:
edges = [line[0:len(line) - 1] for line in edges_file]
self.__edges = edges
def _save_labels(self, file_name):
"""
Saves the labels in a file, each line contains
the labels of a single document
Parameters
----------
file_name : name of the file to write
"""
data = self.get_labels()
if data is not None:
with open(file_name, 'w') as outfile:
for element in data:
outfile.write("%s\n" % json.dumps(element))
else:
raise Exception("error in saving labels")
def _load_labels(self, file_name):
"""
Loads labels from a file
Parameters
----------
file_name : name of the file to read
----------
"""
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as labels_file:
labels = [json.loads(line.strip()) for line in labels_file]
self.__labels = labels
def _load_covariates(self, file_name):
"""
Loads covariates from a file
Parameters
----------
file_name : name of the file to read
----------
"""
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as covariates_file:
covariates = [json.loads(line.strip()) for line in covariates_file]
self.__covariates = covariates
def _save_vocabulary(self, file_name):
"""
Saves vocabulary dictionary in a file
Parameters
----------
file_name : name of the file to write
-------
"""
data = self.get_vocabulary()
if data is not None:
with open(file_name, 'w', encoding='utf8') as outfile:
for word in data:
outfile.write(word + "\n")
else:
raise Exception("error in saving vocabulary")
def _save_document_indexes(self, file_name):
"""
Saves document indexes in a file
Parameters
----------
file_name : name of the file to write
-------
"""
if self.__original_indexes is not None:
with open(file_name, 'w') as outfile:
for i in self.__original_indexes:
outfile.write(str(i) + "\n")
def _load_vocabulary(self, file_name):
"""
Loads vocabulary from a file
Parameters
----------
file_name : name of the file to read
"""
vocabulary = []
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as vocabulary_file:
for line in vocabulary_file:
vocabulary.append(line.strip())
self.__vocabulary = vocabulary
else:
raise Exception("error in loading vocabulary")
def _load_document_indexes(self, file_name):
"""
Loads document indexes from a file
Parameters
----------
file_name : name of the file to read
"""
document_indexes = []
file = Path(file_name)
if file.is_file():
with open(file_name, 'r') as indexes_file:
for line in indexes_file:
document_indexes.append(line.strip())
self.__original_indexes = document_indexes
else:
raise Exception("error in loading vocabulary")
def save(self, path):
"""
Saves all the dataset info in a folder
Parameters
----------
path : path to the folder in which files are saved.
If the folder doesn't exist it will be created
"""
Path(path).mkdir(parents=True, exist_ok=True)
try:
partitions = self.get_partitioned_corpus()
corpus, partition = [], []
for i, p in enumerate(partitions):
if i == 0:
part = 'train'
elif i == 1 and len(partitions) == 3:
part = 'val'
else:
part = 'test'
for doc in p:
corpus.append(' '.join(doc))
partition.append(part)
df = pd.DataFrame(data=corpus)
df = pd.concat([df, pd.DataFrame(partition)], axis=1)
labs = [' '.join(lab) for lab in self.__labels]
covars = [' '.join(lab) for lab in self.__covariates]
if self.__labels:
df = pd.concat([df, pd.DataFrame(labs), pd.DataFrame(covars)], axis=1)
df.to_csv(path + '/corpus.tsv', sep='\t', index=False, header=False)
self._save_vocabulary(path + "/vocabulary.txt")
self._save_metadata(path + "/metadata.json")
self._save_document_indexes(path + "/indexes.txt")
self.dataset_path = path
except:
raise Exception("error in saving the dataset")
# TODO :: update this method to load labels, covaraites, and edges
def load_custom_dataset_from_folder(self, path, multilabel=False):
"""
Loads all the dataset from a folder
Parameters
----------
path : path of the folder to read
"""
self.dataset_path = path
try:
if exists(self.dataset_path + "/metadata.json"):
self._load_metadata(self.dataset_path + "/metadata.json")
else:
self.__metadata = dict()
df = pd.read_csv(self.dataset_path + "/corpus.tsv", sep='\t', header=None)
if len(df.keys()) > 1:
# just make sure docs are sorted in the right way (train - val - test)
final_df = df[df[1] == 'train'].append(df[df[1] == 'val'])
final_df = final_df.append(df[df[1] == 'test'])
self.__metadata['last-training-doc'] = len(final_df[final_df[1] == 'train'])
self.__metadata['last-validation-doc'] = len(final_df[final_df[1] == 'val']) + \
len(final_df[final_df[1] == 'train'])
self.__corpus = [d.split() for d in final_df[0].tolist()]
if len(final_df.keys()) > 2:
if multilabel:
self.__labels = [doc.split() for doc in final_df[2].tolist()]
else:
self.__labels = final_df[2].tolist()
else:
self.__corpus = [d.split() for d in df[0].tolist()]
self.__metadata['last-training-doc'] = len(df[0])
if exists(self.dataset_path + "/vocabulary.txt"):
self._load_vocabulary(self.dataset_path + "/vocabulary.txt")
else:
vocab = set()
for d in self.__corpus:
for w in set(d):
vocab.add(w)
self.__vocabulary = list(vocab)
if exists(self.dataset_path + "/indexes.txt"):
self._load_document_indexes(self.dataset_path + "/indexes.txt")
except:
raise Exception("error in loading the dataset:" + self.dataset_path)
def fetch_dataset(self, dataset_name, data_home=None, download_if_missing=True):
"""Load the filenames and data from a dataset.
Parameters
----------
dataset_name: name of the dataset to download or retrieve
data_home : optional, default: None
Specify a download and cache folder for the datasets. If None,
all data is stored in '~/octis' subfolders.
download_if_missing : optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, dataset_name + ".pkz")
dataset_home = join(data_home, dataset_name)
cache = None
if exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_dataset(dataset_name, target_dir=dataset_home, cache_path=cache_path)
else:
raise IOError(dataset_name + ' dataset not found')
self.is_cached = True
self.__corpus = [d.split() for d in cache["corpus"]]
self.__raw_corpus = cache["raw_corpus"]
self.__vocabulary = cache["vocabulary"]
self.__metadata = cache["metadata"]
self.dataset_path = cache_path
self.__labels = cache["labels"]
self.__covariates = cache["covariates"]
| 36.377637
| 114
| 0.544801
|
4bb8e2093657109726e544796caf330dd10af5bf
| 1,235
|
py
|
Python
|
Angle_model_file/detect_img.py
|
HAIbingshuai/chinese_ocr
|
36c06226b3762b2e516427579f2c2614770e60ae
|
[
"MIT"
] | 21
|
2019-11-16T15:12:17.000Z
|
2022-02-24T03:08:44.000Z
|
Angle_model_file/detect_img.py
|
HAIbingshuai/chinese_ocr
|
36c06226b3762b2e516427579f2c2614770e60ae
|
[
"MIT"
] | 1
|
2019-12-03T00:41:09.000Z
|
2020-10-16T13:46:11.000Z
|
Angle_model_file/detect_img.py
|
HAIbingshuai/chinese_ocr
|
36c06226b3762b2e516427579f2c2614770e60ae
|
[
"MIT"
] | 3
|
2019-12-10T02:43:20.000Z
|
2021-05-12T07:31:41.000Z
|
# coding=utf-8 python3.6
# ================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
# license='MIT License'
# Author : haibingshuai
# Created date: 2019/11/8 9:38
# Description :
# ================================================================
from Angle_model_file.text.opencv_dnn_detect import angle_detect
from PIL import Image
import cv2
import numpy as np
image_path = './data_test/img.jpeg'
image = cv2.imread(image_path)
angle = angle_detect(image)
if angle == 90:
im = Image.fromarray(image).transpose(Image.ROTATE_90)
img = np.array(im)
elif angle == 180:
im = Image.fromarray(image).transpose(Image.ROTATE_180)
img = np.array(im)
elif angle == 270:
im = Image.fromarray(image).transpose(Image.ROTATE_270)
img = np.array(im)
print(angle)
# result = union_rbox(result, 0.2)
# res = [{'text': x['text'],
# 'name': str(i),
# 'box': {'cx': x['cx'],
# 'cy': x['cy'],
# 'w': x['w'],
# 'h': x['h'],
# 'angle': x['degree']
#
# }
# } for i, x in enumerate(result)]
# res = adjust_box_to_origin(img, angle, res) ##修正box
| 25.204082
| 66
| 0.519028
|
935eb517e05278cc10295b753607fdcd06ec500b
| 3,409
|
py
|
Python
|
modeling/backbones/resnet.py
|
CHENGY12/APNet
|
95fc5b5893d562fe57600f9a1df589cf3711ee7b
|
[
"MIT"
] | 31
|
2021-07-29T15:52:33.000Z
|
2022-03-17T07:01:48.000Z
|
modeling/backbones/resnet.py
|
Mawandasmat/APNet
|
8ba6e078ff062415b2b2b34115bbadb4cfd6e827
|
[
"MIT"
] | 7
|
2021-09-02T08:54:40.000Z
|
2022-02-10T16:56:43.000Z
|
modeling/backbones/resnet.py
|
Mawandasmat/APNet
|
8ba6e078ff062415b2b2b34115bbadb4cfd6e827
|
[
"MIT"
] | 5
|
2021-08-30T00:55:18.000Z
|
2021-10-19T03:13:11.000Z
|
import math
import torch
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
#self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
#x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| 31.859813
| 77
| 0.565855
|
ffeb0bcfbaa095d5f3c6c5a007d61e5e56a570fc
| 21,269
|
py
|
Python
|
core/controllers/topic_viewer_test.py
|
abhinavrastogi2627/oppia
|
1a6321c18cb57653787e9fed395a32eae33fbecb
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/topic_viewer_test.py
|
abhinavrastogi2627/oppia
|
1a6321c18cb57653787e9fed395a32eae33fbecb
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/topic_viewer_test.py
|
abhinavrastogi2627/oppia
|
1a6321c18cb57653787e9fed395a32eae33fbecb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the topic viewer page."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.domain import question_services
from core.domain import skill_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import python_utils
class BaseTopicViewerControllerTests(test_utils.GenericTestBase):
def setUp(self):
"""Completes the sign-up process for the various users."""
super(BaseTopicViewerControllerTests, self).setUp()
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.UserActionsInfo(self.admin_id)
self.topic_id = 'topic'
self.story_id_1 = 'story_id_1'
self.story_id_2 = 'story_id_2'
self.topic_id_1 = 'topic1'
self.topic_id_2 = 'topic2'
self.skill_id_1 = skill_services.get_new_skill_id()
self.skill_id_2 = skill_services.get_new_skill_id()
self.story_1 = story_domain.Story.create_default_story(
self.story_id_1, 'story_title', 'description', self.topic_id_1,
'story-frag-one')
self.story_1.description = 'story_description'
self.story_1.node_titles = []
self.story_2 = story_domain.Story.create_default_story(
self.story_id_2, 'story_title', 'description', self.topic_id_2,
'story-frag-two')
self.story_2.description = 'story_description'
self.story_2.node_titles = []
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'public_topic_name', 'public', 'description')
self.topic.uncategorized_skill_ids.append(self.skill_id_1)
self.topic.subtopics.append(topic_domain.Subtopic(
1, 'subtopic_name', [self.skill_id_2], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'subtopic-name'))
self.topic.next_subtopic_id = 2
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
self.topic.canonical_story_references.append(
topic_domain.StoryReference.create_default_story_reference(
self.story_id_1))
self.topic.additional_story_references.append(
topic_domain.StoryReference.create_default_story_reference(
self.story_id_2))
topic_services.save_new_topic(self.admin_id, self.topic)
story_services.save_new_story(self.admin_id, self.story_1)
story_services.save_new_story(self.admin_id, self.story_2)
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id_1, 'private_topic_name',
'private_topic_name', 'description')
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
self.topic.url_fragment = 'private'
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id_1, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id_2, self.admin_id)
self.save_new_skill(
self.skill_id_1, self.user_id, description='Skill Description 1')
self.save_new_skill(
self.skill_id_2, self.user_id, description='Skill Description 2')
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, 0.3)
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_2, 0.5)
class TopicViewerPageTests(BaseTopicViewerControllerTests):
def test_any_user_can_access_topic_viewer_page(self):
self.get_html_response('/learn/staging/%s' % 'public')
def test_accessibility_of_unpublished_topic_viewer_page(self):
self.get_html_response(
'/learn/staging/%s' % 'private',
expected_status_int=404)
self.login(self.ADMIN_EMAIL)
self.get_html_response('/learn/staging/%s' % 'private')
self.logout()
class TopicPageDataHandlerTests(
BaseTopicViewerControllerTests, test_utils.EmailTestBase):
def test_get_with_no_user_logged_in(self):
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'public'))
expected_dict = {
'topic_name': 'public_topic_name',
'topic_id': self.topic_id,
'canonical_story_dicts': [{
'id': self.story_1.id,
'title': self.story_1.title,
'description': self.story_1.description,
'node_titles': self.story_1.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-one'
}],
'additional_story_dicts': [{
'id': self.story_2.id,
'title': self.story_2.title,
'description': self.story_2.description,
'node_titles': self.story_2.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-two'
}],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [{
u'thumbnail_filename': u'image.svg',
u'thumbnail_bg_color': u'#FFFFFF',
u'skill_ids': [self.skill_id_2],
u'id': 1,
u'title': u'subtopic_name',
u'url_fragment': u'subtopic-name'}],
'degrees_of_mastery': {
self.skill_id_1: None,
self.skill_id_2: None
},
'skill_descriptions': {
self.skill_id_1: 'Skill Description 1',
self.skill_id_2: 'Skill Description 2'
},
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
def test_get_with_user_logged_in(self):
skill_services.delete_skill(self.admin_id, self.skill_id_1)
self.login(self.NEW_USER_EMAIL)
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(messages), 0)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'public'))
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
expected_email_html_body = (
'The deleted skills: %s are still'
' present in topic with id %s' % (
self.skill_id_1, self.topic_id))
self.assertEqual(len(messages), 1)
self.assertIn(
expected_email_html_body,
messages[0].html.decode())
expected_dict = {
'topic_name': 'public_topic_name',
'topic_id': self.topic_id,
'canonical_story_dicts': [{
'id': self.story_1.id,
'title': self.story_1.title,
'description': self.story_1.description,
'node_titles': self.story_1.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-one'
}],
'additional_story_dicts': [{
'id': self.story_2.id,
'title': self.story_2.title,
'description': self.story_2.description,
'node_titles': self.story_2.node_titles,
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'story_is_published': True,
'completed_node_titles': [],
'url_fragment': 'story-frag-two'
}],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [{
u'thumbnail_filename': u'image.svg',
u'thumbnail_bg_color': u'#FFFFFF',
u'skill_ids': [self.skill_id_2],
u'id': 1,
u'title': u'subtopic_name',
u'url_fragment': u'subtopic-name'}],
'degrees_of_mastery': {
self.skill_id_1: 0.3,
self.skill_id_2: 0.5
},
'skill_descriptions': {
self.skill_id_1: None,
self.skill_id_2: 'Skill Description 2'
},
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_no_skills_ids(self):
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'topic_with_no_skills',
'topic-with-no-skills', 'description')
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
json_response = self.get_json(
'%s/staging/%s' % (
feconf.TOPIC_DATA_HANDLER, 'topic-with-no-skills'))
expected_dict = {
'topic_name': 'topic_with_no_skills',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'uncategorized_skill_ids': [],
'subtopics': [],
'degrees_of_mastery': {},
'skill_descriptions': {},
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
def test_get_with_five_or_more_questions(self):
number_of_questions = 6
self.topic_id = 'new_topic'
self.skill_id_1 = skill_services.get_new_skill_id()
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
self.topic.uncategorized_skill_ids.append(self.skill_id_1)
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
self.save_new_skill(
self.skill_id_1, self.admin_id, description='Skill Description 1')
for index in python_utils.RANGE(number_of_questions):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(index), [self.skill_id_1])
question_services.create_new_question_skill_link(
self.admin_id, question_id, self.skill_id_1, 0.5)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [],
'degrees_of_mastery': {
self.skill_id_1: None
},
'skill_descriptions': {
self.skill_id_1: 'Skill Description 1'
},
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_twenty_or_more_questions(self):
number_of_questions = 50
self.topic_id = 'new_topic'
self.skill_id_1 = skill_services.get_new_skill_id()
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
self.topic.uncategorized_skill_ids.append(self.skill_id_1)
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
self.save_new_skill(
self.skill_id_1, self.admin_id, description='Skill Description 1')
for index in python_utils.RANGE(number_of_questions):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(index), [self.skill_id_1])
question_services.create_new_question_skill_link(
self.admin_id, question_id, self.skill_id_1, 0.5)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'uncategorized_skill_ids': [self.skill_id_1],
'subtopics': [],
'degrees_of_mastery': {
self.skill_id_1: None
},
'skill_descriptions': {
self.skill_id_1: 'Skill Description 1'
},
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_twenty_or_more_questions_with_multiple_skills(self):
number_of_skills = 3
number_of_questions = [1, 2, 2]
self.topic_id = 'new_topic'
skill_ids = (
[skill_services.get_new_skill_id() for _ in python_utils.RANGE(
number_of_skills)])
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
for index in python_utils.RANGE(number_of_skills):
self.topic.uncategorized_skill_ids.append(skill_ids[index])
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
for i in python_utils.RANGE(number_of_skills):
self.save_new_skill(
skill_ids[i], self.admin_id,
description='Skill Description')
for i in python_utils.RANGE(number_of_skills):
for j in python_utils.RANGE(number_of_questions[i]):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(j), [skill_ids[i]])
question_services.create_new_question_skill_link(
self.admin_id, question_id, skill_ids[i], 0.5)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_lesser_questions_with_fifty_or_more_skills(self):
number_of_skills = 60
number_of_questions = [0] * 60
number_of_questions[46] = 2
self.topic_id = 'new_topic'
skill_ids = (
[skill_services.get_new_skill_id() for _ in python_utils.RANGE(
number_of_skills)])
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
for index in python_utils.RANGE(number_of_skills):
self.topic.uncategorized_skill_ids.append(skill_ids[index])
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
for i in python_utils.RANGE(number_of_skills):
self.save_new_skill(
skill_ids[i], self.admin_id,
description='Skill Description')
for i in python_utils.RANGE(number_of_skills):
for j in python_utils.RANGE(number_of_questions[i]):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(j), [skill_ids[i]])
question_services.create_new_question_skill_link(
self.admin_id, question_id, skill_ids[i], 0.5)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'train_tab_should_be_displayed': False
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
def test_get_with_more_questions_with_fifty_or_more_skills(self):
number_of_skills = 60
number_of_questions = [0] * 60
number_of_questions[46] = 2
number_of_questions[20] = 3
number_of_questions[29] = 10
self.topic_id = 'new_topic'
skill_ids = (
[skill_services.get_new_skill_id() for _ in python_utils.RANGE(
number_of_skills)])
self.topic = topic_domain.Topic.create_default_topic(
self.topic_id, 'new_topic', 'new-topic', 'description')
for index in python_utils.RANGE(number_of_skills):
self.topic.uncategorized_skill_ids.append(skill_ids[index])
self.topic.thumbnail_filename = 'Image.svg'
self.topic.thumbnail_bg_color = (
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0])
topic_services.save_new_topic(self.admin_id, self.topic)
topic_services.publish_topic(self.topic_id, self.admin_id)
for i in python_utils.RANGE(number_of_skills):
self.save_new_skill(
skill_ids[i], self.admin_id,
description='Skill Description')
for i in python_utils.RANGE(number_of_skills):
for j in python_utils.RANGE(number_of_questions[i]):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.admin_id,
self._create_valid_question_data(j), [skill_ids[i]])
question_services.create_new_question_skill_link(
self.admin_id, question_id, skill_ids[i], 0.5)
json_response = self.get_json(
'%s/staging/%s' % (feconf.TOPIC_DATA_HANDLER, 'new-topic'))
expected_dict = {
'topic_name': 'new_topic',
'topic_id': self.topic_id,
'canonical_story_dicts': [],
'additional_story_dicts': [],
'train_tab_should_be_displayed': True
}
self.assertDictContainsSubset(expected_dict, json_response)
self.logout()
| 44.966173
| 78
| 0.619493
|
eba5a441d78efa329f1bc931e66bad1f9d4556a0
| 806
|
py
|
Python
|
globalitc/globalitc/urls.py
|
Ghevondyan04/new_globalitc.am
|
7afffe58143590987b9e16a9d575bfa49ce3ef08
|
[
"Apache-2.0"
] | null | null | null |
globalitc/globalitc/urls.py
|
Ghevondyan04/new_globalitc.am
|
7afffe58143590987b9e16a9d575bfa49ce3ef08
|
[
"Apache-2.0"
] | null | null | null |
globalitc/globalitc/urls.py
|
Ghevondyan04/new_globalitc.am
|
7afffe58143590987b9e16a9d575bfa49ce3ef08
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from home_page.views import home_view, services_list_view, fac_program_view, fac_web_program_view, fac_multimedia_view
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
# path('auth/', include('users.urls')),
path('', home_view, name="home"),
path('services', services_list_view, name="services_list"),
path('faculties/web', fac_web_program_view, name="faculties_web"),
path('faculties/programming', fac_program_view, name="faculties_programming"),
path('faculties/multimedia', fac_multimedia_view, name="faculties_multimedia")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 42.421053
| 118
| 0.766749
|
b51d3979c662b2e11a126bfe14ae18a7c30e3266
| 34,449
|
py
|
Python
|
ansible/lib/ansible/modules/core/files/unarchive.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/files/unarchive.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/files/unarchive.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2016, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: unarchive
version_added: 1.4
short_description: Unpacks an archive after (optionally) copying it from the local machine.
extends_documentation_fragment: files
description:
- The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set remote_src=yes to unpack an archive which already exists on the target..
options:
src:
description:
- If remote_src=no (default), local path to archive file to copy to the target server; can be absolute or relative. If remote_src=yes, path on the target server to existing archive file to unpack.
- If remote_src=yes and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)
required: true
default: null
dest:
description:
- Remote absolute path where the archive should be unpacked
required: true
default: null
copy:
description:
- "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine."
- "This option has been deprecated in favor of C(remote_src)"
- "This option is mutually exclusive with C(remote_src)."
required: false
choices: [ "yes", "no" ]
default: "yes"
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
version_added: "1.6"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
required: false
default: []
version_added: "2.1"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
required: false
default: no
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
default:
required: false
version_added: "2.1"
remote_src:
description:
- "Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller."
- "This option is mutually exclusive with C(copy)."
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https url as the source of the file.
- This should only set to C(no) used on personally controlled sites using self-signed cer
- Prior to 2.2 the code worked as if this was set to C(yes).
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "2.2"
author: "Dag Wieers (@dagwieers)"
todo:
- re-implement tar support using native tarfile module
- re-implement zip support using native zipfile module
notes:
- requires C(gtar)/C(unzip) command on target host
- can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar)
- uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive
- existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction
- existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- unarchive: src=foo.tgz dest=/var/lib/foo
# Unarchive a file that is already on the remote machine
- unarchive: src=/tmp/foo.zip dest=/usr/local/bin remote_src=yes
# Unarchive a file that needs to be downloaded (added in 2.0)
- unarchive: src=https://example.com/example.zip dest=/usr/local/bin remote_src=yes
'''
import re
import os
import stat
import pwd
import grp
import datetime
import time
import binascii
import codecs
from zipfile import ZipFile, BadZipfile
from ansible.module_utils._text import to_text
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
# When downloading an archive, how much of the archive to download before
# saving to a tempfile (64k)
BUFSIZE = 65536
def crc32(path):
''' Return a CRC32 checksum of a file '''
return binascii.crc32(open(path).read()) & 0xffffffff
def shell_escape(string):
''' Quote meta-characters in the args for the unix shell '''
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
# class to handle .zip files
class ZipArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.excludes = module.params['exclude']
self.includes = []
self.cmd_path = self.module.get_bin_path('unzip')
self._files_in_archive = []
self._infodict = dict()
def _permstr_to_octal(self, modestr, umask):
''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:
mode += 2**(i+3*j)
# The unzip utility does not support setting the stST bits
# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:
# mode += 2**(9+j)
return ( mode & ~umask )
def _legacy_file_list(self, force_refresh=False):
unzip_bin = self.module.get_bin_path('unzip')
if not unzip_bin:
raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
if rc:
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = long(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipfile:
e = get_exception()
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = long(item.CRC)
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipfile:
e = get_exception()
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list(force_refresh)
else:
raise
else:
try:
for member in archive.namelist():
if member not in self.excludes:
self._files_in_archive.append(to_native(member))
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
archive.close()
return self._files_in_archive
def is_unarchived(self):
cmd = [ self.cmd_path, '-ZT', '-s', self.src ]
if self.excludes:
cmd.extend([ ' -x ', ] + self.excludes)
rc, out, err = self.module.run_command(cmd)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except:
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except:
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwname(self.file_args['owner'])
except:
try:
tpw = pwd.getpwuid(self.file_args['owner'])
except:
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except:
try:
tgr = grp.getgrgid(self.file_args['group'])
except:
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue
if len(pcs[6]) != 15: continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
dest = os.path.join(self.dest, path)
try:
st = os.lstat(dest)
except:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
timestamp = time.mktime(dt_object.timetuple())
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(dest)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception:
e = get_exception()
self.module.fail_json(path=path, msg="mode %(mode)s must be in octal form" % self.file_args, details=str(e))
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except:
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except:
gid = st.st_gid
if run_uid != 0 and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [ self.cmd_path, '-o', self.src ]
if self.opts:
cmd.extend(self.opts)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend([ '-x' ] + self.excludes)
cmd.extend([ '-d', self.dest ])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Command "unzip" not found.'
cmd = [ self.cmd_path, '-l', self.src ]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# class to handle gzipped tar files
class TgzArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
self.cmd_path = self.module.get_bin_path('gtar', None)
if not self.cmd_path:
# Fallback to tar
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = '-z'
self._files_in_archive = []
if self.cmd_path:
self.tar_type = self._get_tar_type()
else:
self.tar_type = None
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = [ self.cmd_path, '--list', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = codecs.escape_decode(filename)[0]
if filename and filename not in self.excludes:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [ self.cmd_path, '--diff', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
if MOD_TIME_DIFF_RE.search(line):
out += line + '\n'
if MISSING_FILE_RE.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [ self.cmd_path, '--extract', '-C', self.dest ]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend([ '--show-transformed-names' ] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Commands "gtar" and "tar" not found.'
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError:
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive.' % self.cmd_path
# class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarArchive, self).__init__(src, dest, file_args, module)
# argument to tar
self.zipflag = ''
# class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarBzipArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-j'
# class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarXzArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = ' '.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=True, type='path'),
original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack
dest = dict(required=True, type='path'),
copy = dict(required=False, default=True, type='bool'),
remote_src = dict(required=False, default=False, type='bool'),
creates = dict(required=False, type='path'),
list_files = dict(required=False, default=False, type='bool'),
keep_newer = dict(required=False, default=False, type='bool'),
exclude = dict(required=False, default=[], type='list'),
extra_opts = dict(required=False, default=[], type='list'),
validate_certs = dict(required=False, default=True, type='bool'),
),
add_file_common_args = True,
mutually_exclusive = [("copy", "remote_src"),],
# check-mode only works for zip files, we cover that later
supports_check_mode = True,
)
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src and copy:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If copy=false, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
tempdir = os.path.dirname(os.path.realpath(__file__))
package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))
try:
rsp, info = fetch_url(module, src)
# If download fails, raise a proper exception
if rsp is None:
raise Exception(info['msg'])
f = open(package, 'w')
# Read 1kb at a time to save on ram
while True:
data = rsp.read(BUFSIZE)
if data == "":
break # End of file, break while loop
f.write(data)
f.close()
src = package
except Exception:
e = get_exception()
module.fail_json(msg="Failure downloading %s, %s" % (src, e))
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception:
e = get_exception()
module.fail_json(msg="Source '%s' not readable" % src)
# is dest OK to receive tar file?
if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = { 'prepared': check_results['diff'] }
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
except (IOError, OSError):
e = get_exception()
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % str(e), **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils._text import to_native
if __name__ == '__main__':
main()
| 39.596552
| 223
| 0.568812
|
7d527ce1c0c35b42b2044c8b81d4fdaefbf15120
| 919
|
py
|
Python
|
Lab06_GUI/exercise-42.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
Lab06_GUI/exercise-42.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
Lab06_GUI/exercise-42.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
#this program demostrates a Button widget.
#When the user clicks the button, an info
#dialog box is displayed.
import tkinter
import tkinter.messagebox
class MyGUI:
def __init__(self):
#create the main widget
self.main_window = tkinter.Tk()
#create a button widget.
#The do_something method should be executed
self.my_button = tkinter.Button(self.main_window,
text='Click Me!',
command=self.do_something)
#pack the button
self.my_button.pack()
#enter tkinter main loop
tkinter.mainloop()
#do_something method is a callback function for the button
def do_something(self):
#display an info dialog box
tkinter.messagebox.showinfo('Response', 'Thanks for clicking the button')
my_gui = MyGUI()
| 28.71875
| 81
| 0.595212
|
1cd0739cef82578ff8ae876d4ff8e164a3172dd4
| 11,950
|
py
|
Python
|
cde-root/usr/lib64/python2.4/site-packages/Bio/PDB/Polypeptide.py
|
NirBenTalLab/proorigami-cde-package
|
273379075830a9b94d3f2884661a54f853777ff6
|
[
"MIT"
] | null | null | null |
cde-root/usr/lib64/python2.4/site-packages/Bio/PDB/Polypeptide.py
|
NirBenTalLab/proorigami-cde-package
|
273379075830a9b94d3f2884661a54f853777ff6
|
[
"MIT"
] | null | null | null |
cde-root/usr/lib64/python2.4/site-packages/Bio/PDB/Polypeptide.py
|
NirBenTalLab/proorigami-cde-package
|
273379075830a9b94d3f2884661a54f853777ff6
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from types import StringType
from Bio.Alphabet import ProteinAlphabet
from Bio.Seq import Seq
from Bio.SCOP.Raf import to_one_letter_code
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.Residue import Residue, DisorderedResidue
from Vector import calc_dihedral, calc_angle
__doc__="""
Polypeptide related classes (construction and representation).
Example:
>>> ppb=PPBuilder()
>>> for pp in ppb.build_peptides(structure):
>>> print pp.get_sequence()
"""
standard_aa_names=["ALA", "CYS", "ASP", "GLU", "PHE", "GLY", "HIS", "ILE", "LYS",
"LEU", "MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL",
"TRP", "TYR"]
aa1="ACDEFGHIKLMNPQRSTVWY"
aa3=standard_aa_names
d1_to_index={}
dindex_to_1={}
d3_to_index={}
dindex_to_3={}
# Create some lookup tables
for i in range(0, 20):
n1=aa1[i]
n3=aa3[i]
d1_to_index[n1]=i
dindex_to_1[i]=n1
d3_to_index[n3]=i
dindex_to_3[i]=n3
def index_to_one(index):
"""
Index to corresponding one letter amino acid name.
For example: 0 to A.
"""
return dindex_to_1[index]
def one_to_index(s):
"""
One letter code to index.
For example: A to 0.
"""
return d1_to_index[s]
def index_to_three(i):
"""
Index to corresponding three letter amino acid name.
For example: 0 to ALA.
"""
return dindex_to_3[i]
def three_to_index(s):
"""
Three letter code to index.
For example: ALA to 0.
"""
return d3_to_index[s]
def three_to_one(s):
"""
Three letter code to one letter code.
For example: ALA to A.
"""
i=d3_to_index[s]
return dindex_to_1[i]
def one_to_three(s):
"""
One letter code to three letter code.
For example: A to ALA.
"""
i=d1_to_index[s]
return dindex_to_3[i]
def is_aa(residue, standard=0):
"""
Return 1 if residue object/string is an amino acid.
@param residue: a L{Residue} object OR a three letter amino acid code
@type residue: L{Residue} or string
@param standard: flag to check for the 20 AA (default false)
@type standard: boolean
"""
if not type(residue)==StringType:
residue=residue.get_resname()
residue=residue.upper()
if standard:
return d3_to_index.has_key(residue)
else:
return to_one_letter_code.has_key(residue)
class Polypeptide(list):
"""
A polypeptide is simply a list of L{Residue} objects.
"""
def get_ca_list(self):
"""
@return: the list of C-alpha atoms
@rtype: [L{Atom}, L{Atom}, ...]
"""
ca_list=[]
for res in self:
ca=res["CA"]
ca_list.append(ca)
return ca_list
def get_phi_psi_list(self):
"""
Return the list of phi/psi dihedral angles
"""
ppl=[]
lng=len(self)
for i in range(0, lng):
res=self[i]
try:
n=res['N'].get_vector()
ca=res['CA'].get_vector()
c=res['C'].get_vector()
except:
# Some atoms are missing
# Phi/Psi cannot be calculated for this residue
ppl.append((None, None))
res.xtra["PHI"]=None
res.xtra["PSI"]=None
continue
# Phi
if i>0:
rp=self[i-1]
try:
cp=rp['C'].get_vector()
phi=calc_dihedral(cp, n, ca, c)
except:
phi=None
else:
# No phi for residue 0!
phi=None
# Psi
if i<(lng-1):
rn=self[i+1]
try:
nn=rn['N'].get_vector()
psi=calc_dihedral(n, ca, c, nn)
except:
psi=None
else:
# No psi for last residue!
psi=None
ppl.append((phi, psi))
# Add Phi/Psi to xtra dict of residue
res.xtra["PHI"]=phi
res.xtra["PSI"]=psi
return ppl
def get_tau_list(self):
"""
Return list of tau torsions angles for all 4 consecutive
Calpha atoms.
"""
ca_list=self.get_ca_list()
tau_list=[]
for i in range(0, len(ca_list)-3):
atom_list=[ca_list[i], ca_list[i+1], ca_list[i+2], ca_list[i+3]]
vector_list=map(lambda a: a.get_vector(), atom_list)
v1, v2, v3, v4=vector_list
tau=calc_dihedral(v1, v2, v3, v4)
tau_list.append(tau)
# Put tau in xtra dict of residue
res=ca_list[i+2].get_parent()
res.xtra["TAU"]=tau
return tau_list
def get_theta_list(self):
"""
Return list of theta angles for all 3 consecutive
Calpha atoms.
"""
theta_list=[]
ca_list=self.get_ca_list()
for i in range(0, len(ca_list)-2):
atom_list=[ca_list[i], ca_list[i+1], ca_list[i+2]]
vector_list=map(lambda a: a.get_vector(), atom_list)
v1, v2, v3=vector_list
theta=calc_angle(v1, v2, v3)
theta_list.append(theta)
# Put tau in xtra dict of residue
res=ca_list[i+1].get_parent()
res.xtra["THETA"]=theta
return theta_list
def get_sequence(self):
"""
Return the AA sequence.
@return: polypeptide sequence
@rtype: L{Seq}
"""
s=""
for res in self:
resname=res.get_resname()
if to_one_letter_code.has_key(resname):
resname=to_one_letter_code[resname]
else:
resname='X'
s=s+resname
seq=Seq(s, ProteinAlphabet)
return seq
def __repr__(self):
"""
Return <Polypeptide start=START end=END>, where START
and END are sequence identifiers of the outer residues.
"""
start=self[0].get_id()[1]
end=self[-1].get_id()[1]
s="<Polypeptide start=%s end=%s>" % (start, end)
return s
class _PPBuilder:
"""
Base class to extract polypeptides.
It checks if two consecutive residues in a chain
are connected. The connectivity test is implemented by a
subclass.
"""
def __init__(self, radius):
"""
@param radius: distance
@type radius: float
"""
self.radius=radius
def _accept(self, residue):
"Check if the residue is an amino acid."
if is_aa(residue):
return 1
else:
if "CA" in residue.child_dict:
#It has an alpha carbon...
#We probably need to update the hard coded list of
#non-standard residues, see function is_aa for details.
import warnings
warnings.warn("Assuming residue %s is an unknown modified "
"amino acid" % residue.get_resname())
return 1
# not a standard AA so skip
return 0
def build_peptides(self, entity, aa_only=1):
"""
Build and return a list of Polypeptide objects.
@param entity: polypeptides are searched for in this object
@type entity: L{Structure}, L{Model} or L{Chain}
@param aa_only: if 1, the residue needs to be a standard AA
@type aa_only: int
"""
is_connected=self._is_connected
accept=self._accept
level=entity.get_level()
# Decide wich entity we are dealing with
if level=="S":
model=entity[0]
chain_list=model.get_list()
elif level=="M":
chain_list=entity.get_list()
elif level=="C":
chain_list=[entity]
else:
raise PDBException("Entity should be Structure, Model or Chain.")
pp_list=[]
for chain in chain_list:
chain_it=iter(chain)
prev=chain_it.next()
pp=None
for next in chain_it:
if aa_only and not accept(prev):
prev=next
continue
if is_connected(prev, next):
if pp is None:
pp=Polypeptide()
pp.append(prev)
pp_list.append(pp)
pp.append(next)
else:
pp=None
prev=next
return pp_list
class CaPPBuilder(_PPBuilder):
"""
Use CA--CA distance to find polypeptides.
"""
def __init__(self, radius=4.3):
_PPBuilder.__init__(self, radius)
def _is_connected(self, prev, next):
for r in [prev, next]:
if not r.has_id("CA"):
return 0
n=next["CA"]
p=prev["CA"]
# Unpack disordered
if n.is_disordered():
nlist=n.disordered_get_list()
else:
nlist=[n]
if p.is_disordered():
plist=p.disordered_get_list()
else:
plist=[p]
for nn in nlist:
for pp in plist:
if (nn-pp)<self.radius:
return 1
return 0
class PPBuilder(_PPBuilder):
"""
Use C--N distance to find polypeptides.
"""
def __init__(self, radius=1.8):
_PPBuilder.__init__(self, radius)
def _is_connected(self, prev, next):
if not prev.has_id("C"):
return 0
if not next.has_id("N"):
return 0
test_dist=self._test_dist
c=prev["C"]
n=next["N"]
# Test all disordered atom positions!
if c.is_disordered():
clist=c.disordered_get_list()
else:
clist=[c]
if n.is_disordered():
nlist=n.disordered_get_list()
else:
nlist=[n]
for nn in nlist:
for cc in clist:
# To form a peptide bond, N and C must be
# within radius and have the same altloc
# identifier or one altloc blank
n_altloc=nn.get_altloc()
c_altloc=cc.get_altloc()
if n_altloc==c_altloc or n_altloc==" " or c_altloc==" ":
if test_dist(nn, cc):
# Select the disordered atoms that
# are indeed bonded
if c.is_disordered():
c.disordered_select(c_altloc)
if n.is_disordered():
n.disordered_select(n_altloc)
return 1
return 0
def _test_dist(self, c, n):
"Return 1 if distance between atoms<radius"
if (c-n)<self.radius:
return 1
else:
return 0
if __name__=="__main__":
import sys
from Bio.PDB.PDBParser import PDBParser
p=PDBParser(PERMISSIVE=1)
s=p.get_structure("scr", sys.argv[1])
ppb=PPBuilder()
print "C-N"
for pp in ppb.build_peptides(s):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]["A"]):
print pp.get_sequence()
for pp in ppb.build_peptides(s):
for phi, psi in pp.get_phi_psi_list():
print phi, psi
ppb=CaPPBuilder()
print "CA-CA"
for pp in ppb.build_peptides(s):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]["A"]):
print pp.get_sequence()
| 28.117647
| 82
| 0.533389
|
57bda62ab5e60118f2299455fdc87eaab4ef3052
| 3,162
|
py
|
Python
|
chroma_core/management/__init__.py
|
AlexTalker/integrated-manager-for-lustre
|
251099e5c776f3c1898af50bb7cc77924c7cf7c7
|
[
"MIT"
] | 1
|
2021-02-08T16:59:14.000Z
|
2021-02-08T16:59:14.000Z
|
chroma_core/management/__init__.py
|
AlexTalker/integrated-manager-for-lustre
|
251099e5c776f3c1898af50bb7cc77924c7cf7c7
|
[
"MIT"
] | null | null | null |
chroma_core/management/__init__.py
|
AlexTalker/integrated-manager-for-lustre
|
251099e5c776f3c1898af50bb7cc77924c7cf7c7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import django.contrib.auth.models
from django.contrib.contenttypes.models import ContentType
import django.contrib.auth as auth
from south.signals import post_migrate
import chroma_core.models
import settings
def setup_groups(app, **kwargs):
if app != "chroma_core":
return
if auth.models.Group.objects.count() == 0:
print("Creating groups...")
auth.models.Group.objects.create(name="superusers")
fsadmin_group = auth.models.Group.objects.create(name="filesystem_administrators")
def grant_write(group, model):
for perm in auth.models.Permission.objects.filter(content_type=ContentType.objects.get_for_model(model)):
group.permissions.add(perm)
def all_subclasses(cls):
for subclass in cls.__subclasses__():
yield subclass
for subclass in all_subclasses(subclass):
yield subclass
grant_write(fsadmin_group, chroma_core.models.ManagedTarget)
grant_write(fsadmin_group, chroma_core.models.ManagedHost)
grant_write(fsadmin_group, chroma_core.models.ManagedFilesystem)
grant_write(fsadmin_group, chroma_core.models.StorageResourceRecord)
grant_write(fsadmin_group, chroma_core.models.Job)
grant_write(fsadmin_group, chroma_core.models.Command)
grant_write(fsadmin_group, chroma_core.models.Volume)
grant_write(fsadmin_group, chroma_core.models.VolumeNode)
grant_write(fsadmin_group, django.contrib.auth.models.User)
grant_write(fsadmin_group, chroma_core.models.RegistrationToken)
# Allow fs admins to dismiss alerts
grant_write(fsadmin_group, chroma_core.models.AlertState)
for alert_klass in all_subclasses(chroma_core.models.AlertStateBase):
grant_write(fsadmin_group, alert_klass)
fsusers_group = auth.models.Group.objects.create(name="filesystem_users")
# For modifying his own account
grant_write(fsusers_group, django.contrib.auth.models.User)
if settings.DEBUG and auth.models.User.objects.count() == 0:
print("***\n" * 3),
print("*** SECURITY WARNING: You are running in DEBUG mode and default users have been created")
print("***\n" * 3)
user = auth.models.User.objects.create_superuser("admin", "admin@debug.co.eh", "lustre")
user.groups.add(auth.models.Group.objects.get(name="superusers"))
user = auth.models.User.objects.create_user("debug", "debug@debug.co.eh", "lustre")
user.groups.add(auth.models.Group.objects.get(name="filesystem_administrators"))
user = auth.models.User.objects.create_user("user", "user@debug.co.eh", "lustre")
user.groups.add(auth.models.Group.objects.get(name="filesystem_users"))
# Ensure that the auto post_syncdb hook is installed
# before our hook, so that Permission objects will be there
# by the time we are called.
import django.contrib.auth.management
post_migrate.connect(setup_groups)
| 43.315068
| 117
| 0.71284
|
4ebc15ef7053cdf1bbb12427c8c93e5db1b85655
| 5,246
|
py
|
Python
|
test/gtest_xml_outfiles_test.py
|
CM-Archive/android_external_gtest
|
75f109c3120bf78ee89124f1f21b50ea24225236
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T05:16:59.000Z
|
2019-01-31T05:16:59.000Z
|
test/gtest_xml_outfiles_test.py
|
CM-Archive/android_external_gtest
|
75f109c3120bf78ee89124f1f21b50ea24225236
|
[
"BSD-3-Clause"
] | null | null | null |
test/gtest_xml_outfiles_test.py
|
CM-Archive/android_external_gtest
|
75f109c3120bf78ee89124f1f21b50ea24225236
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T05:17:00.000Z
|
2019-01-31T05:17:00.000Z
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import gtest_test_utils
import os
import sys
import tempfile
import unittest
from xml.dom import minidom, Node
import gtest_xml_test_utils
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuite>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuite>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(tempfile.mkdtemp(), "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.removedirs(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = os.path.join(gtest_test_utils.GetBuildDir(),
test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command, working_dir=tempfile.mkdtemp())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| 38.859259
| 140
| 0.727221
|
fa936cb125d27698927a89093b09a059c5d97cd9
| 931
|
py
|
Python
|
gfootball/agent.py
|
level-antoine/football
|
516f63da0ea4696f4c8b6668c65ac4b20385a8fa
|
[
"Apache-2.0"
] | null | null | null |
gfootball/agent.py
|
level-antoine/football
|
516f63da0ea4696f4c8b6668c65ac4b20385a8fa
|
[
"Apache-2.0"
] | null | null | null |
gfootball/agent.py
|
level-antoine/football
|
516f63da0ea4696f4c8b6668c65ac4b20385a8fa
|
[
"Apache-2.0"
] | 1
|
2022-03-02T14:01:00.000Z
|
2022-03-02T14:01:00.000Z
|
import time
import gfootball.env as football_env
import random
class Agent:
def __init__(self):
pass
if __name__ == '__main__':
env = football_env.create_environment(env_name='1_vs_1_easy', representation='extracted', render=True)
state = env.reset()
action_simple = football_env.observation_preprocessing.football_action_set.action_set_dict["simple"]
obs = env.reset()
while True:
action = random.choice(action_simple)
observation, reward, done, info = env.step(action)
print('-----------------------------------------')
i = 1
for obs in observation:
print(i)
print(obs)
i += 1
time.sleep(1000000000)
print(reward)
print(done)
print(info)
print('-----------------------------------------')
if done:
env.reset()
env.close()
| 23.275
| 106
| 0.531686
|
ad1e78ba07a6af53098a2914b156e1a4d2a0e850
| 40,422
|
py
|
Python
|
flow/coderepo/github/github.py
|
jander99/flow
|
33c418547e3693bf277fd6d089d5f3242c83e14a
|
[
"Apache-2.0"
] | null | null | null |
flow/coderepo/github/github.py
|
jander99/flow
|
33c418547e3693bf277fd6d089d5f3242c83e14a
|
[
"Apache-2.0"
] | null | null | null |
flow/coderepo/github/github.py
|
jander99/flow
|
33c418547e3693bf277fd6d089d5f3242c83e14a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# github.py
import json
import os
import re
import shutil
import subprocess
import tarfile
import time
import requests
from flow.buildconfig import BuildConfig
from flow.coderepo.code_repo_abc import Code_Repo
import flow.utils.commons as cicommons
import flow.utils.commons as commons
from flow.utils.commons import Object
class GitHub(Code_Repo):
clazz = 'GitHub'
url = None
org = None
repo = None
token = None
config = BuildConfig
http_timeout = 10
all_tags_and_shas = []
all_commits = []
found_all_commits = False
def __init__(self, config_override=None, verify_repo=True):
method = '__init__'
commons.print_msg(GitHub.clazz, method, 'begin')
# check if we provided an override
if config_override is not None:
self.config = config_override
if verify_repo is True:
self._load_github_token()
#self._refresh_tags()
self._verify_required_attributes()
self._verify_repo_existence(GitHub.url, GitHub.org, GitHub.repo)
commons.print_msg(GitHub.clazz, method, 'end')
def _load_github_token(self):
method = '_load_github_token'
commons.print_msg(GitHub.clazz, method, 'begin')
GitHub.token = os.getenv('GITHUB_TOKEN')
if not GitHub.token:
commons.print_msg(GitHub.clazz, method, "No github token found. If your repo doesn't allow anonymous "
"access, some operations may fail. To define a token, please set "
"environment variable 'GITHUB_TOKEN'", 'WARN')
commons.print_msg(GitHub.clazz, method, 'end')
def _refresh_tags(self):
method = '_refresh_tags'
commons.print_msg(GitHub.clazz, method, 'getting latest tags')
pull_tags_cmd = "git pull --tags"
pull_tags = subprocess.Popen(pull_tags_cmd.split(), shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pull_tags_outputs, pull_tags_errs = pull_tags.communicate(timeout=300)
for tag_line in pull_tags_outputs.splitlines():
commons.print_msg(GitHub.clazz, method, tag_line.decode("utf-8"))
def _verify_required_attributes(self):
method = '_verify_required_attributes'
try:
# noinspection PyStatementEffect
self.config.json_config['github']
GitHub.url = self.config.json_config['github']['URL']
GitHub.org = self.config.json_config['github']['org']
GitHub.repo = self.config.json_config['github']['repo']
except KeyError as e:
commons.print_msg(GitHub.clazz, method, "The build config associated with github is missing, {}."
.format(e), 'ERROR')
exit(1)
def _verify_repo_existence(self, url, org, repo, token=None):
method = '_verify_repo_existence'
commons.print_msg(GitHub.clazz, method, 'begin')
repo_url = url + '/' + org + '/' + repo
if token is None:
token = GitHub.token
if token is not None:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json,
'Authorization': ('token ' + token)}
else:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json}
commons.print_msg(GitHub.clazz, method, repo_url)
retries = 0
finished = False
while not finished:
try:
resp = requests.get(repo_url, headers=headers, timeout=self.http_timeout)
finished = True
except requests.ConnectionError:
commons.print_msg(GitHub.clazz, method, "Request to GitHub timed out, retrying...")
if retries < 2:
time.sleep(retries * 5)
retries += 1
continue
commons.print_msg(GitHub.clazz, method, "Request to GitHub timed out.", "ERROR")
exit(1)
except Exception as e:
commons.print_msg(GitHub.clazz, method, "Failed to access github location {}, retrying".format(e))
if retries < 2:
time.sleep(retries * 5)
retries += 1
continue
commons.print_msg(GitHub.clazz, method, "Failed to access github location {}".format(e), "ERROR")
exit(1)
# noinspection PyUnboundLocalVariable
commons.print_msg(GitHub.clazz, method, resp)
if resp.status_code != 200:
commons.print_msg(GitHub.clazz, method, "Failed to access github location {url}\r\n Response: {rsp}"
.format(url=repo_url,
rsp=resp.text),
"ERROR")
exit(1)
commons.print_msg(GitHub.clazz, method, 'end')
def add_tag_and_release_notes_to_github(self, new_version_tag_array, release_notes=None):
# TODO this needs to be split out and better unit testing added.
# testing is hard because json attributes are not ordered.
method = 'add_tag_and_release_notes_to_github'
commons.print_msg(GitHub.clazz, method, 'begin')
me = Object()
me.tag_name = self.convert_semver_tag_array_to_semver_string(new_version_tag_array)
me.target_commitish = self.config.build_env_info['associatedBranchName']
me.name = self.convert_semver_tag_array_to_semver_string(new_version_tag_array)
if release_notes is not None and len(release_notes) > 0:
me.body = release_notes
else:
me.body = 'No Release Notes'
me.draft = False
me.prerelease = True
# release builds will have a build index of 0.
if self._is_semver_tag_array_release_or_snapshot(new_version_tag_array) == 'release':
me.prerelease = False
tag_and_release_note_payload = me.to_JSON()
url_params = {'org': self.org, 'repo': self.repo}
commons.print_msg(GitHub.clazz, method, self.url)
commons.print_msg(GitHub.clazz, method, self.org)
commons.print_msg(GitHub.clazz, method, self.repo)
release_url = self.url + '/' + self.org + '/' + self.repo + '/releases'
commons.print_msg(GitHub.clazz, method, release_url)
commons.print_msg(GitHub.clazz, method, tag_and_release_note_payload)
commons.print_msg(GitHub.clazz, method, ("?", url_params))
if self.token is not None:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json, 'Authorization': ('token ' + self.token)}
else:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json}
try:
resp = requests.post(release_url, tag_and_release_note_payload, headers=headers, params=url_params, timeout=self.http_timeout)
except requests.ConnectionError:
commons.print_msg(GitHub.clazz, method, 'Request to GitHub timed out.', 'ERROR')
exit(1)
except:
commons.print_msg(GitHub.clazz, method, "The github add release notes call failed to {} has failed".format(
release_url), 'ERROR')
exit(1)
# noinspection PyUnboundLocalVariable
if resp.status_code != 200 and resp.status_code != 201:
commons.print_msg(GitHub.clazz, method, "The github add release notes call failed to {url}\r\n Response: {rsp}"
.format(url=release_url,
rsp=resp.text),
'ERROR')
exit(1)
else:
commons.print_msg(GitHub.clazz, method, resp.text)
commons.print_msg(GitHub.clazz, method, 'end')
def format_github_specific_release_notes_from_project_tracker_story_details(self, story_details):
formatted_release_notes = None
if story_details is not None and isinstance(story_details, list) and len(story_details) > 0:
for i, release_note in enumerate(story_details):
if release_note.get('story_type') == "release":
story_emoji = ":checkered_flag:"
elif release_note.get('story_type') == "bug":
story_emoji = ":beetle:"
elif release_note.get('story_type') == "chore":
story_emoji = ":wrench:"
else:
story_emoji = ":star:"
if formatted_release_notes is None:
formatted_release_notes = ""
formatted_release_notes = formatted_release_notes + story_emoji + '<a href="' + release_note.get('url') + '">' + release_note.get('story_type') + ' **' + str(release_note.get('id')) + '**</a>' + ' ' + '\r\n ' + \
' **' + release_note.get('name') + '** \r\n ' + \
' ' + (release_note.get('description').replace('\n', '\r\n ') + '\r\n' if release_note.get('description') is not None else '') + '\r\n\r\n'
if formatted_release_notes is None:
formatted_release_notes = 'No Release Notes'
return formatted_release_notes
def append_release_notes(self, release_name, text_to_append):
method='append_release_notes'
commons.print_msg(GitHub.clazz, method, 'begin')
release_url_api = self.url + '/' + self.org + '/' + self.repo + '/releases/tags/' + release_name
if self.token is None:
commons.print_msg(GitHub.clazz, method, 'GITHUB_TOKEN is required to use this method.', 'ERROR')
exit(1)
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json, 'Authorization': ('token ' + self.token)}
try:
resp = requests.get(release_url_api, headers=headers, timeout=self.http_timeout)
except requests.ConnectionError:
commons.print_msg(GitHub.clazz, method, 'Request to GitHub timed out.', 'ERROR')
exit(1)
except:
commons.print_msg(GitHub.clazz, method, 'The github add release notes call failed to {} has failed'.format(
release_url_api), 'ERROR')
exit(1)
# noinspection PyUnboundLocalVariable
resp_json = resp.json()
git_release_body = resp_json['body']
git_release_id = resp_json['id']
git_release_body += '\r\n\r\n%s' % text_to_append
jsonMessage = {
'body' : git_release_body
}
release_url_api = self.url + '/' + self.org + '/' + self.repo + '/releases/' + str(git_release_id)
try:
requests.patch(release_url_api, json=jsonMessage, headers=headers, timeout=self.http_timeout)
except requests.ConnectionError:
commons.print_msg(GitHub.clazz, method, 'Request to GitHub timed out.', 'ERROR')
exit(1)
except:
commons.print_msg(GitHub.clazz, method, 'The github add release notes call failed to {} has failed'.format(
release_url_api), 'ERROR')
exit(1)
commons.print_msg(GitHub.clazz, method, 'end')
def calculate_next_semver(self, tag_type, bump_type, highest_version_array):
method = 'calculate_next_semver'
commons.print_msg(GitHub.clazz, method, 'begin')
if highest_version_array is not None:
commons.print_msg(GitHub.clazz, method, "Hightest Git tag: {}".format(
self.convert_semver_tag_array_to_semver_string(highest_version_array)))
else:
commons.print_msg(GitHub.clazz, method, "Hightest Git tag: {}".format(str(highest_version_array)))
commons.print_msg(GitHub.clazz, method, "Bump Type: {}".format(str(bump_type)))
commons.print_msg(GitHub.clazz, method, "Tag Type: {}".format(str(tag_type)))
new_version_array = None
if tag_type != "release" and tag_type != "snapshot":
commons.print_msg(GitHub.clazz, method, "Tag types can only be 'release' or 'snapshot', instead {} was "
"provided.".format(str(tag_type)))
exit(1)
if tag_type == "release" and bump_type != "major" and bump_type != "minor" and bump_type != "bug":
commons.print_msg(GitHub.clazz, method, "Bump types can only be 'major', 'minor' or 'bug', instead {} was "
"provided.".format(str(bump_type)))
exit(1)
if tag_type == 'snapshot':
if highest_version_array is None: # no previous snapshot
new_version_array = [0, 0, 0, 1]
else:
commons.print_msg(GitHub.clazz, method, "Incrementing +buildnumber based on last tag, since it's a "
"snapshot build.")
new_version_array = highest_version_array[:]
# the build index is the 4th item (3rd position)
new_version_array[3] = new_version_array[3]+1
elif tag_type == 'release':
commons.print_msg(GitHub.clazz, method, 'New Release semver')
if highest_version_array is None:
new_version_array = [0, 0, 0, 0]
else:
new_version_array = highest_version_array[:]
# release builds don't have build numbers, so always set to zero
new_version_array[3] = 0
if bump_type == 'major':
# if major rolls then set minor and bug to zero.
new_version_array[0] = new_version_array[0]+1
new_version_array[1] = 0
new_version_array[2] = 0
elif bump_type == 'minor':
# if minor rolls then set bug to zero.
new_version_array[1] = new_version_array[1]+1
new_version_array[2] = 0
elif bump_type == 'bug':
new_version_array[2] = new_version_array[2]+1
commons.print_msg(GitHub.clazz, method, "New Git tag {}".format(self.convert_semver_tag_array_to_semver_string(
new_version_array)))
commons.print_msg(GitHub.clazz, method, 'end')
return new_version_array
def get_git_last_tag(self, start_from_version=None):
method = "get_git_last_tag"
commons.print_msg(GitHub.clazz, method, 'begin')
if start_from_version is not None:
return start_from_version
last_tag = None
if self.config.artifact_category.lower() == 'release':
tags = self.get_all_tags_and_shas_from_github(need_release=1)
for name, _ in tags:
if '+' not in name:
last_tag = name
break
else:
tags = self.get_all_tags_and_shas_from_github(need_snapshot=1)
for name, _ in tags:
if '+' in name:
last_tag = name
break
commons.print_msg(GitHub.clazz, method, "last_tag is: {}".format(last_tag))
return last_tag
def get_git_previous_tag(self, start_from_version=None):
method = "get_git_previous_tag"
commons.print_msg(GitHub.clazz, method, 'begin')
beginning_tag = None
if self.config.artifact_category.lower() == 'release':
tags = self.get_all_tags_and_shas_from_github(need_release=2)
else:
tags = self.get_all_tags_and_shas_from_github(need_snapshot=2)
if start_from_version is None:
for name, _ in tags:
if self.config.artifact_category.lower() == 'release' and '+' not in name:
beginning_tag = name
break
elif self.config.artifact_category.lower() != 'release' and '+' in name:
beginning_tag = name
break
else:
beginning_tag = start_from_version
commons.print_msg(GitHub.clazz, method, "starting with {}".format(beginning_tag))
commons.print_msg(GitHub.clazz, method, "Category: " + self.config.artifact_category.lower())
found_tag = False
for name, _ in tags:
if found_tag:
out_name = None
if self.config.artifact_category.lower() == 'release' and '+' not in name:
out_name = name
elif self.config.artifact_category.lower() != 'release' and '+' in name:
out_name = name
if out_name is not None:
commons.print_msg(GitHub.clazz, method, name)
commons.print_msg(GitHub.clazz, method, 'end')
return out_name
if name == beginning_tag:
found_tag = True
commons.print_msg(GitHub.clazz, method, 'tag not found, or was the first tag')
return None
def get_all_commits_from_github(self, start_from_sha=None):
method = "get_all_commits_from_github"
commons.print_msg(GitHub.clazz, method, 'begin')
if len(GitHub.all_commits) > 0:
if GitHub.found_all_commits:
commons.print_msg(GitHub.clazz, method, 'All commits pulled, returning cached results')
return GitHub.all_commits
foundSha = False
for commit in GitHub.all_commits:
if commit['sha'] == start_from_sha:
foundSha = True
commons.print_msg(GitHub.clazz, method, 'The beginning sha is in our cached list')
if foundSha:
commons.print_msg(GitHub.clazz, method, 'Returning cached results')
return GitHub.all_commits
commons.print_msg(GitHub.clazz, method, 'Beginning sha is not in our cached list, pulling more commits')
per_page = 100
start_page = (len(GitHub.all_commits)//per_page)+1
finished = False
output = GitHub.all_commits
branch = self.config.build_env_info['associatedBranchName']
repo_url = GitHub.url + '/' + GitHub.org + '/' + GitHub.repo + '/commits?per_page=' + str(per_page) + '&page=' + str(start_page) + '&sha=' + str(branch)
token = GitHub.token
if token is not None:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json,
'Authorization': ('token ' + token)}
else:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json}
retries = 0
while not finished:
commons.print_msg(GitHub.clazz, method, repo_url)
try:
resp = requests.get(repo_url, headers=headers, timeout=self.http_timeout)
except Exception as e:
commons.print_msg(GitHub.clazz, method, "Failed to access github location {}".format(e))
if retries < 2:
time.sleep(retries * 5)
retries += 1
continue
commons.print_msg(GitHub.clazz, method, "Failed to access github location {}".format(e), "ERROR")
exit(1)
retries = 0
if 'next' in resp.links:
repo_url = resp.links['next']['url']
else:
GitHub.found_all_commits = True
finished = True
if resp.status_code != 200:
commons.print_msg(GitHub.clazz, method, "Failed to access github location {url}\r\n Response: {rsp}"
.format(url=repo_url,
rsp=resp.text),
"ERROR")
exit(1)
else:
#commons.print_msg(GitHub.clazz, method, resp.text)
#commons.print_msg(GitHub.clazz, method, resp.json())
simplified = []
for commit in resp.json():
simplified.append({'sha': commit['sha'], 'commit': { 'message': commit['commit']['message'] } })
if commit['sha'] == start_from_sha:
commons.print_msg(GitHub.clazz, method, 'Found the beginning sha, stopping lookup')
finished = True
output.extend(simplified)
commons.print_msg(GitHub.clazz, method, '{} total commits'.format(len(output)))
commons.print_msg(GitHub.clazz, method, 'end')
GitHub.all_commits = output
return output
def _verify_tags_found(self, tag_list, need_snapshot, need_release, need_tag, need_base):
found_snapshot = 0
found_release = 0
found_tag = need_tag is None
for name, _ in tag_list:
if need_snapshot > found_snapshot:
if '+' in name:
found_snapshot += 1
if need_release > found_release:
if '+' not in name:
found_release += 1
if not found_tag:
if need_base:
if name.split("+")[0] == need_tag:
found_tag = True
elif name == need_tag:
found_tag = True
if found_snapshot >= need_snapshot and found_release >= need_release and found_tag:
return True
return False
# if need_snapshot, need_release, and need_tag are all left as defaults,
# this method will only pull one page of results.
def get_all_tags_and_shas_from_github(self, need_snapshot=0, need_release=0, need_tag=None, need_base=False):
method = "get_all_tags_and_shas_from_github"
if len(GitHub.all_tags_and_shas) > 0:
if self._verify_tags_found(GitHub.all_tags_and_shas, need_snapshot, need_release, need_tag, need_base):
commons.print_msg(GitHub.clazz, method, 'Already pulled necessary tags, returning cached results')
return GitHub.all_tags_and_shas
commons.print_msg(GitHub.clazz, method, 'Necessary tags are not in our cached list, pulling more tags')
per_page = 100
start_page = (len(GitHub.all_tags_and_shas)//per_page)+1
finished = False
output = GitHub.all_tags_and_shas
repo_url = GitHub.url + '/' + GitHub.org + '/' + GitHub.repo + '/tags?per_page=' + str(per_page) + '&page=' + str(start_page)
token = GitHub.token
if token is not None:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json,
'Authorization': ('token ' + token)}
else:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json}
retries = 0
while not finished:
commons.print_msg(GitHub.clazz, method, repo_url)
try:
resp = requests.get(repo_url, headers=headers, timeout=self.http_timeout)
except Exception as e:
commons.print_msg(GitHub.clazz, method, "Failed to access github location {}".format(e))
if retries < 2:
time.sleep(retries * 5)
retries += 1
continue
commons.print_msg(GitHub.clazz, method, "Failed to access github location {}".format(e), "ERROR")
exit(1)
retries = 0
if 'next' in resp.links:
repo_url = resp.links['next']['url']
else:
finished = True
if resp.status_code != 200:
commons.print_msg(GitHub.clazz, method, "Failed to access github location {url}\r\n Response: {rsp}"
.format(url=repo_url,
rsp=resp.text),
"ERROR")
exit(1)
else:
#commons.print_msg(GitHub.clazz, method, resp.text)
#commons.print_msg(GitHub.clazz, method, resp.json())
simplified = list(map(lambda obj: (obj['name'], obj['commit']['sha']), resp.json()))
output.extend(simplified)
if self._verify_tags_found(output, need_snapshot, need_release, need_tag, need_base):
commons.print_msg(GitHub.clazz, method, 'Found necessary tags, stopping lookup')
finished = True
#commons.print_msg(GitHub.clazz, method, output)
commons.print_msg(GitHub.clazz, method, '{} total tags'.format(len(output)))
commons.print_msg(GitHub.clazz, method, 'end')
GitHub.all_tags_and_shas = output
return output
def get_all_semver_tags(self, need_snapshot=0, need_release=0, need_tag=None, need_base=False):
method = "get_all_semver_tags"
all_tags_output = self.get_all_tags_and_shas_from_github(need_snapshot=need_snapshot, need_release=need_release, need_tag=need_tag, need_base=need_base)
all_tags = all_tags_output#.splitlines()
tag_data = []
for tag, _ in all_tags:
try:
tag_data.append(self.convert_semver_string_to_semver_tag_array(tag))
except Exception:
commons.print_msg(GitHub.clazz, method, "This tag didn't parse right skipping: {} ".format(tag))
tag_data.sort(reverse=True)
GitHub.all_tags_sorted = tag_data
return tag_data
def get_highest_semver_tag(self):
all_semver_tags = self.get_all_semver_tags()
# index 0 is the highest order number
if len(all_semver_tags) > 0:
return all_semver_tags[0]
else:
return None
def get_highest_semver_release_tag(self):
all_semver_tags = self.get_all_semver_tags(need_release=1)
# index 0 is the highest order semver number
# since array is in order from highest version
# to lowest version, then start at 0
# and find the last release build
for tag in all_semver_tags:
if self._is_semver_tag_array_release_or_snapshot(tag) == 'release':
return tag
return None
def get_highest_semver_snapshot_tag(self):
all_semver_tags = self.get_all_semver_tags(need_snapshot=1)
# index 0 is the highest order semver number
# since array is in order from highest version
# to lowest version, then start at 0
# and find the last snapshot build
for tag in all_semver_tags:
if self._is_semver_tag_array_release_or_snapshot(tag) == 'snapshot':
return tag
return None
def get_highest_semver_array_snapshot_tag_from_base(self, base_release_version):
all_semver_tags = self.get_all_semver_tags(need_tag=self.convert_semver_tag_array_to_semver_string(base_release_version), need_base=True)
# index 0 is the highest order semver number
# since array is in order from highest version
# to lowest version, then start at 0
# There are three options to this effort:
# Found a snapshot that matches the base, then return the highest
# Found only a release that matches the base, no snapshots, return release
# no base found, return base.
found_tag = None
for tag in all_semver_tags:
if tag[0] == base_release_version[0] and tag[1] == base_release_version[1] and tag[2] == base_release_version[2]:
# this could be a snapshot, or release, really don't care.
# both are fine.
found_tag = tag
break
return found_tag
def _does_semver_tag_exist(self, tag_array):
all_semver_tags = self.get_all_semver_tags(need_tag=self.convert_semver_tag_array_to_semver_string(tag_array))
# index 0 is the highest order number
if tag_array in all_semver_tags:
return True
else:
return False
def convert_semver_tag_array_to_semver_string(self, tag_array):
if tag_array is None:
return None
tag_string = "v"
tag_string += str(tag_array[0]) # major
tag_string += "." + str(tag_array[1]) # minor
tag_string += "." + str(tag_array[2]) # bug
if len(tag_array) == 4 and tag_array[3] != 0:
tag_string += "+" + str(tag_array[3]) # build
return tag_string
def convert_semver_string_to_semver_tag_array(self, tag_string):
if tag_string is None:
return None
tag_array = []
regex = "^v(\d+)\.(\d+).(\d+)(\+(\d+))?$"
match = re.fullmatch(regex, tag_string.strip())
if match:
# major
tag_array.append(int(match.group(1)))
# minor
tag_array.append(int(match.group(2)))
# bug
tag_array.append(int(match.group(3)))
# build
# group 3 is the "+digit" match
# group 4 is the digit match sub group
# so skip 3
if match.group(5) is not None:
tag_array.append(int(match.group(5)))
else:
tag_array.append(0)
else:
raise Exception("Tag format invalid v0.0.0[+0] instead it was: " + tag_string)
return tag_array
def verify_sem_ver_tag(self, tag):
# None tags are valid (no tags for the repo)
if tag is None:
return True
tag_string = self.convert_semver_tag_array_to_semver_string(tag)
regex = "^v(\d+)\.(\d+).(\d+)(\+(\d+))?$"
match = re.fullmatch(regex, tag_string.strip())
if match:
return True
else:
return False
def get_all_git_commit_history_between_provided_tags(self, semver_array_beginning_version,
semver_array_ending_version=None):
# if you set the semver_array_beginning_version to None, it will pull *ALL* history **be gentle**
method = 'get_all_git_commit_history_between_provided_tags'
commons.print_msg(GitHub.clazz, method, method)
if self.verify_sem_ver_tag(semver_array_beginning_version) is False:
commons.print_msg(GitHub.clazz, method, "Invalid beginning version defined {}".format(
semver_array_beginning_version), 'ERROR')
exit(1)
if semver_array_ending_version is not None and self.verify_sem_ver_tag(semver_array_ending_version) is False:
commons.print_msg(GitHub.clazz, method, "Invalid ending version defined {}".format(
semver_array_ending_version), 'ERROR')
exit(1)
semver_array_beginning_version = self.convert_semver_tag_array_to_semver_string(semver_array_beginning_version)
# noinspection PyTypeChecker
semver_array_ending_version = self.convert_semver_tag_array_to_semver_string(semver_array_ending_version)
# get all tags to get shas
tags = self.get_all_tags_and_shas_from_github(need_tag=semver_array_beginning_version)
ending_sha = ''
beginning_sha = ''
if semver_array_ending_version is not None:
commons.print_msg(GitHub.clazz, method, semver_array_ending_version)
filtered_tags = list(filter(lambda tag: tag[0] == semver_array_ending_version, tags))
if len(filtered_tags) == 0:
print("Version tag not found {}".format(semver_array_ending_version))
commons.print_msg(GitHub.clazz, method, "Version tag not found {}".format(semver_array_ending_version),
'ERROR')
exit(1)
else:
beginning_sha = filtered_tags[0][1]
ending_sha = filtered_tags[0][1]
if semver_array_beginning_version is not None:
commons.print_msg(GitHub.clazz, method, semver_array_beginning_version)
filtered_tags = list(filter(lambda tag: tag[0] == semver_array_beginning_version, tags))
if len(filtered_tags) == 0:
print("Version tag not found {}".format(semver_array_beginning_version))
commons.print_msg(GitHub.clazz, method, "Version tag not found {}".format(semver_array_beginning_version),
'ERROR')
exit(1)
else:
beginning_sha = filtered_tags[0][1]
commons.print_msg(GitHub.clazz, method, ending_sha + ' , ' + beginning_sha)
# get all commits here
commits = self.get_all_commits_from_github(beginning_sha)
trimmed_commits = []
found_beginning = False
if semver_array_beginning_version is None and semver_array_ending_version is None: # Everything!
commons.print_msg(GitHub.clazz, method, "No tag present. Pulling all git commit statements instead.")
trimmed_commits = commits[:]
found_beginning = True
elif semver_array_ending_version is None: # Everything since tag
commons.print_msg(GitHub.clazz, method, "The first tag: {}".format(semver_array_beginning_version))
for commit in commits:
if commit['sha'] == beginning_sha:
found_beginning = True
break
trimmed_commits.append(commit)
else: # Between two tags. Mostly used when re-deploying old versions to send release notes
commons.print_msg(GitHub.clazz, method, "The first tag: ".format(semver_array_beginning_version))
commons.print_msg(GitHub.clazz, method, "The last tag: ".format(semver_array_ending_version))
found_end = False
for commit in commits:
if commit['sha'] == ending_sha:
found_end = True
if commit['sha'] == beginning_sha:
found_beginning = True
break
if found_end:
trimmed_commits.append(commit)
trimmed_commits = list(map(lambda current_sommit: "{} {}".format(current_sommit['sha'][0:7], current_sommit['commit']['message']), trimmed_commits))
commons.print_msg(GitHub.clazz, method, "Number of commits found: {}".format(len(trimmed_commits)))
if not found_beginning:
branch = self.config.build_env_info['associatedBranchName']
commons.print_msg(GitHub.clazz, method, "The commit sha {} could not be found in the commit history of branch '{}', so no tracker stories will be pulled.".format(beginning_sha, branch), 'WARN')
commons.print_msg(GitHub.clazz, method, "This likely means tag {} was created on a branch other than {}.".format(semver_array_beginning_version, branch))
trimmed_commits = []
commons.print_msg(GitHub.clazz, method, 'end')
return trimmed_commits
def _is_semver_tag_array_release_or_snapshot(self, semver_array):
# check the 0.0.0.x position.
# if x == 0 then it is release
# if x > 0 then it is snapshot.
if semver_array[3] == 0:
return 'release'
elif semver_array[3] > 0:
return 'snapshot'
else:
return None
def download_code_at_version(self):
method = "download_code_at_version"
commons.print_msg(GitHub.clazz, method, "begin")
artifact_to_download = self._get_artifact_url()
artifact = self.config.version_number + '.tar.gz'
commons.print_msg(GitHub.clazz, method, ("Attempting to download from github: {}".format(artifact_to_download)))
if not os.path.exists(os.path.join(self.config.push_location, 'unzipped')):
os.makedirs(os.path.join(self.config.push_location, 'unzipped'))
download_path = self.config.push_location + "/" + artifact
if GitHub.token is not None:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json, 'Authorization': ('token ' + GitHub.token)}
else:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json}
try:
download_resp = requests.get(artifact_to_download, headers=headers)
with open(download_path, 'wb') as f:
for chunk in download_resp.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
tar = tarfile.open(self.config.push_location + '/' + artifact)
tar.extractall(os.path.join(self.config.push_location, 'unzipped'))
tar.close()
self._copy_unzipped_file_to_deployment_directory()
except Exception as ex:
commons.print_msg(GitHub.clazz, method, "Failed to download {art}. Error: {e}".format(art=artifact, e=ex),
'ERROR')
exit(1)
commons.print_msg(GitHub.clazz, method, "end")
def _get_artifact_url(self):
method = "_get_artifact_url"
commons.print_msg(GitHub.clazz, method, "begin")
if GitHub.token is not None:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json, 'Authorization': ('token ' + GitHub.token)}
else:
headers = {'Content-type': cicommons.content_json, 'Accept': cicommons.content_json}
tag_information_url = GitHub.url.replace('\\', '/').rstrip('/') + '/' + self.org + '/' + self.repo + \
'/releases/tags/' + self.config.version_number
commons.print_msg(GitHub.clazz, method, ("Retrieving Github information from " + tag_information_url))
resp = requests.get(tag_information_url, headers=headers)
if resp.status_code != 200:
commons.print_msg(GitHub.clazz, method, ("Failed to access github tag information at " + tag_information_url + "\r\n Response: " + resp.text), "ERROR")
exit(1)
else:
commons.print_msg(GitHub.clazz, method, resp.text)
json_data = json.loads(resp.text)
artifact_to_download = json_data['tarball_url']
return artifact_to_download
# return self.domainURL.replace('\\','/').rstrip('/') + '/' + \
# self.org + '/' + \
# self.repo + '/archive/' + \
# self.config.version_number + \
# 'tar.gz'
def _copy_unzipped_file_to_deployment_directory(self):
method = "_copy_unzipped_file_to_deployment_directory"
commons.print_msg(GitHub.clazz, method, "begin")
try:
# github tar puts it in a parent directory. Pull everything out of the parent directory
if len([name for name in os.listdir(os.path.join(self.config.push_location, 'unzipped')) if os.path.isdir(os.path.join(self.config.push_location, 'unzipped', name))]) == 1:
commons.print_msg(GitHub.clazz, method, "Github contents unzipped. Copying out of parent directory. ")
for current_dir in os.listdir(os.path.join(self.config.push_location, 'unzipped')):
if os.path.isdir(os.path.join(self.config.push_location, 'unzipped', current_dir)):
self._copy_tree(os.path.join(self.config.push_location, 'unzipped', current_dir),
self.config.push_location+'/',
False, None)
except Exception as ex:
print(ex)
commons.print_msg(GitHub.clazz, method,
("failed to move files from", os.path.join(self.config.push_location, 'unzipped'), "to", self.config.push_location), "ERROR")
exit(1)
commons.print_msg(GitHub.clazz, method, "end")
def _copy_tree(self, src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
| 43.79415
| 229
| 0.597125
|
8ec9a1b7243ca1684da9a8a412738ef444351e5d
| 936
|
py
|
Python
|
Machine Learning Algorithms/a. Linear Regression/other works/Medical Cost Personal Datasets/MCPD Linear Regression Study.py
|
kerimmstfdemir/Machine_Learning_Algorithms_with_Python
|
8e12b7d1ff437fccb543075d7757dbb2ba99f2a1
|
[
"MIT"
] | null | null | null |
Machine Learning Algorithms/a. Linear Regression/other works/Medical Cost Personal Datasets/MCPD Linear Regression Study.py
|
kerimmstfdemir/Machine_Learning_Algorithms_with_Python
|
8e12b7d1ff437fccb543075d7757dbb2ba99f2a1
|
[
"MIT"
] | null | null | null |
Machine Learning Algorithms/a. Linear Regression/other works/Medical Cost Personal Datasets/MCPD Linear Regression Study.py
|
kerimmstfdemir/Machine_Learning_Algorithms_with_Python
|
8e12b7d1ff437fccb543075d7757dbb2ba99f2a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 16:09:03 2020
@author: Kerim Demir
"""
#import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
#import data
received_dataset = pd.read_csv("insurance/insurance.csv")
dataset = received_dataset.dropna()
#Plotting Data
plt.scatter(dataset.age,dataset.charges)
plt.xlabel("Age")
plt.ylabel("Charges")
plt.show()
#Linear Regression
linear_reg = LinearRegression()
x = dataset.age.values.reshape(-1,1)
y = dataset.charges.values.reshape(-1,1)
linear_reg.fit(x,y)
#Prediction
b0 = linear_reg.predict([[0]])
print("b0 =",b0)
b0_ = linear_reg.intercept_
print("b0_ =",b0_)
b1 = linear_reg.coef_
print("b1 =",b1)
print(linear_reg.predict([[43]]))
#Line Visualition
array = np.array(range(0,65)).reshape(-1,1)
y_head = linear_reg.predict(array)
plt.plot(array,y_head,color="red")
plt.show()
| 15.864407
| 57
| 0.725427
|
fc77bd05a1e67e84e0663b92ee98d589a9ac19f5
| 1,175
|
wsgi
|
Python
|
wsgi/playdoh.wsgi
|
ANKIT-KS/fjord
|
fec88521d3179e11bc6d7a35587092167be02c48
|
[
"BSD-3-Clause"
] | null | null | null |
wsgi/playdoh.wsgi
|
ANKIT-KS/fjord
|
fec88521d3179e11bc6d7a35587092167be02c48
|
[
"BSD-3-Clause"
] | null | null | null |
wsgi/playdoh.wsgi
|
ANKIT-KS/fjord
|
fec88521d3179e11bc6d7a35587092167be02c48
|
[
"BSD-3-Clause"
] | null | null | null |
# This gets used by stage/prod to set up the WSGI application for stage/prod
# use. We do some minor environment setup and then have `fjord/wsgi.py` do
# the rest.
import os
import site
# Set up NewRelic stuff.
try:
import newrelic.agent
except ImportError:
newrelic = False
if newrelic:
newrelic_ini = os.getenv('NEWRELIC_PYTHON_INI_FILE', False)
if newrelic_ini:
newrelic.agent.initialize(newrelic_ini)
else:
newrelic = False
# NOTE: you can also set DJANGO_SETTINGS_MODULE in your environment to override
# the default value in manage.py
# Add the app dir to the python path so we can import manage.
wsgidir = os.path.dirname(__file__)
site.addsitedir(os.path.abspath(os.path.join(wsgidir, '../')))
# Explicitly set these so that fjord.manage_utils does the right
# thing in production.
os.environ['USING_VENDOR'] = '1'
os.environ['SKIP_CHECK'] = '1'
# Importing manage has the side-effect of adding vendor/ stuff and
# doing other environment setup.
import manage
from fjord.wsgi import get_wsgi_application
application = get_wsgi_application()
if newrelic:
application = newrelic.agent.wsgi_application()(application)
| 26.111111
| 79
| 0.748085
|
4594d8bd7036f8def7c6155d365fe862af456356
| 348
|
py
|
Python
|
backend/src/asset_manager/data/schemas/asset.py
|
JonathanLoscalzo/asset-license-dev-demo
|
522c1d531e508ae7e85b212f804eee505d284b2b
|
[
"Xnet",
"X11"
] | null | null | null |
backend/src/asset_manager/data/schemas/asset.py
|
JonathanLoscalzo/asset-license-dev-demo
|
522c1d531e508ae7e85b212f804eee505d284b2b
|
[
"Xnet",
"X11"
] | null | null | null |
backend/src/asset_manager/data/schemas/asset.py
|
JonathanLoscalzo/asset-license-dev-demo
|
522c1d531e508ae7e85b212f804eee505d284b2b
|
[
"Xnet",
"X11"
] | null | null | null |
from typing import Optional
from asset_manager.data.schemas.base import BaseMongoModel, PydanticObjectId
from asset_manager.models.models import TypeAssetEnum, uid
class AssetMongo(BaseMongoModel):
id: uid
brand: str
model: str
type: TypeAssetEnum
user: Optional[PydanticObjectId]
class Config:
orm_mode = True
| 20.470588
| 76
| 0.75
|
f9525ac6914d5dbd62e558ea5a3fe758a07fd542
| 6,210
|
py
|
Python
|
tests/cli/test_experiment.py
|
deeplearninc/auger-ai
|
b50af35e8ea28b528ec233a2f4a8d4e412059be9
|
[
"MIT"
] | null | null | null |
tests/cli/test_experiment.py
|
deeplearninc/auger-ai
|
b50af35e8ea28b528ec233a2f4a8d4e412059be9
|
[
"MIT"
] | 25
|
2019-07-09T04:26:19.000Z
|
2020-07-21T06:43:25.000Z
|
tests/cli/test_experiment.py
|
deeplearninc/auger-ai
|
b50af35e8ea28b528ec233a2f4a8d4e412059be9
|
[
"MIT"
] | 1
|
2019-07-09T15:19:13.000Z
|
2019-07-09T15:19:13.000Z
|
import os
import pytest
from auger.cli.cli import cli
from .utils import interceptor, ORGANIZATIONS, PROJECTS
PROJECT_FILE = {
'data': {
'name': 'iris-1.csv',
'id': 1256,
'statistics': {
'columns_count': 5, 'count': 150,
'stat_data': [{
'datatype': 'categorical',
'column_name': 'class',
'unique_values': 3
}]
},
}
}
PROJECT_FILES = {
'meta': {
'pagination': {'offset': 0, 'count': 1, 'total': 1, 'limit': 100},
'status': 200},
'data': [PROJECT_FILE['data']]
}
EXPERIMENTS = {
'meta': {
'pagination': {'offset': 0, 'count': 1, 'total': 1, 'limit': 100},
'status': 200},
'data': [{
'name': 'iris-1.csv-experiment',
'project_file_id': 1256,
}]
}
EXPERIMENT_SESSIONS = {
'meta': {
'pagination': {'offset': 0, 'count': 2, 'total': 2, 'limit': 100},
'status': 200},
'data': [{
'id': 'test_id_1',
'model_settings': {'start_time': '2019-06-26 22:00:00.405'},
'status': 'completed',
},
{
'id': 'test_id_2',
'model_settings': {'start_time': '2019-06-28 20:30:00.992405'},
'status': 'completed',
}
]
}
EXPERIMENT_SESSION = {
'data': {
'id': 'test_id_2',
'model_settings': {'start_time': '2019-06-28 20:30:00.992405'},
'status': 'completed',
'project_file_id': '1234',
}
}
TRIALS = {
'meta': {'pagination': {'offset': 0, 'limit': 100, 'count': 20, 'total': 20}, 'status': 200},
'data': [{
'id': 'A79FBADD8CCD417',
'score_name': 'f1_macro',
'score_value': 0.123,
'hyperparameter': {'algorithm_name': 'auger_ml.algorithms.baseline.BaselineClassifier'},
}]*20
}
PROJECT = {
'data': {
'status': 'running',
}
}
class TestExperimentCLI():
def test_list(self, runner, log, project, authenticated, monkeypatch):
PAYLOAD = {
'get_organizations': ORGANIZATIONS,
'get_projects': PROJECTS,
'get_project': PROJECT,
'get_project_files': PROJECT_FILES,
'get_experiments': EXPERIMENTS,
}
interceptor(PAYLOAD, monkeypatch)
result = runner.invoke(cli, ['experiment', 'list'])
assert result.exit_code == 0
assert log.messages[0] == 'iris-1.csv-experiment'
assert log.messages[-1] == '1 Experiment(s) listed'
def test_start(self, runner, log, project, authenticated, monkeypatch):
PAYLOAD = {
'get_organizations': ORGANIZATIONS,
'get_projects': PROJECTS,
'get_project': PROJECT,
'get_project_files': PROJECT_FILES,
'get_project_file': PROJECT_FILE,
'get_experiments': EXPERIMENTS,
'get_experiment_sessions': EXPERIMENT_SESSIONS,
'create_experiment_session': EXPERIMENT_SESSION,
'get_experiment_session': EXPERIMENT_SESSION,
'update_experiment_session': EXPERIMENT_SESSION,
'get_trials': TRIALS,
}
interceptor(PAYLOAD, monkeypatch)
result = runner.invoke(cli, ['experiment', 'start'])
assert result.exit_code == 0
assert log.messages[0] == 'Started Experiment iris-1.csv-experiment search...'
@pytest.mark.skip(reason="Make it work first, edge cases next")
def test_start_without_target(self, runner, log, project, authenticated, monkeypatch):
PAYLOAD = {
'get_organizations': ORGANIZATIONS,
'get_projects': PROJECTS,
'get_project_files': PROJECT_FILES,
'get_experiments': EXPERIMENTS,
'get_experiment_sessions': EXPERIMENT_SESSIONS,
'get_trials': TRIALS,
}
interceptor(PAYLOAD, monkeypatch)
# TODO: ensure cli throws error on trying to start exp w/o target
result = runner.invoke(cli, ['experiment', 'start'])
assert result.exit_code != 0
assert log.messages[-1] == 'Please set target to build model.'
def test_stop(self, runner, log, project, authenticated, monkeypatch):
PAYLOAD = {
'get_organizations': ORGANIZATIONS,
'get_projects': PROJECTS,
'get_project_files': PROJECT_FILES,
'get_experiments': EXPERIMENTS,
'get_experiment_sessions': EXPERIMENT_SESSIONS,
'update_experiment_session': EXPERIMENT_SESSION,
}
interceptor(PAYLOAD, monkeypatch)
monkeypatch.setattr('auger.api.cloud.experiment_session.AugerExperimentSessionApi.status', lambda *a, **kw: 'started')
result = runner.invoke(cli, ['experiment', 'stop'])
assert result.exit_code == 0
assert log.messages[0] == 'Search is stopped...'
def test_leaderboard(self, runner, log, project, authenticated, monkeypatch):
PAYLOAD = {
'get_organizations': ORGANIZATIONS,
'get_projects': PROJECTS,
'get_project_files': PROJECT_FILES,
'get_experiments': EXPERIMENTS,
'get_experiment_session': EXPERIMENT_SESSION,
'get_experiment_sessions': EXPERIMENT_SESSIONS,
'get_trials': TRIALS,
}
interceptor(PAYLOAD, monkeypatch)
result = runner.invoke(cli, ['experiment', 'leaderboard'])
assert result.exit_code == 0
assert len(log.messages) == 45
assert log.messages[-1] == 'Search is completed.'
def test_history(self, runner, log, project, authenticated, monkeypatch):
PAYLOAD = {
'get_organizations': ORGANIZATIONS,
'get_projects': PROJECTS,
'get_project_files': PROJECT_FILES,
'get_experiments': EXPERIMENTS,
'get_experiment_sessions': EXPERIMENT_SESSIONS,
}
interceptor(PAYLOAD, monkeypatch)
result = runner.invoke(cli, ['experiment', 'history'])
assert result.exit_code == 0
assert (log.messages[0] ==
'''run id: test_id_1, start time: 2019-06-26 22:00:00.405, '''
'''status: completed''')
assert 'run id: test_id_2' in log.messages[1]
| 34.5
| 126
| 0.583575
|
813c20327cbe42cebe3e1557e2ecd69d712c4b58
| 1,258
|
py
|
Python
|
dolphin_doc_lib/process_test.py
|
jianzhoufeng/dolphin-doc
|
65def92b21bcde6a962458753e59002c2ac125b5
|
[
"Apache-2.0"
] | 1
|
2019-09-04T10:28:46.000Z
|
2019-09-04T10:28:46.000Z
|
dolphin_doc_lib/process_test.py
|
zhongbiaodev/dolphin-doc
|
d4134bdca2224f34a40a22277e8435edbfa393f6
|
[
"Apache-2.0"
] | 1
|
2019-09-22T10:15:24.000Z
|
2019-09-22T10:15:24.000Z
|
dolphin_doc_lib/process_test.py
|
jianzhoufeng/dolphin-doc
|
65def92b21bcde6a962458753e59002c2ac125b5
|
[
"Apache-2.0"
] | 1
|
2019-09-07T03:37:31.000Z
|
2019-09-07T03:37:31.000Z
|
"Unit test for process"
from typing import cast
from dolphin_doc_lib.base.text import TextParagraph, TextSegment
from dolphin_doc_lib.process import process, Content, ContentSource
from dolphin_doc_lib.base.doc import Doc
def test_plain_text():
text = "paragraph 1\nparagraph 2\n\n \n \nparagraph 3\n"
doc = process(Content(data=text))
par1 = TextParagraph().append_text_segment(TextSegment("paragraph 1"))
par2 = TextParagraph().append_text_segment(TextSegment("paragraph 2"))
par3 = TextParagraph().append_text_segment(TextSegment("paragraph 3"))
expect_doc = Doc().append_blocks([par1, par2, par3])
assert doc.to_dict() == expect_doc.to_dict()
def test_plain_text_from_file():
doc = process(
Content(source=ContentSource.FILE,
path="dolphin_doc_lib/testdata/plain_text.txt"))
par1 = TextParagraph().append_text_segment(TextSegment("paragraph 1"))
par2 = TextParagraph().append_text_segment(TextSegment("paragraph 2"))
par3 = TextParagraph().append_text_segment(TextSegment("paragraph 3"))
par4 = TextParagraph().append_text_segment(TextSegment("paragraph 4"))
expect_doc = Doc().append_blocks([par1, par2, par3, par4])
assert doc.to_dict() == expect_doc.to_dict()
| 38.121212
| 74
| 0.735294
|
6194a226533105a99b04750c80b72a986e3432c3
| 4,633
|
py
|
Python
|
xlsxwriter/test/worksheet/test_worksheet09.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-07-25T06:08:09.000Z
|
2019-11-01T02:33:56.000Z
|
xlsxwriter/test/worksheet/test_worksheet09.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | 13
|
2019-07-14T00:29:05.000Z
|
2019-11-26T06:16:46.000Z
|
xlsxwriter/test/worksheet/test_worksheet09.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with a blank cell."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write_blank(0, 0, None)
worksheet.write_blank(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_write(self):
"""Test writing a worksheet with a blank cell with write() method."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write(0, 0, None)
worksheet.write(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_A1(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write_blank('A1', None)
worksheet.write_blank('C2', None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 34.834586
| 171
| 0.529031
|
e2e1117c03a9a899e70fcbfa3e569f714c8041d7
| 5,887
|
py
|
Python
|
ykman/cli/apdu.py
|
maxthomas/yubikey-manager
|
79bf111093401dbbe18ef7627d45e8c472ba17dd
|
[
"BSD-2-Clause"
] | null | null | null |
ykman/cli/apdu.py
|
maxthomas/yubikey-manager
|
79bf111093401dbbe18ef7627d45e8c472ba17dd
|
[
"BSD-2-Clause"
] | null | null | null |
ykman/cli/apdu.py
|
maxthomas/yubikey-manager
|
79bf111093401dbbe18ef7627d45e8c472ba17dd
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2020 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from binascii import a2b_hex
from yubikit.core import AID
from yubikit.core.smartcard import SmartCardConnection, SmartCardProtocol, ApduError, SW
from .util import EnumChoice, ykman_command
import re
import sys
import click
import struct
import logging
logger = logging.getLogger(__name__)
APDU_PATTERN = re.compile(
r"^"
r"(?P<cla>[0-9a-f]{2})?(?P<ins>[0-9a-f]{2})(?P<params>[0-9a-f]{4})?"
r"(?::(?P<body>(?:[0-9a-f]{2})+))?"
r"(?P<check>=(?P<sw>[0-9a-f]{4})?)?"
r"$",
re.IGNORECASE,
)
def _hex(data):
return " ".join(f"{d:02X}" for d in data)
def _parse_apdu(data):
m = APDU_PATTERN.match(data)
if not m:
raise ValueError("Invalid APDU format: " + data)
cla = int(m.group("cla") or "00", 16)
ins = int(m.group("ins"), 16)
params = int(m.group("params") or "0000", 16)
body = a2b_hex(m.group("body") or "")
if m.group("check"):
sw = int(m.group("sw") or "9000", 16)
else:
sw = None
p1, p2 = params >> 8, params & 0xFF
return (cla, ins, p1, p2, body), sw
def _print_response(resp, sw, no_pretty):
click.echo(f"RECV (SW={sw:04X})" + (":" if resp else ""))
if no_pretty:
click.echo(resp.hex().upper())
else:
for i in range(0, len(resp), 16):
chunk = resp[i : i + 16]
click.echo(
" ".join(f"{c:02X}" for c in chunk).ljust(50)
# Replace non-printable characters with a dot.
+ "".join(chr(c) if 31 < c < 127 else chr(183) for c in chunk)
)
@ykman_command(SmartCardConnection, hidden="--full-help" not in sys.argv)
@click.pass_context
@click.option(
"-x", "--no-pretty", is_flag=True, help="Print only the hex output of a response"
)
@click.option(
"-a", "--app", type=EnumChoice(AID), required=False, help="Select application",
)
@click.argument("apdu", nargs=-1)
@click.option("-s", "--send-apdu", multiple=True, help="Provide full APDUs")
def apdu(ctx, no_pretty, app, apdu, send_apdu):
"""
Execute arbitary APDUs.
Provide APDUs as a hex encoded, space-separated list using the following syntax:
[CLA]INS[P1P2][:DATA][=EXPECTED_SW]
If not provided CLA, P1 and P2 are all set to zero.
Setting EXPECTED_SW will cause the command to check the response SW an fail if it
differs. "=" can be used as shorthand for "=9000" (SW=OK).
Examples:
\b
Select the OATH application, send a LIST instruction (0xA1), and make sure we get
sw=9000 (these are equivalent):
$ ykman apdu a40400:a000000527210101=9000 a1=9000
or
$ ykman apdu -a oath a1=
\b
Factory reset the OATH application:
$ ykman apdu -a oath 04dead
or
$ ykman apdu a40400:a000000527210101 04dead
or (using full-apdu mode)
$ ykman apdu -s 00a4040008a000000527210101 -s 0004dead
"""
if apdu and send_apdu:
ctx.fail("Cannot mix positional APDUs and -s/--send-apdu.")
elif not send_apdu:
apdus = [_parse_apdu(data) for data in apdu]
if not apdus and not app:
ctx.fail("No commands provided.")
protocol = SmartCardProtocol(ctx.obj["conn"])
is_first = True
if app:
is_first = False
click.echo("SELECT AID: " + _hex(app))
resp = protocol.select(app)
_print_response(resp, SW.OK, no_pretty)
if send_apdu: # Compatibility mode (full APDUs)
for apdu in send_apdu:
if not is_first:
click.echo()
else:
is_first = False
apdu = a2b_hex(apdu)
click.echo("SEND: " + _hex(apdu))
resp, sw = protocol.connection.send_and_receive(apdu)
_print_response(resp, sw, no_pretty)
else: # Standard mode
for apdu, check in apdus:
if not is_first:
click.echo()
else:
is_first = False
header, body = apdu[:4], apdu[4]
req = _hex(struct.pack(">BBBB", *header))
if body:
req += " -- " + _hex(body)
click.echo("SEND: " + req)
try:
resp = protocol.send_apdu(*apdu)
sw = SW.OK
except ApduError as e:
resp = e.data
sw = e.sw
_print_response(resp, sw, no_pretty)
if check is not None and sw != check:
click.echo(f"Aborted due to error (expected SW={check:04X}).")
ctx.exit(1)
| 34.226744
| 88
| 0.618312
|
c178333c7f8850f50e1b427c9e8297d3d9e8b8bb
| 5,074
|
py
|
Python
|
mars/tensor/random/vonmises.py
|
haijohn/mars
|
672b3a33a70565f01b1a3f508908445491d85acf
|
[
"Apache-2.0"
] | 1
|
2021-06-10T02:43:01.000Z
|
2021-06-10T02:43:01.000Z
|
mars/tensor/random/vonmises.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/random/vonmises.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...serialization.serializables import AnyField
from ..utils import gen_random_seeds
from .core import TensorRandomOperandMixin, handle_array, TensorDistribution
class TensorVonmises(TensorDistribution, TensorRandomOperandMixin):
_input_fields_ = ['_mu', '_kappa']
_op_type_ = OperandDef.RAND_VONMISES
_fields_ = '_mu', '_kappa', '_size'
_mu = AnyField('mu')
_kappa = AnyField('kappa')
_func_name = 'vonmises'
def __init__(self, size=None, dtype=None, **kw):
dtype = np.dtype(dtype) if dtype is not None else dtype
super().__init__(_size=size, dtype=dtype, **kw)
@property
def mu(self):
return self._mu
@property
def kappa(self):
return self._kappa
def __call__(self, mu, kappa, chunk_size=None):
return self.new_tensor([mu, kappa], None, raw_chunk_size=chunk_size)
def vonmises(random_state, mu, kappa, size=None, chunk_size=None, gpu=None, dtype=None):
r"""
Draw samples from a von Mises distribution.
Samples are drawn from a von Mises distribution with specified mode
(mu) and dispersion (kappa), on the interval [-pi, pi].
The von Mises distribution (also known as the circular normal
distribution) is a continuous probability distribution on the unit
circle. It may be thought of as the circular analogue of the normal
distribution.
Parameters
----------
mu : float or array_like of floats
Mode ("center") of the distribution.
kappa : float or array_like of floats
Dispersion of the distribution, has to be >=0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``mu`` and ``kappa`` are both scalars.
Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
gpu : bool, optional
Allocate the tensor on GPU if True, False as default
dtype : data-type, optional
Data-type of the returned tensor.
Returns
-------
out : Tensor or scalar
Drawn samples from the parameterized von Mises distribution.
See Also
--------
scipy.stats.vonmises : probability density function, distribution, or
cumulative density function, etc.
Notes
-----
The probability density for the von Mises distribution is
.. math:: p(x) = \frac{e^{\kappa cos(x-\mu)}}{2\pi I_0(\kappa)},
where :math:`\mu` is the mode and :math:`\kappa` the dispersion,
and :math:`I_0(\kappa)` is the modified Bessel function of order 0.
The von Mises is named for Richard Edler von Mises, who was born in
Austria-Hungary, in what is now the Ukraine. He fled to the United
States in 1939 and became a professor at Harvard. He worked in
probability theory, aerodynamics, fluid mechanics, and philosophy of
science.
References
----------
.. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
Mathematical Functions with Formulas, Graphs, and Mathematical
Tables, 9th printing," New York: Dover, 1972.
.. [2] von Mises, R., "Mathematical Theory of Probability
and Statistics", New York: Academic Press, 1964.
Examples
--------
Draw samples from the distribution:
>>> import mars.tensor as mt
>>> mu, kappa = 0.0, 4.0 # mean and dispersion
>>> s = mt.random.vonmises(mu, kappa, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> from scipy.special import i0
>>> plt.hist(s.execute(), 50, normed=True)
>>> x = mt.linspace(-mt.pi, mt.pi, num=51)
>>> y = mt.exp(kappa*mt.cos(x-mu))/(2*mt.pi*i0(kappa))
>>> plt.plot(x.execute(), y.execute(), linewidth=2, color='r')
>>> plt.show()
"""
if dtype is None:
dtype = np.random.RandomState().vonmises(
handle_array(mu), handle_array(kappa), size=(0,)).dtype
size = random_state._handle_size(size)
seed = gen_random_seeds(1, random_state.to_numpy())[0]
op = TensorVonmises(size=size, seed=seed, gpu=gpu, dtype=dtype)
return op(mu, kappa, chunk_size=chunk_size)
| 35.985816
| 88
| 0.667915
|
378645c49022aafce008b388a896d2de3586a77d
| 1,459
|
py
|
Python
|
Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L250TeV_Ctau200cm_Pythia8_13TeV_cff.py
|
zhangzc11/cms-gmsb-sps8-configs
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
[
"Apache-2.0"
] | null | null | null |
Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L250TeV_Ctau200cm_Pythia8_13TeV_cff.py
|
zhangzc11/cms-gmsb-sps8-configs
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
[
"Apache-2.0"
] | null | null | null |
Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L250TeV_Ctau200cm_Pythia8_13TeV_cff.py
|
zhangzc11/cms-gmsb-sps8-configs
|
838e6aac1d13251e050c0ee8c4ed26ca0c6cef7e
|
[
"Apache-2.0"
] | null | null | null |
with open("/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/GMSB_SHLA/GMSB_Lambda250TeV_CTau200cm.slha") as f:
SLHA_TABLE = f.read()
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| 42.911765
| 138
| 0.632625
|
8e324c46337721d269fb542f86512c62f9b4ce3f
| 410
|
py
|
Python
|
utils/tool.py
|
HKUNLP/UnifiedSKG
|
49a2ff950bb312b980c22ad72b11520db72ab6a3
|
[
"Apache-2.0"
] | 191
|
2021-12-14T11:33:09.000Z
|
2022-03-31T09:20:41.000Z
|
utils/tool.py
|
HKUNLP/UnifiedSKG
|
49a2ff950bb312b980c22ad72b11520db72ab6a3
|
[
"Apache-2.0"
] | 7
|
2022-01-20T05:41:51.000Z
|
2022-03-20T06:43:22.000Z
|
utils/tool.py
|
HKUNLP/UnifiedSKG
|
49a2ff950bb312b980c22ad72b11520db72ab6a3
|
[
"Apache-2.0"
] | 22
|
2021-12-14T12:59:42.000Z
|
2022-03-29T03:45:51.000Z
|
import importlib
def get_model(model):
Model = importlib.import_module('models.{}'.format(model)).Model
return Model
def get_constructor(constructor):
Constructor = importlib.import_module('{}'.format(constructor)).Constructor
return Constructor
def get_evaluator(evaluate_tool):
EvaluateTool = importlib.import_module('{}'.format(evaluate_tool)).EvaluateTool
return EvaluateTool
| 24.117647
| 83
| 0.758537
|
db5ee61de20d488f4996518cc833ad1bdfe6d294
| 419
|
py
|
Python
|
setup.py
|
docloud/luna.sso
|
a62c0ee788b72b39340165e41de16e248eb5edad
|
[
"MIT"
] | null | null | null |
setup.py
|
docloud/luna.sso
|
a62c0ee788b72b39340165e41de16e248eb5edad
|
[
"MIT"
] | null | null | null |
setup.py
|
docloud/luna.sso
|
a62c0ee788b72b39340165e41de16e248eb5edad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
entry_points = [
]
setup(
name='luna.sso',
version='0.0.1',
description='Luna Project',
url='http://github.com/luna/luna',
include_package_data=True,
packages=find_packages(),
entry_points={"console_scripts": entry_points},
# package_data={'folder': ['']},
install_requires=open('requirements.txt').readlines(),
)
| 22.052632
| 58
| 0.675418
|
0d366264faa390921804bbe6ad027f89493a81a2
| 696
|
py
|
Python
|
submit/migrations/versions/344c3f86394c_add_is_locked_field_.py
|
xavierholt/submit-cs
|
70724b5dbc02d87e4b0e06c4ca4593be104919d6
|
[
"BSD-2-Clause"
] | 10
|
2015-02-09T12:09:35.000Z
|
2021-07-20T00:44:09.000Z
|
submit/migrations/versions/344c3f86394c_add_is_locked_field_.py
|
xavierholt/submit-cs
|
70724b5dbc02d87e4b0e06c4ca4593be104919d6
|
[
"BSD-2-Clause"
] | 20
|
2015-01-31T01:16:23.000Z
|
2018-02-23T19:01:26.000Z
|
submit/migrations/versions/344c3f86394c_add_is_locked_field_.py
|
xavierholt/submit-cs
|
70724b5dbc02d87e4b0e06c4ca4593be104919d6
|
[
"BSD-2-Clause"
] | 6
|
2015-02-09T12:09:35.000Z
|
2021-05-04T03:03:48.000Z
|
"""Add is_locked field to testable.
Revision ID: 344c3f86394c
Revises: 15e554bd88aa
Create Date: 2013-10-14 21:02:27.239190
"""
# revision identifiers, used by Alembic.
revision = '344c3f86394c'
down_revision = '15e554bd88aa'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('testable', sa.Column('is_locked', sa.Boolean(),
server_default=u'0', nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('testable', 'is_locked')
### end Alembic commands ###
| 24.857143
| 77
| 0.662356
|
c7c1eeb58b6b31926536b9ce01e58cc5ed500222
| 978
|
py
|
Python
|
kubernetes/test/test_v1beta1_token_review.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | 1
|
2019-07-12T05:38:06.000Z
|
2019-07-12T05:38:06.000Z
|
kubernetes/test/test_v1beta1_token_review.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_token_review.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | 1
|
2021-05-18T12:25:56.000Z
|
2021-05-18T12:25:56.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_token_review import V1beta1TokenReview
class TestV1beta1TokenReview(unittest.TestCase):
""" V1beta1TokenReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1TokenReview(self):
"""
Test V1beta1TokenReview
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_token_review.V1beta1TokenReview()
pass
if __name__ == '__main__':
unittest.main()
| 21.733333
| 105
| 0.713701
|
32dc1702bb5bd934d7588dee9298dde14e0b8f0f
| 1,638
|
py
|
Python
|
setup.py
|
Global19-atlassian-net/hyperledger-py
|
f24e9cc409b50628b911950466786be6fe74f09f
|
[
"Apache-2.0"
] | 73
|
2016-04-19T06:49:59.000Z
|
2020-04-24T16:55:53.000Z
|
setup.py
|
Global19-atlassian-net/hyperledger-py
|
f24e9cc409b50628b911950466786be6fe74f09f
|
[
"Apache-2.0"
] | 12
|
2016-06-01T03:46:25.000Z
|
2018-01-11T02:35:13.000Z
|
setup.py
|
yeasy/hyperledger-py
|
f24e9cc409b50628b911950466786be6fe74f09f
|
[
"Apache-2.0"
] | 45
|
2016-04-19T03:24:35.000Z
|
2020-07-31T17:10:02.000Z
|
#!/usr/bin/env python
import os
from setuptools import setup
from hyperledger import version
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.5.0',
'six >= 1.4.0',
# 'websocket-client >= 0.32.0',
]
exec(open('hyperledger/version.py').read())
with open('README.md') as f:
long_description = f.read()
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
setup(
name='hyperledger',
version=version,
keywords=('hyperledger', 'blockchain'),
license='Apache License v2.0',
description="Python client for Hyperledger.",
long_description=long_description,
author='Baohua Yang',
author_email='yangbaohua@gmail.com',
url='https://github.com/yeasy/hyperledger-py/',
packages=[
'hyperledger', 'hyperledger.api', 'hyperledger.auth',
'hyperledger.ssladapter', 'hyperledger.utils',
],
platforms='any',
install_requires=requirements,
tests_require=test_requirements,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
)
| 28.736842
| 61
| 0.64591
|
3b66adf46c30955c6c7241943b506f820dbc57e4
| 14,807
|
py
|
Python
|
tests/conftest.py
|
uuip/redis-py
|
8638d24f1480a747e90467dc9950a278a895182d
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
uuip/redis-py
|
8638d24f1480a747e90467dc9950a278a895182d
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
uuip/redis-py
|
8638d24f1480a747e90467dc9950a278a895182d
|
[
"MIT"
] | null | null | null |
import argparse
import random
import time
from typing import Callable, TypeVar
from unittest.mock import Mock
from urllib.parse import urlparse
import pytest
from packaging.version import Version
import redis
from redis.backoff import NoBackoff
from redis.connection import parse_url
from redis.exceptions import RedisClusterException
from redis.retry import Retry
REDIS_INFO = {}
default_redis_url = "redis://localhost:6379/9"
default_redismod_url = "redis://localhost:36379"
default_redis_unstable_url = "redis://localhost:6378"
# default ssl client ignores verification for the purpose of testing
default_redis_ssl_url = "rediss://localhost:6666"
default_cluster_nodes = 6
_DecoratedTest = TypeVar("_DecoratedTest", bound="Callable")
_TestDecorator = Callable[[_DecoratedTest], _DecoratedTest]
# Taken from python3.9
class BooleanOptionalAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
_option_strings = []
for option_string in option_strings:
_option_strings.append(option_string)
if option_string.startswith("--"):
option_string = "--no-" + option_string[2:]
_option_strings.append(option_string)
if help is not None and default is not None:
help += f" (default: {default})"
super().__init__(
option_strings=_option_strings,
dest=dest,
nargs=0,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
if option_string in self.option_strings:
setattr(namespace, self.dest, not option_string.startswith("--no-"))
def format_usage(self):
return " | ".join(self.option_strings)
def pytest_addoption(parser):
parser.addoption(
"--redis-url",
default=default_redis_url,
action="store",
help="Redis connection string," " defaults to `%(default)s`",
)
parser.addoption(
"--redismod-url",
default=default_redismod_url,
action="store",
help="Connection string to redis server"
" with loaded modules,"
" defaults to `%(default)s`",
)
parser.addoption(
"--redis-ssl-url",
default=default_redis_ssl_url,
action="store",
help="Redis SSL connection string," " defaults to `%(default)s`",
)
parser.addoption(
"--redis-cluster-nodes",
default=default_cluster_nodes,
action="store",
help="The number of cluster nodes that need to be "
"available before the test can start,"
" defaults to `%(default)s`",
)
parser.addoption(
"--redis-unstable-url",
default=default_redis_unstable_url,
action="store",
help="Redis unstable (latest version) connection string "
"defaults to %(default)s`",
)
parser.addoption(
"--uvloop", action=BooleanOptionalAction, help="Run tests with uvloop"
)
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
cmds = [command.upper() for command in client.command().keys()]
if "dping" in cmds:
info["enterprise"] = True
else:
info["enterprise"] = False
client.connection_pool.disconnect()
return info
def pytest_sessionstart(session):
redis_url = session.config.getoption("--redis-url")
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
cluster_enabled = info["cluster_enabled"]
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
REDIS_INFO["cluster_enabled"] = cluster_enabled
REDIS_INFO["enterprise"] = info["enterprise"]
# module info, if the second redis is running
try:
redismod_url = session.config.getoption("--redismod-url")
info = _get_info(redismod_url)
REDIS_INFO["modules"] = info["modules"]
except redis.exceptions.ConnectionError:
pass
except KeyError:
pass
if cluster_enabled:
cluster_nodes = session.config.getoption("--redis-cluster-nodes")
wait_for_cluster_creation(redis_url, cluster_nodes)
use_uvloop = session.config.getoption("--uvloop")
if use_uvloop:
try:
import uvloop
uvloop.install()
except ImportError as e:
raise RuntimeError(
"Can not import uvloop, make sure it is installed"
) from e
def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=60):
"""
Waits for the cluster creation to complete.
As soon as all :cluster_nodes: nodes become available, the cluster will be
considered ready.
:param redis_url: the cluster's url, e.g. redis://localhost:16379/0
:param cluster_nodes: The number of nodes in the cluster
:param timeout: the amount of time to wait (in seconds)
"""
now = time.time()
end_time = now + timeout
client = None
print(f"Waiting for {cluster_nodes} cluster nodes to become available")
while now < end_time:
try:
client = redis.RedisCluster.from_url(redis_url)
if len(client.get_nodes()) == int(cluster_nodes):
print("All nodes are available!")
break
except RedisClusterException:
pass
time.sleep(1)
now = time.time()
if now >= end_time:
available_nodes = 0 if client is None else len(client.get_nodes())
raise RedisClusterException(
f"The cluster did not become available after {timeout} seconds. "
f"Only {available_nodes} nodes out of {cluster_nodes} are available"
)
def skip_if_server_version_lt(min_version: str) -> _TestDecorator:
redis_version = REDIS_INFO["version"]
check = Version(redis_version) < Version(min_version)
return pytest.mark.skipif(check, reason=f"Redis version required >= {min_version}")
def skip_if_server_version_gte(min_version: str) -> _TestDecorator:
redis_version = REDIS_INFO["version"]
check = Version(redis_version) >= Version(min_version)
return pytest.mark.skipif(check, reason=f"Redis version required < {min_version}")
def skip_unless_arch_bits(arch_bits: int) -> _TestDecorator:
return pytest.mark.skipif(
REDIS_INFO["arch_bits"] != arch_bits, reason=f"server is not {arch_bits}-bit"
)
def skip_ifmodversion_lt(min_version: str, module_name: str):
try:
modules = REDIS_INFO["modules"]
except KeyError:
return pytest.mark.skipif(True, reason="Redis server does not have modules")
if modules == []:
return pytest.mark.skipif(True, reason="No redis modules found")
for j in modules:
if module_name == j.get("name"):
version = j.get("ver")
mv = int(min_version.replace(".", ""))
check = version < mv
return pytest.mark.skipif(check, reason="Redis module version")
raise AttributeError(f"No redis module named {module_name}")
def skip_if_redis_enterprise() -> _TestDecorator:
check = REDIS_INFO["enterprise"] is True
return pytest.mark.skipif(check, reason="Redis enterprise")
def skip_ifnot_redis_enterprise() -> _TestDecorator:
check = REDIS_INFO["enterprise"] is False
return pytest.mark.skipif(check, reason="Not running in redis enterprise")
def skip_if_nocryptography() -> _TestDecorator:
try:
import cryptography # noqa
return pytest.mark.skipif(False, reason="Cryptography dependency found")
except ImportError:
return pytest.mark.skipif(True, reason="No cryptography dependency")
def skip_if_cryptography() -> _TestDecorator:
try:
import cryptography # noqa
return pytest.mark.skipif(True, reason="Cryptography dependency found")
except ImportError:
return pytest.mark.skipif(False, reason="No cryptography dependency")
def _get_client(
cls, request, single_connection_client=True, flushdb=True, from_url=None, **kwargs
):
"""
Helper for fixtures or tests that need a Redis client
Uses the "--redis-url" command line argument for connection info. Unlike
ConnectionPool.from_url, keyword arguments to this function override
values specified in the URL.
"""
if from_url is None:
redis_url = request.config.getoption("--redis-url")
else:
redis_url = from_url
cluster_mode = REDIS_INFO["cluster_enabled"]
if not cluster_mode:
url_options = parse_url(redis_url)
url_options.update(kwargs)
pool = redis.ConnectionPool(**url_options)
client = cls(connection_pool=pool)
else:
client = redis.RedisCluster.from_url(redis_url, **kwargs)
single_connection_client = False
if single_connection_client:
client = client.client()
if request:
def teardown():
if not cluster_mode:
if flushdb:
try:
client.flushdb()
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
client.flushdb()
client.close()
client.connection_pool.disconnect()
else:
cluster_teardown(client, flushdb)
request.addfinalizer(teardown)
return client
def cluster_teardown(client, flushdb):
if flushdb:
try:
client.flushdb(target_nodes="primaries")
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
client.flushdb(target_nodes="primaries")
client.close()
client.disconnect_connection_pools()
# specifically set to the zero database, because creating
# an index on db != 0 raises a ResponseError in redis
@pytest.fixture()
def modclient(request, **kwargs):
rmurl = request.config.getoption("--redismod-url")
with _get_client(
redis.Redis, request, from_url=rmurl, decode_responses=True, **kwargs
) as client:
yield client
@pytest.fixture()
def r(request):
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def r_timeout(request):
with _get_client(redis.Redis, request, socket_timeout=1) as client:
yield client
@pytest.fixture()
def r2(request):
"A second client for tests that need multiple"
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def sslclient(request):
with _get_client(redis.Redis, request, ssl=True) as client:
yield client
def _gen_cluster_mock_resp(r, response):
connection = Mock()
connection.retry = Retry(NoBackoff(), 0)
connection.read_response.return_value = response
r.connection = connection
return r
@pytest.fixture()
def mock_cluster_resp_ok(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, "OK")
@pytest.fixture()
def mock_cluster_resp_int(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
return _gen_cluster_mock_resp(r, "2")
@pytest.fixture()
def mock_cluster_resp_info(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = (
"cluster_state:ok\r\ncluster_slots_assigned:16384\r\n"
"cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n"
"cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n"
"cluster_size:3\r\ncluster_current_epoch:7\r\n"
"cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n"
"cluster_stats_messages_received:105653\r\n"
)
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_nodes(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = (
"c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 "
"slave aa90da731f673a99617dfe930306549a09f83a6b 0 "
"1447836263059 5 connected\n"
"9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 "
"master - 0 1447836264065 0 connected\n"
"aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 "
"myself,master - 0 0 2 connected 5461-10922\n"
"1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836262556 3 connected\n"
"4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 "
"master - 0 1447836262555 7 connected 0-5460\n"
"19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 "
"master - 0 1447836263562 3 connected 10923-16383\n"
"fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 "
"master,fail - 1447829446956 1447829444948 1 disconnected\n"
)
return _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_slaves(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = (
"['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836789290 3 connected']"
)
return _gen_cluster_mock_resp(r, response)
@pytest.fixture(scope="session")
def master_host(request):
url = request.config.getoption("--redis-url")
parts = urlparse(url)
yield parts.hostname, parts.port
@pytest.fixture()
def unstable_r(request):
url = request.config.getoption("--redis-unstable-url")
with _get_client(
redis.Redis, request, from_url=url, decode_responses=True
) as client:
yield client
def wait_for_command(client, monitor, command, key=None):
# issue a command with a key name that's local to this process.
# if we find a command with our key before the command we're waiting
# for, something went wrong
if key is None:
# generate key
redis_version = REDIS_INFO["version"]
if Version(redis_version) >= Version("5.0.0"):
id_str = str(client.client_id())
else:
id_str = f"{random.randrange(2 ** 32):08x}"
key = f"__REDIS-PY-{id_str}__"
client.get(key)
while True:
monitor_response = monitor.next_command()
if command in monitor_response["command"]:
return monitor_response
if key in monitor_response["command"]:
return None
| 32.049784
| 87
| 0.662119
|
d3dbd1137b4c46e78024d66be5f2b97ce124c7b0
| 21,077
|
py
|
Python
|
mivisionx_calibration_tool.py
|
kiritigowda/MIVisionX-calibration-tool
|
d67d5ff36d8db0b09aa14d5cba4a53d620f0226c
|
[
"MIT"
] | 1
|
2019-10-24T13:47:25.000Z
|
2019-10-24T13:47:25.000Z
|
mivisionx_calibration_tool.py
|
kiritigowda/MIVisionX-calibration-tool
|
d67d5ff36d8db0b09aa14d5cba4a53d620f0226c
|
[
"MIT"
] | null | null | null |
mivisionx_calibration_tool.py
|
kiritigowda/MIVisionX-calibration-tool
|
d67d5ff36d8db0b09aa14d5cba4a53d620f0226c
|
[
"MIT"
] | 3
|
2019-08-02T17:02:51.000Z
|
2019-12-05T08:56:04.000Z
|
__author__ = "Kiriti Nagesh Gowda"
__copyright__ = "Copyright 2019, AMD MIVisionX"
__license__ = "MIT"
__version__ = "0.9.0"
__maintainer__ = "Kiriti Nagesh Gowda"
__email__ = "Kiriti.NageshGowda@amd.com"
__status__ = "ALPHA"
__script_name__ = "MIVisionX Calibration Tool"
import argparse
import os
import sys
import ctypes
import cv2
import time
import numpy
import numpy as np
from numpy.ctypeslib import ndpointer
# global variables
FP16inference = False
verbosePrint = False
labelNames = None
colors =[
(0,153,0), # Top1
(153,153,0), # Top2
(153,76,0), # Top3
(0,128,255), # Top4
(255,102,102), # Top5
];
# AMD Neural Net python wrapper
class AnnAPI:
def __init__(self,library):
self.lib = ctypes.cdll.LoadLibrary(library)
self.annQueryInference = self.lib.annQueryInference
self.annQueryInference.restype = ctypes.c_char_p
self.annQueryInference.argtypes = []
self.annQueryLocals = self.lib.annQueryLocals
self.annQueryLocals.restype = ctypes.c_char_p
self.annQueryLocals.argtypes = []
self.annCreateInference = self.lib.annCreateInference
self.annCreateInference.restype = ctypes.c_void_p
self.annCreateInference.argtypes = [ctypes.c_char_p]
self.annReleaseInference = self.lib.annReleaseInference
self.annReleaseInference.restype = ctypes.c_int
self.annReleaseInference.argtypes = [ctypes.c_void_p]
self.annCopyToInferenceInput = self.lib.annCopyToInferenceInput
self.annCopyToInferenceInput.restype = ctypes.c_int
self.annCopyToInferenceInput.argtypes = [ctypes.c_void_p, ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), ctypes.c_size_t, ctypes.c_bool]
self.annCopyFromInferenceOutput = self.lib.annCopyFromInferenceOutput
self.annCopyFromInferenceOutput.restype = ctypes.c_int
self.annCopyFromInferenceOutput.argtypes = [ctypes.c_void_p, ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), ctypes.c_size_t]
self.annCopyFromInferenceLocal = self.lib.annCopyFromInferenceLocal
self.annCopyFromInferenceLocal.restype = ctypes.c_int
self.annCopyFromInferenceLocal.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), ctypes.c_size_t]
self.annRunInference = self.lib.annRunInference
self.annRunInference.restype = ctypes.c_int
self.annRunInference.argtypes = [ctypes.c_void_p, ctypes.c_int]
print('OK: AnnAPI found "' + self.annQueryInference().decode("utf-8") + '" as configuration in ' + library)
# classifier definition
class annieObjectWrapper():
def __init__(self, annpythonlib, weightsfile):
select = 1
self.api = AnnAPI(annpythonlib)
input_info,output_info,empty = self.api.annQueryInference().decode("utf-8").split(';')
input,name,n_i,c_i,h_i,w_i = input_info.split(',')
outputCount = output_info.split(",")
stringcount = len(outputCount)
if stringcount == 6:
output,opName,n_o,c_o,h_o,w_o = output_info.split(',')
else:
output,opName,n_o,c_o= output_info.split(',')
h_o = '1'; w_o = '1';
self.hdl = self.api.annCreateInference(weightsfile.encode('utf-8'))
self.dim = (int(w_i),int(h_i))
self.outputDim = (int(n_o),int(c_o),int(h_o),int(w_o))
def __del__(self):
self.api.annReleaseInference(self.hdl)
def runInference(self, img, out):
# create input.f32 file
img_r = img[:,:,0]
img_g = img[:,:,1]
img_b = img[:,:,2]
img_t = np.concatenate((img_r, img_g, img_b), 0)
# copy input f32 to inference input
status = self.api.annCopyToInferenceInput(self.hdl, np.ascontiguousarray(img_t, dtype=np.float32), (img.shape[0]*img.shape[1]*3*4), 0)
# run inference
status = self.api.annRunInference(self.hdl, 1)
# copy output f32
status = self.api.annCopyFromInferenceOutput(self.hdl, np.ascontiguousarray(out, dtype=np.float32), out.nbytes)
return out
def classify(self, img):
# create output.f32 buffer
out_buf = bytearray(self.outputDim[0]*self.outputDim[1]*self.outputDim[2]*self.outputDim[3]*4)
out = np.frombuffer(out_buf, dtype=numpy.float32)
# run inference & receive output
output = self.runInference(img, out)
return output
def getLocalDetails(self, localsDict):
for each in filter(None,self.api.annQueryLocals().decode("utf-8").split(';')):
types,name,n,c,h,w = each.split(',')
if name[0:5] == "conv_":
local_size = int(n)*int(c)*int(h)*int(w)*4
local_buf = bytearray(local_size)
local = np.frombuffer(local_buf, dtype=np.float32)
localsDict[name] = local
def getLocals(self, img, localsDict, img_num):
if not os.path.exists("dumpBuffers"):
os.makedirs("dumpBuffers")
for each in filter(None,self.api.annQueryLocals().decode("utf-8").split(';')):
types,name,n,c,h,w = each.split(',')
if not os.path.exists('dumpBuffers/img_%d' %(img_num)):
os.makedirs('dumpBuffers/img_%d' %(img_num))
if name in localsDict:
#print types,name
local_size = int(n)*int(c)*int(h)*int(w)*4
status = self.api.annCopyFromInferenceLocal(self.hdl, name, np.ascontiguousarray(localsDict[name], dtype=np.float32), local_size)
#fid = open('dumpBuffers/img_%d/%s.bin' %(img_num,name), 'wb+')
#fid.write(localsDict[name].tobytes())
#fid.close()
#print('INFO: annCopyFromInferenceLocal status %d' %(status))
# process classification output function
def processClassificationOutput(inputImage, modelName, modelOutput):
# post process output file
start = time.time()
softmaxOutput = np.float32(modelOutput)
topIndex = []
topLabels = []
topProb = []
for x in softmaxOutput.argsort()[-5:]:
topIndex.append(x)
topLabels.append(labelNames[x])
topProb.append(softmaxOutput[x])
end = time.time()
if(verbosePrint):
print '%30s' % 'Processed results in ', str((end - start)*1000), 'ms'
# display output
start = time.time()
# initialize the result image
resultImage = np.zeros((250, 525, 3), dtype="uint8")
resultImage.fill(255)
cv2.putText(resultImage, 'MIVisionX Object Classification', (25, 25),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 2)
topK = 1
for i in reversed(range(5)):
txt = topLabels[i].decode('utf-8')[:-1]
conf = topProb[i]
txt = 'Top'+str(topK)+':'+txt+' '+str(int(round((conf*100), 0)))+'%'
size = cv2.getTextSize(txt, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
t_width = size[0][0]
t_height = size[0][1]
textColor = (colors[topK - 1])
cv2.putText(resultImage,txt,(45,t_height+(topK*30+40)),cv2.FONT_HERSHEY_SIMPLEX,0.5,textColor,1)
topK = topK + 1
end = time.time()
if(verbosePrint):
print '%30s' % 'Processed results image in ', str((end - start)*1000), 'ms'
return resultImage, topIndex, topProb
# MIVisionX Classifier
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_format', type=str, required=True, help='pre-trained model format, options:caffe/onnx/nnef [required]')
parser.add_argument('--model_name', type=str, required=True, help='model name [required]')
parser.add_argument('--model', type=str, required=True, help='pre_trained model file [required]')
parser.add_argument('--model_input_dims', type=str, required=True, help='c,h,w - channel,height,width [required]')
parser.add_argument('--model_output_dims', type=str, required=True, help='c,h,w - channel,height,width [required]')
parser.add_argument('--label', type=str, required=True, help='labels text file [required]')
parser.add_argument('--output_dir', type=str, required=True, help='output dir to store ADAT results [required]')
parser.add_argument('--image_dir', type=str, required=True, help='image directory for analysis [required]')
parser.add_argument('--image_val', type=str, default='', help='image list with ground truth [optional]')
parser.add_argument('--hierarchy', type=str, default='', help='AMD proprietary hierarchical file [optional]')
parser.add_argument('--add', type=str, default='', help='input preprocessing factor [optional - default:0]')
parser.add_argument('--multiply', type=str, default='', help='input preprocessing factor [optional - default:1]')
parser.add_argument('--fp16', type=str, default='no', help='quantize to FP16 [optional - default:no]')
parser.add_argument('--replace', type=str, default='no', help='replace/overwrite model [optional - default:no]')
parser.add_argument('--verbose', type=str, default='no', help='verbose [optional - default:no]')
args = parser.parse_args()
# get arguments
modelFormat = args.model_format
modelName = args.model_name
modelLocation = args.model
modelInputDims = args.model_input_dims
modelOutputDims = args.model_output_dims
label = args.label
outputDir = args.output_dir
imageDir = args.image_dir
imageVal = args.image_val
hierarchy = args.hierarchy
inputAdd = args.add
inputMultiply = args.multiply
fp16 = args.fp16
replaceModel = args.replace
verbose = args.verbose
# set verbose print
if(verbose != 'no'):
verbosePrint = True
# set fp16 inference turned on/off
if(fp16 != 'no'):
FP16inference = True
# set paths
modelCompilerPath = '/opt/rocm/mivisionx/model_compiler/python'
ADATPath= '/opt/rocm/mivisionx/toolkit/analysis_and_visualization/classification'
setupDir = '~/.mivisionx-calibration-tool'
analyzerDir = os.path.expanduser(setupDir)
modelDir = analyzerDir+'/'+modelName+'_dir'
nnirDir = modelDir+'/nnir-files'
openvxDir = modelDir+'/openvx-files'
modelBuildDir = modelDir+'/build'
adatOutputDir = os.path.expanduser(outputDir)
inputImageDir = os.path.expanduser(imageDir)
trainedModel = os.path.expanduser(modelLocation)
labelText = os.path.expanduser(label)
hierarchyText = os.path.expanduser(hierarchy)
imageValText = os.path.expanduser(imageVal)
pythonLib = modelBuildDir+'/libannpython.so'
weightsFile = openvxDir+'/weights.bin'
finalImageResultsFile = modelDir+'/imageResultsFile.csv'
# get input & output dims
str_c_i, str_h_i, str_w_i = modelInputDims.split(',')
c_i = int(str_c_i); h_i = int(str_h_i); w_i = int(str_w_i)
str_c_o, str_h_o, str_w_o = modelOutputDims.split(',')
c_o = int(str_c_o); h_o = int(str_h_o); w_o = int(str_w_o)
# input pre-processing values
Ax=0
if(inputAdd != ''):
Ax = float(inputAdd)
Mx=1
if(inputMultiply != ''):
Mx = float(inputMultiply)
# check pre-trained model
if(not os.path.isfile(trainedModel) and modelFormat != 'nnef' ):
print("\nPre-Trained Model not found, check argument --model\n")
quit()
# check for label file
if (not os.path.isfile(labelText)):
print("\nlabels.txt not found, check argument --label\n")
quit()
else:
fp = open(labelText, 'r')
labelNames = fp.readlines()
fp.close()
# MIVisionX setup
if(os.path.exists(analyzerDir)):
print("\nMIVisionX Calibration Tool\n")
# replace old model or throw error
if(replaceModel == 'yes'):
os.system('rm -rf '+modelDir)
elif(os.path.exists(modelDir)):
print("ERROR: Model exists, use --replace yes option to overwrite or use a different name in --model_name")
quit()
else:
print("\nMIVisionX Calibration Tool Created\n")
os.system('(cd ; mkdir .mivisionx-calibration-tool)')
# Compile Model and generate python .so files
os.system('mkdir '+modelDir)
if(os.path.exists(modelDir)):
# convert to NNIR
if(modelFormat == 'caffe'):
os.system('(cd '+modelDir+'; python '+modelCompilerPath+'/caffe_to_nnir.py '+trainedModel+' nnir-files --input-dims 1,'+modelInputDims+' )')
elif(modelFormat == 'onnx'):
os.system('(cd '+modelDir+'; python '+modelCompilerPath+'/onnx_to_nnir.py '+trainedModel+' nnir-files --input-dims 1,'+modelInputDims+' )')
elif(modelFormat == 'nnef'):
os.system('(cd '+modelDir+'; python '+modelCompilerPath+'/nnef_to_nnir.py '+trainedModel+' nnir-files )')
else:
print("ERROR: Neural Network Format Not supported, use caffe/onnx/nnef in arugment --model_format")
quit()
# convert the model to FP16
if(FP16inference):
os.system('(cd '+modelDir+'; python '+modelCompilerPath+'/nnir_update.py --convert-fp16 1 --fuse-ops 1 nnir-files nnir-files)')
print("\nModel Quantized to FP16\n")
# convert to openvx
if(os.path.exists(nnirDir)):
os.system('(cd '+modelDir+'; python '+modelCompilerPath+'/nnir_to_openvx.py --virtual_tensor 0 nnir-files openvx-files)')
else:
print("ERROR: Converting Pre-Trained model to NNIR Failed")
quit()
# build model
if(os.path.exists(openvxDir)):
os.system('mkdir '+modelBuildDir)
os.system('(cd '+modelBuildDir+'; cmake ../openvx-files; make; ./anntest ../openvx-files/weights.bin )')
print("\nSUCCESS: Converting Pre-Trained model to MIVisionX Runtime successful\n")
else:
print("ERROR: Converting NNIR to OpenVX Failed")
quit()
else:
print("ERROR: MIVisionX Calibration Tool Failed")
quit()
# opencv display window
windowInput = "MIVisionX Calibration Tool - Input Image"
windowResult = "MIVisionX Calibration Tool - Results"
windowProgress = "MIVisionX Calibration Tool - Progress"
cv2.namedWindow(windowInput, cv2.WINDOW_GUI_EXPANDED)
cv2.resizeWindow(windowInput, 800, 800)
# create inference classifier
classifier = annieObjectWrapper(pythonLib, weightsFile)
# check for image val text
totalImages = 0;
if(imageVal != ''):
if (not os.path.isfile(imageValText)):
print("\nImage Validation Text not found, check argument --image_val\n")
quit()
else:
fp = open(imageValText, 'r')
imageValidation = fp.readlines()
fp.close()
totalImages = len(imageValidation)
else:
print("\nFlow without Image Validation Text not implemented, pass argument --image_val\n")
quit()
# original std out location
orig_stdout = sys.stdout
# setup results output file
sys.stdout = open(finalImageResultsFile,'a')
print('Image File Name,Ground Truth Label,Output Label 1,Output Label 2,Output Label 3,\
Output Label 4,Output Label 5,Prob 1,Prob 2,Prob 3,Prob 4,Prob 5')
sys.stdout = orig_stdout
#calibrate - create memory for local tensors
localsDict = {}
start = time.time()
classifier.getLocalDetails(localsDict)
end = time.time()
if(verbosePrint):
print '%30s' % 'Allocating memory for locals took ', str((end - start)*1000), 'ms'
#calibrate - get tensor names and allocate histogram mem
# process images
correctTop5 = 0; correctTop1 = 0; wrong = 0; noGroundTruth = 0;
for x in range(totalImages):
imageFileName,grountTruth = imageValidation[x].decode("utf-8").split(' ')
groundTruthIndex = int(grountTruth)
imageFile = os.path.expanduser(inputImageDir+'/'+imageFileName)
if (not os.path.isfile(imageFile)):
print 'Image File - '+imageFile+' not found'
quit()
else:
# read image
start = time.time()
frame = cv2.imread(imageFile)
end = time.time()
if(verbosePrint):
print '%30s' % 'Read Image in ', str((end - start)*1000), 'ms'
# resize and process frame
start = time.time()
resizedFrame = cv2.resize(frame, (w_i,h_i))
RGBframe = cv2.cvtColor(resizedFrame, cv2.COLOR_BGR2RGB)
if(inputAdd != '' or inputMultiply != ''):
RGBframe = ( RGBframe.copy() * Mx) + Ax
end = time.time()
if(verbosePrint):
print '%30s' % 'Input pre-processed in ', str((end - start)*1000), 'ms'
# run inference
start = time.time()
output = classifier.classify(RGBframe)
end = time.time()
if(verbosePrint):
print '%30s' % 'Executed Model in ', str((end - start)*1000), 'ms'
start = time.time()
outputLocals = classifier.getLocals(RGBframe, localsDict, x)
end = time.time()
if(verbosePrint):
print '%30s' % 'Obtained intermediate tensors for calibration ', str((end - start)*1000), 'ms'
# process output and display
resultImage, topIndex, topProb = processClassificationOutput(resizedFrame, modelName, output)
start = time.time()
cv2.imshow(windowInput, frame)
cv2.imshow(windowResult, resultImage)
end = time.time()
if(verbosePrint):
print '%30s' % 'Processed display in ', str((end - start)*1000), 'ms\n'
# write image results to a file
start = time.time()
sys.stdout = open(finalImageResultsFile,'a')
print(imageFileName+','+str(groundTruthIndex)+','+str(topIndex[4])+
','+str(topIndex[3])+','+str(topIndex[2])+','+str(topIndex[1])+','+str(topIndex[0])+','+str(topProb[4])+
','+str(topProb[3])+','+str(topProb[2])+','+str(topProb[1])+','+str(topProb[0]))
sys.stdout = orig_stdout
end = time.time()
if(verbosePrint):
print '%30s' % 'Image result saved in ', str((end - start)*1000), 'ms'
# create progress image
start = time.time()
progressImage = np.zeros((400, 500, 3), dtype="uint8")
progressImage.fill(255)
cv2.putText(progressImage, 'Inference Analyzer Progress', (25, 25),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 2)
size = cv2.getTextSize(modelName, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
t_width = size[0][0]
t_height = size[0][1]
headerX_start = int(250 -(t_width/2))
cv2.putText(progressImage,modelName,(headerX_start,t_height+(20+40)),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,0),2)
txt = 'Processed: '+str(x+1)+' of '+str(totalImages)
size = cv2.getTextSize(txt, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
cv2.putText(progressImage,txt,(50,t_height+(60+40)),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
# progress bar
cv2.rectangle(progressImage, (50,150), (450,180), (192,192,192), -1)
progressWidth = int(50+ ((400*(x+1))/totalImages))
cv2.rectangle(progressImage, (50,150), (progressWidth,180), (255,204,153), -1)
percentage = int(((x+1)/float(totalImages))*100)
pTxt = 'progress: '+str(percentage)+'%'
cv2.putText(progressImage,pTxt,(175,170),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if(groundTruthIndex == topIndex[4]):
correctTop1 = correctTop1 + 1
correctTop5 = correctTop5 + 1
elif(groundTruthIndex == topIndex[3] or groundTruthIndex == topIndex[2] or groundTruthIndex == topIndex[1] or groundTruthIndex == topIndex[0]):
correctTop5 = correctTop5 + 1
elif(groundTruthIndex == -1):
noGroundTruth = noGroundTruth + 1
else:
wrong = wrong + 1
# top 1 progress
cv2.rectangle(progressImage, (50,200), (450,230), (192,192,192), -1)
progressWidth = int(50 + ((400*correctTop1)/totalImages))
cv2.rectangle(progressImage, (50,200), (progressWidth,230), (0,153,0), -1)
percentage = int((correctTop1/float(totalImages))*100)
pTxt = 'Top1: '+str(percentage)+'%'
cv2.putText(progressImage,pTxt,(195,220),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
# top 5 progress
cv2.rectangle(progressImage, (50,250), (450,280), (192,192,192), -1)
progressWidth = int(50+ ((400*correctTop5)/totalImages))
cv2.rectangle(progressImage, (50,250), (progressWidth,280), (0,255,0), -1)
percentage = int((correctTop5/float(totalImages))*100)
pTxt = 'Top5: '+str(percentage)+'%'
cv2.putText(progressImage,pTxt,(195,270),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
# wrong progress
cv2.rectangle(progressImage, (50,300), (450,330), (192,192,192), -1)
progressWidth = int(50+ ((400*wrong)/totalImages))
cv2.rectangle(progressImage, (50,300), (progressWidth,330), (0,0,255), -1)
percentage = int((wrong/float(totalImages))*100)
pTxt = 'Mismatch: '+str(percentage)+'%'
cv2.putText(progressImage,pTxt,(175,320),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
# no ground truth progress
cv2.rectangle(progressImage, (50,350), (450,380), (192,192,192), -1)
progressWidth = int(50+ ((400*noGroundTruth)/totalImages))
cv2.rectangle(progressImage, (50,350), (progressWidth,380), (0,255,255), -1)
percentage = int((noGroundTruth/float(totalImages))*100)
pTxt = 'Ground Truth unavailable: '+str(percentage)+'%'
cv2.putText(progressImage,pTxt,(125,370),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow(windowProgress, progressImage)
end = time.time()
if(verbosePrint):
print '%30s' % 'Progress image created in ', str((end - start)*1000), 'ms'
# exit on ESC
key = cv2.waitKey(2)
if key == 27:
break
# Calibration - get histogram
print("\nSUCCESS: Images Inferenced with the Model\n")
cv2.destroyWindow(windowInput)
cv2.destroyWindow(windowResult)
# Create ADAT folder and file
print("\nADAT tool called to create the analysis toolkit\n")
if(os.path.exists(adatOutputDir)):
if(hierarchy == ''):
os.system('python '+ADATPath+'/generate-visualization.py -i '+finalImageResultsFile+
' -d '+inputImageDir+' -l '+labelText+' -m '+modelName+' -o '+adatOutputDir+' -f '+modelName+'-ADAT')
else:
os.system('python '+ADATPath+'/generate-visualization.py -i '+finalImageResultsFile+
' -d '+inputImageDir+' -l '+labelText+' -h '+hierarchyText+' -m '+modelName+' -o '+adatOutputDir+' -f '+modelName+'-ADAT')
print("\nSUCCESS: Image Analysis Toolkit Created\n")
print("Press ESC to exit or close progess window\n")
while True:
key = cv2.waitKey(2)
if key == 27:
cv2.destroyAllWindows()
break
if cv2.getWindowProperty(windowProgress,cv2.WND_PROP_VISIBLE) < 1:
break
outputHTMLFile = os.path.expanduser(adatOutputDir+'/'+modelName+'-ADAT-toolKit/index.html')
os.system('firefox '+outputHTMLFile)
| 41.408644
| 146
| 0.69787
|
27fe33b13760598918be8fd06be21302084c12dc
| 17,691
|
py
|
Python
|
pycql/integrations/django/filters.py
|
tomkralidis/pycql
|
35cea1ee462e06b75bbe47bf766e8f0bf79b0018
|
[
"MIT"
] | 10
|
2019-09-18T13:29:17.000Z
|
2021-07-15T18:21:54.000Z
|
pycql/integrations/django/filters.py
|
tomkralidis/pycql
|
35cea1ee462e06b75bbe47bf766e8f0bf79b0018
|
[
"MIT"
] | 6
|
2019-09-18T14:55:57.000Z
|
2021-08-30T21:43:46.000Z
|
pycql/integrations/django/filters.py
|
tomkralidis/pycql
|
35cea1ee462e06b75bbe47bf766e8f0bf79b0018
|
[
"MIT"
] | 4
|
2020-05-13T12:35:12.000Z
|
2021-06-29T10:37:11.000Z
|
# ------------------------------------------------------------------------------
#
# Project: pycql <https://github.com/geopython/pycql>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
from operator import and_, or_, add, sub, mul, truediv
from datetime import datetime, timedelta
from functools import reduce
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from django.db.models import Q, F, ForeignKey, Value
from django.db.models.expressions import Expression
from django.contrib.gis.gdal import SpatialReference
from django.contrib.gis.geos import Polygon
from django.contrib.gis.measure import D
ARITHMETIC_TYPES = (Expression, F, Value, int, float)
# ------------------------------------------------------------------------------
# Filters
# ------------------------------------------------------------------------------
def combine(sub_filters, combinator="AND"):
""" Combine filters using a logical combinator
:param sub_filters: the filters to combine
:param combinator: a string: "AND" / "OR"
:type sub_filters: list[django.db.models.Q]
:return: the combined filter
:rtype: :class:`django.db.models.Q`
"""
for sub_filter in sub_filters:
assert isinstance(sub_filter, Q)
assert combinator in ("AND", "OR")
op = and_ if combinator == "AND" else or_
return reduce(lambda acc, q: op(acc, q) if acc else q, sub_filters)
def negate(sub_filter):
""" Negate a filter, opposing its meaning.
:param sub_filter: the filter to negate
:type sub_filter: :class:`django.db.models.Q`
:return: the negated filter
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(sub_filter, Q)
return ~sub_filter
OP_TO_COMP = {
"<": "lt",
"<=": "lte",
">": "gt",
">=": "gte",
"<>": None,
"=": "exact"
}
def compare(lhs, rhs, op, mapping_choices=None):
""" Compare a filter with an expression using a comparison operation
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param rhs: the filter expression
:type rhs: :class:`django.db.models.F`
:param op: a string denoting the operation. one of ``"<"``, ``"<="``,
``">"``, ``">="``, ``"<>"``, ``"="``
:type op: str
:param mapping_choices: a dict to lookup potential choices for a certain
field.
:type mapping_choices: dict[str, str]
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
# assert isinstance(rhs, Q) # TODO!!
assert op in OP_TO_COMP
comp = OP_TO_COMP[op]
field_name = lhs.name
if mapping_choices and field_name in mapping_choices:
try:
if isinstance(rhs, str):
rhs = mapping_choices[field_name][rhs]
elif hasattr(rhs, 'value'):
rhs = Value(mapping_choices[field_name][rhs.value])
except KeyError as e:
raise AssertionError("Invalid field value %s" % e)
if comp:
return Q(**{"%s__%s" % (lhs.name, comp): rhs})
return ~Q(**{field_name: rhs})
def between(lhs, low, high, not_=False):
""" Create a filter to match elements that have a value within a certain
range.
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param low: the lower value of the range
:type low:
:param high: the upper value of the range
:type high:
:param not_: whether the range shall be inclusive (the default) or
exclusive
:type not_: bool
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
# assert isinstance(low, BaseExpression)
# assert isinstance(high, BaseExpression) # TODO
q = Q(**{"%s__range" % lhs.name: (low, high)})
return ~q if not_ else q
def like(lhs, rhs, case=False, not_=False, mapping_choices=None):
""" Create a filter to filter elements according to a string attribute using
wildcard expressions.
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param rhs: the wildcard pattern: a string containing any number of '%'
characters as wildcards.
:type rhs: str
:param case: whether the lookup shall be done case sensitively or not
:type case: bool
:param not_: whether the range shall be inclusive (the default) or
exclusive
:type not_: bool
:param mapping_choices: a dict to lookup potential choices for a certain
field.
:type mapping_choices: dict[str, str]
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
if isinstance(rhs, str):
pattern = rhs
elif hasattr(rhs, 'value'):
pattern = rhs.value
else:
raise AssertionError('Invalid pattern specified')
parts = pattern.split("%")
length = len(parts)
if mapping_choices and lhs.name in mapping_choices:
# special case when choices are given for the field:
# compare statically and use 'in' operator to check if contained
cmp_av = [
(a, a if case else a.lower())
for a in mapping_choices[lhs.name].keys()
]
for idx, part in enumerate(parts):
if not part:
continue
cmp_p = part if case else part.lower()
if idx == 0 and length > 1: # startswith
cmp_av = [a for a in cmp_av if a[1].startswith(cmp_p)]
elif idx == 0: # exact matching
cmp_av = [a for a in cmp_av if a[1] == cmp_p]
elif idx == length - 1: # endswith
cmp_av = [a for a in cmp_av if a[1].endswith(cmp_p)]
else: # middle
cmp_av = [a for a in cmp_av if cmp_p in a[1]]
q = Q(**{
"%s__in" % lhs.name: [
mapping_choices[lhs.name][a[0]]
for a in cmp_av
]
})
else:
i = "" if case else "i"
q = None
for idx, part in enumerate(parts):
if not part:
continue
if idx == 0 and length > 1: # startswith
new_q = Q(**{
"%s__%s" % (lhs.name, "%sstartswith" % i): part
})
elif idx == 0: # exact matching
new_q = Q(**{
"%s__%s" % (lhs.name, "%sexact" % i): part
})
elif idx == length - 1: # endswith
new_q = Q(**{
"%s__%s" % (lhs.name, "%sendswith" % i): part
})
else: # middle
new_q = Q(**{
"%s__%s" % (lhs.name, "%scontains" % i): part
})
q = q & new_q if q else new_q
return ~q if not_ else q
def contains(lhs, items, not_=False, mapping_choices=None):
""" Create a filter to match elements attribute to be in a list of choices.
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param items: a list of choices
:type items: list
:param not_: whether the range shall be inclusive (the default) or
exclusive
:type not_: bool
:param mapping_choices: a dict to lookup potential choices for a certain
field.
:type mapping_choices: dict[str, str]
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
# for item in items:
# assert isinstance(item, BaseExpression)
if mapping_choices and lhs.name in mapping_choices:
def map_value(item):
try:
if isinstance(item, str):
item = mapping_choices[lhs.name][item]
elif hasattr(item, 'value'):
item = Value(mapping_choices[lhs.name][item.value])
except KeyError as e:
raise AssertionError("Invalid field value %s" % e)
return item
items = map(map_value, items)
q = Q(**{"%s__in" % lhs.name: items})
return ~q if not_ else q
def null(lhs, not_=False):
""" Create a filter to match elements whose attribute is (not) null
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param not_: whether the range shall be inclusive (the default) or
exclusive
:type not_: bool
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
return Q(**{"%s__isnull" % lhs.name: not not_})
def temporal(lhs, time_or_period, op):
""" Create a temporal filter for the given temporal attribute.
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param time_or_period: the time instant or time span to use as a filter
:type time_or_period: :class:`datetime.datetime` or a tuple of two
datetimes or a tuple of one datetime and one
:class:`datetime.timedelta`
:param op: the comparison operation. one of ``"BEFORE"``,
``"BEFORE OR DURING"``, ``"DURING"``, ``"DURING OR AFTER"``,
``"AFTER"``.
:type op: str
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
assert op in (
"BEFORE", "BEFORE OR DURING", "DURING", "DURING OR AFTER", "AFTER"
)
low = None
high = None
if op in ("BEFORE", "AFTER"):
assert isinstance(time_or_period, datetime)
if op == "BEFORE":
high = time_or_period
else:
low = time_or_period
else:
low, high = time_or_period
assert isinstance(low, datetime) or isinstance(high, datetime)
if isinstance(low, timedelta):
low = high - low
if isinstance(high, timedelta):
high = low + high
if low and high:
return Q(**{"%s__range" % lhs.name: (low, high)})
elif low:
return Q(**{"%s__gte" % lhs.name: low})
else:
return Q(**{"%s__lte" % lhs.name: high})
def time_interval(time_or_period, containment='overlaps',
begin_time_field='begin_time', end_time_field='end_time'):
"""
"""
gt_op = "__gte"
lt_op = "__lte"
is_slice = len(time_or_period) == 1
if len(time_or_period) == 1:
is_slice = True
value = time_or_period[0]
else:
is_slice = False
low, high = time_or_period
if is_slice or (high == low and containment == "overlaps"):
return Q(**{
begin_time_field + "__lte": time_or_period[0],
end_time_field + "__gte": time_or_period[0]
})
elif high == low:
return Q(**{
begin_time_field + "__gte": value,
end_time_field + "__lte": value
})
else:
q = Q()
# check if the temporal bounds must be strictly contained
if containment == "contains":
if high is not None:
q &= Q(**{
end_time_field + lt_op: high
})
if low is not None:
q &= Q(**{
begin_time_field + gt_op: low
})
# or just overlapping
else:
if high is not None:
q &= Q(**{
begin_time_field + lt_op: high
})
if low is not None:
q &= Q(**{
end_time_field + gt_op: low
})
return q
UNITS_LOOKUP = {
"kilometers": "km",
"meters": "m"
}
def spatial(lhs, rhs, op, pattern=None, distance=None, units=None):
""" Create a spatial filter for the given spatial attribute.
:param lhs: the field to compare
:type lhs: :class:`django.db.models.F`
:param rhs: the time instant or time span to use as a filter
:type rhs:
:param op: the comparison operation. one of ``"INTERSECTS"``,
``"DISJOINT"``, `"CONTAINS"``, ``"WITHIN"``,
``"TOUCHES"``, ``"CROSSES"``, ``"OVERLAPS"``,
``"EQUALS"``, ``"RELATE"``, ``"DWITHIN"``, ``"BEYOND"``
:type op: str
:param pattern: the spatial relation pattern
:type pattern: str
:param distance: the distance value for distance based lookups:
``"DWITHIN"`` and ``"BEYOND"``
:type distance: float
:param units: the units the distance is expressed in
:type units: str
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
# assert isinstance(rhs, BaseExpression) # TODO
assert op in (
"INTERSECTS", "DISJOINT", "CONTAINS", "WITHIN", "TOUCHES", "CROSSES",
"OVERLAPS", "EQUALS", "RELATE", "DWITHIN", "BEYOND"
)
if op == "RELATE":
assert pattern
elif op in ("DWITHIN", "BEYOND"):
assert distance
assert units
if op in (
"INTERSECTS", "DISJOINT", "CONTAINS", "WITHIN", "TOUCHES",
"CROSSES", "OVERLAPS", "EQUALS"):
return Q(**{"%s__%s" % (lhs.name, op.lower()): rhs})
elif op == "RELATE":
return Q(**{"%s__relate" % lhs.name: (rhs, pattern)})
elif op in ("DWITHIN", "BEYOND"):
# TODO: maybe use D.unit_attname(units)
d = D(**{UNITS_LOOKUP[units]: distance})
if op == "DWITHIN":
return Q(**{"%s__distance_lte" % lhs.name: (rhs, d, 'spheroid')})
return Q(**{"%s__distance_gte" % lhs.name: (rhs, d, 'spheroid')})
def bbox(lhs, minx, miny, maxx, maxy, crs=None, bboverlaps=True):
""" Create a bounding box filter for the given spatial attribute.
:param lhs: the field to compare
:param minx: the lower x part of the bbox
:type minx: float
:param miny: the lower y part of the bbox
:type miny: float
:param maxx: the upper x part of the bbox
:type maxx: float
:param maxy: the upper y part of the bbox
:type maxy: float
:param crs: the CRS the bbox is expressed in
:type crs: str
:type lhs: :class:`django.db.models.F`
:return: a comparison expression object
:rtype: :class:`django.db.models.Q`
"""
assert isinstance(lhs, F)
box = Polygon.from_bbox((minx, miny, maxx, maxy))
if crs:
box.srid = SpatialReference(crs).srid
box.transform(4326)
if bboverlaps:
return Q(**{"%s__bboverlaps" % lhs.name: box})
return Q(**{"%s__intersects" % lhs.name: box})
def attribute(name, field_mapping=None):
""" Create an attribute lookup expression using a field mapping dictionary.
:param name: the field filter name
:type name: str
:param field_mapping: the dictionary to use as a lookup.
:type mapping_choices: dict[str, str]
:rtype: :class:`django.db.models.F`
"""
if field_mapping:
field = field_mapping.get(name, name)
else:
field = name
return F(field)
def literal(value):
return Value(value)
OP_TO_FUNC = {
"+": add,
"-": sub,
"*": mul,
"/": truediv
}
def arithmetic(lhs, rhs, op):
""" Create an arithmetic filter
:param lhs: left hand side of the arithmetic expression. either a scalar
or a field lookup or another type of expression
:param rhs: same as `lhs`
:param op: the arithmetic operation. one of ``"+"``, ``"-"``, ``"*"``, ``"/"``
:rtype: :class:`django.db.models.F`
"""
assert isinstance(lhs, ARITHMETIC_TYPES), '%r is not a compatible type' % lhs
assert isinstance(rhs, ARITHMETIC_TYPES), '%r is not a compatible type' % rhs
assert op in OP_TO_FUNC
func = OP_TO_FUNC[op]
return func(lhs, rhs)
| 33.826004
| 86
| 0.561246
|
5c748f1d963e25045b7d09340d89d2abf55e9130
| 1,750
|
py
|
Python
|
app/core/models.py
|
elam91/recipe-app-api
|
0a30e9f7928ae737c26da20ecc4cce0570709fbb
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
elam91/recipe-app-api
|
0a30e9f7928ae737c26da20ecc4cce0570709fbb
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
elam91/recipe-app-api
|
0a30e9f7928ae737c26da20ecc4cce0570709fbb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError("Email is required for creating a user")
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
| 28.688525
| 76
| 0.677714
|
5642c2345debf2f20442b2bda22099eb2f8a601a
| 2,692
|
py
|
Python
|
data_sources/collect_list_of_beaches.py
|
Mithul/spaceapps
|
933a58af940cc0de2070b6d3f069a3c64a22be96
|
[
"MIT"
] | null | null | null |
data_sources/collect_list_of_beaches.py
|
Mithul/spaceapps
|
933a58af940cc0de2070b6d3f069a3c64a22be96
|
[
"MIT"
] | null | null | null |
data_sources/collect_list_of_beaches.py
|
Mithul/spaceapps
|
933a58af940cc0de2070b6d3f069a3c64a22be96
|
[
"MIT"
] | null | null | null |
import json
from bs4 import BeautifulSoup
from pymongo import *
import requests
import pdb
import LatLon
req = requests.get("https://en.wikipedia.org/wiki/List_of_beaches")
soup = BeautifulSoup(req.text,'html.parser')
ul = soup.find_all('ul')[3:94]
all_links = []
for u in ul:
links = u.find_all('a')
for i,a in enumerate(links):
print i
try:
if(a.has_attr("class")):
if(a["class"][0]=="new"):
continue
all_links.append(a['href'])
except Exception as e:
print str(e)
continue
big_countries = soup.find_all("div",{"role": "note"})[1:-1]
for country in big_countries:
url = "http://www.wikipedia.com"+country.find('a')["href"]
soup1 = BeautifulSoup(requests.get(url).text,'html.parser')
uls = soup1.find_all('ul')
uls = uls[2:]
uls = uls[:18]
for u in uls:
for i,a in enumerate(u.find_all('a')):
print i
if(a.has_attr("class")):
if(a["class"][0]=="new"):
continue
try:
all_links.append(a['href'])
except Exception as e:
if(a["class"][1]=='selflink'):
continue
continue
lines = []
skip = 0
filtered_list = []
for a in all_links:
if 'wikimedia' not in a and ':' not in a and '=' not in a and 'cite_note' not in a and 'wiki' in a and 'List' not in a:
filtered_list.append(a)
for i,link in enumerate(all_links):
print i
try:
s = BeautifulSoup(requests.get("http://www.wikipedia.com"+link).text,'html.parser')
latlong = s.find('span',{'class': 'geo-dec'}).text
lat = float(latlong.split(' ')[0].split(u'\xb0')[0])
lon = float(latlong.split(' ')[1].split(u'\xb0')[0])
except Exception as e:
continue
name = s.find('title').text.split('-')[0]
req = requests.get("https://maps.googleapis.com/maps/api/geocode/json?latlng={},{}&key={}".format(lat,lon,"AIzaSyCa-e3nbLmkMeczwrZ5U3-bhSyzEl4_d_g"))
try:
address = json.loads(req.text)["results"][0]["formatted_address"]
lines.append("Beach.create(name:'{}' ,latitude:{} ,longitude:{} , address:'{}')\n".format(name.encode('utf-8'),lat,lon,address.encode('utf-8')))
except Exception as e:
skip+=1
print "Skip"
continue
with open("seed.rb","w") as fp:
for line in lines:
try:
fp.write(line)
except Exception as e:
pdb.set_trace()
print skip
| 33.234568
| 150
| 0.533432
|
ffb6161a8d042e3926de262758424c40c0f2b7ce
| 14,114
|
py
|
Python
|
official/vision/beta/tasks/retinanet.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/tasks/retinanet.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/tasks/retinanet.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet task definition."""
from typing import Any, Optional, List, Tuple, Mapping
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.vision import keras_cv
from official.vision.beta.configs import retinanet as exp_cfg
from official.vision.beta.dataloaders import input_reader_factory
from official.vision.beta.dataloaders import retinanet_input
from official.vision.beta.dataloaders import tf_example_decoder
from official.vision.beta.dataloaders import tfds_factory
from official.vision.beta.dataloaders import tf_example_label_map_decoder
from official.vision.beta.evaluation import coco_evaluator
from official.vision.beta.modeling import factory
@task_factory.register_task_cls(exp_cfg.RetinaNetTask)
class RetinaNetTask(base_task.Task):
"""A single-replica view of training procedure.
RetinaNet task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build RetinaNet model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_retinanet(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.assert_consumed()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Build input dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
parser = retinanet_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
dtype=params.dtype,
match_threshold=params.parser.match_threshold,
unmatched_threshold=params.parser.unmatched_threshold,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_attribute_loss(self,
attribute_heads: List[exp_cfg.AttributeHead],
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
box_sample_weight: tf.Tensor) -> float:
"""Computes attribute loss.
Args:
attribute_heads: a list of attribute head configs.
outputs: RetinaNet model outputs.
labels: RetinaNet labels.
box_sample_weight: normalized bounding box sample weights.
Returns:
Attribute loss of all attribute heads.
"""
attribute_loss = 0.0
for head in attribute_heads:
if head.name not in labels['attribute_targets']:
raise ValueError(f'Attribute {head.name} not found in label targets.')
if head.name not in outputs['attribute_outputs']:
raise ValueError(f'Attribute {head.name} not found in model outputs.')
y_true_att = keras_cv.losses.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=head.size)
y_pred_att = keras_cv.losses.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size)
if head.type == 'regression':
att_loss_fn = tf.keras.losses.Huber(
1.0, reduction=tf.keras.losses.Reduction.SUM)
att_loss = att_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight)
else:
raise ValueError(f'Attribute type {head.type} not supported.')
attribute_loss += att_loss
return attribute_loss
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None):
"""Build RetinaNet losses."""
params = self.task_config
attribute_heads = self.task_config.model.head.attribute_heads
cls_loss_fn = keras_cv.losses.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=tf.keras.losses.Reduction.SUM)
box_loss_fn = tf.keras.losses.Huber(
params.losses.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
cls_sample_weight = labels['cls_weights']
box_sample_weight = labels['box_weights']
num_positives = tf.reduce_sum(box_sample_weight) + 1.0
cls_sample_weight = cls_sample_weight / num_positives
box_sample_weight = box_sample_weight / num_positives
y_true_cls = keras_cv.losses.multi_level_flatten(
labels['cls_targets'], last_dim=None)
y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes)
y_pred_cls = keras_cv.losses.multi_level_flatten(
outputs['cls_outputs'], last_dim=params.model.num_classes)
y_true_box = keras_cv.losses.multi_level_flatten(
labels['box_targets'], last_dim=4)
y_pred_box = keras_cv.losses.multi_level_flatten(
outputs['box_outputs'], last_dim=4)
cls_loss = cls_loss_fn(
y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)
box_loss = box_loss_fn(
y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)
model_loss = cls_loss + params.losses.box_loss_weight * box_loss
if attribute_heads:
model_loss += self.build_attribute_loss(attribute_heads, outputs, labels,
box_sample_weight)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
return total_loss, cls_loss, box_loss, model_loss
def build_metrics(self, training: bool = True):
"""Build detection metrics."""
metrics = []
metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
if self.task_config.validation_data.tfds_name and self.task_config.annotation_file:
raise ValueError(
"Can't evaluate using annotation file when TFDS is used.")
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self.task_config.annotation_file,
include_mask=False,
per_category_metrics=self.task_config.per_category_metrics)
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = model(features, anchor_boxes=labels['anchor_boxes'],
image_shape=labels['image_info'][:, 1, :],
training=False)
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
coco_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update({self.coco_metric.name: (labels['groundtruths'],
coco_model_outputs)})
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.coco_metric.reset_states()
state = self.coco_metric
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
return self.coco_metric.result()
| 40.210826
| 90
| 0.675783
|
8a0b06919976e3ffd2664b4a50e362df093c61f6
| 10,334
|
py
|
Python
|
src/deepke/name_entity_re/few_shot/module/train.py
|
zjunlp/deepke-dre
|
0de6bdb4d03e5b36b84e93a509788b6fa9bbbce9
|
[
"MIT"
] | 3
|
2022-02-18T05:03:02.000Z
|
2022-03-19T12:32:16.000Z
|
src/deepke/name_entity_re/few_shot/module/train.py
|
zjunlp/deepke-dre
|
0de6bdb4d03e5b36b84e93a509788b6fa9bbbce9
|
[
"MIT"
] | null | null | null |
src/deepke/name_entity_re/few_shot/module/train.py
|
zjunlp/deepke-dre
|
0de6bdb4d03e5b36b84e93a509788b6fa9bbbce9
|
[
"MIT"
] | null | null | null |
import torch
from torch import optim
from tqdm import tqdm
from ..utils import convert_preds_to_outputs, write_predictions
import random
class Trainer(object):
def __init__(self, train_data=None, dev_data=None, test_data=None, model=None, process=None, args=None, logger=None, loss=None, metrics=None, writer=None) -> None:
self.train_data = train_data
self.dev_data = dev_data
self.test_data = test_data
self.model = model
self.process = process
self.logger = logger
self.metrics = metrics
self.writer = writer
self.loss = loss
self.num_epochs = args.num_epochs
self.batch_size = args.batch_size
self.lr = args.learning_rate
self.eval_begin_epoch = args.eval_begin_epoch
self.device = args.device
self.load_path = args.load_path
self.save_path = args.save_path
self.refresh_step = 1
self.best_metric = 0
self.best_dev_epoch = None
self.optimizer = None
if self.train_data is not None:
self.train_num_steps = len(self.train_data) * self.num_epochs
self.step = 0
self.args = args
def train(self):
self.before_train() # something should do before training
self.step = 0
self.model.train()
self.logger.info("***** Running training *****")
self.logger.info(" Num instance = %d", len(self.train_data)*self.batch_size)
self.logger.info(" Num epoch = %d", self.num_epochs)
self.logger.info(" Batch size = %d", self.batch_size)
self.logger.info(" Learning rate = {}".format(self.lr))
self.logger.info(" Evaluate begin = %d", self.eval_begin_epoch)
if self.load_path is not None: # load model from load_path
self.logger.info("Loading model from {}".format(self.load_path))
load_model_dict = torch.load(self.args.load_path)
model_dict = self.model.state_dict()
for name in load_model_dict:
if name in model_dict:
if model_dict[name].shape == load_model_dict[name].shape:
model_dict[name] = load_model_dict[name]
else:
self.logger.info(f"Skip loading parameter: {name}, "
f"required shape: {model_dict[name].shape}, "
f"loaded shape: {load_model_dict[name].shape}")
else:
self.logger.info(f"Not Found! Skip loading parameter: {name}.")
self.model.load_state_dict(model_dict)
self.logger.info("Load model successful!")
with tqdm(total=self.train_num_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True, initial=self.step) as pbar:
self.pbar = pbar
avg_loss = 0
for epoch in range(self.num_epochs):
pbar.set_description_str(desc="Epoch {}/{}".format(epoch, self.num_epochs))
for batch in self.train_data:
self.step += 1
batch = (tup.to(self.device) if isinstance(tup, torch.Tensor) else tup for tup in batch)
loss = self._step(batch, mode="train")
avg_loss += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.step % self.refresh_step == 0:
avg_loss = float(avg_loss) / self.refresh_step
print_output = "loss:{:<6.5f}".format(avg_loss)
pbar.update(1)
pbar.set_postfix_str(print_output)
# self.writer.add_scalar(tag='loss', scalar_value=avg_loss, global_step=self.step) # tensorbordx
self.writer.log({ 'avg_loss':avg_loss})
avg_loss = 0
if epoch >= self.eval_begin_epoch:
self.evaluate(epoch) # generator to dev.
pbar.close()
self.pbar = None
self.logger.info("Get best performance at epoch {}, best f1 score is {:.2f}".format(self.best_dev_epoch, self.best_metric))
def evaluate(self, epoch):
self.model.eval()
self.logger.info("***** Running evaluate *****")
self.logger.info(" Num instance = %d", len(self.dev_data)*self.batch_size)
self.logger.info(" Batch size = %d", self.batch_size)
with torch.no_grad():
with tqdm(total=len(self.dev_data), leave=False, dynamic_ncols=True) as pbar:
pbar.set_description_str(desc="Dev")
for batch in self.dev_data:
batch = (tup.to(self.device) if isinstance(tup, torch.Tensor) else tup for tup in batch) # to cpu/cuda device
self._step(batch, mode="dev")
pbar.update()
# evaluate done
eva_result = self.metrics.get_metric()
pbar.close()
self.logger.info("Epoch {}/{}, best f1: {}, current f1 score: {:.2f}, recall: {:.2f}, precision: {:.2f}."\
.format(epoch, self.num_epochs, self.best_metric, eva_result['f'], eva_result['rec'], eva_result['pre']))
# self.writer.add_scalars('evaluate', {'f1': eva_result['f'],
# 'recall': eva_result['rec'],
# 'precision': eva_result['pre']}, epoch)
self.writer.log({'f1': eva_result['f'],'recall': eva_result['rec'],'precision': eva_result['pre']})
if eva_result['f'] >= self.best_metric: # this epoch get best performance
self.logger.info("Get better performance at epoch {}".format(epoch))
self.best_dev_epoch = epoch
self.best_metric = eva_result['f'] # update best metric(f1 score)
if self.save_path is not None: # need to save model
torch.save(self.model.state_dict(), self.save_path+"/best_model.pth")
self.logger.info("Save best model at {}".format(self.save_path))
self.model.train()
def predict(self):
self.model.eval()
self.logger.info("***** Running testing *****")
self.logger.info(" Num instance = %d", len(self.test_data)*self.batch_size)
self.logger.info(" Batch size = %d", self.batch_size)
if self.load_path is not None: # load model from load_path
self.logger.info("Loading model from {}".format(self.load_path))
self.model.load_state_dict(torch.load(self.load_path))
self.logger.info("Load model successful!")
self.model.to(self.device)
with torch.no_grad():
with tqdm(total=len(self.test_data), leave=False, dynamic_ncols=True) as pbar:
pbar.set_description_str(desc="Test")
texts = []
labels = []
for batch in self.test_data:
batch = (tup.to(self.device) if isinstance(tup, torch.Tensor) else tup for tup in batch) # to cpu/cuda device
src_tokens, src_seq_len, first, raw_words = batch
preds = self._step((src_tokens, src_seq_len, first), mode="test")
outputs = convert_preds_to_outputs(preds, raw_words, self.process.mapping, self.process.tokenizer)
texts.extend(raw_words)
labels.extend(outputs)
pbar.update()
self.logger.info("***** Predict example *****")
idx = random.randint(0, len(texts))
print(len(texts), len(labels))
self.logger.info("Raw texts: " + " ".join(texts[idx]))
self.logger.info("Prediction: " + " ".join(labels[idx]))
if self.args.write_path is not None: # write predict
write_predictions(self.args.write_path, texts, labels)
self.logger.info("Write into {}!".format(self.args.write_path))
def _step(self, batch, mode="train"):
if mode=="dev": # dev: compute metric
src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first, target_span = batch
pred = self.model.predict(src_tokens, src_seq_len, first)
self.metrics.evaluate(target_span, pred, tgt_tokens)
return
elif mode=="test": # test: just get pred
src_tokens, src_seq_len, first = batch
pred = self.model.predict(src_tokens, src_seq_len, first)
return pred
else: # train: get loss
src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first, target_span = batch
pred = self.model(src_tokens, tgt_tokens, src_seq_len, first)
loss = self.loss(tgt_tokens, tgt_seq_len, pred)
return loss
def before_train(self):
parameters = []
params = {'lr':self.lr, 'weight_decay':1e-2}
params['params'] = [param for name, param in self.model.named_parameters() if not ('bart_encoder' in name or 'bart_decoder' in name)]
parameters.append(params)
params = {'lr':self.lr, 'weight_decay':1e-2}
params['params'] = []
for name, param in self.model.named_parameters():
if ('bart_encoder' in name or 'bart_decoder' in name) and not ('layernorm' in name or 'layer_norm' in name):
params['params'].append(param)
parameters.append(params)
params = {'lr':self.lr, 'weight_decay':0}
params['params'] = []
for name, param in self.model.named_parameters():
if ('bart_encoder' in name or 'bart_decoder' in name) and ('layernorm' in name or 'layer_norm' in name):
params['params'].append(param)
parameters.append(params)
self.optimizer = optim.AdamW(parameters)
if self.args.freeze_plm: # freeze pretrained language model(bart)
for name, par in self.model.named_parameters():
if 'prompt_encoder' in name or 'prompt_decoder' in name and "bart_mlp" not in name:
par.requires_grad = False
self.model.to(self.device)
| 49.682692
| 167
| 0.570544
|
dc0e4e2d7a484ff56e62d9c50041f9f254b128de
| 2,695
|
py
|
Python
|
beagle/building/loader.py
|
FernandoGaGu/beagle
|
b1c968ec84d560e9903a582413e6334fcf447735
|
[
"BSD-3-Clause"
] | 1
|
2020-12-27T15:58:14.000Z
|
2020-12-27T15:58:14.000Z
|
beagle/building/loader.py
|
FernandoGaGu/beagle
|
b1c968ec84d560e9903a582413e6334fcf447735
|
[
"BSD-3-Clause"
] | null | null | null |
beagle/building/loader.py
|
FernandoGaGu/beagle
|
b1c968ec84d560e9903a582413e6334fcf447735
|
[
"BSD-3-Clause"
] | null | null | null |
from ..fitness import Fitness
from ..population import Population
from ..report import Report, MOEAReport
from ..algorithm import Algorithm
from ..exceptions import UnrecognisedParameter
from .basic import basic_ga_1, basic_ga_2
from .nsga2 import nsga2
from .spea2 import spea2
NSGA2_ID = 'NSGA2'
SPEA2_ID = 'SPEA2'
AVAILABLE_BUILDINGS = {
'GA1': basic_ga_1,
'GA2': basic_ga_2,
NSGA2_ID: nsga2,
SPEA2_ID: spea2
}
_MOEAs = [NSGA2_ID, SPEA2_ID]
def use_algorithm(name: str, fitness: Fitness, **kwargs):
"""
Function that returns a Algorithm based on a pre-defined schema.
Currently available:
'GA1' for more info use: help(beagle.AVAILABLE_BUILDINGS['GA1')
'GA2' for more info use: help(beagle.AVAILABLE_BUILDINGS['GA2')
To specify parameters for each operator it is required to prefix the process name (e.g. mutation, recombination
or selection) with a _ and the parameter name. For example to specify the mutation probability it is necessary to
use the argument: 'mutation_probability'.
Parameters
----------
:param name: str
Pre-defined schema name. Available: 'GA1'
:param fitness: beagle.Fitness
Fitness object used to evaluate the individuals in population.
:param initial_population: beagle.Population
Initial population.
:param evaluate_out_of_step: bool (optional, by default False)
Indicates whether to make an initial evaluation of the population before calling the step() function at the
first interaction. In most algorithms, except for multi-objective ones such as NSGA2 or SPEA2 the parameter
will be True.
:param kwargs:
Parameters of the mutation, recombination and selection operators.
Returns
-------
:return beagle.Algorithm
Pre-build algorithm.
"""
if name not in AVAILABLE_BUILDINGS: raise UnrecognisedParameter(name, 'name: str in use_algorithm()')
initial_population = None
if name in _MOEAs:
kwargs['report'] = MOEAReport(num_objectives=len(fitness)) # create report for multi-objective algorithms
kwargs['evaluate_out_of_step'] = False
else:
kwargs['report'] = Report() # create report for basic algorithms
if 'initial_population' in kwargs:
initial_population = kwargs['initial_population']
if not isinstance(initial_population, Population): raise TypeError('initial_population must be a Population.')
del kwargs['initial_population'] # eliminate initial_population from kwargs
return Algorithm(step=AVAILABLE_BUILDINGS[name], fitness=fitness, initial_population=initial_population, **kwargs)
| 36.917808
| 118
| 0.715399
|
b62cd451ff7df9af5c1129f4885678cf4af82765
| 7,812
|
py
|
Python
|
CodonSubstitution/build/biopython/Bio/SeqIO/FastaIO.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | 8
|
2021-12-14T21:30:01.000Z
|
2022-02-14T11:30:03.000Z
|
CodonSubstitution/build/biopython/Bio/SeqIO/FastaIO.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | null | null | null |
CodonSubstitution/build/biopython/Bio/SeqIO/FastaIO.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
# Copyright 2006-2009 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# This module is for reading and writing FASTA format files as SeqRecord
# objects. The code is partly inspired by earlier Biopython modules,
# Bio.Fasta.* and the now deprecated Bio.SeqIO.FASTA
"""Bio.SeqIO support for the "fasta" (aka FastA or Pearson) file format.
You are expected to use this module via the Bio.SeqIO functions."""
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
#This is a generator function!
def FastaIterator(handle, alphabet = single_letter_alphabet, title2ids = None):
"""Generator function to iterate over Fasta records (as SeqRecord objects).
handle - input file
alphabet - optional alphabet
title2ids - A function that, when given the title of the FASTA
file (without the beginning >), will return the id, name and
description (in that order) for the record as a tuple of strings.
If this is not given, then the entire title line will be used
as the description, and the first word as the id and name.
Note that use of title2ids matches that of Bio.Fasta.SequenceParser
but the defaults are slightly different.
"""
#Skip any text before the first record (e.g. blank lines, comments)
while True:
line = handle.readline()
if line == "" : return #Premature end of file, or just empty?
if line[0] == ">":
break
while True:
if line[0]!=">":
raise ValueError("Records in Fasta files should start with '>' character")
if title2ids:
id, name, descr = title2ids(line[1:].rstrip())
else:
descr = line[1:].rstrip()
try:
id = descr.split()[0]
except IndexError:
assert not descr, repr(line)
#Should we use SeqRecord default for no ID?
id = ""
name = id
lines = []
line = handle.readline()
while True:
if not line : break
if line[0] == ">": break
lines.append(line.rstrip())
line = handle.readline()
#Remove trailing whitespace, and any internal spaces
#(and any embedded \r which are possible in mangled files
#when not opened in universal read lines mode)
result = "".join(lines).replace(" ", "").replace("\r", "")
#Return the record and then continue...
yield SeqRecord(Seq(result, alphabet),
id = id, name = name, description = descr)
if not line : return #StopIteration
assert False, "Should not reach this line"
class FastaWriter(SequentialSequenceWriter):
"""Class to write Fasta format files."""
def __init__(self, handle, wrap=60, record2title=None):
"""Create a Fasta writer.
handle - Handle to an output file, e.g. as returned
by open(filename, "w")
wrap - Optional line length used to wrap sequence lines.
Defaults to wrapping the sequence at 60 characters
Use zero (or None) for no wrapping, giving a single
long line for the sequence.
record2title - Optional function to return the text to be
used for the title line of each record. By default the
a combination of the record.id and record.description
is used. If the record.description starts with the
record.id, then just the record.description is used.
You can either use:
myWriter = FastaWriter(open(filename,"w"))
writer.write_file(myRecords)
Or, follow the sequential file writer system, for example:
myWriter = FastaWriter(open(filename,"w"))
writer.write_header() # does nothing for Fasta files
...
Multiple calls to writer.write_record() and/or writer.write_records()
...
writer.write_footer() # does nothing for Fasta files
writer.close()
"""
SequentialSequenceWriter.__init__(self, handle)
#self.handle = handle
self.wrap = None
if wrap:
if wrap < 1:
raise ValueError
self.wrap = wrap
self.record2title = record2title
def write_record(self, record):
"""Write a single Fasta record to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
if self.record2title:
title=self.clean(self.record2title(record))
else:
id = self.clean(record.id)
description = self.clean(record.description)
#if description[:len(id)]==id:
if description and description.split(None,1)[0]==id:
#The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
assert "\n" not in title
assert "\r" not in title
self.handle.write(">%s\n" % title)
data = self._get_seq_string(record) #Catches sequence being None
assert "\n" not in data
assert "\r" not in data
if self.wrap:
for i in range(0, len(data), self.wrap):
self.handle.write(data[i:i+self.wrap] + "\n")
else:
self.handle.write(data + "\n")
if __name__ == "__main__":
print "Running quick self test"
import os
from Bio.Alphabet import generic_protein, generic_nucleotide
#Download the files from here:
#ftp://ftp.ncbi.nlm.nih.gov/genomes/Bacteria/Nanoarchaeum_equitans
fna_filename = "NC_005213.fna"
faa_filename = "NC_005213.faa"
def genbank_name_function(text):
text, descr = text.split(None,1)
id = text.split("|")[3]
name = id.split(".",1)[0]
return id, name, descr
def print_record(record):
#See also bug 2057
#http://bugzilla.open-bio.org/show_bug.cgi?id=2057
print "ID:" + record.id
print "Name:" + record.name
print "Descr:" + record.description
print record.seq
for feature in record.annotations:
print '/%s=%s' % (feature, record.annotations[feature])
if record.dbxrefs:
print "Database cross references:"
for x in record.dbxrefs : print " - %s" % x
if os.path.isfile(fna_filename):
print "--------"
print "FastaIterator (single sequence)"
iterator = FastaIterator(open(fna_filename, "r"), alphabet=generic_nucleotide, title2ids=genbank_name_function)
count=0
for record in iterator:
count=count+1
print_record(record)
assert count == 1
print str(record.__class__)
if os.path.isfile(faa_filename):
print "--------"
print "FastaIterator (multiple sequences)"
iterator = FastaIterator(open(faa_filename, "r"), alphabet=generic_protein, title2ids=genbank_name_function)
count=0
for record in iterator:
count=count+1
print_record(record)
break
assert count>0
print str(record.__class__)
from cStringIO import StringIO
print "--------"
print "FastaIterator (empty input file)"
#Just to make sure no errors happen
iterator = FastaIterator(StringIO(""))
count = 0
for record in iterator:
count = count+1
assert count==0
print "Done"
| 35.834862
| 119
| 0.611367
|
5f409391616e463a98d2d80e3b677bd331748c33
| 3,235
|
py
|
Python
|
url_migration/tests/test_resolver.py
|
riklaunim/django-url-migration
|
0d1115d02b64a895934ecdd7387e65b34b3d68e7
|
[
"BSD-3-Clause"
] | 4
|
2017-04-28T18:58:31.000Z
|
2017-10-04T07:32:47.000Z
|
url_migration/tests/test_resolver.py
|
riklaunim/django-url-migration
|
0d1115d02b64a895934ecdd7387e65b34b3d68e7
|
[
"BSD-3-Clause"
] | 3
|
2021-04-23T11:30:49.000Z
|
2021-04-26T14:12:29.000Z
|
url_migration/tests/test_resolver.py
|
riklaunim/django-url-migration
|
0d1115d02b64a895934ecdd7387e65b34b3d68e7
|
[
"BSD-3-Clause"
] | 1
|
2021-04-23T11:07:36.000Z
|
2021-04-23T11:07:36.000Z
|
from unittest import mock
from django import test
from .. import models
from .. import views
from . import factories
class TestUrlResolverIntegration(test.TestCase):
def test_if_url_is_resolved(self):
factories.UrlMappingFactory(source_url='/bar', target_url='/foo')
response = self.client.get('/bar')
self.assertEqual('/foo', response.url)
self.assertEqual(301, response.status_code)
def test_if_url_is_resolved_by_regexp(self):
factories.UrlRegexpMappingFactory(
source_mapping=r'/profil-v/(?P<pk>[0-9]+)/',
target_mapping=r'/profil-miejsca/\1/'
)
response = self.client.get('/profil-v/10/')
self.assertEqual('/profil-miejsca/10/', response.url)
self.assertEqual(301, response.status_code)
def test_if_404_is_returned(self):
response = self.client.get('/abc/10/')
self.assertEqual(404, response.status_code)
@mock.patch('url_migration.views.logger')
def test_if_404_is_returned_for_invalid_redirect(self, logger):
factories.UrlMappingFactory(
source_url='/a/',
target_url='/a/',
)
response = self.client.get('/a/')
self.assertTrue(logger.exception.called)
self.assertEqual(404, response.status_code)
class TestRegexpMapping(test.TestCase):
META = {
'HTTP_REFERER': 'http://google.pl',
'HTTP_USER_AGENT': 'Firefox 1',
'HTTP_X_REAL_IP': '10.0.0.2',
}
def test_if_get_or_create_url_mapping_returns_existing_mapping(self):
rule = factories.UrlRegexpMappingFactory(
source_mapping=r'/a/b/(?P<pk>[0-9]+)/')
mapping = factories.RegexpGeneratedMappingFactory(
source_url='/a/b/1/', regexp=rule)
other_rule = factories.UrlRegexpMappingFactory(
source_mapping=r'/a/(.*)/')
request = mock.Mock(path='/a/b/1/')
resolver = views.UrlResolver(request)
result = resolver._get_or_create_url_mapping(other_rule)
self.assertEqual(mapping, result)
self.assertEqual(1, models.RegexpGeneratedMapping.objects.count())
def test_if_get_or_create_url_mapping_creates_new_mapping(self):
rule = factories.UrlRegexpMappingFactory(
source_mapping=r'/a/(?P<pk>[0-9]+)/',
target_mapping=r'/b/\1/',
)
factories.RegexpGeneratedMappingFactory(
source_url='/a/1234/', regexp=rule)
request = mock.Mock(path='/a/1/')
resolver = views.UrlResolver(request)
result = resolver._get_or_create_url_mapping(rule)
expected = models.RegexpGeneratedMapping.objects.get(
source_url='/a/1/',
target_url='/b/1/',
regexp=rule
)
self.assertEqual(expected, result)
self.assertEqual(2, models.RegexpGeneratedMapping.objects.count())
def test_if_exception_is_raised_for_redirect_loop(self):
factories.UrlMappingFactory(
source_url='/a/',
target_url='/a/',
)
request = test.RequestFactory().get('/a/')
resolver = views.UrlResolver(request)
with self.assertRaises(views.InvalidRedirect):
list(resolver.map())
| 35.944444
| 74
| 0.643277
|
5dde6523ade23b141b81dd1e07eb6ea734c0b560
| 38,015
|
py
|
Python
|
venv/Lib/site-packages/twilio/rest/taskrouter/v1/workspace/task/reservation.py
|
Kenny-Z/IOT-Message-Board-with-Face-Recognition
|
81b9a778ebf850b9f78e5d18a52720bd78593565
|
[
"MIT"
] | 4
|
2020-11-13T02:31:25.000Z
|
2020-12-24T22:04:30.000Z
|
twilio/rest/taskrouter/v1/workspace/task/reservation.py
|
Sephra1/twilio-python
|
7dd04209e2d38a31c2783ee8f8ae8be6222a43f2
|
[
"MIT"
] | 7
|
2020-06-06T01:19:11.000Z
|
2021-06-10T19:42:05.000Z
|
bot/lib/python3.7/site-packages/twilio/rest/taskrouter/v1/workspace/task/reservation.py
|
carlosrh18/DavinciBot
|
d73a6b7f68d7bab25d134d3f85c6b63a86c206c5
|
[
"MIT"
] | 1
|
2020-08-27T01:33:19.000Z
|
2020-08-27T01:33:19.000Z
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ReservationList(ListResource):
""" """
def __init__(self, version, workspace_sid, task_sid):
"""
Initialize the ReservationList
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace that this task is contained within.
:param task_sid: The SID of the reserved Task resource
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
"""
super(ReservationList, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, 'task_sid': task_sid, }
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations'.format(**self._solution)
def stream(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Streams ReservationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param ReservationInstance.Status reservation_status: Returns the list of reservations for a task with a specified ReservationStatus
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(reservation_status=reservation_status, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Lists ReservationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param ReservationInstance.Status reservation_status: Returns the list of reservations for a task with a specified ReservationStatus
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
return list(self.stream(reservation_status=reservation_status, limit=limit, page_size=page_size, ))
def page(self, reservation_status=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of ReservationInstance records from the API.
Request is executed immediately
:param ReservationInstance.Status reservation_status: Returns the list of reservations for a task with a specified ReservationStatus
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
data = values.of({
'ReservationStatus': reservation_status,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ReservationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ReservationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ReservationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ReservationContext
:param sid: The SID of the TaskReservation resource to fetch
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ReservationContext
:param sid: The SID of the TaskReservation resource to fetch
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationList>'
class ReservationPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the ReservationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The SID of the Workspace that this task is contained within.
:param task_sid: The SID of the reserved Task resource
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
super(ReservationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationPage>'
class ReservationContext(InstanceContext):
""" """
def __init__(self, version, workspace_sid, task_sid, sid):
"""
Initialize the ReservationContext
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace with the TaskReservation resource to fetch
:param task_sid: The SID of the reserved Task resource with the TaskReservation resource to fetch
:param sid: The SID of the TaskReservation resource to fetch
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
super(ReservationContext, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, 'task_sid': task_sid, 'sid': sid, }
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the ReservationInstance
:returns: The fetched ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, reservation_status=values.unset,
worker_activity_sid=values.unset, instruction=values.unset,
dequeue_post_work_activity_sid=values.unset,
dequeue_from=values.unset, dequeue_record=values.unset,
dequeue_timeout=values.unset, dequeue_to=values.unset,
dequeue_status_callback_url=values.unset, call_from=values.unset,
call_record=values.unset, call_timeout=values.unset,
call_to=values.unset, call_url=values.unset,
call_status_callback_url=values.unset, call_accept=values.unset,
redirect_call_sid=values.unset, redirect_accept=values.unset,
redirect_url=values.unset, to=values.unset, from_=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
status_callback_event=values.unset, timeout=values.unset,
record=values.unset, muted=values.unset, beep=values.unset,
start_conference_on_enter=values.unset,
end_conference_on_exit=values.unset, wait_url=values.unset,
wait_method=values.unset, early_media=values.unset,
max_participants=values.unset,
conference_status_callback=values.unset,
conference_status_callback_method=values.unset,
conference_status_callback_event=values.unset,
conference_record=values.unset, conference_trim=values.unset,
recording_channels=values.unset,
recording_status_callback=values.unset,
recording_status_callback_method=values.unset,
conference_recording_status_callback=values.unset,
conference_recording_status_callback_method=values.unset,
region=values.unset, sip_auth_username=values.unset,
sip_auth_password=values.unset,
dequeue_status_callback_event=values.unset,
post_work_activity_sid=values.unset, supervisor_mode=values.unset,
supervisor=values.unset,
end_conference_on_customer_exit=values.unset,
beep_on_customer_entrance=values.unset):
"""
Update the ReservationInstance
:param ReservationInstance.Status reservation_status: The new status of the reservation
:param unicode worker_activity_sid: The new worker activity SID if rejecting a reservation
:param unicode instruction: The assignment instruction for reservation
:param unicode dequeue_post_work_activity_sid: The SID of the Activity resource to start after executing a Dequeue instruction
:param unicode dequeue_from: The Caller ID of the call to the worker when executing a Dequeue instruction
:param unicode dequeue_record: Whether to record both legs of a call when executing a Dequeue instruction
:param unicode dequeue_timeout: Timeout for call when executing a Dequeue instruction
:param unicode dequeue_to: The Contact URI of the worker when executing a Dequeue instruction
:param unicode dequeue_status_callback_url: The Callback URL for completed call event when executing a Dequeue instruction
:param unicode call_from: The Caller ID of the outbound call when executing a Call instruction
:param unicode call_record: Whether to record both legs of a call when executing a Call instruction
:param unicode call_timeout: Timeout for call when executing a Call instruction
:param unicode call_to: The Contact URI of the worker when executing a Call instruction
:param unicode call_url: TwiML URI executed on answering the worker's leg as a result of the Call instruction
:param unicode call_status_callback_url: The URL to call for the completed call event when executing a Call instruction
:param bool call_accept: Whether to accept a reservation when executing a Call instruction
:param unicode redirect_call_sid: The Call SID of the call parked in the queue when executing a Redirect instruction
:param bool redirect_accept: Whether the reservation should be accepted when executing a Redirect instruction
:param unicode redirect_url: TwiML URI to redirect the call to when executing the Redirect instruction
:param unicode to: The Contact URI of the worker when executing a Conference instruction
:param unicode from_: The Caller ID of the call to the worker when executing a Conference instruction
:param unicode status_callback: The URL we should call to send status information to your application
:param unicode status_callback_method: The HTTP method we should use to call status_callback
:param ReservationInstance.CallStatus status_callback_event: The call progress events that we will send to status_callback
:param unicode timeout: Timeout for call when executing a Conference instruction
:param bool record: Whether to record the participant and their conferences
:param bool muted: Whether to mute the agent
:param unicode beep: Whether to play a notification beep when the participant joins
:param bool start_conference_on_enter: Whether the conference starts when the participant joins the conference
:param bool end_conference_on_exit: Whether to end the conference when the agent leaves
:param unicode wait_url: URL that hosts pre-conference hold music
:param unicode wait_method: The HTTP method we should use to call `wait_url`
:param bool early_media: Whether agents can hear the state of the outbound call
:param unicode max_participants: The maximum number of agent conference participants
:param unicode conference_status_callback: The callback URL for conference events
:param unicode conference_status_callback_method: HTTP method for requesting `conference_status_callback` URL
:param ReservationInstance.ConferenceEvent conference_status_callback_event: The conference status events that we will send to conference_status_callback
:param unicode conference_record: Whether to record the conference the participant is joining
:param unicode conference_trim: How to trim leading and trailing silence from your recorded conference audio files
:param unicode recording_channels: Specify `mono` or `dual` recording channels
:param unicode recording_status_callback: The URL that we should call using the `recording_status_callback_method` when the recording status changes
:param unicode recording_status_callback_method: The HTTP method we should use when we call `recording_status_callback`
:param unicode conference_recording_status_callback: The URL we should call using the `conference_recording_status_callback_method` when the conference recording is available
:param unicode conference_recording_status_callback_method: The HTTP method we should use to call `conference_recording_status_callback`
:param unicode region: The region where we should mix the conference audio
:param unicode sip_auth_username: The SIP username used for authentication
:param unicode sip_auth_password: The SIP password for authentication
:param unicode dequeue_status_callback_event: The Call progress events sent via webhooks as a result of a Dequeue instruction
:param unicode post_work_activity_sid: The new worker activity SID after executing a Conference instruction
:param ReservationInstance.SupervisorMode supervisor_mode: The Supervisor mode when executing the Supervise instruction
:param unicode supervisor: The Supervisor SID/URI when executing the Supervise instruction
:param bool end_conference_on_customer_exit: Whether to end the conference when the customer leaves
:param bool beep_on_customer_entrance: Whether to play a notification beep when the customer joins
:returns: The updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
data = values.of({
'ReservationStatus': reservation_status,
'WorkerActivitySid': worker_activity_sid,
'Instruction': instruction,
'DequeuePostWorkActivitySid': dequeue_post_work_activity_sid,
'DequeueFrom': dequeue_from,
'DequeueRecord': dequeue_record,
'DequeueTimeout': dequeue_timeout,
'DequeueTo': dequeue_to,
'DequeueStatusCallbackUrl': dequeue_status_callback_url,
'CallFrom': call_from,
'CallRecord': call_record,
'CallTimeout': call_timeout,
'CallTo': call_to,
'CallUrl': call_url,
'CallStatusCallbackUrl': call_status_callback_url,
'CallAccept': call_accept,
'RedirectCallSid': redirect_call_sid,
'RedirectAccept': redirect_accept,
'RedirectUrl': redirect_url,
'To': to,
'From': from_,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'StatusCallbackEvent': serialize.map(status_callback_event, lambda e: e),
'Timeout': timeout,
'Record': record,
'Muted': muted,
'Beep': beep,
'StartConferenceOnEnter': start_conference_on_enter,
'EndConferenceOnExit': end_conference_on_exit,
'WaitUrl': wait_url,
'WaitMethod': wait_method,
'EarlyMedia': early_media,
'MaxParticipants': max_participants,
'ConferenceStatusCallback': conference_status_callback,
'ConferenceStatusCallbackMethod': conference_status_callback_method,
'ConferenceStatusCallbackEvent': serialize.map(conference_status_callback_event, lambda e: e),
'ConferenceRecord': conference_record,
'ConferenceTrim': conference_trim,
'RecordingChannels': recording_channels,
'RecordingStatusCallback': recording_status_callback,
'RecordingStatusCallbackMethod': recording_status_callback_method,
'ConferenceRecordingStatusCallback': conference_recording_status_callback,
'ConferenceRecordingStatusCallbackMethod': conference_recording_status_callback_method,
'Region': region,
'SipAuthUsername': sip_auth_username,
'SipAuthPassword': sip_auth_password,
'DequeueStatusCallbackEvent': serialize.map(dequeue_status_callback_event, lambda e: e),
'PostWorkActivitySid': post_work_activity_sid,
'SupervisorMode': supervisor_mode,
'Supervisor': supervisor,
'EndConferenceOnCustomerExit': end_conference_on_customer_exit,
'BeepOnCustomerEntrance': beep_on_customer_entrance,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.ReservationContext {}>'.format(context)
class ReservationInstance(InstanceResource):
""" """
class Status(object):
PENDING = "pending"
ACCEPTED = "accepted"
REJECTED = "rejected"
TIMEOUT = "timeout"
CANCELED = "canceled"
RESCINDED = "rescinded"
WRAPPING = "wrapping"
COMPLETED = "completed"
class CallStatus(object):
INITIATED = "initiated"
RINGING = "ringing"
ANSWERED = "answered"
COMPLETED = "completed"
class ConferenceEvent(object):
START = "start"
END = "end"
JOIN = "join"
LEAVE = "leave"
MUTE = "mute"
HOLD = "hold"
SPEAKER = "speaker"
class SupervisorMode(object):
MONITOR = "monitor"
WHISPER = "whisper"
BARGE = "barge"
def __init__(self, version, payload, workspace_sid, task_sid, sid=None):
"""
Initialize the ReservationInstance
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
super(ReservationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'reservation_status': payload.get('reservation_status'),
'sid': payload.get('sid'),
'task_sid': payload.get('task_sid'),
'worker_name': payload.get('worker_name'),
'worker_sid': payload.get('worker_sid'),
'workspace_sid': payload.get('workspace_sid'),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {
'workspace_sid': workspace_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ReservationContext for this ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
if self._context is None:
self._context = ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def reservation_status(self):
"""
:returns: The current status of the reservation
:rtype: ReservationInstance.Status
"""
return self._properties['reservation_status']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def task_sid(self):
"""
:returns: The SID of the reserved Task resource
:rtype: unicode
"""
return self._properties['task_sid']
@property
def worker_name(self):
"""
:returns: The friendly_name of the Worker that is reserved
:rtype: unicode
"""
return self._properties['worker_name']
@property
def worker_sid(self):
"""
:returns: The SID of the reserved Worker resource
:rtype: unicode
"""
return self._properties['worker_sid']
@property
def workspace_sid(self):
"""
:returns: The SID of the Workspace that this task is contained within.
:rtype: unicode
"""
return self._properties['workspace_sid']
@property
def url(self):
"""
:returns: The absolute URL of the TaskReservation reservation
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The URLs of related resources
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the ReservationInstance
:returns: The fetched ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return self._proxy.fetch()
def update(self, reservation_status=values.unset,
worker_activity_sid=values.unset, instruction=values.unset,
dequeue_post_work_activity_sid=values.unset,
dequeue_from=values.unset, dequeue_record=values.unset,
dequeue_timeout=values.unset, dequeue_to=values.unset,
dequeue_status_callback_url=values.unset, call_from=values.unset,
call_record=values.unset, call_timeout=values.unset,
call_to=values.unset, call_url=values.unset,
call_status_callback_url=values.unset, call_accept=values.unset,
redirect_call_sid=values.unset, redirect_accept=values.unset,
redirect_url=values.unset, to=values.unset, from_=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
status_callback_event=values.unset, timeout=values.unset,
record=values.unset, muted=values.unset, beep=values.unset,
start_conference_on_enter=values.unset,
end_conference_on_exit=values.unset, wait_url=values.unset,
wait_method=values.unset, early_media=values.unset,
max_participants=values.unset,
conference_status_callback=values.unset,
conference_status_callback_method=values.unset,
conference_status_callback_event=values.unset,
conference_record=values.unset, conference_trim=values.unset,
recording_channels=values.unset,
recording_status_callback=values.unset,
recording_status_callback_method=values.unset,
conference_recording_status_callback=values.unset,
conference_recording_status_callback_method=values.unset,
region=values.unset, sip_auth_username=values.unset,
sip_auth_password=values.unset,
dequeue_status_callback_event=values.unset,
post_work_activity_sid=values.unset, supervisor_mode=values.unset,
supervisor=values.unset,
end_conference_on_customer_exit=values.unset,
beep_on_customer_entrance=values.unset):
"""
Update the ReservationInstance
:param ReservationInstance.Status reservation_status: The new status of the reservation
:param unicode worker_activity_sid: The new worker activity SID if rejecting a reservation
:param unicode instruction: The assignment instruction for reservation
:param unicode dequeue_post_work_activity_sid: The SID of the Activity resource to start after executing a Dequeue instruction
:param unicode dequeue_from: The Caller ID of the call to the worker when executing a Dequeue instruction
:param unicode dequeue_record: Whether to record both legs of a call when executing a Dequeue instruction
:param unicode dequeue_timeout: Timeout for call when executing a Dequeue instruction
:param unicode dequeue_to: The Contact URI of the worker when executing a Dequeue instruction
:param unicode dequeue_status_callback_url: The Callback URL for completed call event when executing a Dequeue instruction
:param unicode call_from: The Caller ID of the outbound call when executing a Call instruction
:param unicode call_record: Whether to record both legs of a call when executing a Call instruction
:param unicode call_timeout: Timeout for call when executing a Call instruction
:param unicode call_to: The Contact URI of the worker when executing a Call instruction
:param unicode call_url: TwiML URI executed on answering the worker's leg as a result of the Call instruction
:param unicode call_status_callback_url: The URL to call for the completed call event when executing a Call instruction
:param bool call_accept: Whether to accept a reservation when executing a Call instruction
:param unicode redirect_call_sid: The Call SID of the call parked in the queue when executing a Redirect instruction
:param bool redirect_accept: Whether the reservation should be accepted when executing a Redirect instruction
:param unicode redirect_url: TwiML URI to redirect the call to when executing the Redirect instruction
:param unicode to: The Contact URI of the worker when executing a Conference instruction
:param unicode from_: The Caller ID of the call to the worker when executing a Conference instruction
:param unicode status_callback: The URL we should call to send status information to your application
:param unicode status_callback_method: The HTTP method we should use to call status_callback
:param ReservationInstance.CallStatus status_callback_event: The call progress events that we will send to status_callback
:param unicode timeout: Timeout for call when executing a Conference instruction
:param bool record: Whether to record the participant and their conferences
:param bool muted: Whether to mute the agent
:param unicode beep: Whether to play a notification beep when the participant joins
:param bool start_conference_on_enter: Whether the conference starts when the participant joins the conference
:param bool end_conference_on_exit: Whether to end the conference when the agent leaves
:param unicode wait_url: URL that hosts pre-conference hold music
:param unicode wait_method: The HTTP method we should use to call `wait_url`
:param bool early_media: Whether agents can hear the state of the outbound call
:param unicode max_participants: The maximum number of agent conference participants
:param unicode conference_status_callback: The callback URL for conference events
:param unicode conference_status_callback_method: HTTP method for requesting `conference_status_callback` URL
:param ReservationInstance.ConferenceEvent conference_status_callback_event: The conference status events that we will send to conference_status_callback
:param unicode conference_record: Whether to record the conference the participant is joining
:param unicode conference_trim: How to trim leading and trailing silence from your recorded conference audio files
:param unicode recording_channels: Specify `mono` or `dual` recording channels
:param unicode recording_status_callback: The URL that we should call using the `recording_status_callback_method` when the recording status changes
:param unicode recording_status_callback_method: The HTTP method we should use when we call `recording_status_callback`
:param unicode conference_recording_status_callback: The URL we should call using the `conference_recording_status_callback_method` when the conference recording is available
:param unicode conference_recording_status_callback_method: The HTTP method we should use to call `conference_recording_status_callback`
:param unicode region: The region where we should mix the conference audio
:param unicode sip_auth_username: The SIP username used for authentication
:param unicode sip_auth_password: The SIP password for authentication
:param unicode dequeue_status_callback_event: The Call progress events sent via webhooks as a result of a Dequeue instruction
:param unicode post_work_activity_sid: The new worker activity SID after executing a Conference instruction
:param ReservationInstance.SupervisorMode supervisor_mode: The Supervisor mode when executing the Supervise instruction
:param unicode supervisor: The Supervisor SID/URI when executing the Supervise instruction
:param bool end_conference_on_customer_exit: Whether to end the conference when the customer leaves
:param bool beep_on_customer_entrance: Whether to play a notification beep when the customer joins
:returns: The updated ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return self._proxy.update(
reservation_status=reservation_status,
worker_activity_sid=worker_activity_sid,
instruction=instruction,
dequeue_post_work_activity_sid=dequeue_post_work_activity_sid,
dequeue_from=dequeue_from,
dequeue_record=dequeue_record,
dequeue_timeout=dequeue_timeout,
dequeue_to=dequeue_to,
dequeue_status_callback_url=dequeue_status_callback_url,
call_from=call_from,
call_record=call_record,
call_timeout=call_timeout,
call_to=call_to,
call_url=call_url,
call_status_callback_url=call_status_callback_url,
call_accept=call_accept,
redirect_call_sid=redirect_call_sid,
redirect_accept=redirect_accept,
redirect_url=redirect_url,
to=to,
from_=from_,
status_callback=status_callback,
status_callback_method=status_callback_method,
status_callback_event=status_callback_event,
timeout=timeout,
record=record,
muted=muted,
beep=beep,
start_conference_on_enter=start_conference_on_enter,
end_conference_on_exit=end_conference_on_exit,
wait_url=wait_url,
wait_method=wait_method,
early_media=early_media,
max_participants=max_participants,
conference_status_callback=conference_status_callback,
conference_status_callback_method=conference_status_callback_method,
conference_status_callback_event=conference_status_callback_event,
conference_record=conference_record,
conference_trim=conference_trim,
recording_channels=recording_channels,
recording_status_callback=recording_status_callback,
recording_status_callback_method=recording_status_callback_method,
conference_recording_status_callback=conference_recording_status_callback,
conference_recording_status_callback_method=conference_recording_status_callback_method,
region=region,
sip_auth_username=sip_auth_username,
sip_auth_password=sip_auth_password,
dequeue_status_callback_event=dequeue_status_callback_event,
post_work_activity_sid=post_work_activity_sid,
supervisor_mode=supervisor_mode,
supervisor=supervisor,
end_conference_on_customer_exit=end_conference_on_customer_exit,
beep_on_customer_entrance=beep_on_customer_entrance,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.ReservationInstance {}>'.format(context)
| 50.085639
| 182
| 0.68886
|
a0722f59de3763d8e3638aae4f25355508df57eb
| 270
|
py
|
Python
|
src/chapter6/exercise5.py
|
Group3BCS1/BCS-2021
|
b8ee8f900e3fd23822844e10fb2c6475a4f3400a
|
[
"MIT"
] | null | null | null |
src/chapter6/exercise5.py
|
Group3BCS1/BCS-2021
|
b8ee8f900e3fd23822844e10fb2c6475a4f3400a
|
[
"MIT"
] | null | null | null |
src/chapter6/exercise5.py
|
Group3BCS1/BCS-2021
|
b8ee8f900e3fd23822844e10fb2c6475a4f3400a
|
[
"MIT"
] | 2
|
2021-06-11T08:18:48.000Z
|
2021-06-12T20:31:44.000Z
|
word = 'X-DSPAM-Confidence: 0.8475'
# Finds the colon character
atpos = word.find(':')
# Extracts portion after colon
var_float = word [atpos + 1:]
# Converts to floating point number
var_float = float(var_float)
print(f'This is a floating point number {var_float}')
| 33.75
| 55
| 0.72963
|
c68bcc1bfdd00695d0138fbfb4c63aec658ecf13
| 3,377
|
py
|
Python
|
q/protocol.py
|
rymurr/q
|
af44753108d2c569d520b6c1ef719a4e0b616f3e
|
[
"MIT"
] | null | null | null |
q/protocol.py
|
rymurr/q
|
af44753108d2c569d520b6c1ef719a4e0b616f3e
|
[
"MIT"
] | null | null | null |
q/protocol.py
|
rymurr/q
|
af44753108d2c569d520b6c1ef719a4e0b616f3e
|
[
"MIT"
] | null | null | null |
'''
Primary source of kdb ipc protocol definitions
here we define all the q data types and their on the wire form
A parser is used to convert between the python format and kdb/q format
types are found here:
http://www.kx.com/q/d/q1.htm
Note on dates and times
dates are number of days since Jan 1 2000
times are number of hours/minutes/seconds/millis
datetimes are float64 days since Jan 1 (fractional day is converted to millis and parsed)
TODO:
need some docstrings
add in async/concurrency stuff for speed
profile!
integrate back into connection class and do full tests
clarify handling of OrderedDict
add in pd.Series
clarify handling of sorted and keyed tables
add indicies (associated with keys)
'''
import pandas
import datetime
import numpy as np
from bitstring import ConstBitStream
from collections import OrderedDict
from time import mktime
#types: -ve is atomic +ve is vector
types = {
-1: ('int', '4'), #bool
1: ('int', '4', np.bool), #bool vector
-4: ('int','8'), #byte
4: ('int','8', np.int8), #byte vector
-5: ('int', '16'), #short
5: ('int', '16', np.int16), #short vector
-6: ('int','32'), #int
6: ('int','32', np.int32), #int vector
-7: ('int','64'), #long
7: ('int','64', np.int64), #long vector
-8: ('float','32'), #real
8: ('float','32', np.float32), #real vector
-9: ('float','64'), #float
9: ('float','64', np.float64), #float vector
-10:('int', '8'), #char
10:('int', '8', np.char), #char vector
-11:('symbol',''), #symbol
11:('symbol',''), #symbol vector
-12:('int', '64'), #nanos
12: ('int', '64'), #nanos vector
-13:('int', '32'), #month
13:('int', '32'), #month vector
-14:('int', '32'), #date
14:('int', '32'), #date vector
-15:('float', '64'), #datetime
15:('float', '64'), #datetime vector
-16:('int', '64'), #nano datetime
16:('int', '64'), #nano datetime vector
-17:('int', '32'), #hour
17:('int', '32'), #hour vector
-18:('int', '32'), #second
18:('int', '32'), #second vector
-19:('int', '32'), #time
19:('int', '32'), #time vector
0:('list','0'), #list
}
inv_types = {
bool: (1, 'int', '8'),
np.bool: (-1, 'int', '8'),
np.int8: (4, 'int','8'),
'int': (-5, 'int', '16'),
np.int16: (5, 'int', '16'),
long: (-7, 'int', '64'),
np.int64: (7, 'int', '64'),
float: (-9, 'float', '64'),
'float': (-8, 'float', '32'),
np.float32: (8, 'float', '32'),
np.float64: (8, 'float', '64'),
int: (-6, 'int', '32'),
list: (0, '', ''),
dict: (99, '', ''),
str: (-11, 'hex', '8'),
'str': (-10, 'hex', '8'),
OrderedDict: (127, '', ''),
np.int64: (6, 'int', '32'),
np.int8: (4, 'int', '8'),
np.object_: (11, 'hex', 8,),
pandas.DataFrame: (98, '', ''),
}
INT = -6
BYTE = -4
NULL = ConstBitStream('0x00')
Y2KDAYS = datetime.datetime(2000,1,1).toordinal()
Y2KMILLIS = mktime(datetime.datetime(2000,1,1).utctimetuple())
MILLIS = 8.64E7
#header format
header_format = 'int{0}:8=endian, int{0}:8=async, pad:16, int{0}:32=length, bits=data'
| 32.161905
| 93
| 0.517619
|
b65ef215969f9d3a248c8538f776589a5f306c73
| 4,890
|
py
|
Python
|
testsuite/python-imageoutput/src/test_imageoutput.py
|
Nedra1998/oiio
|
1a105a6496606a6070df0a5ae5ab64416803e02e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
testsuite/python-imageoutput/src/test_imageoutput.py
|
Nedra1998/oiio
|
1a105a6496606a6070df0a5ae5ab64416803e02e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
testsuite/python-imageoutput/src/test_imageoutput.py
|
Nedra1998/oiio
|
1a105a6496606a6070df0a5ae5ab64416803e02e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import OpenImageIO as oiio
import numpy as np
# Read the one subimage from input then write it to output using
# write_image, write_scanlines, write_scanline, write_tile, or write_tiles,
# depending on the 'method' argument). (Just copy one subimage, one MIP
# level.)
def copy_subimage (input, output, method="image",
memformat=oiio.TypeFloat) :
spec = input.spec ()
if method == "image" :
pixels = input.read_image (memformat)
if pixels is None :
print ("Error reading input pixels in", in_filename)
return False
output.write_image (pixels)
elif method == "scanlines" and spec.tile_width == 0 :
pixels = input.read_image (memformat)
if pixels is None :
print ("Error reading input pixels in", in_filename)
return False
output.write_scanlines (spec.y, spec.y+spec.height, spec.z,
pixels)
elif method == "scanline" and spec.tile_width == 0 :
for z in range(spec.z, spec.z+spec.depth) :
for y in range(spec.y, spec.y+spec.height) :
pixels = input.read_scanline (y, z, memformat)
if pixels is None :
print ("Error reading input pixels in", in_filename)
return False
output.write_scanline (y, z, pixels)
elif method == "tiles" and spec.tile_width != 0 :
pixels = input.read_image (memformat)
if pixels is None :
print ("Error reading input pixels in", in_filename)
return False
output.write_tiles (spec.x, spec.x+spec.width,
spec.y, spec.y+spec.height,
spec.z, spec.z+spec.depth,
pixels)
elif method == "tile" and spec.tile_width != 0 :
for z in range(spec.z, spec.z+spec.depth, spec.tile_depth) :
for y in range(spec.y, spec.y+spec.height, spec.tile_height) :
for x in range(spec.x, spec.x+spec.width, spec.tile_width) :
pixels = input.read_tile (x, y, z, memformat)
if pixels is None :
print ("Error reading input pixels in", in_filename)
return False
output.write_tile (x, y, z, pixels)
else :
print ("Unknown method:", method)
return False
return True
# Read the whole image then write using write_image, write_scanlines,
# write_scanline, write_tile, or write_tiles, depending on the 'method'
# argument). (Just copy one subimage, one MIP level.)
def copy_image (in_filename, out_filename, method="image",
memformat=oiio.TypeFloat, outformat=oiio.TypeUnknown) :
input = oiio.ImageInput.open (in_filename)
if not input :
print ('Could not open "' + filename + '"')
print ("\tError: ", oiio.geterror())
print ()
return
outspec = input.spec()
if outformat != oiio.TypeUnknown :
outspec.format = outformat
output = oiio.ImageOutput.create (out_filename)
if not output :
print ("Could not create ImageOutput for", out_filename)
return
ok = output.open (out_filename, outspec)
if not ok :
print ("Could not open", out_filename)
return
ok = copy_subimage (input, output, method, memformat)
input.close ()
output.close ()
if ok :
print ("Copied", in_filename, "to", out_filename, "as", method)
def test_subimages (out_filename="multipart.exr") :
output = oiio.ImageOutput.create (out_filename)
spec = oiio.ImageSpec (64, 64, 3, "half")
specs = (spec, spec, spec)
output.open (out_filename, specs)
buffer = np.zeros ((64, 64, 3), dtype=float)
for i in range(3) :
if i != 0 :
output.open (out_filename, specs[i], "AppendSubimage")
output.write_image (buffer)
output.close ()
######################################################################
# main test starts here
try:
copy_image ("scanline.tif", "grid-image.tif", method="image")
copy_image ("scanline.tif", "grid-scanline.tif", method="scanline")
copy_image ("scanline.tif", "grid-scanlines.tif", method="scanlines")
copy_image ("tiled.tif", "grid-timage.tif", method="image")
copy_image ("tiled.tif", "grid-tile.tif", method="tile")
copy_image ("tiled.tif", "grid-tiles.tif", method="tiles")
# Regression test for crash when changing formats
copy_image ("scanline.tif", "grid-image.tif",
memformat=oiio.TypeUInt8, outformat=oiio.TypeUInt16)
# Ensure we can write multiple subimages
test_subimages ()
print ("Done.")
except Exception as detail:
print ("Unknown exception:", detail)
| 39.12
| 76
| 0.600818
|
57b86119c931246e4ca06d032a20b189278b76f0
| 1,072
|
py
|
Python
|
onlinecourse/admin.py
|
jcampbellaccount/ibm-developer-skills-network-final-cloud-app-with-database
|
e43b264eeb3f19199077e8502ae3af1ccab4a743
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/admin.py
|
jcampbellaccount/ibm-developer-skills-network-final-cloud-app-with-database
|
e43b264eeb3f19199077e8502ae3af1ccab4a743
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/admin.py
|
jcampbellaccount/ibm-developer-skills-network-final-cloud-app-with-database
|
e43b264eeb3f19199077e8502ae3af1ccab4a743
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# <HINT> Import any new Models here
from .models import Course, Lesson, Instructor, Learner, Question, Choice
class QuestionInline(admin.StackedInline):
model = Question
extra = 5
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 5
class LessonInline(admin.StackedInline):
model = Lesson
extra = 5
# Register your models here.
class CourseAdmin(admin.ModelAdmin):
inlines = [LessonInline]
list_display = ('name', 'pub_date')
list_filter = ['pub_date']
search_fields = ['name', 'description']
class LessonAdmin(admin.ModelAdmin):
list_display = ['title']
class QuestionAdmin(admin.ModelAdmin):
list_display = ['lesson', 'question_text']
inlines = [ChoiceInline]
class ChoiceAdmin(admin.ModelAdmin):
list_display = ['choice_text']
admin.site.register(Course, CourseAdmin)
admin.site.register(Lesson, LessonAdmin)
admin.site.register(Instructor)
admin.site.register(Learner)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice, ChoiceAdmin)
| 24.930233
| 73
| 0.738806
|
759a8a57a9590ed2963d29e2ad70c9267e4e6b29
| 99
|
py
|
Python
|
importcrdata/apps.py
|
JZ1999/crelectoralregister
|
dd5e314608a8a715ae7c29014b9ff87dab585980
|
[
"Apache-2.0"
] | null | null | null |
importcrdata/apps.py
|
JZ1999/crelectoralregister
|
dd5e314608a8a715ae7c29014b9ff87dab585980
|
[
"Apache-2.0"
] | 8
|
2019-12-04T22:53:01.000Z
|
2022-02-10T07:54:51.000Z
|
importcrdata/apps.py
|
JZ1999/crelectoralregister
|
dd5e314608a8a715ae7c29014b9ff87dab585980
|
[
"Apache-2.0"
] | 1
|
2019-06-21T15:09:56.000Z
|
2019-06-21T15:09:56.000Z
|
from django.apps import AppConfig
class ImportcrdataConfig(AppConfig):
name = 'importcrdata'
| 16.5
| 36
| 0.777778
|
0149487795819c0262e4c74300873349225eb122
| 481
|
py
|
Python
|
temboo/core/Library/CorpWatch/Relationships/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/CorpWatch/Relationships/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/CorpWatch/Relationships/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.CorpWatch.Relationships.CompanyChildren import CompanyChildren, CompanyChildrenInputSet, CompanyChildrenResultSet, CompanyChildrenChoreographyExecution
from temboo.Library.CorpWatch.Relationships.CompanyParents import CompanyParents, CompanyParentsInputSet, CompanyParentsResultSet, CompanyParentsChoreographyExecution
from temboo.Library.CorpWatch.Relationships.TopParent import TopParent, TopParentInputSet, TopParentResultSet, TopParentChoreographyExecution
| 120.25
| 171
| 0.912682
|
0fd4bee75e332bf7bc38a260bb4dddf478ca1c4f
| 3,498
|
py
|
Python
|
filepart/__init__.py
|
UnknownPlayer78/filepart
|
0ebb3a4428b1d415185b13be24034f688ac6185b
|
[
"MIT"
] | 1
|
2020-01-18T14:13:43.000Z
|
2020-01-18T14:13:43.000Z
|
filepart/__init__.py
|
UnknownPlayer78/filepart
|
0ebb3a4428b1d415185b13be24034f688ac6185b
|
[
"MIT"
] | null | null | null |
filepart/__init__.py
|
UnknownPlayer78/filepart
|
0ebb3a4428b1d415185b13be24034f688ac6185b
|
[
"MIT"
] | null | null | null |
import sys, getopt
from .__main__ import __version__
from os.path import exists, isfile
from .utils import parse_size
from .utils import Help
from .wizard import Wizard
from .split import Splitter
from .build import Builder
# TODO wizard file not found (check for .part file exists)
# -> file.ext -> check for startswith file.ext endswith .part
# TODO find solution for part systems
# -> file.ext.1.part
# -> file.ext.2.part
# -> file.ext.3.part
# -> file.ext -> file.ext.x.part -> file.ext
def main(argv=None):
if argv == None:
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "hvsbp:o:", ["help", "version", "split", "build", "parts=", "output="])
except getopt.GetoptError:
print(Help.SHORT)
sys.exit(2)
if len(opts) > 0:
mode = ""
file = " ".join(args) if len(args) > 0 else ""
parts = ""
output = ""
for opt, arg in opts:
if opt in ("-h", "--help"):
print(Help.LONG)
sys.exit(0)
elif opt in ("-v", "--version"):
print(__version__)
sys.exit(0)
elif opt in ("-s", "--split"):
mode = "split"
elif opt in ("-b", "--build"):
mode = "build"
if not file:
print(Help.SHORT)
print(f"\n\n{__name__}: error: You must provide a file.")
sys.exit(2)
for opt, arg in opts:
if opt in ("-p", "--parts"):
parts = arg
elif opt in ("-o", "--output"):
output = arg
if mode == "split":
if not parts:
print(Help.SHORT)
print(f"\n\n{__name__}: error: You must provide the parts.")
sys.exit(2)
if not exists(file) or not isfile(file):
print(f"{__name__}: error: " + file + ": No such file.")
sys.exit(2)
try:
parts = parse_size(parts)
except Exception as error:
print(f"{__name__}: error: " + str(error))
sys.exit(2)
if not output:
output = "./"
else:
if not exists(output) or isfile(output):
print(f"{__name__}: error: " + output + " No such directory.")
sys.exit(2)
if mode == "split":
splitter = Splitter(
file=file,
parts=parts,
output=output
)
splitter.split()
elif mode == "build":
builder = Builder(
file=file,
output=output
)
builder.build()
elif len(args) < 1:
wizard = Wizard()
mode = wizard.options["mode"]
file = wizard.options["file"]
if mode == "split":
parts = wizard.options["parts"]
output = wizard.options["output"]
if mode == "split":
splitter = Splitter(
file=file,
parts=parts,
output=output
)
splitter.split()
elif mode == "build":
builder = Builder(
file=file,
output=output
)
builder.build()
else:
print(Help.SHORT)
sys.exit(2)
| 28.209677
| 112
| 0.450543
|
92ea3966b0f87d0effd8e95d039615f6012482c9
| 3,058
|
py
|
Python
|
homeassistant/components/sensor/zwave.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/sensor/zwave.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/sensor/zwave.py
|
shire210/home-assistant
|
63cd8bbee6f1b74ae9c6c249ac820119a8a573d8
|
[
"Apache-2.0"
] | null | null | null |
"""
Interfaces with Z-Wave sensors.
For more details about this platform, please refer to the documentation
at https://home-assistant.io/components/sensor.zwave/
"""
import logging
# Because we do not compile openzwave on CI
# pylint: disable=import-error
from homeassistant.components.sensor import DOMAIN
from homeassistant.components import zwave
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.components.zwave import async_setup_platform # noqa # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
def get_device(node, values, **kwargs):
"""Create zwave entity device."""
# Generic Device mappings
if node.has_command_class(zwave.const.COMMAND_CLASS_SENSOR_MULTILEVEL):
return ZWaveMultilevelSensor(values)
if node.has_command_class(zwave.const.COMMAND_CLASS_METER) and \
values.primary.type == zwave.const.TYPE_DECIMAL:
return ZWaveMultilevelSensor(values)
if node.has_command_class(zwave.const.COMMAND_CLASS_ALARM) or \
node.has_command_class(zwave.const.COMMAND_CLASS_SENSOR_ALARM):
return ZWaveAlarmSensor(values)
return None
class ZWaveSensor(zwave.ZWaveDeviceEntity):
"""Representation of a Z-Wave sensor."""
def __init__(self, values):
"""Initialize the sensor."""
zwave.ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self.update_properties()
def update_properties(self):
"""Callback on data changes for node values."""
self._state = self.values.primary.data
self._units = self.values.primary.units
@property
def force_update(self):
"""Return force_update."""
return True
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
return self._units
class ZWaveMultilevelSensor(ZWaveSensor):
"""Representation of a multi level sensor Z-Wave sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._units in ('C', 'F'):
return round(self._state, 1)
elif isinstance(self._state, float):
return round(self._state, 2)
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._units == 'C':
return TEMP_CELSIUS
elif self._units == 'F':
return TEMP_FAHRENHEIT
else:
return self._units
class ZWaveAlarmSensor(ZWaveSensor):
"""Representation of a Z-Wave sensor that sends Alarm alerts.
Examples include certain Multisensors that have motion and vibration
capabilities. Z-Wave defines various alarm types such as Smoke, Flood,
Burglar, CarbonMonoxide, etc.
This wraps these alarms and allows you to use them to trigger things, etc.
COMMAND_CLASS_ALARM is what we get here.
"""
pass
| 31.204082
| 103
| 0.690974
|
590e630cb10ec47be4d25ae9b8259d01da28e8cc
| 18,624
|
py
|
Python
|
spidey_simulation/spidey_py/spidey_python/omniverse/robot/articulated_object.py
|
JasonJZLiu/Spidey-Quadruped
|
74c1817f997b354bae4fffd2728f2cc94947062c
|
[
"MIT"
] | 5
|
2021-06-14T03:12:18.000Z
|
2021-12-23T12:58:56.000Z
|
spidey_simulation/spidey_py/spidey_python/omniverse/robot/articulated_object.py
|
JasonJZLiu/Spidey-Quadruped
|
74c1817f997b354bae4fffd2728f2cc94947062c
|
[
"MIT"
] | null | null | null |
spidey_simulation/spidey_py/spidey_python/omniverse/robot/articulated_object.py
|
JasonJZLiu/Spidey-Quadruped
|
74c1817f997b354bae4fffd2728f2cc94947062c
|
[
"MIT"
] | null | null | null |
"""
@author Mayank Mittal
@email mittalma@ethz.ch
@brief Implementation of an articulated object.
"""
# python
import os
import numpy as np
import scipy.spatial.transform as tf
from typing import Optional, List
# omniverse
from pxr import Usd, UsdGeom, Gf, Semantics
import omni.isaac.dynamic_control._dynamic_control as omni_dc
# mpulator gym
from spidey_python.utils.message import *
from spidey_python.utils.errors import *
from spidey_python.omniverse.robot.robot_base import RobotBase
class ArticulatedObject(RobotBase):
"""
@brief Implementation of an articulated object.
Articulated object differs from a "articulated object" in the sense that these are passive instances in the
environment, i.e. the joints are not actuated. However, since their interface resembles that of
a articulated object, they derive from the base class `RobotBase`.
"""
"""
Instantiation
"""
def __init__(self, stage: Usd.Stage, prim_path: str, usd_path: Optional[str] = None,
frame_names: List[str] = None, meters_per_unit: Optional[float] = 1.0):
"""
Defines the variables and constants for the articulated object.
:param stage: The USD stage to import articulated object into.
:param prim_path: The path for the primitive in the stage.
:param usd_path: The path to the USD file to load.
:param frame_names: A list of frame names whose pose to store.
:param meters_per_unit: The units of conversion from simulator's scale to meters.
"""
super().__init__()
# Check that input is correct
assert os.path.isabs(prim_path)
assert isinstance(meters_per_unit, float)
# Copy args to internal variables
self._prim_path = prim_path
self._meters_per_unit = meters_per_unit
self._usd_path = usd_path
# Check if any frames specified whose pose to store
if frame_names is None:
self._frame_handle_names = list()
else:
self._frame_handle_names = frame_names
# Persistent Scene-graph related in Universal Scene Description
self._stage = stage
self._prim = None
# Handles to various ov-kit plugins
self._dc_handle = None
# Handles related to articulated object
self._articulation_handle = None
# Count of number of DOF in object
self._num_dofs = 0
# Store DOF properties
self._dof_properties = {
"lower_limits": np.array([]),
"upper_limits": np.array([]),
"max_velocity": np.array([]),
"max_efforts": np.array([]),
}
# Store frame handles and poses
self._frames_info = dict()
for frame_name in self._frame_handle_names:
self._frames_info[frame_name] = {
'handle': None,
'pos': np.empty(3),
'quat': np.empty(4)
}
# Default state of the articulated object
self._default_state = {
"pos": np.array([]),
"vel": np.array([])
}
# Dynamics information of the articulated object
self._state = {
# Generalized coordinates
"pos": np.array([]),
# Generalized velocities
"vel": np.array([])
}
def __del__(self):
"""
Cleanup after exiting
"""
pass
def __str__(self) -> str:
"""
:return: A string containing information about the instance's state.
"""
# set print options for numpy
np.set_printoptions(precision=4)
# print message
msg = f"Articulated Object @ \'{self._prim_path}\'\n" \
" State:\n" \
f" q: {self.q} \n" \
f" u: {self.u} \n"
return msg
"""
Properties
"""
@property
def prim(self) -> Usd.Prim:
"""
:return: The USD primitive instance corresponding to the articulated object.
"""
return self._prim
@property
def prim_path(self) -> str:
"""
:return: The path to the prim the stage.
"""
return self._prim_path
@property
def dof_properties(self) -> dict:
"""
:return: A dictionary containing the DOF properties such as joint limits.
"""
return self._dof_properties
@property
def q(self) -> np.ndarray:
"""
:return: The generalized coordinates of the articulated object.
"""
return self._state["pos"]
@property
def u(self) -> np.ndarray:
"""
:return: The generalized velocities of the articulated object.
"""
return self._state["vel"]
@property
def frames_info(self) -> dict:
"""
:return: A nested dictionary with key as the frame names and values as the information
about the frame such as position and orientation in world frame.
"""
return self._frames_info
@property
def default_state(self) -> dict:
"""
:return: The default state of the articulated object.
"""
return self._default_state
@property
def state(self) -> dict:
"""
:return: The current state of the articulated object.
"""
return self._state
"""
Helpers
"""
def toggle_visibility(self, visible: bool):
""" Toggle visibility of the articulated object prim in the scene.
:param visible: Flag to whether make prim visible or invisible.
"""
# get imageable object
imageable = UsdGeom.Imageable(self._prim)
# toggle visibility
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def set_semantic_label(self, label: str):
"""
Set the semantic label corresponding to the prim.
:param label: Name of the semantic label.
"""
# create semantics api if not exists
if not self._prim.HasAPI(Semantics.SemanticsAPI):
sem = Semantics.SemanticsAPI.Apply(self._prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
else:
sem = Semantics.SemanticsAPI.Get(self._prim, "Semantics")
# set attributes
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(label)
def set_prim_pose(self, pos: np.ndarray, quat: Optional[np.ndarray] = None):
""" Set location of the root of the object in the stage.
:param pos: (x, y, z) cartesian coordinates for location of root of the articulated object in the world frame.
:param quat: (x, y, z, w) quaternion coordinates of orientation of root of the articulated object in the world frame.
Default orientation is (0, 0, 0, 1), i.e. identity w.r.t. world.
"""
if self._prim is None:
print_warn(f"Prim not found at \'{self._prim_path}\'. Please ensure that the USD stage has the prim.")
return
# convert to datatypes accepted by simulator
if not isinstance(pos, Gf.Vec3d):
pos = pos / self._meters_per_unit
pos = Gf.Vec3d(*pos)
# if orientation not provided, default to identity
if quat is not None:
rotm = tf.Rotation.from_quat(quat).as_matrix()
rotm = Gf.Matrix3d(*rotm.ravel())
else:
rotm = Gf.Matrix3d().SetIdentity()
# set attribute properties for the transform on the primitive
properties = self._prim.GetPropertyNames()
if "xformOp:transform" in properties:
transform_attr = self._prim.GetAttribute("xformOp:transform")
matrix = self._prim.GetAttribute("xformOp:transform").Get()
matrix.SetTranslateOnly(pos).SetRotateOnly(rotm)
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(self._prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetTranslate(pos).SetRotate(rotm))
def set_state(self, q: np.ndarray, u: np.ndarray, **kwargs):
""" Set the dof state of the articulated object.
:param q: Generalized coordinates for the object.
:param u: Generalized velocities for the object.
"""
# convert input to numpy array (sanity)
q = np.asarray(q)
u = np.asarray(u)
# check input is of right shape
assert q.shape == (self._num_dofs,)
assert u.shape == (self._num_dofs,)
# assign
# for arm
dof_states = self._dc_handle.get_articulation_dof_states(self._articulation_handle, omni_dc.STATE_ALL)
for index in range(self._num_dofs):
# set initial joint stat
dof_states["pos"][index] = q[index]
dof_states["vel"][index] = u[index]
self._dc_handle.set_articulation_dof_states(self._articulation_handle, dof_states, omni_dc.STATE_ALL)
"""
Operations
"""
def create(self):
"""
Loads the articulated object into the Omniverse stage.
@note This function is kept separate in case one wants to create an instance of the class without launching
the simulator. Or, if one doesn't want to create a new primitive programmatically but refer to an
exisiting one in the current USD stage.
"""
# Extract USD path from configuration
usd_path = self._usd_path
# check that path exists
if not os.path.exists(usd_path):
msg = f"File not found: {usd_path}"
print_error(msg)
raise FileNotFoundError(msg)
else:
print_info(f"Loading from: {usd_path}.")
# define persistent scene graph geometry for the articulated object
self._prim = self._stage.DefinePrim(self._prim_path, "Xform")
# add reference to the USD in the current stage
self._prim.GetReferences().AddReference(usd_path)
# check that the path to articulation in scene-graph is correct
assert self._prim_path == self._prim.GetPath().pathString
def setup(self, dc: omni_dc.DynamicControl):
"""
Registers the assets and configures internal variables of the articulated object.
:param dc: Handle to dynamic control plugin instance.
"""
# get prim if it doesn't exist yet
# this is to deal with the scenario when the stage already has the prim so user does not create one.
if self._prim is None:
self._prim = self._stage.GetPrimAtPath(self._prim_path)
# check that prim exists. (GetPrimPath returns invalid prim if one doesn't exist)
if not self._prim.IsValid():
msg = f"Prim not found at \'{self._prim_path}\'. Please ensure that the USD stage has the prim."
print_error(msg)
raise OmniverseError(msg)
# initialize dynamic control handle
self._dc_handle = dc
# initialize handle to the articulation for articulated object through dynamic control toolbox
self._articulation_handle = self._dc_handle.get_articulation(self._prim_path)
if self._articulation_handle == omni_dc.INVALID_HANDLE:
raise InvalidHandleError(f"Failed to obtain articulated object at \'{self._prim_path}\'")
# get number of degrees of freedom of articulated object
self._num_dofs = self._dc_handle.get_articulation_dof_count(self._articulation_handle)
# setup corresponding frame handle
self._setup_handles()
# setup links of the robot
self._setup_links()
# setup controls for the robot
self._setup_control()
# record initial state of the object in the scene as default state
dof_states = self._dc_handle.get_articulation_dof_states(self._articulation_handle, omni_dc.STATE_ALL)
self._default_state["pos"] = np.asarray(dof_states["pos"])
self._default_state["vel"] = np.zeros_like(self._default_state["pos"])
# root spawned position
self.set_prim_pose(pos=np.array([0.0, 0.0, 0.0]), quat=None)
# set default initial state of the articulated object
self.set_state(q=self._default_state["pos"],
u=self._default_state["vel"])
# update the internal buffers
self.update()
# print status
print_notify(f"Setup complete for articulated object \'{self._prim_path}\'.")
def advance(self):
"""Apply input command to the articulated object.
@note Passive object in the scene with no joint commands.
"""
pass
def update(self):
"""
Updates the buffers for dynamics state of the articulated object.
"""
# get frame poses
for frame_name in self._frames_info:
frame_handle = self._frames_info[frame_name]['handle']
# pose of the base of the robot
frame_pose = self._dc_handle.get_rigid_body_pose(frame_handle)
pos = np.array([frame_pose.p.x, frame_pose.p.y, frame_pose.p.z])
quat = np.array([frame_pose.r.x, frame_pose.r.y, frame_pose.r.z, frame_pose.r.w])
# convert from simulator's units to meters
pos = pos * self._meters_per_unit
# store into the dictionary
self._frames_info[frame_name]['pos'] = pos
self._frames_info[frame_name]['quat'] = quat
# fill joint state of the object
dof_states = self._dc_handle.get_articulation_dof_states(self._articulation_handle, omni_dc.STATE_ALL)
self._default_state["pos"] = np.asarray(dof_states["pos"])
self._default_state["vel"] = np.asarray(dof_states["vel"])
def display(self):
"""
Display the configuration of the articulated object.
"""
print(f"Articulation handle: {self._articulation_handle}")
# Print information about kinematic chain
root_link_index = self._dc_handle.get_articulation_root_body(self._articulation_handle)
print("--- Hierarchy:\n"
f"{self._convert_kinematic_hierarchy_to_string(root_link_index)}")
# Information about the body states of the articulated object
body_states = self._dc_handle.get_articulation_body_states(self._articulation_handle, omni_dc.STATE_ALL)
print_info("--- Body states:\n"
f"{body_states}")
# Information about the DOF states of the articulated object.
dof_states = self._dc_handle.get_articulation_dof_states(self._articulation_handle, omni_dc.STATE_ALL)
print_info("--- DOF states:\n"
f"{dof_states}")
# Information about the DOF properties of the articulated object.
dof_props = self._dc_handle.get_articulation_dof_properties(self._articulation_handle)
print_info("--- DOF properties:\n"
"[type] [has-limits] [lower] [upper] [drive-mode] [max-vel] [max-effort] [stiffness] [damping]\n"
f"{dof_props}")
"""
Internals
"""
def _setup_handles(self):
"""
Configures the handles of the frames.
"""
for frame_name in self._frame_handle_names:
# get frame handle
frame_handle = self._dc_handle.find_articulation_body(self._articulation_handle, frame_name)
# check handles are valid
if frame_handle == omni_dc.INVALID_HANDLE:
msg = f"*** Failed to load handle at \'{frame_name}\'"
print_error(msg)
raise InvalidHandleError(msg)
# store information into information dictionary
self._frames_info[frame_name]['handle'] = frame_handle
def _setup_links(self):
"""
Configures the properties of the links in the object.
"""
pass
def _setup_control(self):
"""
Configures the controllers for the robot system. Since passive system, we set the DOF
type to None for all joints.
"""
# get joint poperties
dof_props = self._dc_handle.get_articulation_dof_properties(self._articulation_handle)
# store essential dof properties internally
self._dof_properties["lower_limits"] = np.asarray(dof_props["lower"])
self._dof_properties["upper_limits"] = np.asarray(dof_props["upper"])
self._dof_properties["max_velocity"] = np.asarray(dof_props["maxVelocity"])
self._dof_properties["max_effort"] = np.asarray(dof_props["maxEffort"])
# joints: set control type based on specification
for index in range(self._num_dofs):
# set drive mode
dof_props["driveMode"][index] = omni_dc.DRIVE_NONE
dof_props["stiffness"][index] = 0.0
dof_props["damping"][index] = 0.0
# set dof properties
self._dc_handle.set_articulation_dof_properties(self._articulation_handle, dof_props)
def _convert_kinematic_hierarchy_to_string(self, body_index, indent_level=0) -> str:
""" Reads the articulation handle and converts kinematic tree into a string.
:param body_index: Index of the body to start iteration with.
:param indent_level: Indentation level in the converted message
:return: A string message containing the kinematic tree.
"""
# define current indentation
indent = "|" + "-" * indent_level
# get name of the body
body_name = self._dc_handle.get_rigid_body_name(body_index)
# add body name to string
str_output = f"{indent}Body: {body_name}\n"
# iterate over children of the body
for i in range(self._dc_handle.get_rigid_body_child_joint_count(body_index)):
# get joint name
joint = self._dc_handle.get_rigid_body_child_joint(body_index, i)
joint_name = self._dc_handle.get_joint_name(joint)
# get child link name
child = self._dc_handle.get_joint_child_body(joint)
child_name = self._dc_handle.get_rigid_body_name(child)
# add information to string output
str_output += f"{indent}>>Joint: {joint_name} -> {child_name}\n"
# iterate recrusively for depth-first-search
str_output += self._convert_kinematic_hierarchy_to_string(child, indent_level + 4)
# return result
return str_output
# EOF
| 40.311688
| 125
| 0.628329
|
d64ae0a62a7cf4f7f2c63334793249f295a4af74
| 2,476
|
py
|
Python
|
proj/archs/cluster/net5g_two_head.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
proj/archs/cluster/net5g_two_head.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
proj/archs/cluster/net5g_two_head.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from .net5g import ClusterNet5gTrunk
from .residual import BasicBlock, ResNet
# resnet34 and full channels
__all__ = ["ClusterNet5gTwoHead"]
class ClusterNet5gTwoHeadHead(nn.Module):
def __init__(self, config, output_k, semisup=False):
super(ClusterNet5gTwoHeadHead, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.semisup = semisup
if not semisup:
self.num_sub_heads = config.num_sub_heads
self.heads = nn.ModuleList([nn.Sequential(
nn.Linear(512 * BasicBlock.expansion, output_k),
nn.Softmax(dim=1)) for _ in xrange(self.num_sub_heads)])
else:
self.head = nn.Linear(512 * BasicBlock.expansion, output_k)
def forward(self, x, kmeans_use_features=False):
if not self.semisup:
results = []
for i in xrange(self.num_sub_heads):
if kmeans_use_features:
results.append(x) # duplicates
else:
results.append(self.heads[i](x))
return results
else:
return self.head(x)
class ClusterNet5gTwoHead(ResNet):
def __init__(self, config):
# no saving of configs
super(ClusterNet5gTwoHead, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.trunk = ClusterNet5gTrunk(config)
self.head_A = ClusterNet5gTwoHeadHead(config, output_k=config.output_k_A)
semisup = (hasattr(config, "semisup") and
config.semisup)
print("semisup: %s" % semisup)
self.head_B = ClusterNet5gTwoHeadHead(config, output_k=config.output_k_B,
semisup=semisup)
self._initialize_weights()
def forward(self, x, head="B", kmeans_use_features=False,
trunk_features=False,
penultimate_features=False):
# default is "B" for use by eval proj
# training script switches between A and B
x = self.trunk(x, penultimate_features=penultimate_features)
if trunk_features: # for semisup
return x
# returns list or single
if head == "A":
x = self.head_A(x, kmeans_use_features=kmeans_use_features)
elif head == "B":
x = self.head_B(x, kmeans_use_features=kmeans_use_features)
else:
assert (False)
return x
| 30.195122
| 81
| 0.607835
|
4199be6b5d4205a76b1333d1a62ff4364bad69a4
| 9,759
|
py
|
Python
|
spacegraphcats/search/search_utils.py
|
rhysnewell/spacegraphcats
|
e4d8b29171af0d1c8507066021be3b6a50c7802b
|
[
"BSD-3-Clause"
] | null | null | null |
spacegraphcats/search/search_utils.py
|
rhysnewell/spacegraphcats
|
e4d8b29171af0d1c8507066021be3b6a50c7802b
|
[
"BSD-3-Clause"
] | null | null | null |
spacegraphcats/search/search_utils.py
|
rhysnewell/spacegraphcats
|
e4d8b29171af0d1c8507066021be3b6a50c7802b
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
import csv
import os
import sqlite3
import numpy
from screed.screedRecord import Record
from screed.utils import to_str
from sourmash import MinHash
from spacegraphcats.utils.bgzf.bgzf import BgzfReader
from . import MPHF_KmerIndex
def sqlite_get_max_offset(cursor):
cursor.execute("SELECT max(sequences.offset) FROM sequences")
(last_offset,) = next(cursor)
return last_offset
def sqlite_get_offsets(cursor, cdbg_ids):
seen_offsets = set()
seen_labels = set()
cursor.execute("DROP TABLE IF EXISTS cdbg_query")
cursor.execute("CREATE TEMPORARY TABLE cdbg_query (cdbg_id INTEGER PRIMARY KEY);")
for label in cdbg_ids:
cursor.execute("INSERT INTO cdbg_query (cdbg_id) VALUES (?)", (label,))
cursor.execute(
"SELECT DISTINCT sequences.offset,sequences.cdbg_id FROM sequences WHERE cdbg_id in (SELECT cdbg_id FROM cdbg_query) ORDER BY offset"
)
for n, (offset, label) in enumerate(cursor):
if offset not in seen_offsets:
yield offset
seen_offsets.add(offset)
seen_labels.add(label)
seen_labels -= cdbg_ids
assert not seen_labels # should have gotten ALL the labels
class GrabBGZF_Random(object):
def __init__(self, filename):
self.reader = BgzfReader(filename, "rt")
ch = self.reader.read(1)
if ch == ">":
iter_fn = my_fasta_iter
elif ch == "@":
iter_fn = my_fastq_iter
else:
raise Exception("unknown start chr {}".format(ch))
self.iter_fn = iter_fn
def get_sequence_at(self, pos):
self.reader.seek(pos)
record = next(self.iter_fn(self.reader))
return record
def iterate_bgzf(reader):
ch = reader.read(1)
if ch == ">":
iter_fn = my_fasta_iter
elif ch == "@":
iter_fn = my_fastq_iter
else:
raise Exception("unknown start chr {}".format(ch))
reader.seek(0)
for record, pos in iter_fn(reader):
yield record, pos
def my_fasta_iter(handle, parse_description=False, line=None):
"""
Iterator over the given FASTA file handle, returning records. handle
is a handle to a file opened for reading
"""
last_start = handle.tell()
if line is None:
line = handle.readline()
while line:
data = {}
line = to_str(line.strip())
if not line.startswith(">"):
raise IOError(
"Bad FASTA format: no '>' at beginning of line: {}".format(line)
)
if parse_description: # Try to grab the name and optional description
try:
data["name"], data["description"] = line[1:].split(" ", 1)
except ValueError: # No optional description
data["name"] = line[1:]
data["description"] = ""
else:
data["name"] = line[1:]
data["description"] = ""
data["name"] = data["name"].strip()
data["description"] = data["description"].strip()
# Collect sequence lines into a list
sequenceList = []
pos = handle.tell()
line = to_str(handle.readline())
while line and not line.startswith(">"):
sequenceList.append(line.strip())
pos = handle.tell()
line = to_str(handle.readline())
data["sequence"] = "".join(sequenceList)
yield Record(**data), last_start
last_start = pos
def my_fastq_iter(handle, line=None, parse_description=False):
"""
Iterator over the given FASTQ file handle returning records. handle
is a handle to a file opened for reading
CTB: this relies on each FASTQ record being exactly 4 lines.
"""
while 1:
pos = handle.tell()
line = handle.readline()
if not line:
return
assert line.startswith("@"), line
name = to_str(line.strip())[1:]
line = handle.readline()
sequence = to_str(line.strip())
line = handle.readline()
plus = to_str(line.strip())
assert plus == "+"
line = handle.readline()
quality = to_str(line.strip())
yield Record(name, sequence, quality=quality), pos
def get_reads_by_cdbg(sqlite_filename, reads_filename, cdbg_ids):
"""
Given a list of cDBG IDs, retrieve the actual sequences corresponding
to them by using offsets into a BGZF file.
"""
# connect to sqlite db
db = sqlite3.connect(sqlite_filename)
cursor = db.cursor()
# open readsfile for random access
reads_grabber = GrabBGZF_Random(reads_filename)
## get last offset in file as measure of progress
last_offset = sqlite_get_max_offset(cursor)
# pull out the offsets of all sequences with matches in cdbg_ids.
for offset in sqlite_get_offsets(cursor, cdbg_ids):
offset_f = offset / last_offset
record, xx = reads_grabber.get_sequence_at(offset)
assert xx == offset
yield record, offset_f
def get_contigs_by_cdbg_sqlite(db, cdbg_ids):
"""
Given a list of cDBG IDs, retrieve the actual contig sequences
corresponding to them from a sqlite database created by
sort_bcalm_unitigs.
"""
cursor = db.cursor()
for cdbg_id in cdbg_ids:
cdbg_id = int(cdbg_id)
cursor.execute("SELECT sequence FROM sequences WHERE id=?", (cdbg_id,))
results = cursor.fetchall()
assert len(results) == 1
(seq,) = results[0]
yield Record(str(cdbg_id), seq)
def contigs_iter_sqlite(contigs_db):
"""
Yield all the sequences in the contigs database.
"""
cursor = contigs_db.cursor()
cursor.execute("SELECT id, sequence FROM sequences")
for ident, sequence in cursor:
yield Record(str(ident), sequence)
### MPHF stuff
def load_kmer_index(catlas_prefix):
"Load kmer index created by search.contigs_by_kmer."
return MPHF_KmerIndex.from_catlas_directory(catlas_prefix)
def load_cdbg_size_info(catlas_prefix, min_abund=0.0):
filename = os.path.join(catlas_prefix, "contigs.info.csv")
with open(filename, "rt") as fp:
cdbg_kmer_sizes = {}
cdbg_weighted_kmer_sizes = {}
r = csv.DictReader(fp)
for row in r:
contig_id = int(row["contig_id"])
n_kmers = int(row["n_kmers"])
mean_abund = float(row["mean_abund"])
if not min_abund or mean_abund >= min_abund:
cdbg_kmer_sizes[contig_id] = n_kmers
cdbg_weighted_kmer_sizes[contig_id] = mean_abund * n_kmers
return cdbg_kmer_sizes, cdbg_weighted_kmer_sizes
def decorate_catlas_with_kmer_sizes(
layer1_to_cdbg, dag, dag_levels, cdbg_kmer_sizes, cdbg_weighted_kmer_sizes
):
x = []
for (node_id, level) in dag_levels.items():
x.append((level, node_id))
x.sort()
node_kmer_sizes = {}
node_weighted_kmer_sizes = {}
for level, node_id in x:
if level == 1: # aggregate across cDBG nodes
total_kmers = 0
total_weighted_kmers = 0
for cdbg_node in layer1_to_cdbg.get(node_id):
total_kmers += cdbg_kmer_sizes.get(cdbg_node, 0)
total_weighted_kmers += cdbg_weighted_kmer_sizes.get(cdbg_node, 0)
node_kmer_sizes[node_id] = total_kmers
node_weighted_kmer_sizes[node_id] = total_weighted_kmers
else: # aggregate across children
sub_size = 0
sub_weighted_size = 0
for child_id in dag[node_id]:
sub_size += node_kmer_sizes[child_id]
sub_weighted_size += node_weighted_kmer_sizes[child_id]
node_kmer_sizes[node_id] = sub_size
node_weighted_kmer_sizes[node_id] = sub_weighted_size
return node_kmer_sizes, node_weighted_kmer_sizes
def output_response_curve(outname, match_counts, kmer_idx, layer1_to_cdbg):
curve = []
# track total containment
total = 0
# walk over all layer1 nodes
for node_id, cdbg_nodes in sorted(layer1_to_cdbg.items()):
n_matches = 0
n_kmers = 0
# aggregate counts across cDBG nodes under this layer1 node.
for cdbg_node in cdbg_nodes:
n_matches += match_counts.get(cdbg_node, 0)
n_kmers += kmer_idx.get_cdbg_size(cdbg_node)
# do we keep this layer1 node, i.e. does it have positive containment?
if n_matches:
n_cont = n_matches
n_oh = n_kmers - n_matches
total += n_cont
curve.append((n_cont, n_oh, node_id))
# sort by absolute containment
curve.sort(reverse=True)
# track total containment etc
sofar = 0
total_oh = 0
total_cont = 0
# CTB: remove redundant sum_cont fields
# CTB: switch to CSV output
# CTB: ask Mike what he wants here :)
with open(outname, "wt") as fp:
fp.write(
"sum_cont relative_cont relative_overhead sum_cont2 sum_oh catlas_id\n"
)
# only output ~200 points
sampling_rate = max(int(len(curve) / 200), 1)
# do the output thing
for pos, (n_cont, n_oh, node_id) in enumerate(curve):
sofar += n_cont
total_oh += n_oh
total_cont += n_cont
# output first and last points, as well as at sampling rate.
if pos % sampling_rate == 0 or pos == 0 or pos + 1 == len(curve):
fp.write(
"{} {} {} {} {} {}\n".format(
sofar,
total_cont / total,
total_oh / total,
n_cont,
n_oh,
node_id,
)
)
| 29.572727
| 141
| 0.613895
|
22ee5b16e3a779bd422a65e15e655aeed02b07b3
| 266
|
py
|
Python
|
src/wechaty_plugin_contrib/__init__.py
|
fish-ball/python-wechaty-plugin-contrib
|
ae2226300d87f1d13b52215a11a73618e0567d72
|
[
"Apache-2.0"
] | null | null | null |
src/wechaty_plugin_contrib/__init__.py
|
fish-ball/python-wechaty-plugin-contrib
|
ae2226300d87f1d13b52215a11a73618e0567d72
|
[
"Apache-2.0"
] | null | null | null |
src/wechaty_plugin_contrib/__init__.py
|
fish-ball/python-wechaty-plugin-contrib
|
ae2226300d87f1d13b52215a11a73618e0567d72
|
[
"Apache-2.0"
] | null | null | null |
"""import all plugins"""
from wechaty_plugin_contrib.ding_dong_plugin import DingDongPlugin
from wechaty_plugin_contrib.daily_plugin import (
DailyPluginOptions,
DailyPlugin
)
__all__ = [
'DingDongPlugin',
'DailyPluginOptions',
'DailyPlugin'
]
| 19
| 66
| 0.755639
|
7c942090eb7f092b1b3ef8781dcf5021f9fbc35e
| 3,003
|
py
|
Python
|
aegea/flow_logs.py
|
jshoe/aegea
|
dfcb46672a703e831961dc5dd1c6f36488374555
|
[
"Apache-2.0"
] | null | null | null |
aegea/flow_logs.py
|
jshoe/aegea
|
dfcb46672a703e831961dc5dd1c6f36488374555
|
[
"Apache-2.0"
] | null | null | null |
aegea/flow_logs.py
|
jshoe/aegea
|
dfcb46672a703e831961dc5dd1c6f36488374555
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse
from botocore.exceptions import ClientError
from .ls import register_parser, register_listing_parser, grep, add_time_bound_args
from .util import Timestamp, paginate, hashabledict
from .util.printing import format_table, page_output, get_field, get_cell, tabulate
from .util.exceptions import AegeaException
from .util.aws import ARN, resources, clients, ensure_iam_role, ensure_vpc, expect_error_codes
def flow_logs(args):
flow_logs_parser.print_help()
flow_logs_parser = register_parser(flow_logs, help="Manage EC2 VPC flow logs", description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
def create(args):
if args.resource and args.resource.startswith("vpc-"):
resource_type = "VPC"
elif args.resource and args.resource.startswith("subnet-"):
resource_type = "Subnet"
elif args.resource and args.resource.startswith("eni-"):
resource_type = "NetworkInterface"
elif args.resource:
raise AegeaException('Unrecognized resource type: "{}"'.format(args.resource))
else:
args.resource = ensure_vpc().id
resource_type = "VPC"
flow_logs_iam_role = ensure_iam_role(__name__,
policies=["service-role/AmazonAPIGatewayPushToCloudWatchLogs"],
trust=["vpc-flow-logs"])
try:
return clients.ec2.create_flow_logs(ResourceIds=[args.resource],
ResourceType=resource_type,
TrafficType=args.traffic_type,
LogGroupName=__name__,
DeliverLogsPermissionArn=flow_logs_iam_role.arn)
except ClientError as e:
expect_error_codes(e, "FlowLogAlreadyExists")
return dict(FlowLogAlreadyExists=True)
parser = register_parser(create, parent=flow_logs_parser, help="Create VPC flow logs")
parser.add_argument("--resource")
parser.add_argument("--traffic_type", choices=["ACCEPT", "REJECT", "ALL"], default="ALL")
def ls(args):
describe_flow_logs_args = dict(Filters=[dict(Name="resource-id", Values=[args.resource])]) if args.resource else {}
page_output(tabulate(clients.ec2.describe_flow_logs(**describe_flow_logs_args)["FlowLogs"], args))
parser = register_listing_parser(ls, parent=flow_logs_parser, help="List VPC flow logs")
parser.add_argument("--resource")
def get(args):
args.log_group, args.pattern = __name__, None
args.log_stream = "-".join([args.network_interface, args.traffic_type]) if args.network_interface else None
grep(args)
parser = register_parser(get, parent=flow_logs_parser, help="Get VPC flow logs")
parser.add_argument("--network-interface")
parser.add_argument("--traffic_type", choices=["ACCEPT", "REJECT", "ALL"], default="ALL")
add_time_bound_args(parser)
| 47.666667
| 119
| 0.689977
|
6b32c6147c7208216630b5da677a4bb72525200c
| 4,697
|
py
|
Python
|
examples/working_with_files/concatenate_files.py
|
LiamBindle/gcpy
|
64ac8f236ecc11da88d874c558463dd5f8cc6503
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | 1
|
2020-02-20T23:41:26.000Z
|
2020-02-20T23:41:26.000Z
|
examples/working_with_files/concatenate_files.py
|
LiamBindle/gcpy
|
64ac8f236ecc11da88d874c558463dd5f8cc6503
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | null | null | null |
examples/working_with_files/concatenate_files.py
|
LiamBindle/gcpy
|
64ac8f236ecc11da88d874c558463dd5f8cc6503
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
This Python script concatenates several individual netCDF files
into a single netCDF file using xarray.
Calling sequence:
./concatentate_files.py
Remarks:
If you have several individual files with one variable per file,
you should consider concatenating them into a single file.
This is often more efficient, as opening each netCDF file incurs
computational overhead. It is usually faster to read data from
a file with multiple variables than to having to open several
files with one variable each.
'''
# Imports
from gcpy import core
import os
import numpy as np
import xarray as xr
from xarray.coding.variables import SerializationWarning
import warnings
# Suppress harmless run-time warnings (mostly about underflow or NaNs)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=SerializationWarning)
def find_files_in_dir(path, substrs):
'''
Returns a list of all files in a directory that match one or more
substrings.
Args:
-----
path : str
Path to the directory in which to search for files.
substrs : list of str
List of substrings used in the search for files.
Returns:
--------
file_list : list of str
List of files in the directory (specified by path)
that match all substrings (specified in substrs).
'''
# Initialize
file_list = []
# Walk through the given data directory. Then for each file found,
# add it to file_list if it matches text in search_list.
for root, directory, files in os.walk(path):
for f in files:
for s in substrs:
if s in f:
file_list.append(os.path.join(root, f))
# Return an alphabetically sorted list of files
file_list.sort()
return file_list
def replace_nans_with_zeroes(ds, verbose=True):
'''
Replaces NaN values with zeroes for each variable
within an an xarray Dataset.
Args:
----
ds : xarray Dataset
The input dataset, containing one or more data variables.
Keyword Args (optional):
------------------------
verbose : boolean
Set this switch to print out the variable name, as well
as the min and max of the variable. This will illustrate
the replacement of NaNs with zeroes.
'''
# Keep all netCDF attributes
with xr.set_options(keep_attrs=True):
# Loop over all variables in the Dataset
for v in ds.data_vars.keys():
# OPTIONAL STEP:
# Xarray will try convert missing values to NaN's,
# so you may need to replace these with zeroes.
#
# If your netCDF files represent e.g. emissions,
# or other physical quantities, you may want to
# replace these with zeros, so that NaNs won't
# get read into atmospheric models, etc.
#
# NOTE: ds[v].values converts to a numpy ndarray,
# so that you can use numpy functions.
ds[v].where(np.isnan(ds[v].values), other=0.0, drop=False)
# OPTIONAL: Print min & max for each variable
# Comment out if you wish
if verbose:
print('{} : {} {}'.format(
v, np.min(ds[v].values), np.max(ds[v].values)))
# Return the modified Datast
return ds
def main():
'''
Main program.
'''
# File path containing data files
# (YOU CAN EDIT THIS)
path_to_dir = '/path/to/my/netcdf/files/'
# List of search strings that each file must contain
# (YOU CAN EDIT THIS)
substrs = ['SpeciesConc']
# Get a list of variables that GCPy should not read.
# These are mostly variables introduced into GCHP with the MAPL v1.0.0
# update. These variables contain either repeated or non-standard
# dimensions that can cause problems in xarray when combining datasets.
skip_vars = core.skip_these_vars()
# Look for all the netCDF files in the path
file_list = find_files_in_dir(path_to_dir, substrs)
ds = xr.open_mfdataset(file_list, drop_variables=skip_vars)
# Replace NaN values with zeroes
ds = replace_nans_with_zeroes(ds, verbose=True)
# Specify the path and filename for the concatenated data
# (YOU CAN EDIT THIS)
outdir = '/path/to/my/output/file'
outfile = os.path.join(outdir, 'my_concatenated_output_file.nc')
# Write concatenated data to a netCDF file
ds.to_netcdf(outfile)
if __name__ == "__main__":
main()
| 30.699346
| 75
| 0.648073
|
067b11b2c7d2dcadeded241ea22612126a0a8b65
| 274
|
py
|
Python
|
config.py
|
banan039pl/flask_leaderboard
|
832c7039f0697c5004a35b67d01957c718e5d4ef
|
[
"MIT"
] | 5
|
2020-06-15T02:56:39.000Z
|
2021-12-28T19:18:18.000Z
|
config.py
|
banan039pl/flask_leaderboard
|
832c7039f0697c5004a35b67d01957c718e5d4ef
|
[
"MIT"
] | 2
|
2019-12-01T15:50:05.000Z
|
2021-12-17T07:54:23.000Z
|
config.py
|
banan039pl/flask_leaderboard
|
832c7039f0697c5004a35b67d01957c718e5d4ef
|
[
"MIT"
] | 9
|
2020-01-19T11:21:33.000Z
|
2022-02-22T06:28:52.000Z
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
print(basedir)
class Config(object):
# ...
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| 30.444444
| 65
| 0.693431
|
42b1dcf864839aee4d3bd7f0d8eb1e786f00123f
| 12,192
|
py
|
Python
|
tensorlayer/layers/normalization.py
|
lllcho/tensorlayer
|
87591b4945a6a67dfb4ea797a575efae997fd9d2
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/layers/normalization.py
|
lllcho/tensorlayer
|
87591b4945a6a67dfb4ea797a575efae997fd9d2
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/layers/normalization.py
|
lllcho/tensorlayer
|
87591b4945a6a67dfb4ea797a575efae997fd9d2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .core import *
from .. import _logging as logging
import tensorflow as tf
__all__ = [
'LocalResponseNormLayer',
'BatchNormLayer',
'InstanceNormLayer',
'LayerNormLayer',
]
class LocalResponseNormLayer(Layer):
"""The :class:`LocalResponseNormLayer` layer is for Local Response Normalization.
See ``tf.nn.local_response_normalization`` or ``tf.nn.lrn`` for new TF version.
The 4-D input tensor is a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently.
Within a given vector, each component is divided by the weighted square-sum of inputs within depth_radius.
Parameters
-----------
layer : :class:`Layer`
The previous layer with a 4D output shape.
depth_radius : int
Depth radius. 0-D. Half-width of the 1-D normalization window.
bias : float
An offset which is usually positive and shall avoid dividing by 0.
alpha : float
A scale factor which is usually positive.
beta : float
An exponent.
name : str
A unique layer name.
"""
def __init__(
self,
prev_layer,
depth_radius=None,
bias=None,
alpha=None,
beta=None,
name='lrn_layer',
):
Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (self.name, str(depth_radius), str(bias), str(alpha),
str(beta)))
with tf.variable_scope(name):
self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
self.all_layers.append(self.outputs)
class BatchNormLayer(Layer):
"""
The :class:`BatchNormLayer` is a batch normalization layer for both fully-connected and convolution outputs.
See ``tf.nn.batch_normalization`` and ``tf.nn.moments``.
Parameters
----------
layer : :class:`Layer`
The previous layer.
decay : float
A decay factor for `ExponentialMovingAverage`.
Suggest to use a large value for large dataset.
epsilon : float
Eplison.
act : activation function
The activation function of this layer.
is_train : boolean
Is being used for training or inference.
beta_init : initializer or None
The initializer for initializing beta, if None, skip beta.
Usually you should not skip beta unless you know what happened.
gamma_init : initializer or None
The initializer for initializing gamma, if None, skip gamma.
When the batch normalization layer is use instead of 'biases', or the next layer is linear, this can be
disabled since the scaling can be done by the next layer. see `Inception-ResNet-v2 <https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py>`__
dtype : TensorFlow dtype
tf.float32 (default) or tf.float16.
name : str
A unique layer name.
References
----------
- `Source <https://github.com/ry/tensorflow-resnet/blob/master/resnet.py>`__
- `stackoverflow <http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow>`__
"""
def __init__(
self,
prev_layer,
decay=0.9,
epsilon=0.00001,
act=tf.identity,
is_train=False,
beta_init=tf.zeros_initializer,
gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002),
name='batchnorm_layer',
):
Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train))
x_shape = self.inputs.get_shape()
params_shape = x_shape[-1:]
from tensorflow.python.training import moving_averages
with tf.variable_scope(name):
axis = list(range(len(x_shape) - 1))
# 1. beta, gamma
variables = []
if beta_init:
if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer:
beta_init = beta_init()
beta = tf.get_variable('beta', shape=params_shape, initializer=beta_init, dtype=LayersConfig.tf_dtype, trainable=is_train)
variables.append(beta)
else:
beta = None
if gamma_init:
gamma = tf.get_variable(
'gamma',
shape=params_shape,
initializer=gamma_init,
dtype=LayersConfig.tf_dtype,
trainable=is_train,
)
variables.append(gamma)
else:
gamma = None
# 2.
if tf.__version__ > '0.12.1':
moving_mean_init = tf.zeros_initializer()
else:
moving_mean_init = tf.zeros_initializer
moving_mean = tf.get_variable('moving_mean', params_shape, initializer=moving_mean_init, dtype=LayersConfig.tf_dtype, trainable=False)
moving_variance = tf.get_variable(
'moving_variance',
params_shape,
initializer=tf.constant_initializer(1.),
dtype=LayersConfig.tf_dtype,
trainable=False,
)
# 3.
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(self.inputs, axis)
try: # TF12
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, has bias
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False) # if zero_debias=True, has bias
# logging.info("TF12 moving")
except Exception: # TF11
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
# logging.info("TF11 moving")
def mean_var_with_update():
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
if is_train:
mean, var = mean_var_with_update()
self.outputs = act(tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon))
else:
self.outputs = act(tf.nn.batch_normalization(self.inputs, moving_mean, moving_variance, beta, gamma, epsilon))
variables.extend([moving_mean, moving_variance])
# logging.info(len(variables))
# for idx, v in enumerate(variables):
# logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v))
# exit()
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
self.all_layers.append(self.outputs)
self.all_params.extend(variables)
class InstanceNormLayer(Layer):
"""The :class:`InstanceNormLayer` class is a for instance normalization.
Parameters
-----------
layer : :class:`Layer`
The previous layer.
act : activation function.
The activation function of this layer.
epsilon : float
Eplison.
name : str
A unique layer name
"""
def __init__(
self,
prev_layer,
act=tf.identity,
epsilon=1e-5,
name='instan_norm',
):
Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
logging.info("InstanceNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__))
with tf.variable_scope(name) as vs:
mean, var = tf.nn.moments(self.inputs, [1, 2], keep_dims=True)
scale = tf.get_variable(
'scale', [self.inputs.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02), dtype=LayersConfig.tf_dtype)
offset = tf.get_variable('offset', [self.inputs.get_shape()[-1]], initializer=tf.constant_initializer(0.0), dtype=LayersConfig.tf_dtype)
self.outputs = scale * tf.div(self.inputs - mean, tf.sqrt(var + epsilon)) + offset
self.outputs = act(self.outputs)
variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
self.all_layers.append(self.outputs)
self.all_params.extend(variables)
class LayerNormLayer(Layer):
"""
The :class:`LayerNormLayer` class is for layer normalization, see `tf.contrib.layers.layer_norm <https://www.tensorflow.org/api_docs/python/tf/contrib/layers/layer_norm>`__.
Parameters
----------
layer : :class:`Layer`
The previous layer.
act : activation function
The activation function of this layer.
others : _
`tf.contrib.layers.layer_norm <https://www.tensorflow.org/api_docs/python/tf/contrib/layers/layer_norm>`__.
"""
def __init__(self,
prev_layer,
center=True,
scale=True,
act=tf.identity,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
begin_norm_axis=1,
begin_params_axis=-1,
name='layernorm'):
Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
logging.info("LayerNormLayer %s: act:%s" % (self.name, act.__name__))
if tf.__version__ < "1.3":
# raise Exception("Please use TF 1.3+")
with tf.variable_scope(name) as vs:
self.outputs = tf.contrib.layers.layer_norm(
self.inputs,
center=center,
scale=scale,
activation_fn=act,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
# begin_norm_axis=begin_norm_axis,
# begin_params_axis=begin_params_axis,
scope='var',
)
variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
else:
with tf.variable_scope(name) as vs:
self.outputs = tf.contrib.layers.layer_norm(
self.inputs,
center=center,
scale=scale,
activation_fn=act,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
begin_norm_axis=begin_norm_axis,
begin_params_axis=begin_params_axis,
scope='var',
)
variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
self.all_layers.append(self.outputs)
self.all_params.extend(variables)
| 39.97377
| 186
| 0.594242
|
9fc7f5937bcae855c1b8166dcf6331c36b7aa350
| 25,149
|
py
|
Python
|
Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py
|
SergeBakharev/content
|
d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py
|
SergeBakharev/content
|
d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Base/Scripts/DBotFindSimilarIncidentsByIndicators/DBotFindSimilarIncidentsByIndicators.py
|
SergeBakharev/content
|
d66cc274f5bf6f9f0e9ed7e4df1af7b6f305aacf
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import numpy as np
from collections import Counter
import re
import math
from typing import List, Dict
STATUS_DICT = {
0: "Pending",
1: "Active",
2: "Closed",
3: "Archive",
}
ROUND_SCORING = 2
PLAYGROUND_PATTERN = '[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}'
FIRST_COLUMNS_INCIDENTS_DISPLAY = ['incident ID', 'created', 'name']
FIELDS_TO_REMOVE_TO_DISPLAY = ['id']
INCIDENT_FIELDS_TO_USE = ['indicators']
FIELD_INDICATOR_TYPE = 'indicator_type'
def normalize(x: List[str]) -> str:
"""
Normalize function for indicators
:param x: list of indicators
:return:
"""
return ' '.join(x)
def identity_score(x):
"""
Identity function
:param x: object
:return:
"""
return x
def flatten_list(my_list: List[List]) -> List:
"""
Flatten a list of list
:param l: list of list
:return: list
"""
return [item for sublist in my_list for item in sublist]
class FrequencyIndicators(BaseEstimator, TransformerMixin):
"""
FrequencyIndicators class for indicator frequencies computation
"""
def __init__(self, incident_field, normalize_function, current_incident):
self.incident_field = incident_field
self.normalize_function = normalize_function
self.frequency = {}
if self.normalize_function:
current_incident = current_incident[self.incident_field].apply(self.normalize_function)
else:
current_incident = current_incident[self.incident_field]
self.vocabulary = current_incident.iloc[0].split(' ')
def fit(self, x):
if self.normalize_function:
x = x[self.incident_field].apply(self.normalize_function)
else:
x = x[self.incident_field]
size = len(x) + 1
frequencies = Counter(flatten_list([t.split(' ') for t in x.values]))
frequencies.update(Counter(self.vocabulary))
self.frequency = {k: math.log(1 + size / v) for k, v in frequencies.items()}
return self
def transform(self, x):
if self.normalize_function:
x = x[self.incident_field].apply(self.normalize_function)
else:
x = x[self.incident_field]
return x.apply(self.compute_term_score)
def compute_term_score(self, indicators_values_string: str) -> float:
x = indicators_values_string.split(' ')
return sum([1 * self.frequency[word] for word in self.vocabulary if word in x]) / sum(
[self.frequency[word] for word in self.vocabulary])
TRANSFORMATION = {
'frequency_indicators': {'transformer': FrequencyIndicators,
'normalize': None,
'scoring_function': identity_score
}
}
class Transformer():
def __init__(self, p_transformer_type, incident_field, p_incidents_df, p_current_incident, p_params):
"""
:param p_transformer_type: One of the key value of TRANSFORMATION dict
:param incident_field: incident field used in this transformation
:param p_incidents_df: DataFrame of incident (should contains one columns which same name than incident_field)
:param p_current_incident: DataFrame of the current incident
:param p_params: Dictionary of all the transformation - TRANSFORMATION
"""
self.transformer_type = p_transformer_type
self.incident_field = incident_field
self.current_incident = p_current_incident
self.incidents_df = p_incidents_df
self.params = p_params
def fit_transform(self):
transformation = self.params[self.transformer_type]
transformer = transformation['transformer'](self.incident_field, transformation['normalize'],
self.current_incident)
X_vect = transformer.fit_transform(self.incidents_df)
incident_vect = transformer.transform(self.current_incident)
return X_vect, incident_vect
def get_score(self):
scoring_function = self.params[self.transformer_type]['scoring_function']
X_vect, incident_vect = self.fit_transform()
distance = scoring_function(X_vect)
self.incidents_df['similarity %s' % self.incident_field] = np.round(distance, ROUND_SCORING)
return self.incidents_df
class Model:
def __init__(self, p_transformation):
"""
:param p_transformation: Dict with the transformers parameters - TRANSFORMATION
"""
self.transformation = p_transformation
def init_prediction(self, p_incident_to_match, p_incidents_df, p_fields_for_frequencyIndicators=[]):
"""
:param p_incident_to_match: Dataframe with one incident
:param p_incidents_df: Dataframe with all the incidents
:param p_fields_indicators_transformation: list of incident fields that for the transformer 'indicators'
:return:
"""
self.incident_to_match = p_incident_to_match
self.incidents_df = p_incidents_df
self.fields_for_frequencyIndicators = p_fields_for_frequencyIndicators
def predict(self):
self.remove_empty_field()
self.get_score()
self.prepare_for_display()
return self.incidents_df
def remove_empty_field(self):
remove_list = []
for field in self.fields_for_frequencyIndicators:
if field not in self.incident_to_match.columns or not self.incident_to_match[field].values[
0] or not isinstance(self.incident_to_match[field].values[0], str) or \
self.incident_to_match[field].values[0] == 'None' or \
self.incident_to_match[field].values[0] == 'N/A':
remove_list.append(field)
self.fields_for_frequencyIndicators = [x for x in self.fields_for_frequencyIndicators if
x not in remove_list]
def get_score(self):
for field in self.fields_for_frequencyIndicators:
t = Transformer('frequency_indicators', field, self.incidents_df, self.incident_to_match,
self.transformation)
t.get_score()
def prepare_for_display(self):
vocabulary = self.incident_to_match['indicators'].iloc[0].split(' ')
self.incidents_df['Identical indicators'] = self.incidents_df['indicators'].apply(
lambda x: ','.join([id for id in x.split(' ') if id in vocabulary]))
def get_all_indicators_for_incident(incident_id: str) -> List[Dict]:
"""
Get indicators for one incident
:param incident_id: incident id
:return:
"""
query = 'incident.id:%s' % incident_id
res = demisto.executeCommand("findIndicators", {'query': query})
if is_error(res):
get_error(res)
if not res[0]['Contents']:
return []
indicators = res[0]['Contents']
return indicators
def get_number_of_invs_for_indicators(indicator: Dict) -> int:
"""
:param indicator: list of dict representing indicators
:return: lenght of investigation ids for this indicators
"""
invs = indicator.get('investigationIDs') or []
return len(invs)
def get_indicators_from_incident_ids(ids: List[str]) -> List[Dict]:
"""
Get indicators for list of incidents ids
:param ids: List of incident ids
:return: List of indicators for each id
"""
ids_string = []
for id_ in ids:
ids_string.append('incident.id: "%s"' % id_)
query = " OR ".join(ids_string)
res = demisto.executeCommand('findIndicators', {
'query': query
})
if is_error(res):
get_error(res)
if not res[0]['Contents']:
return []
indicators = res[0]['Contents']
return indicators
def match_indicators_incident(indicators: List[Dict], incident_ids: List[str]) -> Dict[str, List]:
"""
:param indicators: list of dict representing indicators
:param incident_ids: list of incident ids
:return: dict of {incident id : list of indicators ids related to this incident)
"""
d = {k: [] for k in incident_ids} # type: Dict[str, List]
for indicator in indicators:
inv_ids = indicator.get('investigationIDs', None)
if inv_ids:
for inv_id in inv_ids:
if inv_id in d.keys():
d[inv_id] = d[inv_id] + [indicator['id']]
return d
def enriched_incidents(df, fields_incident_to_display, from_date: str):
"""
Enriched incidents with data
:param df: Incidents dataFrame
:param fields_incident_to_display: Fields selected for enrichement
:param from_date: from_date
:return: Incidents dataFrame enriched
"""
if 'id' in df.columns:
ids = df.id.tolist()
else:
ids = df.index
ids_string = []
for id_ in ids:
ids_string.append('id: "%s"' % id_)
query = " OR ".join(ids_string)
res = demisto.executeCommand('GetIncidentsByQuery', {
'query': query,
'populateFields': ' , '.join(fields_incident_to_display),
'fromDate': from_date,
})
if is_error(res):
return_error(res)
if not json.loads(res[0]['Contents']):
return df
else:
incidents = json.loads(res[0]['Contents'])
incidents_dict = {incident['id']: incident for incident in incidents}
for field in fields_incident_to_display:
if field == 'created':
df[field] = [incidents_dict.get(id_, {}).get(field, '')[:10] if
len(incidents_dict.get(id_, {}).get(field, '')) > 10 else '' for id_ in ids]
elif field == 'status':
df[field] = [STATUS_DICT.get(incidents_dict.get(id_, {}).get(field, '')) if
incidents_dict.get(id_, {}).get(field, '') in STATUS_DICT else ' ' for id_ in ids]
else:
df[field] = [incidents_dict.get(id_, {}).get(field, '') for id_ in ids]
return df
def return_outputs_custom(readable_output, outputs=None):
return_entry = {
"Type": entryTypes["note"],
"HumanReadable": readable_output,
"ContentsFormat": formats['json'],
"Contents": outputs,
"EntryContext": outputs,
}
demisto.results(return_entry)
def return_no_mututal_indicators_found_entry():
hr = '### Mutual Indicators' + '\n'
hr += 'No mutual indicators were found.'
return_outputs_custom(hr, add_context_key(create_context_for_indicators()))
def create_context_for_indicators(indicators_df=None):
if indicators_df is None:
indicators_context = []
else:
indicators_df.rename({'Value': 'value'}, axis=1, inplace=True)
indicators_df = indicators_df[['id', 'value']]
indicators_context = indicators_df.to_dict(orient='records')
return {'indicators': indicators_context}
def add_context_key(entry_context):
new_context = {}
for k, v in entry_context.items():
new_context['{}.{}'.format('MutualIndicators', k)] = v
return new_context
def return_indicator_entry(incident_ids, indicators_types, indicators_list):
indicators_query = 'investigationIDs:({})'.format(' '.join('"{}"'.format(id_) for id_ in incident_ids))
fields = ['id', 'indicator_type', 'investigationIDs', 'relatedIncCount', 'score', 'value']
indicators_args = {'query': indicators_query, 'limit': '150', 'populateFields': ','.join(fields)}
res = demisto.executeCommand('GetIndicatorsByQuery', args=indicators_args)
if is_error(res):
return_error(res)
indicators = res[0]['Contents']
indicators_df = pd.DataFrame(data=indicators)
if len(indicators_df) == 0:
return_no_mututal_indicators_found_entry()
return indicators_df
indicators_df = indicators_df[indicators_df['relatedIncCount'] < 150]
indicators_df['Involved Incidents Count'] = \
indicators_df['investigationIDs'].apply(lambda x: sum(id_ in incident_ids for id_ in x))
indicators_df = indicators_df[indicators_df['Involved Incidents Count'] > 1]
if indicators_types:
indicators_df = indicators_df[indicators_df.indicator_type.isin(indicators_types)]
indicators_df = indicators_df[indicators_df.id.isin([x.get('id') for x in indicators_list])]
if len(indicators_df) == 0:
return_no_mututal_indicators_found_entry()
return indicators_df
indicators_df['Id'] = indicators_df['id'].apply(lambda x: "[%s](#/indicator/%s)" % (x, x))
indicators_df = indicators_df.sort_values(['score', 'Involved Incidents Count'], ascending=False)
indicators_df['Reputation'] = indicators_df['score'].apply(scoreToReputation)
indicators_df.rename({'value': 'Value', 'indicator_type': 'Type'}, axis=1, inplace=True)
indicators_headers = ['Id', 'Value', 'Type', 'Reputation', 'Involved Incidents Count']
hr = tableToMarkdown('Mutual Indicators', indicators_df.to_dict(orient='records'),
headers=indicators_headers)
return_outputs_custom(hr, add_context_key(create_context_for_indicators(indicators_df)))
return indicators_df
def get_indicators_map(indicators: List[Dict]) -> Dict[str, Dict]:
"""
:param indicators: list of dict representing indicators
:return: Dictionary {id of indicators: indicators}
"""
return {ind['id']: ind for ind in indicators}
def join(my_list: List) -> str:
return ' '.join(my_list)
def organize_data(similar_incidents: pd.DataFrame, indicators_map: Dict[str, Dict], threshold: float,
max_incidents_to_display: int) \
-> pd.DataFrame:
"""
Clean and organize dataframe before displaying
:param similar_incidents: DataFrame of incident
:param indicators_map: Dict of indicators
:param threshold: threshold for similarity score
:param max_incidents_to_display: Max number of incidents we want to display
:return: Clean DataFrame of incident
"""
similar_incidents = similar_incidents.reset_index().rename(columns={'index': 'id'})
similar_incidents['incident ID'] = similar_incidents['id'].apply(lambda _id: "[%s](#/Details/%s)" % (_id, _id))
similar_incidents['Identical indicators'] = similar_incidents['Identical indicators'].apply(
lambda _ids: '\n'.join(
[indicators_map.get(x).get('value') if indicators_map.get(x) else ' ' for x in # type: ignore
_ids.split(',')])) # type: ignore
similar_incidents = similar_incidents[['incident ID', 'id', 'Identical indicators', 'similarity indicators']]
similar_incidents = similar_incidents[similar_incidents['similarity indicators'] > threshold]
similar_incidents.sort_values(['similarity indicators'], inplace=True, ascending=False)
return similar_incidents.head(max_incidents_to_display)
def return_no_similar_incident_found_entry():
hr = '### No Similar indicators' + '\n'
hr += 'No Similar indicators were found.'
return_outputs(readable_output=hr, outputs={'DBotFindSimilarIncidentsByIndicators': create_context_for_incidents()},
raw_response={})
def create_context_for_incidents(similar_incidents=pd.DataFrame()):
"""
Return context from dataframe of incident
:param similar_incidents: DataFrame of incidents with indicators
:return: context
"""
if len(similar_incidents) == 0:
context = {
'similarIncidentList': {},
'isSimilarIncidentFound': False
}
else:
context = {
'similarIncident': (similar_incidents.to_dict(orient='records')),
'isSimilarIncidentFound': True
}
return context
def display_actual_incident(incident_df: pd.DataFrame, incident_id: str, fields_incident_to_display: List[str],
from_date: str) -> None:
"""
Display current incident
:param incident_df: DataFrame of incident
:param incident_id: incident ID
:param fields_incident_to_display: fields to display
:param from_date: fields to from_date
:return: None
"""
incident_df['id'] = [incident_id]
incident_df = enriched_incidents(incident_df, fields_incident_to_display, from_date)
incident_df['Incident ID'] = incident_df['id'].apply(lambda _id: "[%s](#/Details/%s)" % (_id, _id))
col_incident = incident_df.columns.tolist()
col_incident = FIRST_COLUMNS_INCIDENTS_DISPLAY + [x for x in col_incident if
x not in FIRST_COLUMNS_INCIDENTS_DISPLAY + ['id', 'indicators']]
col_incident = [x.title() for x in col_incident]
incident_df = incident_df.rename(str.title, axis='columns')
incident_json = incident_df.to_dict(orient='records')
return_outputs(readable_output=tableToMarkdown("Actual Incident", incident_json,
col_incident))
def load_indicators_for_current_incident(incident_id: str, indicators_types: List[str], min_nb_of_indicators: int,
max_indicators_for_white_list: int):
"""
Take
:param incident_id: ID of current incident
:param indicators_types: list of indicators type accepted
:param limit_nb_of_indicators: Min number of indicators in the current incident
:param max_indicators: Max incidents in indicators for white list
:return: return [*indicators] and dictionnary {key: indicators} and if early_stop
"""
indicators = get_all_indicators_for_incident(incident_id)
if not indicators:
return_no_mututal_indicators_found_entry()
return_no_similar_incident_found_entry()
return [], {}, True
indicators_map = get_indicators_map(indicators)
indicators = list(
filter(lambda x: get_number_of_invs_for_indicators(x) < max_indicators_for_white_list, indicators))
if indicators_types:
indicators = [x for x in indicators if x.get(FIELD_INDICATOR_TYPE) in indicators_types]
if len(indicators) < min_nb_of_indicators:
return_no_mututal_indicators_found_entry()
return_no_similar_incident_found_entry()
return [], {}, True
return indicators, indicators_map, False
def get_incidents_ids_related_to_indicators(indicators):
"""
Return incident ids from a list of indicators
:param indicators: List of indicators
:return: [*incidents_ids]
"""
incident_ids = [indicator.get('investigationIDs', None) for indicator in indicators if
indicator.get('investigationIDs', None)]
incident_ids = flatten_list(incident_ids)
p = re.compile(PLAYGROUND_PATTERN)
incident_ids = [x for x in incident_ids if not p.match(x)]
if not incident_ids:
return_no_mututal_indicators_found_entry()
return_no_similar_incident_found_entry()
return [], True
return incident_ids, False
def get_related_incidents_with_indicators(incident_ids: List[str], indicators_types: List[str],
incident_id: str) -> pd.DataFrame:
"""
Create dataframe of incident with indicators from incidents ids list
:param incident_ids: List if incident id
:param indicators_types: List of indicators type
:param incident_id: current incident (in order to remove it)
:return: dataframe of incident with indicators
"""
indicators_related = get_indicators_from_incident_ids(incident_ids)
if not indicators_related:
return_no_similar_incident_found_entry()
return pd.DataFrame(), True
if indicators_types:
indicators_related = [x for x in indicators_related if x.get(FIELD_INDICATOR_TYPE) in indicators_types]
if not indicators_related:
return_no_similar_incident_found_entry()
return pd.DataFrame(), True
incidents_with_indicators = match_indicators_incident(indicators_related, incident_ids)
incidents_with_indicators_join = {k: join(v) for k, v in incidents_with_indicators.items()}
incidents_with_indicators_join.pop(incident_id, None)
if not bool(incidents_with_indicators_join):
return_no_similar_incident_found_entry()
return pd.DataFrame(), True
incidents_df = pd.DataFrame.from_dict(incidents_with_indicators_join, orient='index')
incidents_df.columns = ['indicators']
return incidents_df, False
def organize_current_incident(current_incident_df, indicators_map):
current_incident_df['Indicators'] = current_incident_df['indicators'].apply(
lambda _ids: '\n'.join(
[indicators_map.get(x).get('value') if indicators_map.get(x) else ' ' for x in # type: ignore
_ids.split(' ')])) # type: ignore
return current_incident_df
def return_outputs_tagged(similar_incidents: pd.DataFrame, context: Dict, tag: Optional[str] = None):
colums_to_display = FIRST_COLUMNS_INCIDENTS_DISPLAY + [x for x in similar_incidents.columns.tolist() if
x not in FIRST_COLUMNS_INCIDENTS_DISPLAY + FIELDS_TO_REMOVE_TO_DISPLAY]
similar_incidents_renamed = similar_incidents.rename(str.title, axis='columns')
similar_incidents_json = similar_incidents_renamed.to_dict(orient='records')
colums_to_display = [x.title() for x in colums_to_display]
readable_output = tableToMarkdown("Similar incidents", similar_incidents_json, colums_to_display)
return_entry = {
"Type": entryTypes["note"],
"HumanReadable": readable_output,
"ContentsFormat": formats['json'],
"Contents": similar_incidents.to_dict(orient='records'),
"EntryContext": {'DBotFindSimilarIncidents': context},
}
if tag is not None:
return_entry["Tags"] = [tag]
demisto.results(return_entry)
def main():
max_indicators_for_white_list = int(demisto.args()['maxIncidentsInIndicatorsForWhiteList'])
min_nb_of_indicators = int(demisto.args()['minNumberOfIndicators'])
threshold = float(demisto.args()['threshold'])
indicators_types = demisto.args().get('indicatorsTypes')
if indicators_types:
indicators_types = indicators_types.split(',')
indicators_types = [x.strip() for x in indicators_types if x]
show_actual_incident = demisto.args().get('showActualIncident')
max_incidents_to_display = int(demisto.args()['maxIncidentsToDisplay'])
fields_incident_to_display = demisto.args()['fieldsIncidentToDisplay'].split(',')
fields_incident_to_display = [x.strip() for x in fields_incident_to_display if x]
fields_incident_to_display = list(set(['created', 'name'] + fields_incident_to_display))
from_date = demisto.args().get('fromDate')
# load the Dcurrent incident
incident_id = demisto.args().get('incidentId')
if not incident_id:
incident = demisto.incidents()[0]
incident_id = incident['id']
# load the related indicators to the incidents
indicators, indicators_map, early_exit = load_indicators_for_current_incident(incident_id, indicators_types,
min_nb_of_indicators,
max_indicators_for_white_list)
if early_exit:
return
# Get the Investigation IDs related to the indicators if the incidents
incident_ids, early_exit = get_incidents_ids_related_to_indicators(indicators)
if early_exit:
return
# Return Mutual indicators
_ = return_indicator_entry(incident_ids, indicators_types, indicators)
# Get related incidents with indicators
incidents_df, early_exit = get_related_incidents_with_indicators(incident_ids, indicators_types, incident_id)
if early_exit:
return
# Current incident
indicators_for_incident = [' '.join(set([x.get('id') for x in indicators]))] # type: ignore
current_incident_df = pd.DataFrame(indicators_for_incident, columns=['indicators'])
# Prediction
model = Model(p_transformation=TRANSFORMATION)
model.init_prediction(current_incident_df, incidents_df, INCIDENT_FIELDS_TO_USE)
similar_incidents = model.predict()
# Display and enriched incidents data
current_incident_df = organize_current_incident(current_incident_df, indicators_map)
similar_incidents = organize_data(similar_incidents, indicators_map, threshold, max_incidents_to_display)
similar_incidents = enriched_incidents(similar_incidents, fields_incident_to_display, from_date)
incident_found_bool = (len(similar_incidents) > 0)
if show_actual_incident == 'True':
display_actual_incident(current_incident_df, incident_id, fields_incident_to_display, from_date)
if incident_found_bool:
context = create_context_for_incidents(similar_incidents)
return_outputs_tagged(similar_incidents, context, 'similarIncidents')
else:
return_no_similar_incident_found_entry()
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| 41.706468
| 130
| 0.679351
|
a535cc8dcd8d6d31206b2ce52ec495f9ecda7d7e
| 150
|
py
|
Python
|
bin/ominoes/pentominoes-eye.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/ominoes/pentominoes-eye.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/ominoes/pentominoes-eye.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""? solutions"""
import puzzler
from puzzler.puzzles.pentominoes import PentominoesEye as puzzle
puzzler.run(puzzle)
| 15
| 64
| 0.746667
|
0c9792ae4ecebab8c55d363de60de93ddb74b28c
| 311
|
py
|
Python
|
website/__init__.py
|
andrequeiroz2/targit
|
7e42b8e40d492c1cd17c14b0300cfa5c000df2b9
|
[
"MIT"
] | null | null | null |
website/__init__.py
|
andrequeiroz2/targit
|
7e42b8e40d492c1cd17c14b0300cfa5c000df2b9
|
[
"MIT"
] | null | null | null |
website/__init__.py
|
andrequeiroz2/targit
|
7e42b8e40d492c1cd17c14b0300cfa5c000df2b9
|
[
"MIT"
] | null | null | null |
## -- Script inicial do módulo website --
## Não é necessário alterar nada nesse arquivo
from flask import Flask
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretkey'
from .views import views
app.register_blueprint(views, url_prefix='/')
return app
| 22.214286
| 50
| 0.665595
|
9bd21cba149be094afc6b4534541036ced792921
| 9,642
|
py
|
Python
|
purple/keyboard/sweep/layout/soar.py
|
a8ksh4/purple
|
11460cc44cda7edf6f8fce5a5c841d4b04a47741
|
[
"MIT"
] | null | null | null |
purple/keyboard/sweep/layout/soar.py
|
a8ksh4/purple
|
11460cc44cda7edf6f8fce5a5c841d4b04a47741
|
[
"MIT"
] | null | null | null |
purple/keyboard/sweep/layout/soar.py
|
a8ksh4/purple
|
11460cc44cda7edf6f8fce5a5c841d4b04a47741
|
[
"MIT"
] | null | null | null |
from adafruit_hid.keycode import Keycode
from adafruit_hid.mouse import Mouse
from purple.action import Action, Lock, MouseMove, MousePress, OneShot, Press, ToLayer
from purple.helpers import key
from purple.layer import Layer
class Layout:
auto_mod = []
layers = [
Layer( # Base
{
key(5): Action(Press(Keycode.A)),
key(10): Action(Press(Keycode.B)),
key(12): Action(Press(Keycode.C)),
key(13): Action(Press(Keycode.D)),
key(7): Action(Press(Keycode.E)),
key(2): Action(Press(Keycode.F)),
key(4): Action(Press(Keycode.G)),
key(6)+key(7): Action(Press(Keycode.H)),
key(7)+key(8): Action(Press(Keycode.I)),
key(9)+key(16): Action(Press(Keycode.J)),
key(1)+key(16): Action(Press(Keycode.K)),
key(5)+key(16): Action(Press(Keycode.L)),
key(9): Action(Press(Keycode.M)),
key(8)+key(16): Action(Press(Keycode.N)),
key(6): Action(Press(Keycode.O)),
key(3): Action(Press(Keycode.P)),
key(14)+key(16): Action(Press(Keycode.Q)),
key(6)+key(16): Action(Press(Keycode.R)),
key(7)+key(16): Action(Press(Keycode.S)),
key(8): Action(Press(Keycode.T)),
key(11): Action(Press(Keycode.U)),
key(0): Action(Press(Keycode.V)),
key(1): Action(Press(Keycode.W)),
key(11)+key(16): Action(Press(Keycode.X)),
key(14): Action(Press(Keycode.Y)),
key(10)+key(16): Action(Press(Keycode.Z)),
key(4)+key(16): Action(Press(Keycode.GRAVE_ACCENT)),
key(3)+key(8): Action(Press(Keycode.SEMICOLON)),
key(11)+key(12): Action(Press(Keycode.PERIOD)),
key(12)+key(13): Action(Press(Keycode.COMMA)),
key(2)+key(16): Action(Press(Keycode.LEFT_BRACKET)),
key(3)+key(16): Action(Press(Keycode.RIGHT_BRACKET)),
key(8)+key(13): Action(Press(Keycode.QUOTE)),
key(13)+key(16): Action(Press(Keycode.MINUS)),
key(12)+key(16): Action(Press(Keycode.EQUALS)),
key(9)+key(14): Action(Press(Keycode.FORWARD_SLASH)),
key(4)+key(9): Action(Press(Keycode.BACKSLASH)),
key(2)+key(7)+key(16): Action(Press(Keycode.SHIFT, Keycode.NINE)),
key(3)+key(8)+key(16): Action(Press(Keycode.SHIFT, Keycode.ZERO)),
# Modifiers
key(5)+key(6): Action(OneShot(Keycode.SHIFT), Press(Keycode.SHIFT), hold=True),
key(5)+key(8): Action(OneShot(Keycode.CONTROL), Press(Keycode.CONTROL), hold=True),
key(5)+key(13): Action(OneShot(Keycode.ALT), Press(Keycode.ALT), hold=True),
key(5)+key(9): Action(OneShot(Keycode.GUI), Press(Keycode.GUI), hold=True),
# Locked Modifiers
key(5)+key(6)+key(15): Action(Lock(Keycode.SHIFT)),
key(15)+key(16): Action(Lock(Keycode.SHIFT)),
key(5)+key(8)+key(15): Action(Lock(Keycode.CONTROL)),
key(5)+key(13)+key(15): Action(Lock(Keycode.ALT)),
key(5)+key(9)+key(15): Action(Lock(Keycode.GUI)),
# Other
key(16): Action(Press(Keycode.SPACE), Press(Keycode.SHIFT), hold=True),
key(5)+key(6)+key(16): Action(Press(Keycode.TAB)),
key(7)+key(8)+key(16): Action(Press(Keycode.ENTER)),
key(15): Action(Press(Keycode.BACKSPACE), hold=True),
key(10)+key(13): Action(Press(Keycode.DELETE), hold=True),
key(0)+key(16): Action(Press(Keycode.ESCAPE)),
key(7)+key(15): Action(ToLayer(1)),
key(6)+key(15): Action(ToLayer(2)),
key(8)+key(15): Action(ToLayer(3)),
key(5)+key(15): Action(ToLayer(4)),
},
(0, 0, 0)
),
Layer( # Nav
{
key(0): Action(Press(Keycode.PAGE_UP)),
key(1): Action(Press(Keycode.HOME)),
key(2): Action(Press(Keycode.UP_ARROW), hold=True),
key(3): Action(Press(Keycode.END)),
key(5): Action(Press(Keycode.PAGE_DOWN)),
key(6): Action(Press(Keycode.LEFT_ARROW), hold=True),
key(7): Action(Press(Keycode.DOWN_ARROW), hold=True),
key(8): Action(Press(Keycode.RIGHT_ARROW), hold=True),
# Shifted
key(0)+key(16): Action(Press(Keycode.SHIFT, Keycode.PAGE_UP)),
key(1)+key(16): Action(Press(Keycode.SHIFT, Keycode.HOME)),
key(2)+key(16): Action(Press(Keycode.SHIFT, Keycode.UP_ARROW), hold=True),
key(3)+key(16): Action(Press(Keycode.SHIFT, Keycode.END)),
key(5)+key(16): Action(Press(Keycode.SHIFT, Keycode.PAGE_DOWN)),
key(6)+key(16): Action(Press(Keycode.SHIFT, Keycode.LEFT_ARROW), hold=True),
key(7)+key(16): Action(Press(Keycode.SHIFT, Keycode.DOWN_ARROW), hold=True),
key(8)+key(16): Action(Press(Keycode.SHIFT, Keycode.RIGHT_ARROW), hold=True),
# Other
key(9): Action(ToLayer(0)),
},
(64, 0, 64)
),
Layer( # Mouse
{
key(0): Action(MouseMove(0, 0, 1), hold=True),
key(1): Action(MousePress(Mouse.RIGHT_BUTTON)),
key(2): Action(MouseMove(0, -8), hold=True),
key(3): Action(MousePress(Mouse.LEFT_BUTTON)),
key(5): Action(MouseMove(0, 0, -1), hold=True),
key(6): Action(MouseMove(-8, 0), hold=True),
key(7): Action(MouseMove(0, 8), hold=True),
key(8): Action(MouseMove(8, 0), hold=True),
# Diagonals
key(2)+key(6): Action(MouseMove(-8, -8), hold=True),
key(2)+key(8): Action(MouseMove(8, -8), hold=True),
key(7)+key(8): Action(MouseMove(8, 8), hold=True),
key(6)+key(7): Action(MouseMove(-8, 8), hold=True),
# Other
key(9): Action(ToLayer(0)),
},
(64, 0, 0)
),
Layer( # Numbers
{
# Base
key(5): Action(Press(Keycode.ZERO)),
key(6): Action(Press(Keycode.ONE)),
key(7): Action(Press(Keycode.TWO)),
key(8): Action(Press(Keycode.THREE)),
key(11): Action(Press(Keycode.FOUR)),
key(12): Action(Press(Keycode.FIVE)),
key(13): Action(Press(Keycode.SIX)),
key(1): Action(Press(Keycode.SEVEN)),
key(2): Action(Press(Keycode.EIGHT)),
key(3): Action(Press(Keycode.NINE)),
# Shifted
key(5)+key(16): Action(Press(Keycode.SHIFT, Keycode.ZERO)),
key(6)+key(16): Action(Press(Keycode.SHIFT, Keycode.ONE)),
key(7)+key(16): Action(Press(Keycode.SHIFT, Keycode.TWO)),
key(8)+key(16): Action(Press(Keycode.SHIFT, Keycode.THREE)),
key(11)+key(16): Action(Press(Keycode.SHIFT, Keycode.FOUR)),
key(12)+key(16): Action(Press(Keycode.SHIFT, Keycode.FIVE)),
key(13)+key(16): Action(Press(Keycode.SHIFT, Keycode.SIX)),
key(1)+key(16): Action(Press(Keycode.SHIFT, Keycode.SEVEN)),
key(2)+key(16): Action(Press(Keycode.SHIFT, Keycode.EIGHT)),
key(3)+key(16): Action(Press(Keycode.SHIFT, Keycode.NINE)),
# Other
key(9): Action(ToLayer(0)),
},
(0, 64, 64)
),
Layer( # Functions
{
# Base
key(10): Action(Press(Keycode.F1)),
key(11): Action(Press(Keycode.F2)),
key(12): Action(Press(Keycode.F3)),
key(13): Action(Press(Keycode.F4)),
key(5): Action(Press(Keycode.F5)),
key(6): Action(Press(Keycode.F6)),
key(7): Action(Press(Keycode.F7)),
key(8): Action(Press(Keycode.F8)),
key(0): Action(Press(Keycode.F9)),
key(1): Action(Press(Keycode.F10)),
key(2): Action(Press(Keycode.F11)),
key(3): Action(Press(Keycode.F12)),
# Shifted
key(10)+key(16): Action(Press(Keycode.SHIFT, Keycode.F1)),
key(11)+key(16): Action(Press(Keycode.SHIFT, Keycode.F2)),
key(12)+key(16): Action(Press(Keycode.SHIFT, Keycode.F3)),
key(13)+key(16): Action(Press(Keycode.SHIFT, Keycode.F4)),
key(5)+key(16): Action(Press(Keycode.SHIFT, Keycode.F5)),
key(6)+key(16): Action(Press(Keycode.SHIFT, Keycode.F6)),
key(7)+key(16): Action(Press(Keycode.SHIFT, Keycode.F7)),
key(8)+key(16): Action(Press(Keycode.SHIFT, Keycode.F8)),
key(0)+key(16): Action(Press(Keycode.SHIFT, Keycode.F9)),
key(1)+key(16): Action(Press(Keycode.SHIFT, Keycode.F10)),
key(2)+key(16): Action(Press(Keycode.SHIFT, Keycode.F11)),
key(3)+key(16): Action(Press(Keycode.SHIFT, Keycode.F12)),
# Other
key(9): Action(ToLayer(0)),
},
(0, 64, 0)
),
]
| 48.69697
| 99
| 0.505289
|
bae25105d78be674f0a2ca4bcba7a12d8636ee7a
| 11,998
|
py
|
Python
|
designate_tempest_plugin/tests/api/v2/test_transfer_request.py
|
izadorozhna/designate-tempest-plugin
|
c1708fc0c878ea3fa544454c7e96c58581478853
|
[
"Apache-2.0"
] | null | null | null |
designate_tempest_plugin/tests/api/v2/test_transfer_request.py
|
izadorozhna/designate-tempest-plugin
|
c1708fc0c878ea3fa544454c7e96c58581478853
|
[
"Apache-2.0"
] | null | null | null |
designate_tempest_plugin/tests/api/v2/test_transfer_request.py
|
izadorozhna/designate-tempest-plugin
|
c1708fc0c878ea3fa544454c7e96c58581478853
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from designate_tempest_plugin.tests import base
from designate_tempest_plugin import data_utils as dns_data_utils
LOG = logging.getLogger(__name__)
class BaseTransferRequestTest(base.BaseDnsV2Test):
excluded_keys = ['created_at', 'updated_at', 'key', 'links']
class TransferRequestTest(BaseTransferRequestTest):
credentials = ['primary', 'alt']
@classmethod
def setup_credentials(cls):
# Do not create network resources for these test.
cls.set_network_resources()
super(TransferRequestTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(TransferRequestTest, cls).setup_clients()
cls.zone_client = cls.os_primary.zones_client
cls.client = cls.os_primary.transfer_request_client
cls.alt_client = cls.os_alt.transfer_request_client
@decorators.idempotent_id('2381d489-ad84-403d-b0a2-8b77e4e966bf')
def test_create_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Ensure we respond with ACTIVE status')
self.assertEqual('ACTIVE', transfer_request['status'])
@decorators.idempotent_id('5deae1ac-7c14-42dc-b14e-4e4b2725beb7')
def test_create_transfer_request_scoped(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
transfer_request_data = dns_data_utils.rand_transfer_request_data(
target_project_id=self.os_alt.credentials.project_id)
LOG.info('Create a scoped zone transfer_request')
_, transfer_request = self.client.create_transfer_request(
zone['id'], transfer_request_data)
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Ensure we respond with ACTIVE status')
self.assertEqual('ACTIVE', transfer_request['status'])
@decorators.idempotent_id('4505152f-0a9c-4f02-b385-2216c914a0be')
def test_create_transfer_request_empty_body(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request_empty_body(
zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Ensure we respond with ACTIVE status')
self.assertEqual('ACTIVE', transfer_request['status'])
@decorators.idempotent_id('64a7be9f-8371-4ce1-a242-c1190de7c985')
def test_show_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Fetch the transfer_request')
_, body = self.client.show_transfer_request(transfer_request['id'])
LOG.info('Ensure the fetched response matches the '
'created transfer_request')
self.assertExpected(transfer_request, body, self.excluded_keys)
@decorators.idempotent_id('235ded87-0c47-430b-8cad-4f3194b927a6')
def test_show_transfer_request_as_target(self):
# Checks the target of a scoped transfer request can see
# the request.
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
transfer_request_data = dns_data_utils.rand_transfer_request_data(
target_project_id=self.os_alt.credentials.project_id)
LOG.info('Create a scoped zone transfer_request')
_, transfer_request = self.client.create_transfer_request(
zone['id'], transfer_request_data)
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Fetch the transfer_request as the target')
_, body = self.alt_client.show_transfer_request(transfer_request['id'])
LOG.info('Ensure the fetched response matches the '
'created transfer_request')
excluded_keys = self.excluded_keys + ["target_project_id",
"project_id"]
self.assertExpected(transfer_request, body, excluded_keys)
@decorators.idempotent_id('7d81c487-aa15-44c4-b3e5-424ab9e6a3e5')
def test_delete_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'],
ignore_errors=lib_exc.NotFound)
LOG.info('Delete the transfer_request')
_, body = self.client.delete_transfer_request(transfer_request['id'])
self.assertRaises(lib_exc.NotFound,
lambda: self.client.show_transfer_request(transfer_request['id']))
@decorators.idempotent_id('ddd42a19-1768-428c-846e-32f9d6493011')
def test_list_transfer_requests(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('List transfer_requests')
_, body = self.client.list_transfer_requests()
self.assertGreater(len(body['transfer_requests']), 0)
@decorators.idempotent_id('de5e9d32-c723-4518-84e5-58da9722cc13')
def test_update_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Update the transfer_request')
data = {
"description": "demo descripion"
}
_, transfer_request_patch = self.client.update_transfer_request(
transfer_request['id'], transfer_request_data=data)
self.assertEqual(data['description'],
transfer_request_patch['description'])
@decorators.idempotent_id('73b754a9-e856-4fd6-80ba-e8d1b80f5dfa')
def test_list_transfer_requests_dot_json_fails(self):
uri = self.client.get_uri('transfer_requests.json')
self.assertRaises(lib_exc.NotFound,
lambda: self.client.get(uri))
class TestTransferRequestNotFound(BaseTransferRequestTest):
@classmethod
def setup_credentials(cls):
# Do not create network resources for these test.
cls.set_network_resources()
super(TestTransferRequestNotFound, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(TestTransferRequestNotFound, cls).setup_clients()
cls.client = cls.os_primary.transfer_request_client
@decorators.idempotent_id('d255f72f-ba24-43df-9dba-011ed7f4625d')
def test_show_transfer_request_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.client.show_transfer_request,
data_utils.rand_uuid())
self.assertTransferRequest404(e.resp, e.resp_body)
@decorators.idempotent_id('9ff383fb-c31d-4c6f-8085-7b261e401223')
def test_update_transfer_request_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.client.update_transfer_request,
data_utils.rand_uuid())
self.assertTransferRequest404(e.resp, e.resp_body)
@decorators.idempotent_id('5a4a0755-c01d-448f-b856-b081b96ae77e')
def test_delete_transfer_request_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.client.delete_transfer_request,
data_utils.rand_uuid())
self.assertTransferRequest404(e.resp, e.resp_body)
def assertTransferRequest404(self, resp, resp_body):
self.assertEqual(404, resp.status)
self.assertEqual(404, resp_body['code'])
self.assertEqual("zone_transfer_request_not_found", resp_body['type'])
self.assertEqual("Could not find ZoneTransferRequest",
resp_body['message'])
class TestTransferRequestInvalidId(BaseTransferRequestTest):
@classmethod
def setup_credentials(cls):
# Do not create network resources for these test.
cls.set_network_resources()
super(TestTransferRequestInvalidId, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(TestTransferRequestInvalidId, cls).setup_clients()
cls.client = cls.os_primary.transfer_request_client
@decorators.idempotent_id('2205dd19-ecc7-4c68-9e89-63c47d642b07')
def test_show_transfer_request_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.client.show_transfer_request,
'foo')
self.assertTransferRequestInvalidId(e.resp, e.resp_body)
@decorators.idempotent_id('af0ce46f-10be-4cce-a1d5-1b5c2a39fb97')
def test_update_transfer_request_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.client.update_transfer_request,
'foo')
self.assertTransferRequestInvalidId(e.resp, e.resp_body)
@decorators.idempotent_id('1728dca5-01f1-45f4-b59d-7a981d479394')
def test_delete_transfer_request_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.client.delete_transfer_request,
'foo')
self.assertTransferRequestInvalidId(e.resp, e.resp_body)
def assertTransferRequestInvalidId(self, resp, resp_body):
self.assertEqual(400, resp.status)
self.assertEqual(400, resp_body['code'])
self.assertEqual("invalid_uuid", resp_body['type'])
| 43.158273
| 79
| 0.681197
|
54f0026203e6d9d3428efedfd03cff9f4452df68
| 3,221
|
py
|
Python
|
selfdrive/controls/lib/lane_planner.py
|
funk222/openpilot
|
f6d3a507a6aa568c30a2af097d94c61a65d7dc6a
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/lane_planner.py
|
funk222/openpilot
|
f6d3a507a6aa568c30a2af097d94c61a65d7dc6a
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/lane_planner.py
|
funk222/openpilot
|
f6d3a507a6aa568c30a2af097d94c61a65d7dc6a
|
[
"MIT"
] | 1
|
2020-01-14T03:05:37.000Z
|
2020-01-14T03:05:37.000Z
|
from common.numpy_fast import interp
import numpy as np
from cereal import log
CAMERA_OFFSET = 0.00 # m from center car to camera
def compute_path_pinv(l=50):
deg = 3
x = np.arange(l*1.0)
X = np.vstack(tuple(x**n for n in range(deg, -1, -1))).T
pinv = np.linalg.pinv(X)
return pinv
def model_polyfit(points, path_pinv):
return np.dot(path_pinv, [float(x) for x in points])
def calc_d_poly(l_poly, r_poly, p_poly, l_prob, r_prob, lane_width):
# This will improve behaviour when lanes suddenly widen
lane_width = min(4.0, lane_width)
l_prob = l_prob * interp(abs(l_poly[3]), [2, 2.5], [1.0, 0.0])
r_prob = r_prob * interp(abs(r_poly[3]), [2, 2.5], [1.0, 0.0])
path_from_left_lane = l_poly.copy()
path_from_left_lane[3] -= lane_width / 2.0
path_from_right_lane = r_poly.copy()
path_from_right_lane[3] += lane_width / 2.0
lr_prob = l_prob + r_prob - l_prob * r_prob
d_poly_lane = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
return lr_prob * d_poly_lane + (1.0 - lr_prob) * p_poly
class LanePlanner():
def __init__(self):
self.l_poly = [0., 0., 0., 0.]
self.r_poly = [0., 0., 0., 0.]
self.p_poly = [0., 0., 0., 0.]
self.d_poly = [0., 0., 0., 0.]
self.lane_width_estimate = 3.0
self.lane_width_certainty = 1.0
self.lane_width = 3.0
self.l_prob = 0.
self.r_prob = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self._path_pinv = compute_path_pinv()
self.x_points = np.arange(50)
def parse_model(self, md):
if len(md.leftLane.poly):
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
else:
self.l_poly = model_polyfit(md.leftLane.points, self._path_pinv) # left line
self.r_poly = model_polyfit(md.rightLane.points, self._path_pinv) # right line
self.p_poly = model_polyfit(md.path.points, self._path_pinv) # predicted path
self.l_prob = md.leftLane.prob # left line prob
self.r_prob = md.rightLane.prob # right line prob
if len(md.meta.desirePrediction):
self.l_lane_change_prob = md.meta.desirePrediction[log.PathPlan.Desire.laneChangeLeft - 1]
self.r_lane_change_prob = md.meta.desirePrediction[log.PathPlan.Desire.laneChangeRight - 1]
def update_d_poly(self, v_ego):
# only offset left and right lane lines; offsetting p_poly does not make sense
self.l_poly[3] += CAMERA_OFFSET
self.r_poly[3] += CAMERA_OFFSET
# Find current lanewidth
self.lane_width_certainty += 0.05 * (self.l_prob * self.r_prob - self.lane_width_certainty)
current_lane_width = abs(self.l_poly[3] - self.r_poly[3])
self.lane_width_estimate += 0.005 * (current_lane_width - self.lane_width_estimate)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty * self.lane_width_estimate + \
(1 - self.lane_width_certainty) * speed_lane_width
self.d_poly = calc_d_poly(self.l_poly, self.r_poly, self.p_poly, self.l_prob, self.r_prob, self.lane_width)
def update(self, v_ego, md):
self.parse_model(md)
self.update_d_poly(v_ego)
| 35.788889
| 111
| 0.68488
|
2041745c5c4aa16ecfdbfa4c2cff16ed721d65c6
| 39,805
|
py
|
Python
|
shapash/explainer/smart_explainer.py
|
GAP01/shapash
|
4223278b91a4456bf334deb26220a6501a282f13
|
[
"Apache-2.0"
] | null | null | null |
shapash/explainer/smart_explainer.py
|
GAP01/shapash
|
4223278b91a4456bf334deb26220a6501a282f13
|
[
"Apache-2.0"
] | null | null | null |
shapash/explainer/smart_explainer.py
|
GAP01/shapash
|
4223278b91a4456bf334deb26220a6501a282f13
|
[
"Apache-2.0"
] | null | null | null |
"""
Smart explainer module
"""
import logging
import copy
import pandas as pd
import numpy as np
from shapash.webapp.smart_app import SmartApp
from shapash.utils.io import save_pickle
from shapash.utils.io import load_pickle
from shapash.utils.transform import inverse_transform, apply_postprocessing
from shapash.utils.transform import adapt_contributions
from shapash.utils.utils import get_host_name
from shapash.utils.threading import CustomThread
from shapash.utils.shap_backend import shap_contributions, check_explainer, get_shap_interaction_values
from shapash.utils.check import check_model, check_label_dict, check_ypred, check_contribution_object,\
check_postprocessing, check_features_name
from shapash.manipulation.select_lines import keep_right_contributions
from .smart_state import SmartState
from .multi_decorator import MultiDecorator
from .smart_plotter import SmartPlotter
from .smart_predictor import SmartPredictor
from shapash.utils.model import predict_proba, predict
logging.basicConfig(level=logging.INFO)
class SmartExplainer:
"""
The SmartExplainer class is the main object of the Shapash library.
It allows the Data Scientists to perform many operations to make the
results more understandable :
linking encoders, models, predictions, label dict and datasets.
SmartExplainer users have several methods which are described below.
The SmartExplainer Attributes :
data: dict
Data dictionary has 3 entries. Each key returns a pd.DataFrame (regression) or a list of pd.DataFrame
(classification - The length of the lists is equivalent to the number of labels).
All pd.DataFrame have she same shape (n_samples, n_features).
For the regression case, data that should be regarded as a single array
of size (n_samples, n_features, 3).
data['contrib_sorted']: pandas.DataFrame (regression) or list of pandas.DataFrame (classification)
Contains local contributions of the prediction set, with common line index.
Columns are 'contrib_1', 'contrib_2', ... and contains the top contributions
for each line from left to right. In multi-class problems, this is a list of
contributions, one for each class.
data['var_dict']: pandas.DataFrame (regression) or list of pandas.DataFrame (classification)
Must contain only ints. It gives, for each line, the list of most import features
regarding the local decomposition. In order to save space, columns are denoted by
integers, the conversion being done with the columns_dict member. In multi-class
problems, this is a list of dataframes, one for each class.
data['x_sorted']: pandas.DataFrame (regression) or list of pandas.DataFrame (classification)
It gives, for each line, the list of most important features values regarding the local
decomposition. These values can only be understood with respect to data['var_dict']
x_init: pandas.DataFrame
preprocessed dataset used by the model to perform the prediction.
x_pred: pandas.DataFrame
x_init dataset with inverse transformation with eventual postprocessing modifications.
x_contrib_plot: pandas.DataFrame
x_init dataset with inverse transformation, without postprocessing used for contribution_plot.
y_pred: pandas.DataFrame
User-specified prediction values.
contributions: pandas.DataFrame (regression) or list (classification)
local contributions aggregated if the preprocessing part requires it (e.g. one-hot encoding).
features_dict: dict
Dictionary mapping technical feature names to domain names.
inv_features_dict: dict
Inverse features_dict mapping.
label_dict: dict
Dictionary mapping integer labels to domain names (classification - target values).
inv_label_dict: dict
Inverse label_dict mapping.
columns_dict: dict
Dictionary mapping integer column number to technical feature names.
inv_columns_dict: dict
Inverse columns_dict mapping.
plot: object
Helper object containing all plotting functions (Bridge pattern).
model: model object
model used to check the different values of target estimate predict proba
features_desc: dict
Dictionary that references the numbers of feature values in the x_pred
features_imp: pandas.Series (regression) or list (classification)
Features importance values
preprocessing : category_encoders, ColumnTransformer, list or dict
The processing apply to the original data.
postprocessing : dict
Dictionnary of postprocessing modifications to apply in x_pred dataframe.
How to declare a new SmartExplainer object?
Example
--------
>>> xpl = SmartExplainer(features_dict=featd,label_dict=labeld)
features_dict & label_dict are both optional.
features_dict maps technical feature names to domain names.
label_dict specify the labels of target (classification).
"""
def __init__(self, features_dict={}, label_dict=None):
if isinstance(features_dict, dict) == False:
raise ValueError(
"""
features_dict must be a dict
"""
)
if label_dict is not None and isinstance(label_dict, dict) == False:
raise ValueError(
"""
label_dict must be a dict
"""
)
self.features_dict = features_dict
self.label_dict = label_dict
self.plot = SmartPlotter(self)
def compile(self, x, model, explainer=None, contributions=None, y_pred=None, preprocessing=None, postprocessing=None):
"""
The compile method is the first step to understand model and prediction. It performs the sorting
of contributions, the reverse preprocessing steps and performs all the calculations necessary for
a quick display of plots and efficient display of summary of explanation.
Most of the parameters are optional but all help to display results that can be understood
This step can last a few moments with large datasets.
Parameters
----------
x : pandas.DataFrame
Prediction set.
IMPORTANT: this should be the raw prediction set, whose values are seen by the end user.
x is a preprocessed dataset: Shapash can apply the model to it
model : model object
model used to consistency check. model object can also be used by some method to compute
predict and predict_proba values
explainer : explainer object
explainer must be a shap object
contributions : pandas.DataFrame, np.ndarray or list
single or multiple contributions (multi-class) to handle.
if pandas.Dataframe, the index and columns should be share with the prediction set.
if np.ndarray, index and columns will be generated according to x dataset
y_pred : pandas.Series or pandas.DataFrame, optional (default: None)
Prediction values (1 column only).
The index must be identical to the index of x_pred.
This is an interesting parameter for more explicit outputs. Shapash lets users define their own predict,
as they may wish to set their own threshold (classification)
preprocessing : category_encoders, ColumnTransformer, list, dict, optional (default: None)
--> Differents types of preprocessing are available:
- A single category_encoders (OrdinalEncoder/OnehotEncoder/BaseNEncoder/BinaryEncoder/TargetEncoder)
- A single ColumnTransformer with scikit-learn encoding or category_encoders transformers
- A list with multiple category_encoders with optional (dict, list of dict)
- A list with a single ColumnTransformer with optional (dict, list of dict)
- A dict
- A list of dict
postprocessing : dict, optional (default: None)
Dictionnary of postprocessing modifications to apply in x_pred dataframe.
Dictionnary with feature names as keys (or number, or well labels referencing to features names),
which modifies dataset features by features.
--> Different types of postprocessing are available, but the syntax is this one:
One key by features, 5 different types of modifications:
>>> {
‘feature1’ : { ‘type’ : ‘prefix’, ‘rule’ : ‘age: ‘ },
‘feature2’ : { ‘type’ : ‘suffix’, ‘rule’ : ‘$/week ‘ },
‘feature3’ : { ‘type’ : ‘transcoding’, ‘rule‘: { ‘code1’ : ‘single’, ‘code2’ : ‘married’}},
‘feature4’ : { ‘type’ : ‘regex’ , ‘rule‘: { ‘in’ : ‘AND’, ‘out’ : ‘ & ‘ }},
‘feature5’ : { ‘type’ : ‘case’ , ‘rule‘: ‘lower’‘ }
}
Only one transformation by features is possible.
Example
--------
>>> xpl.compile(x=xtest_df,model=my_model)
"""
self.x_init = x
self.x_pred = inverse_transform(self.x_init, preprocessing)
self.preprocessing = preprocessing
self.model = model
self._case, self._classes = self.check_model()
self.check_label_dict()
if self.label_dict:
self.inv_label_dict = {v: k for k, v in self.label_dict.items()}
if explainer is not None and contributions is not None:
raise ValueError("You have to specify just one of these arguments: explainer, contributions")
if contributions is None:
contributions, explainer = shap_contributions(model, self.x_init, self.check_explainer(explainer))
adapt_contrib = self.adapt_contributions(contributions)
self.state = self.choose_state(adapt_contrib)
self.contributions = self.apply_preprocessing(self.validate_contributions(adapt_contrib), preprocessing)
self.check_contributions()
self.explainer = explainer
self.y_pred = self.check_y_pred(y_pred)
self.columns_dict = {i: col for i, col in enumerate(self.x_pred.columns)}
self.inv_columns_dict = {v: k for k, v in self.columns_dict.items()}
self.check_features_dict()
self.inv_features_dict = {v: k for k, v in self.features_dict.items()}
postprocessing = self.modify_postprocessing(postprocessing)
self.check_postprocessing(postprocessing)
self.postprocessing_modifications = self.check_postprocessing_modif_strings(postprocessing)
self.postprocessing = postprocessing
if self.postprocessing_modifications:
self.x_contrib_plot = copy.deepcopy(self.x_pred)
self.x_pred = self.apply_postprocessing(postprocessing)
self.data = self.state.assign_contributions(
self.state.rank_contributions(
self.contributions,
self.x_pred
)
)
self.features_imp = None
self.features_desc = self.check_features_desc()
def add(self, y_pred=None, label_dict=None, features_dict=None):
"""
add method allows the user to add a label_dict, features_dict
or y_pred without compiling again (and it can last a few moments).
y_pred can be used in the plot to color scatter.
y_pred is needed in the to_pandas method.
label_dict and features_dict displays allow to display clearer results.
Parameters
----------
y_pred : pandas.Series, optional (default: None)
Prediction values (1 column only).
The index must be identical to the index of x_pred.
label_dict: dict, optional (default: None)
Dictionary mapping integer labels to domain names.
features_dict: dict, optional (default: None)
Dictionary mapping technical feature names to domain names.
"""
if y_pred is not None:
self.y_pred = self.check_y_pred(y_pred)
if label_dict is not None:
if isinstance(label_dict, dict) == False:
raise ValueError(
"""
label_dict must be a dict
"""
)
self.label_dict = label_dict
self.check_label_dict()
self.inv_label_dict = {v: k for k, v in self.label_dict.items()}
if features_dict is not None:
if isinstance(features_dict, dict) == False:
raise ValueError(
"""
features_dict must be a dict
"""
)
self.features_dict = features_dict
self.check_features_dict()
self.inv_features_dict = {v: k for k, v in self.features_dict.items()}
def choose_state(self, contributions):
"""
Select implementation of the smart explainer. Typically check if it is a
multi-class problem, in which case the implementation should be adapted
to lists of contributions.
Parameters
----------
contributions : object
Local contributions. Could also be a list of local contributions.
Returns
-------
object
SmartState or SmartMultiState, depending on the nature of the input.
"""
if isinstance(contributions, list):
return MultiDecorator(SmartState())
else:
return SmartState()
def adapt_contributions(self, contributions):
"""
If _case is "classification" and contributions a np.array or pd.DataFrame
this function transform contributions matrix in a list of 2 contributions
matrices: Opposite contributions and contributions matrices.
Parameters
----------
contributions : pandas.DataFrame, np.ndarray or list
Returns
-------
pandas.DataFrame, np.ndarray or list
contributions object modified
"""
return adapt_contributions(self._case, contributions)
def validate_contributions(self, contributions):
"""
Check len of list if _case is "classification"
Check contributions object type if _case is "regression"
Check type of contributions and transform into (list of) pd.Dataframe if necessary
Parameters
----------
contributions : pandas.DataFrame, np.ndarray or list
Returns
-------
pandas.DataFrame or list
"""
check_contribution_object(self._case, self._classes, contributions)
return self.state.validate_contributions(contributions, self.x_init)
def get_interaction_values(self, n_samples_max=None, selection=None):
"""
Compute shap interaction values for each row of x_init.
This function is only available for explainer of type TreeExplainer (used for tree based models).
Please refer to the official tree shap paper for more information : https://arxiv.org/pdf/1802.03888.pdf
Parameters
----------
n_samples_max : int, optional
Limit the number of points for which we compute the interactions.
selection : list, optional
Contains list of index, subset of the input DataFrame that we want to plot
Returns
-------
np.ndarray
Shap interaction values for each sample as an array of shape (# samples x # features x # features).
"""
x = copy.deepcopy(self.x_init)
if selection:
x = x.loc[selection]
if hasattr(self, 'x_interaction'):
if self.x_interaction.equals(x[:n_samples_max]):
return self.interaction_values
self.x_interaction = x[:n_samples_max]
self.interaction_values = get_shap_interaction_values(self.x_interaction, self.explainer)
return self.interaction_values
def apply_preprocessing(self, contributions, preprocessing=None):
"""
Reconstruct contributions for original features, taken into account a preprocessing.
Parameters
----------
contributions : object
Local contributions, or list of local contributions.
preprocessing : object
Encoder taken from scikit-learn or category_encoders
Returns
-------
object
Reconstructed local contributions in the original space. Can be a list.
"""
if preprocessing:
return self.state.inverse_transform_contributions(
contributions,
preprocessing
)
else:
return contributions
def check_postprocessing_modif_strings(self, postprocessing=None):
"""
Check if any modification of postprocessing will convert numeric values into strings values.
If so, return True, otherwise False.
Parameters
----------
postprocessing: dict
Dict of postprocessing modifications to apply.
Returns
-------
modif: bool
Boolean which is True if any numerical variable will be converted into string.
"""
modif = False
if postprocessing is not None:
for key in postprocessing.keys():
dict_postprocess = postprocessing[key]
if dict_postprocess['type'] in {'prefix', 'suffix'} \
and pd.api.types.is_numeric_dtype(self.x_pred[key]):
modif = True
return modif
def modify_postprocessing(self, postprocessing=None):
"""
Modifies postprocessing parameter, to change only keys, with features name,
in case of parameters are not real feature names (with columns_dict,
or inv_features_dict).
Parameters
----------
postprocessing : Dict
Dictionnary of postprocessing to modify.
Returns
-------
Dict
Modified dictionnary, with same values but keys directly referencing to feature names.
"""
if postprocessing:
new_dic = dict()
for key in postprocessing.keys():
if key in self.features_dict:
new_dic[key] = postprocessing[key]
elif key in self.columns_dict.keys():
new_dic[self.columns_dict[key]] = postprocessing[key]
elif key in self.inv_features_dict:
new_dic[self.inv_features_dict[key]] = postprocessing[key]
else:
raise ValueError(f"Feature name '{key}' not found in the dataset.")
return new_dic
def check_postprocessing(self, postprocessing):
"""
Check that postprocessing parameter has good attributes.
Check if postprocessing is a dictionnary, and if its parameters are good.
Parameters
----------
postprocessing : dict
Dictionnary of postprocessing that need to be checked.
"""
check_postprocessing(self.x_pred, postprocessing)
def apply_postprocessing(self, postprocessing=None):
"""
Modifies x_pred Dataframe according to postprocessing modifications, if exists.
Parameters
----------
postprocessing: Dict
Dictionnary of postprocessing modifications to apply in x_pred.
Returns
-------
pandas.Dataframe
Returns x_pred if postprocessing is empty, modified dataframe otherwise.
"""
if postprocessing:
return apply_postprocessing(self.x_pred, postprocessing)
else:
return self.x_pred
def check_y_pred(self, ypred=None):
"""
Check if y_pred is a one column dataframe of integer or float
and if y_pred index matches x_pred index
Parameters
----------
ypred: pandas.DataFrame (optional)
User-specified prediction values.
"""
return check_ypred(self.x_pred, ypred)
def check_model(self):
"""
Check if model has a predict_proba method is a one column dataframe of integer or float
and if y_pred index matches x_pred index
Returns
-------
string:
'regression' or 'classification' according to the attributes of the model
"""
_case, _classes = check_model(self.model)
return _case, _classes
def check_label_dict(self):
"""
Check if label_dict and model _classes match
"""
if self._case != "regression":
return check_label_dict(self.label_dict, self._case, self._classes)
def check_features_dict(self):
"""
Check the features_dict and add the necessary keys if all the
input X columns are not present
"""
for feature in (set(list(self.columns_dict.values())) - set(list(self.features_dict))):
self.features_dict[feature] = feature
def check_contributions(self):
"""
Check if contributions and prediction set match in terms of shape and index.
"""
if not self.state.check_contributions(self.contributions, self.x_pred):
raise ValueError(
"""
Prediction set and contributions should have exactly the same number of lines
and number of columns. the order of the columns must be the same
Please check x, contributions and preprocessing arguments.
"""
)
def check_label_name(self, label, origin=None):
"""
Convert a string label in integer. If the label is already
an integer nothing is done. In all other cases an error is raised.
Parameters
----------
label: int or string
Integer (id) or string (business names)
origin: None, 'num', 'code', 'value' (default: None)
Kind of the label used in parameter
Returns
-------
tuple
label num, label code (class of the mode), label value
"""
if origin is None:
if label in self._classes:
origin = 'code'
elif self.label_dict is not None and label in self.label_dict.values():
origin = 'value'
elif isinstance(label, int) and label in range(-1, len(self._classes)):
origin = 'num'
try:
if origin == 'num':
label_num = label
label_code = self._classes[label]
label_value = self.label_dict[label_code] if self.label_dict else label_code
elif origin == 'code':
label_code = label
label_num = self._classes.index(label)
label_value = self.label_dict[label_code] if self.label_dict else label_code
elif origin == 'value':
label_code = self.inv_label_dict[label]
label_num = self._classes.index(label_code)
label_value = label
else:
raise ValueError
except ValueError:
raise Exception({"message": "Origin must be 'num', 'code' or 'value'."})
except Exception:
raise Exception({"message": f"Label ({label}) not found for origin ({origin})"})
return label_num, label_code, label_value
def check_features_name(self, features):
"""
Convert a list of feature names (string) or features ids into features ids.
Features names can be part of columns_dict or features_dict.
Parameters
----------
features : List
List of ints (columns ids) or of strings (business names)
Returns
-------
list of ints
Columns ids compatible with var_dict
"""
return check_features_name(self.columns_dict, self.features_dict, features)
def check_features_desc(self):
"""
Check x_pred dataframe, compute value counts of each feature
used in plot part
Returns
-------
dict
Number of unique values in x_pred
"""
return dict(self.x_pred.nunique())
def check_attributes(self, attribute):
"""
Check that explainer has the attribute precised
Parameters
----------
attribute: string
the label of the attribute to test
Returns
-------
Object content of the attribute specified from SmartExplainer instance
"""
if not hasattr(self, attribute):
raise ValueError(
"""
attribute {0} isn't an attribute of the explainer precised.
""".format(attribute))
return self.__dict__[attribute]
def filter(
self,
features_to_hide=None,
threshold=None,
positive=None,
max_contrib=None
):
"""
The filter method is an important method which allows to summarize the local explainability
by using the user defined parameters which correspond to its use case.
Filter method is used with the local_plot method of Smarplotter to see the concrete result of this summary
with a local contribution barchart
Please, watch the local_plot tutorial to see how these two methods are combined with a concrete example
Parameters
----------
features_to_hide : list, optional (default: None)
List of strings, containing features to hide.
threshold : float, optional (default: None)
Absolute threshold below which any contribution is hidden.
positive: bool, optional (default: None)
If True, hide negative values. False, hide positive values
If None, hide nothing.
max_contrib : int, optional (default: None)
Maximum number of contributions to show.
"""
mask = [self.state.init_mask(self.data['contrib_sorted'], True)]
if features_to_hide:
mask.append(
self.state.hide_contributions(
self.data['var_dict'],
features_list=self.check_features_name(features_to_hide)
)
)
if threshold:
mask.append(
self.state.cap_contributions(
self.data['contrib_sorted'],
threshold=threshold
)
)
if positive is not None:
mask.append(
self.state.sign_contributions(
self.data['contrib_sorted'],
positive=positive
)
)
self.mask = self.state.combine_masks(mask)
if max_contrib:
self.mask = self.state.cutoff_contributions(self.mask, max_contrib=max_contrib)
self.masked_contributions = self.state.compute_masked_contributions(
self.data['contrib_sorted'],
self.mask
)
self.mask_params = {
'features_to_hide': features_to_hide,
'threshold': threshold,
'positive': positive,
'max_contrib': max_contrib
}
def save(self, path):
"""
Save method allows user to save SmartExplainer object on disk
using a pickle file.
Save method can be useful: you don't have to recompile to display
results later
Parameters
----------
path : str
File path to store the pickle file
Example
--------
>>> xpl.save('path_to_pkl/xpl.pkl')
"""
dict_to_save = {}
for att in self.__dict__.keys():
if isinstance(getattr(self, att), (list, dict, pd.DataFrame, pd.Series, type(None), bool)) or att == "model":
dict_to_save.update({att: getattr(self, att)})
save_pickle(dict_to_save, path)
def load(self, path):
"""
Load method allows Shapash user to use pickled SmartExplainer.
To use this method you must first declare your SmartExplainer object
Watch the following example
Parameters
----------
path : str
File path of the pickle file.
Example
--------
>>> xpl = SmartExplainer()
>>> xpl.load('path_to_pkl/xpl.pkl')
"""
dict_to_load = load_pickle(path)
if isinstance(dict_to_load, dict):
for elem in dict_to_load.keys():
setattr(self, elem, dict_to_load[elem])
self._case, self._classes = self.check_model()
self.state = self.choose_state(self.contributions)
else:
raise ValueError(
"pickle file must contain dictionary"
)
def predict_proba(self):
"""
The predict_proba compute the proba values for each x_init row
"""
self.proba_values = predict_proba(self.model, self.x_init, self._classes)
def predict(self):
"""
The predict method computes the model output for each x_init row and stores it in y_pred attribute
"""
self.y_pred = predict(self.model, self.x_init)
def to_pandas(
self,
features_to_hide=None,
threshold=None,
positive=None,
max_contrib=None,
proba=False
):
"""
The to_pandas method allows to export the summary of local explainability.
This method proposes a set of parameters to summarize the explainability of each point.
If the user does not specify any, the to_pandas method uses the parameter specified during
the last execution of the filter method.
In classification case, The method to_pandas summarizes the explicability which corresponds
to the predicted values specified by the user (with compile or add method).
the proba parameter displays the corresponding predict proba value for each point
In classification case, There are 2 ways to use this to pandas method.
- Provide a real prediction set to explain
- Focus on a constant target value and look at the proba and explainability corresponding to each point.
(in that case, specify a constant pd.Series with add or compile method)
Examples are presented in the tutorial local_plot (please check tutorial part of this doc)
Parameters
----------
features_to_hide : list, optional (default: None)
List of strings, containing features to hide.
threshold : float, optional (default: None)
Absolute threshold below which any contribution is hidden.
positive: bool, optional (default: None)
If True, hide negative values. Hide positive values otherwise. If None, hide nothing.
max_contrib : int, optional (default: 5)
Number of contributions to show in the pandas df
proba : bool, optional (default: False)
adding proba in output df
Returns
-------
pandas.DataFrame
- selected explanation of each row for classification case
Examples
--------
>>> summary_df = xpl.to_pandas(max_contrib=2,proba=True)
>>> summary_df
pred proba feature_1 value_1 contribution_1 feature_2 value_2 contribution_2
0 0 0.756416 Sex 1.0 0.322308 Pclass 3.0 0.155069
1 3 0.628911 Sex 2.0 0.585475 Pclass 1.0 0.370504
2 0 0.543308 Sex 2.0 -0.486667 Pclass 3.0 0.255072
"""
# Classification: y_pred is needed
if self.y_pred is None:
raise ValueError(
"You have to specify y_pred argument. Please use add() or compile() method"
)
# Apply filter method if necessary
if all(var is None for var in [features_to_hide, threshold, positive, max_contrib]) \
and hasattr(self, 'mask_params'):
print('to_pandas params: ' + str(self.mask_params))
else:
self.filter(features_to_hide=features_to_hide,
threshold=threshold,
positive=positive,
max_contrib=max_contrib)
# Summarize information
self.data['summary'] = self.state.summarize(
self.data['contrib_sorted'],
self.data['var_dict'],
self.data['x_sorted'],
self.mask,
self.columns_dict,
self.features_dict
)
# Matching with y_pred
if proba:
self.predict_proba() if proba else None
proba_values = self.proba_values
else:
proba_values = None
y_pred, summary = keep_right_contributions(self.y_pred, self.data['summary'],
self._case, self._classes,
self.label_dict, proba_values)
return pd.concat([y_pred,summary], axis=1)
def compute_features_import(self, force=False):
"""
Compute a relative features importance, sum of absolute values
of the contributions for each.
Features importance compute in base 100
Parameters
----------
force: bool (default: False)
True to force de compute if features importance is
already calculated
Returns
-------
pd.Serie (Regression)
or list of pd.Serie (Classification: One Serie for each target modality)
Each Serie: feature importance, One row by feature,
index of the serie = contributions.columns
"""
if self.features_imp is None or force:
self.features_imp = self.state.compute_features_import(self.contributions)
def init_app(self):
"""
Simple init of SmartApp in case of host smartapp by another way
"""
self.smartapp = SmartApp(self)
def run_app(self, port: int = None, host: str = None) -> CustomThread:
"""
run_app method launches the interpretability web app associated with the shapash object.
run_app method can be used directly in a Jupyter notebook
The link to the webapp is directly mentioned in the Jupyter output
Use object.kill() method to kill the current instance
Examples are presented in the web_app tutorial (please check tutorial part of this doc)
Parameters
----------
port: int (default: None)
The port is by default on 8050. You can specify a custom port
for your webapp.
host: str (default: None)
The default host is '0.0.0.0'. You can specify a custom
ip address for your app
Returns
-------
CustomThread
Return the thread instance of your server.
Example
--------
>>> app = xpl.run_app()
>>> app.kill()
"""
if self.y_pred is None:
self.predict()
if hasattr(self, '_case'):
self.smartapp = SmartApp(self)
if host is None:
host = "0.0.0.0"
if port is None:
port = 8050
host_name = get_host_name()
server_instance = CustomThread(
target=lambda: self.smartapp.app.run_server(debug=False, host=host, port=port))
if host_name is None:
host_name = host
elif host != "0.0.0.0":
host_name = host
server_instance.start()
logging.info(f"Your Shapash application run on http://{host_name}:{port}/")
logging.info("Use the method .kill() to down your app.")
return server_instance
else:
raise ValueError("Explainer must be compiled before running app.")
def to_smartpredictor(self):
"""
Create a SmartPredictor object designed from the following attributes
needed from the SmartExplainer Object :
features_dict: dict
Dictionary mapping technical feature names to domain names.
label_dict: dict
Dictionary mapping integer labels to domain names (classification - target values).
columns_dict: dict
Dictionary mapping integer column number to technical feature names.
features_types: dict
Dictionnary mapping features with the right types needed.
model: model object
model used to check the different values of target estimate predict proba
explainer : explainer object
explainer must be a shap object
preprocessing: category_encoders, ColumnTransformer, list or dict
The processing apply to the original data.
postprocessing: dict
Dictionnary of postprocessing modifications to apply in x_pred dataframe.
_case: string
String that informs if the model used is for classification or regression problem.
_classes: list, None
List of labels if the model used is for classification problem, None otherwise.
mask_params: dict (optional)
Dictionnary allowing the user to define a apply a filter to summarize the local explainability.
"""
if self.explainer is None:
raise ValueError("""SmartPredictor need an explainer, please compile without contributions or specify the
explainer used. Make change in compile() step""")
self.features_types = {features : str(self.x_pred[features].dtypes) for features in self.x_pred.columns}
listattributes = ["features_dict", "model", "columns_dict", "explainer", "features_types",
"label_dict", "preprocessing", "postprocessing"]
params_smartpredictor = [self.check_attributes(attribute) for attribute in listattributes]
if not hasattr(self,"mask_params"):
self.mask_params = {
"features_to_hide": None,
"threshold": None,
"positive": None,
"max_contrib": None
}
params_smartpredictor.append(self.mask_params)
return SmartPredictor(*params_smartpredictor)
def check_x_y_attributes(self, x_str, y_str):
"""
Check if x_str and y_str are attributes of the SmartExplainer
Parameters
----------
x_str: string
label of the attribute x
y_str: string
label of the attribute y
Returns
-------
list of object detained by attributes x and y.
"""
if not (isinstance(x_str, str) and isinstance(y_str, str)):
raise ValueError(
"""
x and y must be strings.
"""
)
params_checkypred = []
attributs_explainer = [x_str, y_str]
for attribut in attributs_explainer:
if hasattr(self, attribut):
params_checkypred.append(self.__dict__[attribut])
else:
params_checkypred.append(None)
return params_checkypred
def check_explainer(self, explainer):
"""
Check if explainer class correspond to a shap explainer object
"""
return check_explainer(explainer)
| 39.924774
| 122
| 0.616656
|
fa576414e811b144e70930efd78051478bbfc481
| 805
|
py
|
Python
|
logs/migrations/0001_initial.py
|
afg984/nthucourses
|
9f28f8e9480b9d7a9db1f9c023955fb23b1a28aa
|
[
"BSD-3-Clause"
] | null | null | null |
logs/migrations/0001_initial.py
|
afg984/nthucourses
|
9f28f8e9480b9d7a9db1f9c023955fb23b1a28aa
|
[
"BSD-3-Clause"
] | null | null | null |
logs/migrations/0001_initial.py
|
afg984/nthucourses
|
9f28f8e9480b9d7a9db1f9c023955fb23b1a28aa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('message', models.CharField(max_length=256)),
('exc_name', models.CharField(max_length=256, default='')),
('traceback', models.TextField(default='')),
('success', models.NullBooleanField()),
('started', models.DateTimeField(auto_now_add=True)),
('ended', models.DateTimeField(null=True)),
],
),
]
| 30.961538
| 114
| 0.567702
|
7877b4a5681246924fd790db4485244ed9b35f3c
| 890
|
py
|
Python
|
readthedocs/builds/migrations/0043_add_cancelled_state.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,054
|
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/builds/migrations/0043_add_cancelled_state.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,282
|
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/builds/migrations/0043_add_cancelled_state.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 3,224
|
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
# Generated by Django 3.2.13 on 2022-05-04 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("builds", "0042_version_state"),
]
operations = [
migrations.AlterField(
model_name="build",
name="state",
field=models.CharField(
choices=[
("triggered", "Triggered"),
("cloning", "Cloning"),
("installing", "Installing"),
("building", "Building"),
("uploading", "Uploading"),
("finished", "Finished"),
("cancelled", "Cancelled"),
],
db_index=True,
default="finished",
max_length=55,
verbose_name="State",
),
),
]
| 26.969697
| 49
| 0.44382
|
73ac13439dcb5cb60cc320b3fa4ba6c274e2430d
| 23,827
|
py
|
Python
|
pytorch_widedeep/callbacks.py
|
5uperpalo/pytorch-widedeep
|
c95c327671a21780f33f3fefdeba8e2c96aa426b
|
[
"MIT"
] | null | null | null |
pytorch_widedeep/callbacks.py
|
5uperpalo/pytorch-widedeep
|
c95c327671a21780f33f3fefdeba8e2c96aa426b
|
[
"MIT"
] | null | null | null |
pytorch_widedeep/callbacks.py
|
5uperpalo/pytorch-widedeep
|
c95c327671a21780f33f3fefdeba8e2c96aa426b
|
[
"MIT"
] | null | null | null |
"""
Code here is mostly based on the code from the torchsample and Keras packages
CREDIT TO THE TORCHSAMPLE AND KERAS TEAMS
"""
import os
import datetime
import warnings
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from pytorch_widedeep.wdtypes import * # noqa: F403
def _get_current_time():
return datetime.datetime.now().strftime("%B %d, %Y - %I:%M%p")
def _is_metric(monitor: str):
# We assume no one will use f3 or more
if any([s in monitor for s in ["acc", "prec", "rec", "fscore", "f1", "f2"]]):
return True
else:
return False
class CallbackContainer(object):
"""
Container holding a list of callbacks.
"""
def __init__(self, callbacks: Optional[List] = None, queue_length: int = 10):
instantiated_callbacks = []
if callbacks is not None:
for callback in callbacks:
if isinstance(callback, type):
instantiated_callbacks.append(callback())
else:
instantiated_callbacks.append(callback)
self.callbacks = [c for c in instantiated_callbacks]
self.queue_length = queue_length
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model: Any):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def set_trainer(self, trainer: Any):
self.trainer = trainer
for callback in self.callbacks:
callback.set_trainer(trainer)
def on_epoch_begin(self, epoch: int, logs: Optional[Dict] = None):
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
def on_epoch_end(
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs, metric)
def on_batch_begin(self, batch: int, logs: Optional[Dict] = None):
logs = logs or {}
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
def on_batch_end(self, batch: int, logs: Optional[Dict] = None):
logs = logs or {}
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
def on_train_begin(self, logs: Optional[Dict] = None):
logs = logs or {}
logs["start_time"] = _get_current_time()
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_eval_begin(self, logs: Optional[Dict] = None):
# at the moment only used to reset metrics before eval
logs = logs or {}
for callback in self.callbacks:
callback.on_eval_begin(logs)
def on_train_end(self, logs: Optional[Dict] = None):
logs = logs or {}
# logs['final_loss'] = self.model.history.epoch_losses[-1],
# logs['best_loss'] = min(self.model.history.epoch_losses),
# logs['stop_time'] = _get_current_time()
for callback in self.callbacks:
callback.on_train_end(logs)
def on_eval_begin(self, logs: Optional[Dict] = None):
logs = logs or {}
for callback in self.callbacks:
callback.on_eval_begin(logs)
class Callback(object):
"""
Base class used to build new callbacks.
"""
def __init__(self):
pass
def set_params(self, params):
self.params = params
def set_model(self, model: Any):
self.model = model
def set_trainer(self, trainer: Any):
self.trainer = trainer
def on_epoch_begin(self, epoch: int, logs: Optional[Dict] = None):
pass
def on_epoch_end(
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
pass
def on_batch_begin(self, batch: int, logs: Optional[Dict] = None):
pass
def on_batch_end(self, batch: int, logs: Optional[Dict] = None):
pass
def on_train_begin(self, logs: Optional[Dict] = None):
pass
def on_eval_begin(self, logs: Optional[Dict] = None):
# at the moment only used to reset metrics before eval
pass
def on_train_end(self, logs: Optional[Dict] = None):
pass
def on_eval_begin(self, logs: Optional[Dict] = None):
pass
class History(Callback):
r"""Callback that records metrics to a ``history`` attribute.
This callback runs by default within :obj:`Trainer`, therefore, should not
be passed to the :obj:`Trainer`. Is included here just for completion.
"""
def on_train_begin(self, logs: Optional[Dict] = None):
self.trainer.history = {}
def on_epoch_end(
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
logs = logs or {}
for k, v in logs.items():
self.trainer.history.setdefault(k, []).append(v)
class LRShedulerCallback(Callback):
r"""Callback for the learning rate schedulers to take a step
This callback runs by default within :obj:`Trainer`, therefore, should not
be passed to the :obj:`Trainer`. Is included here just for completion.
"""
def on_batch_end(self, batch: int, logs: Optional[Dict] = None):
if self.trainer.lr_scheduler is not None:
if self._multiple_scheduler():
for (
model_name,
scheduler,
) in self.trainer.lr_scheduler._schedulers.items():
if self._is_cyclic(model_name):
scheduler.step()
elif self.trainer.cyclic_lr:
self.trainer.lr_scheduler.step()
def on_epoch_end(
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
if self.trainer.lr_scheduler is not None:
if self._multiple_scheduler():
for (
model_name,
scheduler,
) in self.trainer.lr_scheduler._schedulers.items():
if not self._is_cyclic(model_name):
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(metric)
else:
scheduler.step()
elif not self.trainer.cyclic_lr:
if isinstance(self.trainer.lr_scheduler, ReduceLROnPlateau):
self.trainer.lr_scheduler.step(metric)
else:
self.trainer.lr_scheduler.step()
def _multiple_scheduler(self):
return self.trainer.lr_scheduler.__class__.__name__ == "MultipleLRScheduler"
def _is_cyclic(self, model_name: str):
return (
self._has_scheduler(model_name)
and "cycl"
in self.trainer.lr_scheduler._schedulers[
model_name
].__class__.__name__.lower()
)
def _has_scheduler(self, model_name: str):
return model_name in self.trainer.lr_scheduler._schedulers
class LRHistory(Callback):
r"""Saves the learning rates during training to a ``lr_history`` attribute.
Callbacks are passed as input parameters to the :obj:`Trainer` class. See
:class:`pytorch_widedeep.trainer.Trainer`
Parameters
----------
n_epochs: int
number of epochs durint training
Examples
--------
>>> from pytorch_widedeep.callbacks import LRHistory
>>> from pytorch_widedeep.models import TabMlp, Wide, WideDeep
>>> from pytorch_widedeep.training import Trainer
>>>
>>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]
>>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}
>>> wide = Wide(10, 1)
>>> deep = TabMlp(mlp_hidden_dims=[8, 4], column_idx=column_idx, embed_input=embed_input)
>>> model = WideDeep(wide, deep)
>>> trainer = Trainer(model, objective="regression", callbacks=[LRHistory(n_epochs=10)])
"""
def __init__(self, n_epochs: int):
super(LRHistory, self).__init__()
self.n_epochs = n_epochs
def on_epoch_begin(self, epoch: int, logs: Optional[Dict] = None):
if epoch == 0 and self.trainer.lr_scheduler is not None:
self.trainer.lr_history = {}
if self._multiple_scheduler():
self._save_group_lr_mulitple_scheduler(step_location="on_epoch_begin")
else:
self._save_group_lr(self.trainer.optimizer)
def on_batch_end(self, batch: int, logs: Optional[Dict] = None):
if self.trainer.lr_scheduler is not None:
if self._multiple_scheduler():
self._save_group_lr_mulitple_scheduler(step_location="on_batch_end")
elif self.trainer.cyclic_lr:
self._save_group_lr(self.trainer.optimizer)
def on_epoch_end(
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
if epoch != (self.n_epochs - 1) and self.trainer.lr_scheduler is not None:
if self._multiple_scheduler():
self._save_group_lr_mulitple_scheduler(step_location="on_epoch_end")
elif not self.trainer.cyclic_lr:
self._save_group_lr(self.trainer.optimizer)
def _save_group_lr_mulitple_scheduler(self, step_location: str):
for model_name, opt in self.trainer.optimizer._optimizers.items():
if step_location == "on_epoch_begin":
self._save_group_lr(opt, model_name)
if step_location == "on_batch_end":
if self._is_cyclic(model_name):
self._save_group_lr(opt, model_name)
if step_location == "on_epoch_end":
if not self._is_cyclic(model_name):
self._save_group_lr(opt, model_name)
def _save_group_lr(self, opt: Optimizer, model_name: Optional[str] = None):
for group_idx, group in enumerate(opt.param_groups):
if model_name is not None:
group_name = ("_").join(["lr", model_name, str(group_idx)])
else:
group_name = ("_").join(["lr", str(group_idx)])
self.trainer.lr_history.setdefault(group_name, []).append(group["lr"])
def _multiple_scheduler(self):
return self.trainer.lr_scheduler.__class__.__name__ == "MultipleLRScheduler"
def _is_cyclic(self, model_name: str):
return (
self._has_scheduler(model_name)
and "cycl"
in self.trainer.lr_scheduler._schedulers[
model_name
].__class__.__name__.lower()
)
def _has_scheduler(self, model_name: str):
return model_name in self.trainer.lr_scheduler._schedulers
class ModelCheckpoint(Callback):
r"""Saves the model after every epoch.
This class is almost identical to the corresponding keras class.
Therefore, **credit** to the Keras Team.
Callbacks are passed as input parameters to the :obj:`Trainer` class. See
:class:`pytorch_widedeep.trainer.Trainer`
Parameters
----------
filepath: str
Full path to save the output weights. It must contain only the root of
the filenames. Epoch number and ``.pt`` extension (for pytorch) will
be added. e.g. ``filepath="path/to/output_weights/weights_out"`` And
the saved files in that directory will be named: ``weights_out_1.pt,
weights_out_2.pt, ...``
monitor: str, default="loss"
quantity to monitor. Typically 'val_loss' or metric name (e.g. 'val_acc')
verbose:int, default=0,
verbosity mode
save_best_only: bool, default=False,
the latest best model according to the quantity monitored will not be
overwritten.
mode: str, default="auto",
If ``save_best_only=True``, the decision to overwrite the current save
file is made based on either the maximization or the minimization of
the monitored quantity. For `'acc'`, this should be `'max'`, for
`'loss'` this should be `'min'`, etc. In `'auto'` mode, the
direction is automatically inferred from the name of the monitored
quantity.
period: int, default=1,
Interval (number of epochs) between checkpoints.
max_save: int, default=-1
Maximum number of outputs to save. If -1 will save all outputs
Attributes
----------
best: float
best metric
best_epoch: int
best epoch
Examples
--------
>>> from pytorch_widedeep.callbacks import ModelCheckpoint
>>> from pytorch_widedeep.models import TabMlp, Wide, WideDeep
>>> from pytorch_widedeep.training import Trainer
>>>
>>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]
>>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}
>>> wide = Wide(10, 1)
>>> deep = TabMlp(mlp_hidden_dims=[8, 4], column_idx=column_idx, embed_input=embed_input)
>>> model = WideDeep(wide, deep)
>>> trainer = Trainer(model, objective="regression", callbacks=[ModelCheckpoint(filepath='checkpoints/weights_out')])
"""
def __init__(
self,
filepath: str,
monitor: str = "val_loss",
verbose: int = 0,
save_best_only: bool = False,
mode: str = "auto",
period: int = 1,
max_save: int = -1,
):
super(ModelCheckpoint, self).__init__()
self.filepath = filepath
self.monitor = monitor
self.verbose = verbose
self.save_best_only = save_best_only
self.mode = mode
self.period = period
self.max_save = max_save
self.epochs_since_last_save = 0
if len(self.filepath.split("/")[:-1]) == 0:
raise ValueError(
"'filepath' must be the full path to save the output weights,"
" including the root of the filenames. e.g. 'checkpoints/weights_out'"
)
root_dir = ("/").join(self.filepath.split("/")[:-1])
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if self.max_save > 0:
self.old_files: List[str] = []
if self.mode not in ["auto", "min", "max"]:
warnings.warn(
"ModelCheckpoint mode %s is unknown, "
"fallback to auto mode." % (self.mode),
RuntimeWarning,
)
self.mode = "auto"
if self.mode == "min":
self.monitor_op = np.less
self.best = np.Inf
elif self.mode == "max":
self.monitor_op = np.greater
self.best = -np.Inf
else:
if _is_metric(self.monitor):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end( # noqa: C901
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = "{}_{}.p".format(self.filepath, epoch + 1)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
"Can save best model only with %s available, "
"skipping." % (self.monitor),
RuntimeWarning,
)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print(
"\nEpoch %05d: %s improved from %0.5f to %0.5f,"
" saving model to %s"
% (
epoch + 1,
self.monitor,
self.best,
current,
filepath,
)
)
self.best = current
self.best_epoch = epoch
torch.save(self.model.state_dict(), filepath)
if self.max_save > 0:
if len(self.old_files) == self.max_save:
try:
os.remove(self.old_files[0])
except FileNotFoundError:
pass
self.old_files = self.old_files[1:]
self.old_files.append(filepath)
else:
if self.verbose > 0:
print(
"\nEpoch %05d: %s did not improve from %0.5f"
% (epoch + 1, self.monitor, self.best)
)
else:
if self.verbose > 0:
print("\nEpoch %05d: saving model to %s" % (epoch + 1, filepath))
torch.save(self.model.state_dict(), filepath)
if self.max_save > 0:
if len(self.old_files) == self.max_save:
try:
os.remove(self.old_files[0])
except FileNotFoundError:
pass
self.old_files = self.old_files[1:]
self.old_files.append(filepath)
def __getstate__(self):
d = self.__dict__
self_dict = {k: d[k] for k in d if k not in ["trainer", "model"]}
return self_dict
def __setstate__(self, state):
self.__dict__ = state
class EarlyStopping(Callback):
r"""Stop training when a monitored quantity has stopped improving.
This class is almost identical to the corresponding keras class.
Therefore, **credit** to the Keras Team.
Callbacks are passed as input parameters to the :obj:`Trainer` class. See
:class:`pytorch_widedeep.trainer.Trainer`
Parameters
-----------
monitor: str, default='val_loss'.
Quantity to monitor. Typically 'val_loss' or metric name (e.g. 'val_acc')
min_delta: float, default=0.
minimum change in the monitored quantity to qualify as an
improvement, i.e. an absolute change of less than min_delta, will
count as no improvement.
patience: int, default=10.
Number of epochs that produced the monitored quantity with no
improvement after which training will be stopped.
verbose: int.
verbosity mode.
mode: str, default='auto'
one of {'`auto`', '`min`', '`max`'}. In `'min'` mode, training will
stop when the quantity monitored has stopped decreasing; in `'max'`
mode it will stop when the quantity monitored has stopped increasing;
in `'auto'` mode, the direction is automatically inferred from the
name of the monitored quantity.
baseline: float, Optional. default=None.
Baseline value for the monitored quantity to reach. Training will
stop if the model does not show improvement over the baseline.
restore_best_weights: bool, default=None
Whether to restore model weights from the epoch with the best
value of the monitored quantity. If ``False``, the model weights
obtained at the last step of training are used.
Attributes
----------
best: float
best metric
stopped_epoch: int
epoch when the training stopped
Examples
--------
>>> from pytorch_widedeep.callbacks import EarlyStopping
>>> from pytorch_widedeep.models import TabMlp, Wide, WideDeep
>>> from pytorch_widedeep.training import Trainer
>>>
>>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]
>>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}
>>> wide = Wide(10, 1)
>>> deep = TabMlp(mlp_hidden_dims=[8, 4], column_idx=column_idx, embed_input=embed_input)
>>> model = WideDeep(wide, deep)
>>> trainer = Trainer(model, objective="regression", callbacks=[EarlyStopping(patience=10)])
"""
def __init__(
self,
monitor: str = "val_loss",
min_delta: float = 0.0,
patience: int = 10,
verbose: int = 0,
mode: str = "auto",
baseline: Optional[float] = None,
restore_best_weights: bool = False,
):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.mode = mode
self.baseline = baseline
self.restore_best_weights = restore_best_weights
self.wait = 0
self.stopped_epoch = 0
self.state_dict = None
if self.mode not in ["auto", "min", "max"]:
warnings.warn(
"EarlyStopping mode %s is unknown, "
"fallback to auto mode." % self.mode,
RuntimeWarning,
)
self.mode = "auto"
if self.mode == "min":
self.monitor_op = np.less
elif self.mode == "max":
self.monitor_op = np.greater
else:
if _is_metric(self.monitor):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs: Optional[Dict] = None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(
self, epoch: int, logs: Optional[Dict] = None, metric: Optional[float] = None
):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.state_dict = self.model.state_dict()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.trainer.early_stop = True
if self.restore_best_weights:
if self.verbose > 0:
print("Restoring model weights from the end of the best epoch")
self.model.load_state_dict(self.state_dict)
def on_train_end(self, logs: Optional[Dict] = None):
if self.stopped_epoch > 0 and self.verbose > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
monitor_value = logs.get(self.monitor)
if monitor_value is None:
warnings.warn(
"Early stopping conditioned on metric `%s` "
"which is not available. Available metrics are: %s"
% (self.monitor, ",".join(list(logs.keys()))),
RuntimeWarning,
)
return monitor_value
def __getstate__(self):
d = self.__dict__
self_dict = {k: d[k] for k in d if k not in ["trainer", "model"]}
return self_dict
def __setstate__(self, state):
self.__dict__ = state
| 36.544479
| 121
| 0.573467
|
59b2aa9528090042793fe6036ef512d830b76093
| 1,182
|
py
|
Python
|
test/e2e/test_apply_metadata.py
|
ImageMarkup/isic-archive
|
7cd8097886d685ec629e2fcba079271fb77d028f
|
[
"Apache-2.0"
] | 42
|
2015-12-12T14:05:46.000Z
|
2022-03-26T15:20:39.000Z
|
test/e2e/test_apply_metadata.py
|
ImageMarkup/isic-archive
|
7cd8097886d685ec629e2fcba079271fb77d028f
|
[
"Apache-2.0"
] | 494
|
2015-07-09T16:14:12.000Z
|
2021-03-09T09:37:36.000Z
|
test/e2e/test_apply_metadata.py
|
ImageMarkup/uda
|
d221af3368baf3a06ecab67e69e9d0077426c8f9
|
[
"Apache-2.0"
] | 12
|
2015-08-20T14:20:48.000Z
|
2020-10-20T01:14:44.000Z
|
from girder.models.user import User
from isic_archive.models.dataset import Dataset
from isic_archive.models.image import Image
def test_apply_metadata(session, dataset_id):
user = list(User().getAdmins())[0]
dataset = Dataset().load(dataset_id, force=True)
image = Dataset().addImage(
dataset, open('test/data/should-pass.jpg', 'rb'), 286_460, 'foo.jpg', 'some-signature', user
)
with open('test/data/test-metadata.csv') as infile:
r = session.post(
f'dataset/{dataset_id}/metadata',
params={'filename': 'test-metadata.csv'},
data=infile.read(),
headers={'Content-Type': 'text/csv'},
)
r.raise_for_status()
# reload to capture metadata file id
dataset = Dataset().load(dataset_id, force=True)
metadata_file_id = dataset['metadataFiles'][-1]['fileId']
r = session.post(f'dataset/{dataset_id}/metadata/{metadata_file_id}/apply', data={'save': True})
assert r.ok, r.text
image = Image().load(image['_id'], force=True)
assert image['meta']['clinical']['benign_malignant'] == 'benign'
assert image['meta']['unstructured']['some_key'] == 'some_value'
| 39.4
| 100
| 0.650592
|
b649bf93b519105865f936afa04309a44ee11c21
| 52
|
py
|
Python
|
Question 41 - 50/Q50.py
|
debdutgoswami/python-semester-practical
|
9abdc9091d825a2425b36437f6f8fe6806ac84f2
|
[
"MIT"
] | null | null | null |
Question 41 - 50/Q50.py
|
debdutgoswami/python-semester-practical
|
9abdc9091d825a2425b36437f6f8fe6806ac84f2
|
[
"MIT"
] | null | null | null |
Question 41 - 50/Q50.py
|
debdutgoswami/python-semester-practical
|
9abdc9091d825a2425b36437f6f8fe6806ac84f2
|
[
"MIT"
] | null | null | null |
l = [45, 52, 45, 65, 79]
l = list(set(l))
print(l)
| 10.4
| 24
| 0.5
|
21d97e4b8b4dc36458693fe195998ec453412f8f
| 4,600
|
py
|
Python
|
pyIsoDep/functions/batemansolvers.py
|
MattKrecicki/PYTHON-ISOTOPIC-DEPLETION-PACKAGE
|
ccad214de8721aa9b499ef70cd39966f18bceb76
|
[
"MIT"
] | null | null | null |
pyIsoDep/functions/batemansolvers.py
|
MattKrecicki/PYTHON-ISOTOPIC-DEPLETION-PACKAGE
|
ccad214de8721aa9b499ef70cd39966f18bceb76
|
[
"MIT"
] | null | null | null |
pyIsoDep/functions/batemansolvers.py
|
MattKrecicki/PYTHON-ISOTOPIC-DEPLETION-PACKAGE
|
ccad214de8721aa9b499ef70cd39966f18bceb76
|
[
"MIT"
] | null | null | null |
"""batemansolvers
Three solvers are enabled here to solve the Bateman equations:
(1) ODEINT solver
-----------------
Integrate a system of ordinary differential equations
(2) EXPM solver
---------------
Compute the matrix exponential using Pade approximation
(3) CRAM solver
---------------
CHBV computes the direct action of the matrix exponential on
a vector: y = exp(H)*x. It uses the partial fraction expansion of
the uniform rational Chebyshev approximation of type (14,14).
About 14-digit accuracy is expected if the matrix H is symmetric
negative definite. The algorithm may behave poorly otherwise.
See also PADM, EXPOKIT.
Roger B. Sidje (rbs@maths.uq.edu.au)
EXPOKIT: Software Package for Computing Matrix Exponentials.
ACM - Transactions On Mathematical Software, 24(1):130-156, 1998
"""
import numpy as np
from scipy.linalg import solve as linsolver
from scipy.linalg import expm
# -----------------------------------------------------------------------------
# Coefficients and poles of the partial fraction expansion
# -----------------------------------------------------------------------------
# Coefficients for IPF Cram 14
C14_ALPHA = np.array([
+0.557503973136501826E+02 - 0.204295038779771857E+03j,
-0.938666838877006739E+02 + 0.912874896775456363E+02j,
+0.469965415550370835E+02 - 0.116167609985818103E+02j,
-0.961424200626061065E+01 - 0.264195613880262669E+01j,
+0.752722063978321642E+00 + 0.670367365566377770E+00j,
-0.188781253158648576E-01 - 0.343696176445802414E-01j,
+0.143086431411801849E-03 + 0.287221133228814096E-03j, ],
dtype=np.complex128)
C14_THETA = np.array([
-0.562314417475317895E+01 + 0.119406921611247440E+01j,
-0.508934679728216110E+01 + 0.358882439228376881E+01j,
-0.399337136365302569E+01 + 0.600483209099604664E+01j,
-0.226978543095856366E+01 + 0.846173881758693369E+01j,
+0.208756929753827868E+00 + 0.109912615662209418E+02j,
+0.370327340957595652E+01 + 0.136563731924991884E+02j,
+0.889777151877331107E+01 + 0.166309842834712071E+02j, ],
dtype=np.complex128)
C14_ALPHA0 = 0.183216998528140087E-11
class CramSolver:
"""CRAM depletion solver that uses incomplete partial factorization
A method that uses an incomplete partial factorization (IPF) for the
Chebyshev Rational Approximation Method (CRAM), as described in:
M. Pusa, "`Higher-Order Chebyshev Rational Approximation Method and
Application to Burnup Equations
<https://doi.org/10.13182/NSE15-26>`_," Nucl. Sci. Eng., 182:3, 297-318.
Parameters
----------
alpha : numpy.ndarray
Complex residues of poles used in the factorization. Must be a
vector with even number of items.
theta : numpy.ndarray
Complex poles. Must have an equal size as ``alpha``.
alpha0 : float
Limit of the approximation at infinity
Attributes
----------
alpha : numpy.ndarray
Complex residues of poles :attr:`theta` in the incomplete partial
factorization. Denoted as :math:`\tilde{\alpha}`
theta : numpy.ndarray
Complex poles :math:`\theta` of the rational approximation
alpha0 : float
Limit of the approximation at infinity
"""
def __init__(self):
"""reset the number of partial factorization"""
self.alpha = -C14_ALPHA
self.theta = -C14_THETA
self.alpha0 = C14_ALPHA0
def solve(self, A, n0, dt):
"""Solve depletion equations using IPF CRAM
Parameters
----------
A : scipy.sparse.csr_matrix
Sparse transmutation matrix ``A[j, i]`` desribing rates at
which isotope ``i`` transmutes to isotope ``j``
n0 : numpy.ndarray
Initial compositions, typically given in number of atoms in some
material or an atom density
dt : float
Time [s] of the specific interval to be solved
Returns
-------
numpy.ndarray
Final compositions after ``dt``
"""
H = A * dt
y = n0 * self.alpha0
ident = np.eye(A.shape[0])
for alpha, theta in zip(self.alpha, self.theta):
y += np.real(linsolver(H - theta*ident, alpha*n0))
y[y < 1E-25] = 0
return y
class expmSolver:
"""Built-in expm solver that relies on the pade approximation"""
def __init__(self):
"""reset values with a complete list of all the nuclides"""
pass
def solve(self, mtx, n0, dt):
"""Solve the exponential of a matrix"""
n1 = np.dot(expm(mtx * dt), n0)
return n1
| 33.823529
| 79
| 0.646957
|
bdc4a9c0683d9a7dcd541758afe48602384b224b
| 36,656
|
py
|
Python
|
synapse/rest/client/account.py
|
Fizzadar/synapse
|
6b46c3eb3d526d903e1e4833b2e8ae9b73de8502
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/client/account.py
|
Fizzadar/synapse
|
6b46c3eb3d526d903e1e4833b2e8ae9b73de8502
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/client/account.py
|
Fizzadar/synapse
|
6b46c3eb3d526d903e1e4833b2e8ae9b73de8502
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Optional, Tuple
from urllib.parse import urlparse
from twisted.web.server import Request
from synapse.api.constants import LoginType
from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
SynapseError,
ThreepidValidationError,
)
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http.server import HttpServer, finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
from synapse.types import JsonDict
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import check_3pid_allowed, validate_email
from ._base import client_patterns, interactive_auth_handler
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class EmailPasswordRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/password/email/requestToken$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.datastore = hs.get_datastores().main
self.config = hs.config
self.identity_handler = hs.get_identity_handler()
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email.email_app_name,
template_html=self.config.email.email_password_reset_template_html,
template_text=self.config.email.email_password_reset_template_text,
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"User password resets have been disabled due to lack of email config"
)
raise SynapseError(
400, "Email-based password resets have been disabled on this server"
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
# Extract params from body
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
# Canonicalise the email address. The addresses are all stored canonicalised
# in the database. This allows the user to reset his password without having to
# know the exact spelling (eg. upper and lower case) of address in the database.
# Stored in the database "foo@bar.com"
# User requests with "FOO@bar.com" would raise a Not Found error
try:
email = validate_email(body["email"])
except ValueError as e:
raise SynapseError(400, str(e))
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
if next_link:
# Raise if the provided next_link value isn't valid
assert_valid_next_link(self.hs, next_link)
await self.identity_handler.ratelimit_request_token_requests(
request, "email", email
)
# The email will be sent to the stored address.
# This avoids a potential account hijack by requesting a password reset to
# an email address which is controlled by the attacker but which, after
# canonicalisation, matches the one in our database.
existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid(
"email", email
)
if existing_user_id is None:
if self.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
# look like we did something.
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
return 200, {"sid": random_string(16)}
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
assert self.hs.config.registration.account_threepid_delegate_email
# Have the configured identity server handle the request
ret = await self.identity_handler.requestEmailToken(
self.hs.config.registration.account_threepid_delegate_email,
email,
client_secret,
send_attempt,
next_link,
)
else:
# Send password reset emails from Synapse
sid = await self.identity_handler.send_threepid_validation(
email,
client_secret,
send_attempt,
self.mailer.send_password_reset_mail,
next_link,
)
# Wrap the session id in a JSON object
ret = {"sid": sid}
threepid_send_requests.labels(type="email", reason="password_reset").observe(
send_attempt
)
return 200, ret
class PasswordRestServlet(RestServlet):
PATTERNS = client_patterns("/account/password$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
self.datastore = self.hs.get_datastores().main
self.password_policy_handler = hs.get_password_policy_handler()
self._set_password_handler = hs.get_set_password_handler()
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
# we do basic sanity checks here because the auth layer will store these
# in sessions. Pull out the new password provided to us.
new_password = body.pop("new_password", None)
if new_password is not None:
if not isinstance(new_password, str) or len(new_password) > 512:
raise SynapseError(400, "Invalid password")
self.password_policy_handler.validate_password(new_password)
# there are two possibilities here. Either the user does not have an
# access token, and needs to do a password reset; or they have one and
# need to validate their identity.
#
# In the first case, we offer a couple of means of identifying
# themselves (email and msisdn, though it's unclear if msisdn actually
# works).
#
# In the second case, we require a password to confirm their identity.
requester = None
if self.auth.has_access_token(request):
requester = await self.auth.get_user_by_req(request)
try:
params, session_id = await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body,
"modify your account password",
)
except InteractiveAuthIncompleteError as e:
# The user needs to provide more steps to complete auth, but
# they're not required to provide the password again.
#
# If a password is available now, hash the provided password and
# store it for later.
if new_password:
new_password_hash = await self.auth_handler.hash(new_password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
new_password_hash,
)
raise
user_id = requester.user.to_string()
else:
try:
result, params, session_id = await self.auth_handler.check_ui_auth(
[[LoginType.EMAIL_IDENTITY]],
request,
body,
"modify your account password",
)
except InteractiveAuthIncompleteError as e:
# The user needs to provide more steps to complete auth, but
# they're not required to provide the password again.
#
# If a password is available now, hash the provided password and
# store it for later.
if new_password:
new_password_hash = await self.auth_handler.hash(new_password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
new_password_hash,
)
raise
if LoginType.EMAIL_IDENTITY in result:
threepid = result[LoginType.EMAIL_IDENTITY]
if "medium" not in threepid or "address" not in threepid:
raise SynapseError(500, "Malformed threepid")
if threepid["medium"] == "email":
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See add_threepid in synapse/handlers/auth.py)
try:
threepid["address"] = validate_email(threepid["address"])
except ValueError as e:
raise SynapseError(400, str(e))
# if using email, we must know about the email they're authing with!
threepid_user_id = await self.datastore.get_user_id_by_threepid(
threepid["medium"], threepid["address"]
)
if not threepid_user_id:
raise SynapseError(404, "Email address not found", Codes.NOT_FOUND)
user_id = threepid_user_id
else:
logger.error("Auth succeeded but no known type! %r", result.keys())
raise SynapseError(500, "", Codes.UNKNOWN)
# If we have a password in this request, prefer it. Otherwise, use the
# password hash from an earlier request.
if new_password:
password_hash: Optional[str] = await self.auth_handler.hash(new_password)
elif session_id is not None:
password_hash = await self.auth_handler.get_session_data(
session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None
)
else:
# UI validation was skipped, but the request did not include a new
# password.
password_hash = None
if not password_hash:
raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
logout_devices = params.get("logout_devices", True)
await self._set_password_handler.set_password(
user_id, password_hash, logout_devices, requester
)
return 200, {}
class DeactivateAccountRestServlet(RestServlet):
PATTERNS = client_patterns("/account/deactivate$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
self._deactivate_account_handler = hs.get_deactivate_account_handler()
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
erase = body.get("erase", False)
if not isinstance(erase, bool):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'erase' must be a boolean, if given",
Codes.BAD_JSON,
)
requester = await self.auth.get_user_by_req(request)
# allow ASes to deactivate their own users
if requester.app_service:
await self._deactivate_account_handler.deactivate_account(
requester.user.to_string(), erase, requester
)
return 200, {}
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body,
"deactivate your account",
)
result = await self._deactivate_account_handler.deactivate_account(
requester.user.to_string(),
erase,
requester,
id_server=body.get("id_server"),
)
if result:
id_server_unbind_result = "success"
else:
id_server_unbind_result = "no-support"
return 200, {"id_server_unbind_result": id_server_unbind_result}
class EmailThreepidRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/email/requestToken$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.config = hs.config
self.identity_handler = hs.get_identity_handler()
self.store = self.hs.get_datastores().main
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email.email_app_name,
template_html=self.config.email.email_add_threepid_template_html,
template_text=self.config.email.email_add_threepid_template_text,
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"Adding emails have been disabled due to lack of an email config"
)
raise SynapseError(
400, "Adding an email to your account is disabled on this server"
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
# Canonicalise the email address. The addresses are all stored canonicalised
# in the database.
# This ensures that the validation email is sent to the canonicalised address
# as it will later be entered into the database.
# Otherwise the email will be sent to "FOO@bar.com" and stored as
# "foo@bar.com" in database.
try:
email = validate_email(body["email"])
except ValueError as e:
raise SynapseError(400, str(e))
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
if not await check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
"Your email domain is not authorized on this server",
Codes.THREEPID_DENIED,
)
await self.identity_handler.ratelimit_request_token_requests(
request, "email", email
)
if next_link:
# Raise if the provided next_link value isn't valid
assert_valid_next_link(self.hs, next_link)
existing_user_id = await self.store.get_user_id_by_threepid("email", email)
if existing_user_id is not None:
if self.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
# look like we did something.
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
return 200, {"sid": random_string(16)}
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
assert self.hs.config.registration.account_threepid_delegate_email
# Have the configured identity server handle the request
ret = await self.identity_handler.requestEmailToken(
self.hs.config.registration.account_threepid_delegate_email,
email,
client_secret,
send_attempt,
next_link,
)
else:
# Send threepid validation emails from Synapse
sid = await self.identity_handler.send_threepid_validation(
email,
client_secret,
send_attempt,
self.mailer.send_add_threepid_mail,
next_link,
)
# Wrap the session id in a JSON object
ret = {"sid": sid}
threepid_send_requests.labels(type="email", reason="add_threepid").observe(
send_attempt
)
return 200, ret
class MsisdnThreepidRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/msisdn/requestToken$")
def __init__(self, hs: "HomeServer"):
self.hs = hs
super().__init__()
self.store = self.hs.get_datastores().main
self.identity_handler = hs.get_identity_handler()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(
body, ["client_secret", "country", "phone_number", "send_attempt"]
)
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
country = body["country"]
phone_number = body["phone_number"]
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
msisdn = phone_number_to_msisdn(country, phone_number)
if not await check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
403,
"Account phone numbers are not authorized on this server",
Codes.THREEPID_DENIED,
)
await self.identity_handler.ratelimit_request_token_requests(
request, "msisdn", msisdn
)
if next_link:
# Raise if the provided next_link value isn't valid
assert_valid_next_link(self.hs, next_link)
existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn)
if existing_user_id is not None:
if self.hs.config.server.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
# look like we did something.
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
return 200, {"sid": random_string(16)}
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
if not self.hs.config.registration.account_threepid_delegate_msisdn:
logger.warning(
"No upstream msisdn account_threepid_delegate configured on the server to "
"handle this request"
)
raise SynapseError(
400,
"Adding phone numbers to user account is not supported by this homeserver",
)
ret = await self.identity_handler.requestMsisdnToken(
self.hs.config.registration.account_threepid_delegate_msisdn,
country,
phone_number,
client_secret,
send_attempt,
next_link,
)
threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe(
send_attempt
)
return 200, ret
class AddThreepidEmailSubmitTokenServlet(RestServlet):
"""Handles 3PID validation token submission for adding an email to a user's account"""
PATTERNS = client_patterns(
"/add_threepid/email/submit_token$", releases=(), unstable=True
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self._failure_email_template = (
self.config.email.email_add_threepid_template_failure_html
)
async def on_GET(self, request: Request) -> None:
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.email.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"Adding emails have been disabled due to lack of an email config"
)
raise SynapseError(
400, "Adding an email to your account is disabled on this server"
)
elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
raise SynapseError(
400,
"This homeserver is not validating threepids. Use an identity server "
"instead.",
)
sid = parse_string(request, "sid", required=True)
token = parse_string(request, "token", required=True)
client_secret = parse_string(request, "client_secret", required=True)
assert_valid_client_secret(client_secret)
# Attempt to validate a 3PID session
try:
# Mark the session as valid
next_link = await self.store.validate_threepid_session(
sid, client_secret, token, self.clock.time_msec()
)
# Perform a 302 redirect if next_link is set
if next_link:
request.setResponseCode(302)
request.setHeader("Location", next_link)
finish_request(request)
return None
# Otherwise show the success template
html = self.config.email.email_add_threepid_template_success_html_content
status_code = 200
except ThreepidValidationError as e:
status_code = e.code
# Show a failure page with a reason
template_vars = {"failure_reason": e.msg}
html = self._failure_email_template.render(**template_vars)
respond_with_html(request, status_code, html)
class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"""Handles 3PID validation token submission for adding a phone number to a user's
account
"""
PATTERNS = client_patterns(
"/add_threepid/msisdn/submit_token$", releases=(), unstable=True
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
self.identity_handler = hs.get_identity_handler()
async def on_POST(self, request: Request) -> Tuple[int, JsonDict]:
if not self.config.registration.account_threepid_delegate_msisdn:
raise SynapseError(
400,
"This homeserver is not validating phone numbers. Use an identity server "
"instead.",
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "sid", "token"])
assert_valid_client_secret(body["client_secret"])
# Proxy submit_token request to msisdn threepid delegate
response = await self.identity_handler.proxy_msisdn_submit_token(
self.config.registration.account_threepid_delegate_msisdn,
body["client_secret"],
body["sid"],
body["token"],
)
return 200, response
class ThreepidRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
self.datastore = self.hs.get_datastores().main
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
threepids = await self.datastore.user_get_threepids(requester.user.to_string())
return 200, {"threepids": threepids}
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds")
if threepid_creds is None:
raise SynapseError(
400, "Missing param three_pid_creds", Codes.MISSING_PARAM
)
assert_params_in_dict(threepid_creds, ["client_secret", "sid"])
sid = threepid_creds["sid"]
client_secret = threepid_creds["client_secret"]
assert_valid_client_secret(client_secret)
validation_session = await self.identity_handler.validate_threepid_session(
client_secret, sid
)
if validation_session:
await self.auth_handler.add_threepid(
user_id,
validation_session["medium"],
validation_session["address"],
validation_session["validated_at"],
)
return 200, {}
raise SynapseError(
400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED
)
class ThreepidAddRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/add$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "sid"])
sid = body["sid"]
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body,
"add a third-party identifier to your account",
)
validation_session = await self.identity_handler.validate_threepid_session(
client_secret, sid
)
if validation_session:
await self.auth_handler.add_threepid(
user_id,
validation_session["medium"],
validation_session["address"],
validation_session["validated_at"],
)
return 200, {}
raise SynapseError(
400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED
)
class ThreepidBindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/bind$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["id_server", "sid", "client_secret"])
id_server = body["id_server"]
sid = body["sid"]
id_access_token = body.get("id_access_token") # optional
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
await self.identity_handler.bind_threepid(
client_secret, sid, user_id, id_server, id_access_token
)
return 200, {}
class ThreepidUnbindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/unbind$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
self.datastore = self.hs.get_datastores().main
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Unbind the given 3pid from a specific identity server, or identity servers that are
known to have this 3pid bound
"""
requester = await self.auth.get_user_by_req(request)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
medium = body.get("medium")
address = body.get("address")
id_server = body.get("id_server")
# Attempt to unbind the threepid from an identity server. If id_server is None, try to
# unbind from all identity servers this threepid has been added to in the past
result = await self.identity_handler.try_unbind_threepid(
requester.user.to_string(),
{"address": address, "medium": medium, "id_server": id_server},
)
return 200, {"id_server_unbind_result": "success" if result else "no-support"}
class ThreepidDeleteRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/delete$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
try:
ret = await self.auth_handler.delete_threepid(
user_id, body["medium"], body["address"], body.get("id_server")
)
except Exception:
# NB. This endpoint should succeed if there is nothing to
# delete, so it should only throw if something is wrong
# that we ought to care about.
logger.exception("Failed to remove threepid")
raise SynapseError(500, "Failed to remove threepid")
if ret:
id_server_unbind_result = "success"
else:
id_server_unbind_result = "no-support"
return 200, {"id_server_unbind_result": id_server_unbind_result}
def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None:
"""
Raises a SynapseError if a given next_link value is invalid
next_link is valid if the scheme is http(s) and the next_link.domain_whitelist config
option is either empty or contains a domain that matches the one in the given next_link
Args:
hs: The homeserver object
next_link: The next_link value given by the client
Raises:
SynapseError: If the next_link is invalid
"""
valid = True
# Parse the contents of the URL
next_link_parsed = urlparse(next_link)
# Scheme must not point to the local drive
if next_link_parsed.scheme == "file":
valid = False
# If the domain whitelist is set, the domain must be in it
if (
valid
and hs.config.server.next_link_domain_whitelist is not None
and next_link_parsed.hostname not in hs.config.server.next_link_domain_whitelist
):
valid = False
if not valid:
raise SynapseError(
400,
"'next_link' domain not included in whitelist, or not http(s)",
errcode=Codes.INVALID_PARAM,
)
class WhoamiRestServlet(RestServlet):
PATTERNS = client_patterns("/account/whoami$")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
response = {
"user_id": requester.user.to_string(),
# Entered spec in Matrix 1.2
"is_guest": bool(requester.is_guest),
}
# Appservices and similar accounts do not have device IDs
# that we can report on, so exclude them for compliance.
if requester.device_id is not None:
response["device_id"] = requester.device_id
return 200, response
class AccountStatusRestServlet(RestServlet):
PATTERNS = client_patterns(
"/org.matrix.msc3720/account_status$", unstable=True, releases=()
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self._auth = hs.get_auth()
self._account_handler = hs.get_account_handler()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await self._auth.get_user_by_req(request)
body = parse_json_object_from_request(request)
if "user_ids" not in body:
raise SynapseError(
400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM
)
statuses, failures = await self._account_handler.get_account_statuses(
body["user_ids"],
allow_remote=True,
)
return 200, {"account_statuses": statuses, "failures": failures}
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailPasswordRequestTokenRestServlet(hs).register(http_server)
PasswordRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
EmailThreepidRequestTokenRestServlet(hs).register(http_server)
MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
ThreepidAddRestServlet(hs).register(http_server)
ThreepidBindRestServlet(hs).register(http_server)
ThreepidUnbindRestServlet(hs).register(http_server)
ThreepidDeleteRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server)
if hs.config.experimental.msc3720_enabled:
AccountStatusRestServlet(hs).register(http_server)
| 38.954304
| 94
| 0.636158
|
7982bbb93cc579623c501401bddc35f0c0f0ad1d
| 670
|
py
|
Python
|
Lib/site-packages/traitlets/utils/bunch.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/traitlets/utils/bunch.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/traitlets/utils/bunch.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
"""Yet another implementation of bunch
attribute-access of items on a dict.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
class Bunch(dict): # type:ignore[type-arg]
"""A dict with attribute-access"""
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __dir__(self):
# py2-compat: can't use super because dict doesn't have __dir__
names = dir({})
names.extend(self.keys())
return names
| 24.814815
| 71
| 0.638806
|
dee20f0ca68e4575842d172e514e10be131d0af5
| 1,334
|
py
|
Python
|
setup.py
|
dinceraslancom/internetdownloadmanager
|
35d2c57c4fadb93b57952f4e5584546731a082d4
|
[
"Apache-2.0"
] | 10
|
2019-12-10T13:51:07.000Z
|
2021-08-16T13:27:47.000Z
|
setup.py
|
dinceraslancom/internetdownloadmanager
|
35d2c57c4fadb93b57952f4e5584546731a082d4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dinceraslancom/internetdownloadmanager
|
35d2c57c4fadb93b57952f4e5584546731a082d4
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
requires = [
'chardet>=3.0.2,<3.1.0',
'idna>=2.5,<2.9',
'urllib3>=1.21.1,<1.26,!=1.25.0,!=1.25.1',
'certifi>=2017.4.17',
'requests>=2.22.0',
]
with open("README.rst", "r", encoding="utf8") as f:
readme = f.read()
with open("HISTORY.rst", "r", encoding="utf8") as f:
history = f.read()
setup(
name='internetdownloadmanager',
version='0.0.1',
package_dir={'internetdownloadmanager': 'internetdownloadmanager'},
author="Dincer Aslan",
author_email="dinceraslan.com@gmail.com",
description="file downloader with many requests",
long_description=readme,
url="https://github.com/dinceraslancom/internetdownloadmanager",
packages=find_packages(),
python_requires=">=3.0, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
install_requires=requires,
classifiers=[
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
project_urls={
'Source': 'https://github.com/dinceraslancom/internetdownloadmanager',
},
)
| 31.023256
| 78
| 0.613943
|
4ca3cc064795e84e8a8f092b3e424a25c8dc6aa0
| 487
|
py
|
Python
|
83/removeduplicatiesfromsortedlinkedlist.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
83/removeduplicatiesfromsortedlinkedlist.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
83/removeduplicatiesfromsortedlinkedlist.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
cur = head
while head.next:
if head.val == head.next.val:
head.next = head.next.next
else:
head = head.next
return cur
| 30.4375
| 59
| 0.554415
|
c6b107d5f51d7704c7e7742336fc49cb91579cc4
| 3,721
|
py
|
Python
|
VWCemail.py
|
DennisH3/Virtual-Water-Cooler
|
4f62ab890c54a20d619a6d58fbef9fd84f22ce1b
|
[
"MIT"
] | null | null | null |
VWCemail.py
|
DennisH3/Virtual-Water-Cooler
|
4f62ab890c54a20d619a6d58fbef9fd84f22ce1b
|
[
"MIT"
] | 2
|
2020-11-24T13:59:49.000Z
|
2020-12-04T19:32:42.000Z
|
VWCemail.py
|
DennisH3/Virtual-Water-Cooler
|
4f62ab890c54a20d619a6d58fbef9fd84f22ce1b
|
[
"MIT"
] | 2
|
2020-11-24T12:36:45.000Z
|
2020-11-25T14:56:22.000Z
|
# coding: utf-8
# In[ ]:
# Pip Installs
#!pip install pywin32
# In[ ]:
import pandas as pd
from IPython.display import display
import win32com.client as win32
import time
# In[ ]:
"""
Desc:
Function that sends an email.
Params: recipients (string), subject (string), text (string)
Output: An email
Note: you must be logged onto your Outlook 2013 account first before this will run
"""
def email(recipients, text, profilename="Outlook 2013"):
oa = win32.Dispatch("Outlook.Application")
Msg = oa.CreateItem(0)
Msg.To = recipients
Msg.Subject = "Virtual Water Cooler"
Msg.Body = text
Msg.Display()
# Msg.Send()
# In[ ]:
"""Load the data"""
df = pd.read_csv("emailVWC.csv")
# Filter the data for people who have been matched
df = df[df["matched"] == 1]
# Display
display(df)
# In[ ]:
"""Automated Emails for Matched Groups"""
# For each pair in list of matches
for i in range(0, len(df.index)-1, 2):
# Body text of email (English)
text = """Hello {} and {},
You have been matched together for a Virtual Watercooler conversation. We recommend using MS
Teams scheduled during regular business hours for a conversation of at about 10 minutes, but it is up to
you to decide how to proceed.
The group prefers to chat in {} in the {}. You work in {} and {}, respectively.
{}
Please reach out to [name] <email> with all of your
feedback, questions and suggestions. Thank you for using the StatCan Virtual Watercooler.
Sincerely,
The StatCan Virtual Watercooler Team
Innovation Secretariat""".format(df.iloc[i]['name'], # Name of Person 1
df.iloc[i+1]['name'], # Name of Person 2
df.iloc[i]['lang'], # Language preference
df.iloc[i]['t'], # Time preference
df.iloc[i]['field'], # Field of Person 1
df.iloc[i+1]['field'], # Field of Person 2
df.iloc[i]['comInterests'] # Common interests
)
# French translation of the email
textFr = """Bonjour {} et {},
Vous avez été jumelés pour une causerie virtuelle. Nous vous recommandons d’utiliser MS Teams
pendant les heures normales de travail pour discuter environ 10 minutes, mais c’est à vous de décider
de la manière de procéder.
Le groupe préfère discuter en {} dans {}. Vous travaillez dans {} et {}, respectivement.
{}
Nous vous invitons à communiquer avec [nom] <email>
si vous avez des commentaires, des questions et des suggestions. Nous vous remercions de participer aux
causeries virtuelles de Statistique Canada.
Bien cordialement,
L’Équipe des causeries virtuelles de Statistique Canada
Secrétariat de l’innovation""".format(df.iloc[i]['name'], # Name of Person 1
df.iloc[i+1]['name'], # Name of Person 2
df.iloc[i]['langFR'], # Language preference
df.iloc[i]['tFR'], # Time preference
df.iloc[i]['fieldFR'], # Field of Person 1
df.iloc[i+1]['fieldFR'], # Field of Person 2
df.iloc[i]['comInterestsFR'] # Common interests
)
# Final email message
message = text + "\n\n\n" + textFr
print(message)
# The emails from each person in the pair
recipients = df.iloc[i]['email'] + "; " + df.iloc[i+1]['email']
print(recipients)
# Send the emails
email(recipients, message)
# Wait 3 seconds before next email
time.sleep(3)
| 26.769784
| 105
| 0.596345
|
f5bef131802030115074e01d8a8d8f9f7edb8667
| 369
|
py
|
Python
|
section_cmab/algorithms/lrfu.py
|
myron0330/caching-research
|
adb61e6d7246af13d4428254d456618cd6ccf0dc
|
[
"MIT"
] | null | null | null |
section_cmab/algorithms/lrfu.py
|
myron0330/caching-research
|
adb61e6d7246af13d4428254d456618cd6ccf0dc
|
[
"MIT"
] | null | null | null |
section_cmab/algorithms/lrfu.py
|
myron0330/caching-research
|
adb61e6d7246af13d4428254d456618cd6ccf0dc
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File:
# **********************************************************************************#
from __future__ import division
def lfu(x):
"""
Lfu function
"""
return 1
def lru(x):
"""
Lru function
"""
return (1./2)**x
| 18.45
| 85
| 0.254743
|
ae6a0695f5bf250b3c7ac7a4ea833f270582af2c
| 1,449
|
py
|
Python
|
nativecompile.py
|
antimatter15/microwave
|
aef41f6b57e3171ca22c12422c0a05e17cc002c6
|
[
"Apache-2.0"
] | 1
|
2017-03-08T18:45:50.000Z
|
2017-03-08T18:45:50.000Z
|
nativecompile.py
|
antimatter15/microwave
|
aef41f6b57e3171ca22c12422c0a05e17cc002c6
|
[
"Apache-2.0"
] | null | null | null |
nativecompile.py
|
antimatter15/microwave
|
aef41f6b57e3171ca22c12422c0a05e17cc002c6
|
[
"Apache-2.0"
] | null | null | null |
#/usr/bin/python
import httplib, urllib, fileinput, sys, re
print "Reading Developement HTML"
prefix = './'
codes = open(prefix+'native.html','r').read()
compile_regex = r'START_JS(.*?)END_JS'
js = ''
for match in re.finditer(compile_regex, codes, re.DOTALL):
print "Found script compile block"
includetext = match.group(1)
for include in re.finditer(r'src=[\"\'](.*)[\"\']', includetext):
fn = include.group(1)
js += "//File: "+fn+ '\n\n\n'
js += open(prefix+fn,'r').read() + '\n\n\n'
html = codes.replace(match.group(0),'')
print "Writing concatenated JS"
open(prefix+'microwave.native.js','w').write(js)
#exit();
html = html.replace('<!--RELEASE','')
html = html.replace('RELEASE-->','')
html = html.replace('<!---->','')
print "Writing compiled HTML"
open(prefix+'native.out.html','w').write(html)
print "Querying Closure Compiler REST API for compressed JS"
params = urllib.urlencode([
('js_code', js),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'compiled_code'),
])
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()#.replace('\n','')
print "Writing compressed JS"
open(prefix+'native.min.js','w').write(data)
| 31.5
| 67
| 0.668737
|
13d005db04e6d2496073ed7dd6b35cf8d9e21343
| 2,605
|
py
|
Python
|
test/IECore/TypedPrimitiveOp.py
|
gcodebackups/cortex-vfx
|
72fa6c6eb3327fce4faf01361c8fcc2e1e892672
|
[
"BSD-3-Clause"
] | 5
|
2016-07-26T06:09:28.000Z
|
2022-03-07T03:58:51.000Z
|
test/IECore/TypedPrimitiveOp.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | null | null | null |
test/IECore/TypedPrimitiveOp.py
|
turbosun/cortex
|
4bdc01a692652cd562f3bfa85f3dae99d07c0b15
|
[
"BSD-3-Clause"
] | 3
|
2015-03-25T18:45:24.000Z
|
2020-02-15T15:37:18.000Z
|
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
from IECore import *
class TestTypedPrimitiveOp( unittest.TestCase ) :
class MeshCopyOp( MeshPrimitiveOp ) :
def __init__( self ):
MeshPrimitiveOp.__init__( self, "MeshCopyOp", "A simple op to copy meshes" )
def modifyTypedPrimitive( self, mesh, operands ) :
# ModifyOp should automatically copy the input for us, so we can just
# return it.
return mesh
def testMeshPrimitiveOp( self ) :
""" Test TypedPrimitiveOp for use with MeshPrimitive """
op = TestTypedPrimitiveOp.MeshCopyOp()
inputMesh = MeshPrimitive()
outputMesh = op( input = inputMesh )
self.assert_( outputMesh.isInstanceOf( TypeId.MeshPrimitive ) )
self.failIf( inputMesh is outputMesh )
self.assertEqual( inputMesh, outputMesh )
if __name__ == "__main__":
unittest.main()
| 37.753623
| 79
| 0.702111
|
b887a82a2e6404b6cf33bd37dca2ad5e543a6c49
| 53
|
py
|
Python
|
pydeeplator/_version.py
|
sunhailin-Leo/deepL-translator-api
|
20aa5347ce5fa9be116488c729bb389602e229f5
|
[
"MIT"
] | 18
|
2020-04-02T06:07:51.000Z
|
2022-01-19T12:54:43.000Z
|
pydeeplator/_version.py
|
sunhailin-Leo/deepL-translator-api
|
20aa5347ce5fa9be116488c729bb389602e229f5
|
[
"MIT"
] | 3
|
2020-04-07T04:48:23.000Z
|
2022-01-19T12:55:40.000Z
|
pydeeplator/_version.py
|
sunhailin-Leo/deepL-translator-api
|
20aa5347ce5fa9be116488c729bb389602e229f5
|
[
"MIT"
] | 3
|
2020-04-27T04:15:59.000Z
|
2021-02-12T04:02:46.000Z
|
__version__ = "0.0.2"
__author__ = "sunhailin-Leo"
| 17.666667
| 29
| 0.679245
|
eece5b6be4f9a6c959d7890085dc9ca69487e6f6
| 2,053
|
py
|
Python
|
common_python/testing/helpers.py
|
ScienceStacks/PythonCommon
|
2732f928e13592f2089269731c8e2b04f856a77d
|
[
"MIT"
] | 1
|
2019-03-26T20:30:08.000Z
|
2019-03-26T20:30:08.000Z
|
common_python/testing/helpers.py
|
ScienceStacks/PythonCommon
|
2732f928e13592f2089269731c8e2b04f856a77d
|
[
"MIT"
] | 1
|
2019-05-31T21:59:30.000Z
|
2019-05-31T21:59:30.000Z
|
common_python/testing/helpers.py
|
ScienceStacks/PythonCommon
|
2732f928e13592f2089269731c8e2b04f856a77d
|
[
"MIT"
] | null | null | null |
""" Helpers for Tests. """
import os.path
import pandas as pd
DEBUG = True
def isValidDataFrame(df, expected_columns, min_rows=1,
nan_columns=None, key=None,
valid_dict=None):
"""
Simple test of a data frame.
a) Contains the expected columns
b) Has a minimum number of rows
c) Does not contain np.nan values
:param pd.DataFrame df: DataFrame to validate
:param list-of-str expected_columns:
:param int min_rows:
:param list-of-str nan_columns: columns where there may be nan
values
:param str or list-of-str key: Columns that is a key
:param dict valid_dict: key=column name, value=function of value
:return bool: True if passes tests
"""
bads = [x for x in expected_columns if not x in df.columns.tolist()]
if len(bads) > 0:
if DEBUG:
import pdb; pdb.set_trace()
return False
if len(df) < min_rows:
if DEBUG:
import pdb; pdb.set_trace()
return False
if (key is not None) and len(key) > 0:
df_key = pd.DataFrame(df[key])
if len(key) == 1:
keys = df[key]
if None in keys:
keys.remove(None)
df_key[key] = keys
df_key = df_key.drop_duplicates()
if len(df_key) != len(df.drop_duplicates()):
if DEBUG:
import pdb; pdb.set_trace()
return False
if valid_dict is not None:
for col, func in valid_dict.items():
trues = [func(x) for x in df[col]]
if not all(trues):
import pdb; pdb.set_trace()
if DEBUG:
import pdb; pdb.set_trace()
return False
return True
class MockFileDescriptor():
def __init__(self, path, mode):
self.fd = open(path, mode)
def __enter__(*pargs, **kwargs):
pass
def __exit__(*pargs, **kwargs):
pass
def close(self):
self.fd.close()
def read(*pargs):
self = pargs[0]
if len(pargs) > 1:
return self.fd.read(pargs[1])
else:
return self.fd.read()
def readlines(self):
return self.fd.readlines()
def write(*pargs, **kwargs):
pass
def writelines(*pargs, **kwargs):
pass
| 23.597701
| 70
| 0.632245
|
d05007e2dce7d45e935e832d7ff1822bd7f71a53
| 1,214
|
py
|
Python
|
helm-chart/images/kubessh/kubessh_config.py
|
yuvipanda/kubessh
|
ba6350b671f37d1bdcc7db9c19d39c26fef6ef85
|
[
"Apache-2.0"
] | 33
|
2018-04-03T17:43:55.000Z
|
2022-01-10T19:36:36.000Z
|
helm-chart/images/kubessh/kubessh_config.py
|
yuvipanda/kubessh
|
ba6350b671f37d1bdcc7db9c19d39c26fef6ef85
|
[
"Apache-2.0"
] | 30
|
2018-04-03T20:16:11.000Z
|
2020-11-23T06:20:42.000Z
|
helm-chart/images/kubessh/kubessh_config.py
|
kubessh/kubessh
|
ba6350b671f37d1bdcc7db9c19d39c26fef6ef85
|
[
"Apache-2.0"
] | 5
|
2020-04-16T15:01:46.000Z
|
2020-11-28T01:49:56.000Z
|
from ruamel.yaml import YAML
from kubessh.authentication.github import GitHubAuthenticator
from kubessh.authentication.gitlab import GitLabAuthenticator
from kubessh.authentication.dummy import DummyAuthenticator
yaml = YAML()
c.KubeSSH.host_key_path = '/etc/kubessh/secrets/kubessh.host-key'
c.KubeSSH.debug = True
with open('/etc/kubessh/config/values.yaml') as f:
config = yaml.load(f)
if config['auth']['type'] == 'github':
c.KubeSSH.authenticator_class = GitHubAuthenticator
c.KubeSSH.authenticator_class.allowed_users = config['auth']['github']['allowedUsers']
elif config['auth']['type'] == 'gitlab':
c.KubeSSH.authenticator_class = GitLabAuthenticator
c.KubeSSH.authenticator_class.instance_url = config['auth']['gitlab']['instanceUrl']
c.KubeSSH.authenticator_class.allowed_users = config['auth']['gitlab']['allowedUsers']
elif config['auth']['type'] == 'dummy':
c.KubeSSH.authenticator_class = DummyAuthenticator
if 'defaultNamespace' in config:
c.KubeSSH.default_namespace = config['defaultNamespace']
if 'podTemplate' in config:
c.UserPod.pod_template = config['podTemplate']
if 'pvcTemplates' in config:
c.UserPod.pvc_templates = config['pvcTemplates']
| 36.787879
| 90
| 0.757002
|
b2a868e3b0f668de6ba15a4add3eaf29595f4583
| 1,096
|
py
|
Python
|
base-excel/Demo1.py
|
meteor1993/python-learning
|
4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40
|
[
"MIT"
] | 83
|
2019-10-15T06:54:06.000Z
|
2022-03-28T14:08:21.000Z
|
base-excel/Demo1.py
|
wenxuefeng3930/python-learning
|
4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40
|
[
"MIT"
] | 1
|
2020-04-16T08:13:19.000Z
|
2020-07-14T01:52:46.000Z
|
base-excel/Demo1.py
|
wenxuefeng3930/python-learning
|
4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40
|
[
"MIT"
] | 74
|
2019-11-02T08:10:36.000Z
|
2022-02-19T12:23:36.000Z
|
import xlsxwriter
import datetime
workbook = xlsxwriter.Workbook('demo.xlsx')
sheet1 = workbook.add_worksheet('test_sheet')
workfomat = workbook.add_format()
# 字体加粗
workfomat.set_bold(True)
# 单元格边框宽度
workfomat.set_border(1)
# 对齐方式
workfomat.set_align('left')
# 格式化数据格式为小数点后两位
workfomat.set_num_format('0.00')
heads = ['', '语文', '数学', '英语']
datas = [
['小明', 76, 85, 95],
['小红', 85, 58, 92],
['小王', 98, 96, 91]
]
sheet1.write_row('A1', heads, workfomat)
sheet1.write_row('A2', datas[0], workfomat)
sheet1.write_row('A3', datas[1], workfomat)
sheet1.write_row('A4', datas[2], workfomat)
fomat1 = workbook.add_format({'num_format': 'yyyy/mm/dd/ hh:mm:ss'})
sheet1.write_datetime('E5', datetime.datetime(2019, 11, 9, 22, 44, 26), fomat1)
sheet1.insert_image('I6', 'wx.jpg')
chart = workbook.add_chart({'type': 'column'})
chart.add_series({'values': '=test_sheet!$B$2:$B$4'})
chart.add_series({'values': '=test_sheet!$C$2:$C$4'})
chart.add_series({'values': '=test_sheet!$D$2:$D$4'})
sheet1.insert_chart('A7', chart)
workbook.close()
| 24.355556
| 80
| 0.655109
|
3fb027e6f1ccd8038e26ce54c7f80ff4275d30c0
| 1,153
|
py
|
Python
|
setup.py
|
marqeta/marqeta-python
|
66fa690eb910825c510a391720b0fe717fac0234
|
[
"MIT"
] | 21
|
2019-04-12T09:02:17.000Z
|
2022-02-18T11:39:06.000Z
|
setup.py
|
marqeta/marqeta-python
|
66fa690eb910825c510a391720b0fe717fac0234
|
[
"MIT"
] | 1
|
2020-07-22T21:27:40.000Z
|
2020-07-23T17:38:43.000Z
|
setup.py
|
marqeta/marqeta-python
|
66fa690eb910825c510a391720b0fe717fac0234
|
[
"MIT"
] | 10
|
2019-05-08T14:20:37.000Z
|
2021-09-20T18:09:26.000Z
|
from setuptools import setup, find_packages
from marqeta.version import __version__
with open('requirements.txt', 'r') as requirements:
install_requires = requirements.read()
with open('README.md') as f:
long_description = f.read()
setup(
name="marqeta",
version=__version__,
description="Marqeta Python SDK",
author="Marqeta, Inc.",
url="https://github.com/marqeta/marqeta-python",
license="MIT",
keywords=["marqeta"],
project_urls={
"Documentation": "https://marqeta.com/api",
"Source Code": "https://github.com/marqeta/marqeta-python",
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
install_requires=install_requires,
long_description=long_description,
long_description_content_type="text/markdown",
)
| 32.027778
| 71
| 0.660885
|
a4e6f31ae88411363b680e641a5b521ba2d032c1
| 287
|
py
|
Python
|
test/espnet2/bin/test_enh_scoring.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | 5,053
|
2017-12-13T06:21:41.000Z
|
2022-03-31T13:38:29.000Z
|
test/espnet2/bin/test_enh_scoring.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | 3,666
|
2017-12-14T05:58:50.000Z
|
2022-03-31T22:11:49.000Z
|
test/espnet2/bin/test_enh_scoring.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | 1,709
|
2017-12-13T01:02:42.000Z
|
2022-03-31T11:57:45.000Z
|
from argparse import ArgumentParser
import pytest
from espnet2.bin.enh_scoring import get_parser
from espnet2.bin.enh_scoring import main
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
| 17.9375
| 51
| 0.766551
|
03695a84bd5fa9630bf6c787f19ddc47a5103a76
| 4,003
|
py
|
Python
|
dependencies/panda/Panda3D-1.10.0-x64/direct/showbase/Job.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 3
|
2018-03-09T12:07:29.000Z
|
2021-02-25T06:50:25.000Z
|
direct/src/showbase/Job.py
|
Sinkay/panda3d
|
16bfd3750f726a8831771b81649d18d087917fd5
|
[
"PHP-3.01",
"PHP-3.0"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
direct/src/showbase/Job.py
|
Sinkay/panda3d
|
16bfd3750f726a8831771b81649d18d087917fd5
|
[
"PHP-3.01",
"PHP-3.0"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from direct.showbase.DirectObject import DirectObject
if __debug__:
from panda3d.core import PStatCollector
class Job(DirectObject):
# Base class for cpu-intensive or non-time-critical operations that
# are run through the JobManager.
# values to yield from your run() generator method
Done = object()
Continue = None # 'yield None' is acceptable in place of 'yield Job.Continue'
Sleep = object() # yield any remaining time for this job until next frame
# These priorities determine how many timeslices a job gets relative to other
# jobs. A job with priority of 1000 will run 10 times more often than a job
# with priority of 100.
Priorities = ScratchPad(Min=1, Low=100, Normal=1000, High=10000)
_SerialGen = SerialNumGen()
def __init__(self, name):
self._name = name
self._generator = None
self._id = Job._SerialGen.next()
self._printing = False
self._priority = Job.Priorities.Normal
self._finished = False
if __debug__:
self._pstats = PStatCollector("App:Show code:jobManager:%s" % self._name)
def destroy(self):
del self._name
del self._generator
del self._printing
def getFinishedEvent(self):
return 'job-finished-%s' % self._id
def run(self):
# this is a generator
# override and do your processing
# yield Job.Continue when possible/reasonable
# try not to run longer than the JobManager's timeslice between yields
#
# when done, yield Job.Done
#
raise "don't call down"
def getPriority(self):
return self._priority
def setPriority(self, priority):
self._priority = priority
def printingBegin(self):
self._printing = True
def printingEnd(self):
self._printing = False
def resume(self):
# called every time JobManager is going to start running this job
"""
if self._printing:
# we may be suspended/resumed multiple times per frame, that gets spammy
# if we need to pick out the output of a job, put a prefix onto each line
# of the output
print 'JOB:%s:RESUME' % self._name
"""
pass
def suspend(self):
# called when JobManager is going to stop running this job for a while
"""
if self._printing:
#print 'JOB:%s:SUSPEND' % self._name
pass
"""
pass
def _setFinished(self):
self._finished = True
self.finished()
def isFinished(self):
return self._finished
def finished(self):
# called when the job finishes and has been removed from the JobManager
pass
def getJobName(self):
return self._name
def _getJobId(self):
return self._id
def _getGenerator(self):
if self._generator is None:
self._generator = self.run()
return self._generator
def _cleanupGenerator(self):
if self._generator is not None:
self._generator = None
if __debug__: # __dev__ not yet available at this point
from direct.showbase.Job import Job
class TestJob(Job):
def __init__(self):
Job.__init__(self, 'TestJob')
self._counter = 0
self._accum = 0
self._finished = False
def run(self):
self.printingBegin()
while True:
while self._accum < 100:
self._accum += 1
print 'counter = %s, accum = %s' % (self._counter, self._accum)
yield None
self._accum = 0
self._counter += 1
if self._counter >= 100:
print 'Job.Done'
self.printingEnd()
yield Job.Done
else:
yield None
def addTestJob():
jobMgr.add(TestJob())
| 30.792308
| 85
| 0.587559
|
ffd1f7252f8490538850365c3e9142cb7070edb4
| 1,091
|
py
|
Python
|
xos/synchronizer/veg-synchronizer.py
|
opencord/vEG
|
3f332f376b800ca5e486a14a6ecf55f0b2974e50
|
[
"Apache-2.0"
] | 3
|
2017-09-13T09:45:59.000Z
|
2018-01-26T03:02:34.000Z
|
xos/synchronizer/veg-synchronizer.py
|
opencord/vEG
|
3f332f376b800ca5e486a14a6ecf55f0b2974e50
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizer/veg-synchronizer.py
|
opencord/vEG
|
3f332f376b800ca5e486a14a6ecf55f0b2974e50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# This imports and runs ../../xos-observer.py
import importlib
import os
import sys
from xosconfig import Config
config_file = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/veg_config.yaml')
Config.init(config_file, 'synchronizer-config-schema.yaml')
observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/new_base")
sys.path.append(observer_path)
mod = importlib.import_module("xos-synchronizer")
mod.main()
| 31.171429
| 104
| 0.767186
|
eeb3ad84b0dc55300a5ccc598a712ac2995deb82
| 7,886
|
py
|
Python
|
docs/conf.py
|
ebborchers/kaggle_petfinder
|
4a5992a718e0e323e943824c7a0227e0ff98128d
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
ebborchers/kaggle_petfinder
|
4a5992a718e0e323e943824c7a0227e0ff98128d
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
ebborchers/kaggle_petfinder
|
4a5992a718e0e323e943824c7a0227e0ff98128d
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# petfinder_kaggle documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'petfinder_kaggle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kaggle_petfinderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'kaggle_petfinder.tex',
u'petfinder_kaggle Documentation',
u"Eric Borchers", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kaggle_petfinder', u'petfinder_kaggle Documentation',
[u"Eric Borchers"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kaggle_petfinder', u'petfinder_kaggle Documentation',
u"Eric Borchers", 'petfinder_kaggle',
'Kaggle competition, predicting adoption rates for animals on petfinder.my', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.187755
| 99
| 0.710373
|
b994043f502a756e5c47402e7c7679f129c5d331
| 74,756
|
py
|
Python
|
instabot/api/api.py
|
Tkd-Alex/instabot
|
230eba37c2d9f229bf9f08eb804a166a26c1d60e
|
[
"Apache-2.0"
] | 5
|
2019-08-15T14:47:56.000Z
|
2021-11-24T10:53:15.000Z
|
instabot/api/api.py
|
Tkd-Alex/instabot
|
230eba37c2d9f229bf9f08eb804a166a26c1d60e
|
[
"Apache-2.0"
] | null | null | null |
instabot/api/api.py
|
Tkd-Alex/instabot
|
230eba37c2d9f229bf9f08eb804a166a26c1d60e
|
[
"Apache-2.0"
] | 2
|
2019-08-23T23:39:21.000Z
|
2021-04-18T19:18:39.000Z
|
import base64
import datetime
import hashlib
import hmac
import json
import logging
import os
import random
import sys
import time
import uuid
import pytz
import requests
import requests.utils
import six.moves.urllib as urllib
from requests_toolbelt import MultipartEncoder
from tqdm import tqdm
from . import config, devices
from .api_login import (
change_device_simulation,
generate_all_uuids,
load_uuid_and_cookie,
login_flow,
pre_login_flow,
reinstall_app_simulation,
save_uuid_and_cookie,
set_device,
sync_device_features,
sync_launcher,
sync_user_features,
)
from .api_photo import configure_photo, download_photo, upload_photo, rupload_igphoto
from .api_story import configure_story, download_story, upload_story_photo
from .api_video import configure_video, download_video, upload_video
from .prepare import delete_credentials, get_credentials
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
PY2 = sys.version_info[0] == 2
class API(object):
def __init__(self, device=None, base_path="", save_logfile=True, log_filename=None):
# Setup device and user_agent
self.device = device or devices.DEFAULT_DEVICE
self.cookie_fname = None
self.base_path = base_path
self.is_logged_in = False
self.last_login = None
self.last_response = None
self.total_requests = 0
# Setup logging
self.logger = logging.getLogger("[instabot_{}]".format(id(self)))
if not os.path.exists("./config/"):
os.makedirs("./config/") # create base_path if not exists
if save_logfile is True:
if log_filename is None:
log_filename = os.path.join(
base_path, "instabot_{}.log".format(id(self))
)
fh = logging.FileHandler(filename=log_filename)
fh.setLevel(logging.INFO)
fh.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
self.logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
self.logger.addHandler(ch)
self.logger.setLevel(logging.DEBUG)
self.last_json = None
def set_user(self, username, password, generate_all_uuids=True, set_device=True):
self.username = username
self.password = password
self.logger = logging.getLogger("[instabot_{}]".format(self.username))
if set_device is True:
self.set_device()
if generate_all_uuids is True:
self.generate_all_uuids()
def set_contact_point_prefill(self, usage="prefill"):
data = json.dumps(
{
"id": self.uuid,
"phone_id": self.phone_id,
"_csrftoken": self.token,
"usage": usage,
}
)
return self.send_request("accounts/contact_point_prefill/", data, login=True)
def get_suggested_searches(self, _type="users"):
return self.send_request(
"fbsearch/suggested_searches/", self.json_data({"type": _type})
)
def read_msisdn_header(self, usage="default"):
data = json.dumps({"device_id": self.uuid, "mobile_subno_usage": usage})
return self.send_request(
"accounts/read_msisdn_header/",
data,
login=True,
headers={"X-DEVICE-ID": self.uuid},
)
def log_attribution(self, usage="default"):
data = json.dumps({"adid": self.advertising_id})
return self.send_request("attribution/log_attribution/", data, login=True)
# ====== ALL METHODS IMPORT FROM api_login ====== #
def sync_device_features(self, login=False):
return sync_device_features(self, login)
def sync_launcher(self, login=False):
return sync_launcher(self, login)
def sync_user_features(self):
return sync_user_features(self)
def pre_login_flow(self):
return pre_login_flow(self)
def login_flow(self, just_logged_in=False, app_refresh_interval=1800):
return login_flow(self, just_logged_in, app_refresh_interval)
def set_device(self):
return set_device(self)
def generate_all_uuids(self):
return generate_all_uuids(self)
def reinstall_app_simulation(self):
return reinstall_app_simulation(self)
def change_device_simulation(self):
return change_device_simulation(self)
def load_uuid_and_cookie(self, load_uuid=True, load_cookie=True):
return load_uuid_and_cookie(self, load_uuid=load_uuid, load_cookie=load_cookie)
def save_uuid_and_cookie(self):
return save_uuid_and_cookie(self)
def login(
self,
username=None,
password=None,
force=False,
proxy=None,
use_cookie=True,
use_uuid=True,
cookie_fname=None,
ask_for_code=False,
set_device=True,
generate_all_uuids=True,
solve_challenge=False,
solve_2fa=False,
is_threaded=True,
):
if password is None:
username, password = get_credentials(username=username)
self.set_user(username, password)
self.session = requests.Session()
self.proxy = proxy
self.set_proxy() # Only happens if `self.proxy`
self.cookie_fname = cookie_fname
if self.cookie_fname is None:
cookie_fname = "{username}_uuid_and_cookie.json".format(username=username)
self.cookie_fname = os.path.join(self.base_path, cookie_fname)
cookie_is_loaded = False
if use_cookie is True:
# try:
if (
self.load_uuid_and_cookie(load_cookie=use_cookie, load_uuid=use_uuid)
is True
):
if (
self.login_flow(False) is True
): # Check if the token loaded is valid.
cookie_is_loaded = True
self.save_successful_login()
else:
self.logger.info(
"Login flow failed, the cookie is broken. Relogin again."
)
set_device = generate_all_uuids = False
force = True
# except Exception:
# print("The cookie is not found, but don't worry `instabot` will create it for you using your login details.")
if not cookie_is_loaded and (not self.is_logged_in or force):
self.session = requests.Session()
self.set_proxy() # Only happens if `self.proxy`
if use_uuid is True:
if (
self.load_uuid_and_cookie(
load_cookie=use_cookie, load_uuid=use_uuid
)
is False
):
if set_device is True:
self.set_device()
if generate_all_uuids is True:
self.generate_all_uuids()
self.pre_login_flow()
data = json.dumps(
{
"phone_id": self.phone_id,
"_csrftoken": self.token,
"username": self.username,
"guid": self.uuid,
"device_id": self.device_id,
"password": self.password,
"login_attempt_count": "0",
}
)
if self.send_request("accounts/login/", data, True):
self.save_successful_login()
self.login_flow(True)
# self.device_id = self.uuid
return True
elif (
self.last_json.get("error_type", "") == "checkpoint_challenge_required"
):
self.logger.info("Checkpoint challenge required...")
if solve_challenge is True:
solved = self.solve_challenge()
if solved:
self.save_successful_login()
self.login_flow(True)
return True
else:
self.save_failed_login()
return False
else:
self.save_failed_login()
return False
elif self.last_json.get("two_factor_required"):
self.logger.info("Two-factor authentication required")
if solve_2fa is True:
two_factor_code = input("Enter 2FA verification code: ")
two_factor_id = self.last_json["two_factor_info"][
"two_factor_identifier"
]
login = self.session.post(
config.API_URL + "accounts/two_factor_login/",
data={
"username": self.username,
"verification_code": two_factor_code,
"two_factor_identifier": two_factor_id,
"password": self.password,
"device_id": self.device_id,
"ig_sig_key_version": 4,
},
allow_redirects=True,
)
if login.status_code == 200:
resp_json = json.loads(login.text)
if resp_json["status"] != "ok":
if "message" in resp_json:
self.logger.error(
"Login error: {}".format(resp_json["message"])
)
else:
self.logger.error(
'Login error: "{}" status and message {}.'.format(
resp_json["status"], login.text
)
)
self.save_failed_login()
return False
self.save_successful_login()
self.login_flow(True)
return True
else:
self.logger.error(
"Two-factor authentication request returns {} error with message {} !".format(
login.status_code, login.text
)
)
self.save_failed_login()
return False
# End of Interactive Two-Factor Authentication
else:
self.save_failed_login()
return False
else:
self.save_failed_login()
return False
def save_successful_login(self):
self.is_logged_in = True
self.last_login = time.time()
self.logger.info("Logged-in successfully as '{}'!".format(self.username))
def save_failed_login(self):
try:
self.logger.info(self.last_json.get("message", ""))
except Exception as e:
self.logger.info("Username or password is incorrect.")
self.is_logged_in = False
delete_credentials()
def solve_challenge(self):
challenge_url = self.last_json["challenge"]["api_path"][1:]
try:
self.send_request(challenge_url, None, login=True, with_signature=False)
except Exception as e:
self.logger.error("solve_challenge; {}".format(e))
return False
choices = self.get_challenge_choices()
for choice in choices:
print(choice)
code = input("Insert choice: ")
data = json.dumps({"choice": code})
try:
self.send_request(challenge_url, data, login=True)
except Exception as e:
self.logger.error(e)
return False
print("A code has been sent to the method selected, please check.")
code = input("Insert code: ")
data = json.dumps({"security_code": code})
try:
self.send_request(challenge_url, data, login=True)
except Exception as e:
self.logger.error(e)
return False
worked = (
("logged_in_user" in self.last_json)
and (self.last_json.get("action", "") == "close")
and (self.last_json.get("status", "") == "ok")
)
if worked:
return True
self.logger.error("Not possible to log in. Reset and try again")
return False
def get_challenge_choices(self):
last_json = self.last_json
choices = []
if last_json.get("step_name", "") == "select_verify_method":
choices.append("Checkpoint challenge received")
if "phone_number" in last_json["step_data"]:
choices.append("0 - Phone")
if "email" in last_json["step_data"]:
choices.append("1 - Email")
if last_json.get("step_name", "") == "delta_login_review":
choices.append("Login attempt challenge received")
choices.append("0 - It was me")
choices.append("1 - It wasn't me")
if not choices:
choices.append(
'"{}" challenge received'.format(last_json.get("step_name", "Unknown"))
)
choices.append("0 - Default")
return choices
def was_me(self, challenge_url, was_me=True):
data = json.dumps({"choice": 0 if was_me is True else 1})
return self.send_request(challenge_url, data, login=True)
# {"message": "consent_required", "consent_data": {"headline": "Updates to Our Terms and Data Policy", "content": "We've updated our Terms and made some changes to our Data Policy. Please take a moment to review these ...", "button_text": "Review Now"}, "status": "fail"}
# /api/v1/consent/existing_user_flow/
# signed_body=ae07b2fbf6fd391d26285b242215e97f6b8f9fa015c68a32027c0d5624a35766.{"current_screen_key":"dob","_csrftoken":"74UWcG2CYmiLqRPIYENHkrimkFdhsLnL","day":"3","_uid":"19450998546","year":"1994","_uuid":"ffbe7b2f-1663-43d4-847b-c3f51803637e","month":"9"}&ig_sig_key_version=4
def consent_required(self, day=None, month=None, year=None):
if day is None:
day = random.randint(1, 28)
if month is None:
month = random.randint(1, 12)
if year is None:
year = random.randint(1961, 2000)
data = self.json_data(
{
"current_screen_key": "dob",
"day": str(day),
"month": str(month),
"year": str(year),
}
)
url = "consent/existing_user_flow/"
return self.send_request(url, data)
def logout(self, *args, **kwargs):
if not self.is_logged_in:
return True
data = json.dumps({})
self.is_logged_in = not self.send_request(
"accounts/logout/", data, with_signature=False
)
return not self.is_logged_in
def set_proxy(self):
if self.proxy:
parsed = urllib.parse.urlparse(self.proxy)
scheme = "http://" if not parsed.scheme else ""
self.session.proxies["http"] = scheme + self.proxy
self.session.proxies["https"] = scheme + self.proxy
def send_request( # noqa: C901
self,
endpoint,
post=None,
login=False,
with_signature=True,
headers=None,
extra_sig=None,
):
if not self.is_logged_in and not login:
msg = "Not logged in!"
self.logger.critical(msg)
raise Exception(msg)
self.session.headers.update(config.REQUEST_HEADERS)
self.session.headers.update(
{
"User-Agent": self.user_agent,
"X-IG-Connection-Speed": "{0:d}kbps".format(random.randint(1000, 5000)),
"X-IG-Bandwidth-Speed-KBPS": str(random.randint(7000, 10000)),
"X-IG-Bandwidth-TotalBytes-B": str(random.randint(500000, 900000)),
"X-IG-Bandwidth-TotalTime-MS": str(random.randint(50, 150)),
}
)
if headers:
self.session.headers.update(headers)
try:
self.total_requests += 1
if post is not None: # POST
if with_signature:
# Only `send_direct_item` doesn't need a signature
post = self.generate_signature(post)
if extra_sig is not None and extra_sig != []:
post += "&".join(extra_sig)
response = self.session.post(config.API_URL + endpoint, data=post)
else: # GET
response = self.session.get(config.API_URL + endpoint)
except Exception: # as e:
# self.logger.warning(str(e))
return False
self.last_response = response
if response.status_code == 200:
try:
self.last_json = json.loads(response.text)
return True
except JSONDecodeError as e:
# self.logger.warning(str(e))
return False
else:
# self.logger.warning("[{}] : {} ...".format(response.status_code, response.text[:170]))
try: # For debugging
self.last_json = json.loads(response.text)
except JSONDecodeError:
pass
# if self.last_json.get("message", "") == "consent_required":
# self.consent_required()
return False
@property
def cookie_dict(self):
return self.session.cookies.get_dict()
@property
def token(self):
return self.cookie_dict["csrftoken"]
@property
def user_id(self):
try:
return self.cookie_dict["ds_user_id"]
except Exception:
return self.cookie_dict["sessionid"].split(":")[0]
@property
def rank_token(self):
return "{}_{}".format(self.user_id, self.uuid)
@property
def default_data(self):
return {"_uuid": self.uuid, "_uid": self.user_id, "_csrftoken": self.token}
def json_data(self, data=None):
"""Adds the default_data to data and dumps it to a json."""
if data is None:
data = {}
data.update(self.default_data)
return json.dumps(data)
def action_data(self, data):
_data = {"radio_type": "wifi-none", "device_id": self.device_id}
data.update(_data)
return data
def auto_complete_user_list(self):
return self.send_request("friendships/autocomplete_user_list/")
def batch_fetch(self):
data = {
"scale": 3,
"version": 1,
"vc_policy": "default",
"surfaces_to_triggers": '{"5734":["instagram_feed_prompt"],"4715":["instagram_feed_header"],"5858":["instagram_feed_tool_tip"]}',
"surfaces_to_queries": '{"5734":"viewer() {eligible_promotions.trigger_context_v2(<trigger_context_v2>).ig_parameters(<ig_parameters>).trigger_name(<trigger_name>).surface_nux_id(<surface>).external_gating_permitted_qps(<external_gating_permitted_qps>).supports_client_filters(true).include_holdouts(true) {edges {client_ttl_seconds,log_eligibility_waterfall,is_holdout,priority,time_range {start,end},node {id,promotion_id,logging_data,max_impressions,triggers,contextual_filters {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}}}}}},is_uncancelable,template {name,parameters {name,required,bool_value,string_value,color_value,}},creatives {title {text},content {text},footer {text},social_context {text},social_context_images,primary_action{title {text},url,limit,dismiss_promotion},secondary_action{title {text},url,limit,dismiss_promotion},dismiss_action{title {text},url,limit,dismiss_promotion},image.scale(<scale>) {uri,width,height}}}}}}","4715":"viewer() {eligible_promotions.trigger_context_v2(<trigger_context_v2>).ig_parameters(<ig_parameters>).trigger_name(<trigger_name>).surface_nux_id(<surface>).external_gating_permitted_qps(<external_gating_permitted_qps>).supports_client_filters(true).include_holdouts(true) {edges {client_ttl_seconds,log_eligibility_waterfall,is_holdout,priority,time_range {start,end},node {id,promotion_id,logging_data,max_impressions,triggers,contextual_filters {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}}}}}},is_uncancelable,template {name,parameters {name,required,bool_value,string_value,color_value,}},creatives {title {text},content {text},footer {text},social_context {text},social_context_images,primary_action{title {text},url,limit,dismiss_promotion},secondary_action{title {text},url,limit,dismiss_promotion},dismiss_action{title {text},url,limit,dismiss_promotion},image.scale(<scale>) {uri,width,height}}}}}}","5858":"viewer() {eligible_promotions.trigger_context_v2(<trigger_context_v2>).ig_parameters(<ig_parameters>).trigger_name(<trigger_name>).surface_nux_id(<surface>).external_gating_permitted_qps(<external_gating_permitted_qps>).supports_client_filters(true).include_holdouts(true) {edges {client_ttl_seconds,log_eligibility_waterfall,is_holdout,priority,time_range {start,end},node {id,promotion_id,logging_data,max_impressions,triggers,contextual_filters {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}},clauses {clause_type,filters {filter_type,unknown_action,value {name,required,bool_value,int_value,string_value},extra_datas {name,required,bool_value,int_value,string_value}}}}}},is_uncancelable,template {name,parameters {name,required,bool_value,string_value,color_value,}},creatives {title {text},content {text},footer {text},social_context {text},social_context_images,primary_action{title {text},url,limit,dismiss_promotion},secondary_action{title {text},url,limit,dismiss_promotion},dismiss_action{title {text},url,limit,dismiss_promotion},image.scale(<scale>) {uri,width,height}}}}}}"}', # Just copied from request.
}
data = self.json_data(data)
return self.send_request("qp/batch_fetch/", data)
def get_timeline_feed(self, options=[]):
headers = {"X-Ads-Opt-Out": "0", "X-DEVICE-ID": self.uuid}
data = {
"_csrftoken": self.token,
"_uuid": self.uuid,
"is_prefetch": 0,
"phone_id": self.phone_id,
"device_id": self.uuid,
"client_session_id": self.client_session_id,
"battery_level": random.randint(25, 100),
"is_charging": random.randint(0, 1),
"will_sound_on": random.randint(0, 1),
"is_on_screen": True,
"timezone_offset": datetime.datetime.now(pytz.timezone("CET")).strftime(
"%z"
),
}
if "is_pull_to_refresh" in options:
data["reason"] = "pull_to_refresh"
data["is_pull_to_refresh"] = "1"
elif "is_pull_to_refresh" not in options:
data["reason"] = "cold_start_fetch"
data["is_pull_to_refresh"] = "0"
# unseen_posts
# feed_view_info
# seen_posts
if "push_disabled" in options:
data["push_disabled"] = "true"
if "recovered_from_crash" in options:
data["recovered_from_crash"] = "1"
data = json.dumps(data)
return self.send_request(
"feed/timeline/", data, with_signature=False, headers=headers
)
def get_megaphone_log(self):
return self.send_request("megaphone/log/")
def expose(self):
data = self.json_data(
{"id": self.uuid, "experiment": "ig_android_profile_contextual_feed"}
)
return self.send_request("qe/expose/", data)
# ====== PHOTO METHODS ====== #
def upload_photo(
self,
photo,
caption=None,
upload_id=None,
from_video=False,
force_resize=False,
options={},
):
"""Upload photo to Instagram
@param photo Path to photo file (String)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then generate automatically
@param from_video A flag that signals whether the photo is loaded from the video or by itself (Boolean, DEPRECATED: not used)
@param force_resize Force photo resize (Boolean)
@param options Object with difference options, e.g. configure_timeout, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@return Boolean
"""
return upload_photo(
self, photo, caption, upload_id, from_video, force_resize, options
)
def download_photo(self, media_id, filename, media=False, folder="photos"):
return download_photo(self, media_id, filename, media, folder)
def configure_photo(self, upload_id, photo, caption=""):
return configure_photo(self, upload_id, photo, caption)
# ====== STORY METHODS ====== #
def download_story(self, filename, story_url, username):
return download_story(self, filename, story_url, username)
def upload_story_photo(self, photo, upload_id=None):
return upload_story_photo(self, photo, upload_id)
def configure_story(self, upload_id, photo):
return configure_story(self, upload_id, photo)
# ====== VIDEO METHODS ====== #
def upload_video(
self, video, caption=None, upload_id=None, thumbnail=None, options={}
):
"""Upload video to Instagram
@param video Path to video file (String)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then generate automatically
@param thumbnail Path to thumbnail for video (String). When None, then thumbnail is generate automatically
@param options Object with difference options, e.g. configure_timeout, rename_thumbnail, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@return Object with state of uploading to Instagram (or False)
"""
return upload_video(self, video, caption, upload_id, thumbnail, options)
def download_video(self, media_id, filename, media=False, folder="video"):
return download_video(self, media_id, filename, media, folder)
def configure_video(
self,
upload_id,
video,
thumbnail,
width,
height,
duration,
caption="",
options={},
):
"""Post Configure Video (send caption, thumbnail and more else to Instagram)
@param upload_id Unique upload_id (String). Received from "upload_video"
@param video Path to video file (String)
@param thumbnail Path to thumbnail for video (String). When None, then thumbnail is generate automatically
@param width Width in px (Integer)
@param height Height in px (Integer)
@param duration Duration in seconds (Integer)
@param caption Media description (String)
@param options Object with difference options, e.g. configure_timeout, rename_thumbnail, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
"""
return configure_video(
self, upload_id, video, thumbnail, width, height, duration, caption, options
)
# ====== MEDIA METHODS ====== #
def edit_media(self, media_id, captionText=""):
data = self.json_data({"caption_text": captionText})
url = "media/{media_id}/edit_media/".format(media_id=media_id)
return self.send_request(url, data)
def remove_self_tag(self, media_id):
data = self.json_data()
url = "media/{media_id}/remove/".format(media_id=media_id)
return self.send_request(url, data)
def media_info(self, media_id):
# data = self.json_data({'media_id': media_id})
url = "media/{media_id}/info/".format(media_id=media_id)
return self.send_request(url)
def archive_media(self, media, undo=False):
action = "only_me" if not undo else "undo_only_me"
data = self.json_data({"media_id": media["id"]})
url = "media/{media_id}/{action}/?media_type={media_type}".format(
media_id=media["id"], action=action, media_type=media["media_type"]
)
return self.send_request(url, data)
def delete_media(self, media):
data = self.json_data({"media_id": media.get("id")})
url = "media/{media_id}/delete/".format(media_id=media.get("id"))
return self.send_request(url, data)
def gen_user_breadcrumb(self, size):
key = "iN4$aGr0m"
dt = int(time.time() * 1000)
time_elapsed = random.randint(500, 1500) + size * random.randint(500, 1500)
text_change_event_count = max(1, size / random.randint(3, 5))
data = "{size!s} {elapsed!s} {count!s} {dt!s}".format(
**{
"size": size,
"elapsed": time_elapsed,
"count": text_change_event_count,
"dt": dt,
}
)
return "{!s}\n{!s}\n".format(
base64.b64encode(
hmac.new(
key.encode("ascii"), data.encode("ascii"), digestmod=hashlib.sha256
).digest()
),
base64.b64encode(data.encode("ascii")),
)
def check_offensive_comment(self, comment_text):
return self.send_request(
endpoint="media/comment/check_offensive_comment/",
post=self.json_data(self.action_data({"comment_text": comment_text})),
)
def comment(self, media_id, comment_text):
return self.send_request(
endpoint="media/{media_id}/comment/".format(media_id=media_id),
post=self.json_data(
self.action_data(
{
"container_module": "comments_v2",
"user_breadcrumb": self.gen_user_breadcrumb(len(comment_text)),
"idempotence_token": self.generate_UUID(True),
"comment_text": comment_text,
}
)
),
)
def reply_to_comment(self, media_id, comment_text, parent_comment_id):
data = self.json_data(
{"comment_text": comment_text, "replied_to_comment_id": parent_comment_id}
)
url = "media/{media_id}/comment/".format(media_id=media_id)
return self.send_request(url, data)
def delete_comment(self, media_id, comment_id):
data = self.json_data()
url = "media/{media_id}/comment/{comment_id}/delete/"
url = url.format(media_id=media_id, comment_id=comment_id)
return self.send_request(url, data)
def get_comment_likers(self, comment_id):
url = "media/{comment_id}/comment_likers/?".format(comment_id=comment_id)
return self.send_request(url)
def get_media_likers(self, media_id):
url = "media/{media_id}/likers/?".format(media_id=media_id)
return self.send_request(url)
def like_comment(self, comment_id):
data = self.json_data(
{
"is_carousel_bumped_post": "false",
"container_module": "comments_v2",
"feed_position": "0",
}
)
url = "media/{comment_id}/comment_like/".format(comment_id=comment_id)
return self.send_request(url, data)
def unlike_comment(self, comment_id):
data = self.json_data(
{
"is_carousel_bumped_post": "false",
"container_module": "comments_v2",
"feed_position": "0",
}
)
url = "media/{comment_id}/comment_unlike/".format(comment_id=comment_id)
return self.send_request(url, data)
# From profile => "is_carousel_bumped_post":"false", "container_module":"feed_contextual_profile", "feed_position":"0"
# From home/feed => "inventory_source":"media_or_ad", "is_carousel_bumped_post":"false", "container_module":"feed_timeline", "feed_position":"0"
def like(
self,
media_id,
double_tap=None,
container_module="feed_short_url",
feed_position=0,
username=None,
user_id=None,
hashtag_name=None,
hashtag_id=None,
entity_page_name=None,
entity_page_id=None,
):
data = self.action_data(
{
"media_id": media_id,
"container_module": container_module,
"feed_position": str(feed_position),
"is_carousel_bumped_post": "false",
}
)
if container_module == "feed_timeline":
data.update({"inventory_source": "media_or_ad"})
if username:
data.update({"username": username, "user_id": user_id})
if hashtag_name:
data.update({"hashtag_name": hashtag_name, "hashtag_id": hashtag_id})
if entity_page_name:
data.update(
{"entity_page_name": entity_page_name, "entity_page_id": entity_page_id}
)
if double_tap is None:
double_tap = random.randint(0, 1)
json_data = self.json_data(data)
# TODO: comment out debug log out when done
self.logger.debug("post data: {}".format(json_data))
return self.send_request(
endpoint="media/{media_id}/like/".format(media_id=media_id),
post=json_data,
extra_sig=["d={}".format(double_tap)],
)
def unlike(self, media_id):
data = self.json_data(
{
"media_id": media_id,
"radio_type": "wifi-none",
"is_carousel_bumped_post": "false",
"container_module": "photo_view_other",
"feed_position": "0",
}
)
url = "media/{media_id}/unlike/".format(media_id=media_id)
return self.send_request(url, data)
def get_media_comments(self, media_id, max_id=""):
url = "media/{media_id}/comments/".format(media_id=media_id)
if max_id:
url += "?max_id={max_id}".format(max_id=max_id)
return self.send_request(url)
def explore(self, is_prefetch=False):
data = {
"is_prefetch": is_prefetch,
"is_from_promote": False,
"timezone_offset": datetime.datetime.now(pytz.timezone("CET")).strftime(
"%z"
),
"session_id": self.client_session_id,
"supported_capabilities_new": config.SUPPORTED_CAPABILITIES,
}
if is_prefetch:
data["max_id"] = 0
data["module"] = "explore_popular"
data = json.dumps(data)
return self.send_request("discover/explore/", data)
def get_username_info(self, user_id):
url = "users/{user_id}/info/".format(user_id=user_id)
return self.send_request(url)
def get_self_username_info(self):
return self.get_username_info(self.user_id)
def get_recent_activity(self):
return self.send_request("news/inbox")
def get_following_recent_activity(self):
return self.send_request("news")
def get_user_tags(self, user_id):
url = "usertags/{user_id}/feed/?rank_token={rank_token}&ranked_content=true&"
url = url.format(user_id=user_id, rank_token=self.rank_token)
return self.send_request(url)
def get_self_user_tags(self):
return self.get_user_tags(self.user_id)
def get_geo_media(self, user_id):
url = "maps/user/{user_id}/".format(user_id=user_id)
return self.send_request(url)
def get_self_geo_media(self):
return self.get_geo_media(self.user_id)
def sync_from_adress_book(self, contacts):
url = "address_book/link/?include=extra_display_name,thumbnails"
return self.send_request(url, "contacts=" + json.dumps(contacts))
# ====== FEED METHODS ====== #
def tag_feed(self, tag):
url = "feed/tag/{tag}/?rank_token={rank_token}&ranked_content=true&"
return self.send_request(url.format(tag=tag, rank_token=self.rank_token))
def get_timeline(self):
url = "feed/timeline/?rank_token={rank_token}&ranked_content=true&"
return self.send_request(url.format(rank_token=self.rank_token))
def get_archive_feed(self):
url = "feed/only_me_feed/?rank_token={rank_token}&ranked_content=true&"
return self.send_request(url.format(rank_token=self.rank_token))
def get_user_feed(
self,
user_id,
exclude_comment=True,
only_fetch_first_carousel_media=False,
max_id=None,
ranked_content=None,
min_timestamp=None,
):
url = "feed/user/{user_id}?exclude_comment={exclude_comment}&only_fetch_first_carousel_media={only_fetch_first_carousel_media}".format(
user_id=user_id,
exclude_comment=exclude_comment,
only_fetch_first_carousel_media=only_fetch_first_carousel_media,
)
if max_id is not None:
url += "&max_id={}".format(max_id)
if ranked_content is not None:
url += "&ranked_content={}".format(ranked_content)
if min_timestamp is not None:
url += "&min_timestamp={}".format(min_timestamp)
return self.send_request(url)
def get_self_user_feed(self, max_id="", min_timestamp=None):
return self.get_user_feed(
self.user_id, max_id=max_id, min_timestamp=min_timestamp
)
def get_hashtag_feed(self, hashtag, max_id="", ranked_content=False):
url = "feed/tag/{hashtag}/?max_id={max_id}&rank_token={rank_token}&ranked_content={ranked_content}&"
url = url.format(
hashtag=hashtag,
max_id=max_id,
rank_token=self.rank_token,
ranked_content=ranked_content,
)
return self.send_request(url)
def get_location_feed(self, location_id, max_id=""):
url = "feed/location/{location_id}/?max_id={max_id}&rank_token={rank_token}&ranked_content=true&"
url = url.format(
location_id=location_id, max_id=max_id, rank_token=self.rank_token
)
return self.send_request(url)
def get_popular_feed(self):
url = "feed/popular/?people_teaser_supported=1&rank_token={rank_token}&ranked_content=true&"
return self.send_request(url.format(rank_token=self.rank_token))
def get_liked_media(self, max_id=""):
url = "feed/liked/?max_id={max_id}".format(max_id=max_id)
return self.send_request(url)
# ====== FRIENDSHIPS METHODS ====== #
def get_user_followings(self, user_id, max_id=""):
url = "friendships/{user_id}/following/?max_id={max_id}&ig_sig_key_version={sig_key}&rank_token={rank_token}"
url = url.format(
user_id=user_id,
max_id=max_id,
sig_key=config.SIG_KEY_VERSION,
rank_token=self.rank_token,
)
return self.send_request(url)
def get_self_users_following(self):
return self.get_user_followings(self.user_id)
def get_user_followers(self, user_id, max_id=""):
url = "friendships/{user_id}/followers/?rank_token={rank_token}"
url = url.format(user_id=user_id, rank_token=self.rank_token)
if max_id:
url += "&max_id={max_id}".format(max_id=max_id)
return self.send_request(url)
def get_self_user_followers(self):
return self.followers
def follow(self, user_id):
data = self.json_data(self.action_data({"user_id": user_id}))
self.logger.debug("post data: {}".format(data))
url = "friendships/create/{user_id}/".format(user_id=user_id)
return self.send_request(url, data)
def unfollow(self, user_id):
data = self.json_data({"user_id": user_id, "radio_type": "wifi-none"})
url = "friendships/destroy/{user_id}/".format(user_id=user_id)
return self.send_request(url, data)
def block(self, user_id):
data = self.json_data({"user_id": user_id})
url = "friendships/block/{user_id}/".format(user_id=user_id)
return self.send_request(url, data)
def unblock(self, user_id):
data = self.json_data({"user_id": user_id})
url = "friendships/unblock/{user_id}/".format(user_id=user_id)
return self.send_request(url, data)
def user_friendship(self, user_id):
data = self.json_data({"user_id": user_id})
url = "friendships/show/{user_id}/".format(user_id=user_id)
return self.send_request(url, data)
def mute_user(self, user, mute_story=False, mute_posts=False):
data_dict = {}
if mute_posts:
data_dict["target_posts_author_id"] = user
if mute_story:
data_dict["target_reel_author_id"] = user
data = self.json_data(data_dict)
url = "friendships/mute_posts_or_story_from_follow/"
return self.send_request(url, data)
def unmute_user(self, user, unmute_posts=False, unmute_stories=False):
data_dict = {}
if unmute_posts:
data_dict["target_posts_author_id"] = user
if unmute_stories:
data_dict["target_reel_author_id"] = user
data = self.json_data(data_dict)
url = "friendships/unmute_posts_or_story_from_follow/"
return self.send_request(url, data)
def get_pending_friendships(self):
"""Get pending follow requests"""
url = "friendships/pending/"
return self.send_request(url)
def approve_pending_friendship(self, user_id):
data = self.json_data(
{
"_uuid": self.uuid,
"_uid": self.user_id,
"user_id": user_id,
"_csrftoken": self.token,
}
)
url = "friendships/approve/{}/".format(user_id)
return self.send_request(url, post=data)
def reject_pending_friendship(self, user_id):
data = self.json_data(
{
"_uuid": self.uuid,
"_uid": self.user_id,
"user_id": user_id,
"_csrftoken": self.token,
}
)
url = "friendships/ignore/{}/".format(user_id)
return self.send_request(url, post=data)
def get_direct_share(self):
return self.send_request("direct_share/inbox/?")
@staticmethod
def _prepare_recipients(users, thread_id=None, use_quotes=False):
if not isinstance(users, list):
print("Users must be an list")
return False
result = {"users": "[[{}]]".format(",".join(users))}
if thread_id:
template = '["{}"]' if use_quotes else "[{}]"
result["thread"] = template.format(thread_id)
return result
@staticmethod
def generate_signature(data):
body = (
hmac.new(
config.IG_SIG_KEY.encode("utf-8"), data.encode("utf-8"), hashlib.sha256
).hexdigest()
+ "."
+ urllib.parse.quote(data)
)
signature = "ig_sig_key_version={sig_key}&signed_body={body}"
return signature.format(sig_key=config.SIG_KEY_VERSION, body=body)
@staticmethod
def generate_device_id(seed):
volatile_seed = "12345"
m = hashlib.md5()
m.update(seed.encode("utf-8") + volatile_seed.encode("utf-8"))
return "android-" + m.hexdigest()[:16]
@staticmethod
def get_seed(*args):
m = hashlib.md5()
m.update(b"".join([arg.encode("utf-8") for arg in args]))
return m.hexdigest()
@staticmethod
def generate_UUID(uuid_type):
generated_uuid = str(uuid.uuid4())
if uuid_type:
return generated_uuid
else:
return generated_uuid.replace("-", "")
def get_total_followers_or_followings( # noqa: C901
self,
user_id,
amount=None,
which="followers",
filter_private=False,
filter_business=False,
filter_verified=False,
usernames=False,
to_file=None,
overwrite=False,
):
from io import StringIO
if which == "followers":
key = "follower_count"
get = self.get_user_followers
elif which == "followings":
key = "following_count"
get = self.get_user_followings
sleep_track = 0
result = []
next_max_id = ""
self.get_username_info(user_id)
username_info = self.last_json
if "user" in username_info:
total = amount or username_info["user"][key]
if total > 200000:
print(
"Consider temporarily saving the result of this big "
"operation. This will take a while.\n"
)
else:
return False
if filter_business:
print(
"--> You are going to filter business accounts. This will take time! <--"
)
if to_file is not None:
if os.path.isfile(to_file):
if not overwrite:
print("File `{}` already exists. Not overwriting.".format(to_file))
return False
else:
print("Overwriting file `{}`".format(to_file))
with open(to_file, "w"):
pass
desc = "Getting {} of {}".format(which, user_id)
with tqdm(total=total, desc=desc, leave=True) as pbar:
while True:
get(user_id, next_max_id)
last_json = self.last_json
try:
with open(to_file, "a") if to_file is not None else StringIO() as f:
for item in last_json["users"]:
if filter_private and item["is_private"]:
continue
if filter_business:
time.sleep(2 * random.random())
self.get_username_info(item["pk"])
item_info = self.last_json
if item_info["user"]["is_business"]:
continue
if filter_verified and item["is_verified"]:
continue
if to_file is not None:
if usernames:
f.write("{}\n".format(item["username"]))
else:
f.write("{}\n".format(item["pk"]))
result.append(item)
pbar.update(1)
sleep_track += 1
if sleep_track >= 20000:
sleep_time = random.uniform(120, 180)
msg = "\nWaiting {:.2f} min. due to too many requests."
print(msg.format(sleep_time / 60))
time.sleep(sleep_time)
sleep_track = 0
if not last_json["users"] or len(result) >= total:
return result[:total]
except Exception as e:
print("ERROR: {}".format(e))
return result[:total]
if last_json["big_list"] is False:
return result[:total]
next_max_id = last_json.get("next_max_id", "")
def get_total_followers(self, user_id, amount=None):
return self.get_total_followers_or_followings(user_id, amount, "followers")
def get_total_followings(self, user_id, amount=None):
return self.get_total_followers_or_followings(user_id, amount, "followings")
def get_total_user_feed(self, user_id, min_timestamp=None):
return self.get_last_user_feed(
user_id, amount=float("inf"), min_timestamp=min_timestamp
)
def get_last_user_feed(self, user_id, amount, min_timestamp=None):
user_feed = []
next_max_id = ""
while True:
if len(user_feed) >= float(amount):
# one request returns max 13 items
return user_feed[:amount]
self.get_user_feed(user_id, max_id=next_max_id, min_timestamp=min_timestamp)
time.sleep(0.2)
last_json = self.last_json
if "items" not in last_json:
return user_feed
user_feed += last_json["items"]
if not last_json.get("more_available"):
return user_feed
next_max_id = last_json.get("next_max_id", "")
def get_total_hashtag_feed(self, hashtag_str, amount=100):
hashtag_feed = []
next_max_id = ""
with tqdm(total=amount, desc="Getting hashtag media.", leave=False) as pbar:
while True:
self.get_hashtag_feed(hashtag_str, next_max_id)
last_json = self.last_json
if "items" not in last_json:
return hashtag_feed[:amount]
items = last_json["items"]
try:
pbar.update(len(items))
hashtag_feed += items
if not items or len(hashtag_feed) >= amount:
return hashtag_feed[:amount]
except Exception:
return hashtag_feed[:amount]
next_max_id = last_json.get("next_max_id", "")
def get_total_self_user_feed(self, min_timestamp=None):
return self.get_total_user_feed(self.user_id, min_timestamp)
def get_total_self_followers(self):
return self.get_total_followers(self.user_id)
def get_total_self_followings(self):
return self.get_total_followings(self.user_id)
def get_total_liked_media(self, scan_rate=1):
next_id = ""
liked_items = []
for _ in range(scan_rate):
self.get_liked_media(next_id)
last_json = self.last_json
next_id = last_json.get("next_max_id", "")
liked_items += last_json["items"]
return liked_items
# ====== ACCOUNT / PERSONAL INFO METHODS ====== #
def change_password(self, new_password):
data = self.json_data(
{
"old_password": self.password,
"new_password1": new_password,
"new_password2": new_password,
}
)
return self.send_request("accounts/change_password/", data)
def remove_profile_picture(self):
data = self.json_data()
return self.send_request("accounts/remove_profile_picture/", data)
def set_private_account(self):
data = self.json_data()
return self.send_request("accounts/set_private/", data)
def set_public_account(self):
data = self.json_data()
return self.send_request("accounts/set_public/", data)
def set_name_and_phone(self, name="", phone=""):
return self.send_request(
"accounts/set_phone_and_name/",
self.json_data({"first_name": name, "phone_number": phone}),
)
def get_profile_data(self):
data = self.json_data()
return self.send_request("accounts/current_user/?edit=true", data)
def edit_profile(self, url, phone, first_name, biography, email, gender):
data = self.json_data(
{
"external_url": url,
"phone_number": phone,
"username": self.username,
"full_name": first_name,
"biography": biography,
"email": email,
"gender": gender,
}
)
return self.send_request("accounts/edit_profile/", data)
def edit_profile_picture(self, photo):
upload_id = rupload_igphoto(self.session, photo, upload_id=None)
if type(upload_id) == bool:
return upload_id
data = '_csrftoken={}&_uuid={}&use_fbuploader=true&upload_id={}'.format(self.token, self.uuid, upload_id)
self.send_request("accounts/change_profile_picture/", data, with_signature=False)
def fb_user_search(self, query):
url = (
"fbsearch/topsearch/?context=blended&query={query}&rank_token={rank_token}"
)
return self.send_request(url.format(query=query, rank_token=self.rank_token))
def search_users(self, query):
url = "users/search/?ig_sig_key_version={sig_key}&is_typeahead=true&query={query}&rank_token={rank_token}"
return self.send_request(
url.format(
sig_key=config.SIG_KEY_VERSION, query=query, rank_token=self.rank_token
)
)
def search_username(self, username):
url = "users/{username}/usernameinfo/".format(username=username)
return self.send_request(url)
def search_tags(self, query):
url = "tags/search/?is_typeahead=true&q={query}&rank_token={rank_token}"
return self.send_request(url.format(query=query, rank_token=self.rank_token))
def search_location(self, query="", lat=None, lng=None):
url = (
"fbsearch/places/?rank_token={rank_token}&query={query}&lat={lat}&lng={lng}"
)
url = url.format(rank_token=self.rank_token, query=query, lat=lat, lng=lng)
return self.send_request(url)
def get_user_reel(self, user_id):
url = "feed/user/{}/reel_media/".format(user_id)
return self.send_request(url)
def get_reels_tray_feed(
self, reason="pull_to_refresh"
): # reason can be = cold_start, pull_to_refresh
data = {
"supported_capabilities_new": config.SUPPORTED_CAPABILITIES,
"reason": reason,
"_csrftoken": self.token,
"_uuid": self.uuid,
}
data = json.dumps(data)
return self.send_request("feed/reels_tray/", data)
def get_users_reel(self, user_ids):
"""
Input: user_ids - a list of user_id
Output: dictionary: user_id - stories data.
Basically, for each user output the same as after self.get_user_reel
"""
url = "feed/reels_media/"
res = self.send_request(
url, post=self.json_data({"user_ids": [str(x) for x in user_ids]})
)
if res:
return self.last_json["reels"] if "reels" in self.last_json else []
return []
def see_reels(self, reels):
"""
Input - the list of reels jsons
They can be aquired by using get_users_reel() or get_user_reel() methods
"""
if not isinstance(reels, list):
# In case of only one reel as input
reels = [reels]
story_seen = {}
now = int(time.time())
for i, story in enumerate(
sorted(reels, key=lambda m: m["taken_at"], reverse=True)
):
story_seen_at = now - min(
i + 1 + random.randint(0, 2), max(0, now - story["taken_at"])
)
story_seen["{!s}_{!s}".format(story["id"], story["user"]["pk"])] = [
"{!s}_{!s}".format(story["taken_at"], story_seen_at)
]
data = self.json_data(
{
"reels": story_seen,
"_csrftoken": self.token,
"_uuid": self.uuid,
"_uid": self.user_id,
}
)
data = self.generate_signature(data)
response = self.session.post(
"https://i.instagram.com/api/v2/" + "media/seen/", data=data
)
self.last_response = response
try:
self.last_json = json.loads(response.text)
except JSONDecodeError:
pass
self.total_requests += 1
return response.ok
"""
api/v1/media/216239xxxx1413/180616xxx1178/story_quiz_answer/
answer=0&_csrftoken=PLLbSxxxxlnEt0ttA7F4QEYvxMJ&_uuid=ffbe7b2fxxxxxf51803637e
"story_quizs":[
{
"x":0.750863481121392,
"y":0.8032984473240731,
"z":0,
"width":0.44030202326446105,
"height":0.230359627090983,
"rotation":0.0,
"is_pinned":0,
"is_hidden":0,
"is_sticker":1,
"quiz_sticker":{
"id":"quiz_sticker_default",
"quiz_id":180616xxx1178,
"question":"GUESS THE LOCATION",
"tallies":[
{
"text":"China \ud83c\udde8\ud83c\uddf3",
"count":566
},
{
"text":"Italy \ud83c\uddee\ud83c\uddf9",
"count":2144
},
{
"text":"Switzerland \ud83c\udde8\ud83c\udded",
"count":517
}
],
"correct_answer":1,
"viewer_can_answer":true,
"finished":false,
"text_color":"#FFFFFF",
"start_background_color":"#CA2EE1",
"end_background_color":"#5EB1FF"
}
}
]
"""
def story_quiz_answer(self, media_id, quiz_id, answer=0):
url = "media/{}/{}/story_quiz_answer/".format(media_id, quiz_id)
data = {"answer": 0, "_csrftoken": self.token, "_uuid": self.uuid}
return self.send_request(url, data, with_signature=False)
"""
/api/v1/media/216xxxx59723371_565xxxx417/1810xxxxxx2683/story_slider_vote/
signed_body=7c4a77f1fb5772xxxxxxxxxxc190dd2e968469789323.{"_csrftoken":"PLLbSfC53ZDxxxxx4QEYvxMJ","_uid":"17xxxx39","vote":"0.7019064","_uuid":"ffbe7bxxxxb-c3f51803637e"}&ig_sig_key_version=4
"story_sliders":[
{
"x":0.7047914994500051,
"y":0.341729222052124,
"z":1,
"width":0.372533219147195,
"height":0.097990439015881,
"rotation":-0.021944279891116,
"is_pinned":0,
"is_hidden":0,
"is_sticker":1,
"slider_sticker":{
"slider_id":1810xxxxxx2683,
"question":"In love?",
"emoji":"\ud83d\ude0d",
"text_color":"#FFFFFF",
"background_color":"#3897F0",
"viewer_can_vote":true,
"slider_vote_average":0.9468325791855203,
"slider_vote_count":221
}
}
]
"""
def story_slider_vote(self, media_id, slider_id, vote=0.5000000, vote_random=True):
if vote_random is True:
vote = round(random.uniform(0.10, 1), 7)
url = "media/{}/{}/story_slider_vote/".format(media_id, slider_id)
return self.send_request(url, self.json_data({"vote": str(vote)}))
"""
/api/v1/media/216241xxxx932_5658xxx17/178xx3572937/story_poll_vote/
signed_body=7c4a77f1fb5772xxxxxxxxxxc190dd2e968469789323.{"_csrftoken":"PLLbSfC53ZDxxxxx4QEYvxMJ","_uid":"17xxxx39","vote":"0","_uuid":"ffbe7bxxxxb-c3f51803637e"}&ig_sig_key_version=4
"story_polls":[
{
"x":0.5124582744210741,
"y":0.22404605674749803,
"z":0,
"width":0.415097946769426,
"height":0.072929970970078,
"rotation":0.0,
"is_pinned":0,
"is_hidden":0,
"is_sticker":1,
"poll_sticker":{
"id":"polling_sticker_vibrant",
"poll_id":178xx3572937,
"question":"",
"tallies":[
{
"text":"NOW!",
"font_size":35.0,
"count":242
},
{
"text":"LATER",
"font_size":35.0,
"count":37
}
],
"promotion_tallies":null,
"viewer_can_vote":true,
"is_shared_result":false,
"finished":false
}
}
]
"""
def story_poll_vote(self, media_id, poll_id, vote=0, vote_random=True):
if vote_random is True:
vote = random.randint(0, 1)
url = "media/{}/{}/story_poll_vote/".format(media_id, poll_id)
return self.send_request(url, self.json_data({"vote": str(vote)}))
"""
/api/v1/media/21683xxxx7066343_8588684487/17xxx749730392/story_question_response/
signed_body=94391bcb0cf52dexxxxxx232f0c87774b.{"client_context":"47abf00e-xxxxx296fc5d5","_csrftoken":"PLLbSfC53xxxxQEYvxMJ","response":"Sempre+e+comunque","_uid":"17852xx39","type":"text","mutation_token":"47axxx831c296fc5d5","_uuid":"ffbe7b2fxxx-c3f51803637e"}&ig_sig_key_version=4
"story_questions":[
{
"x":0.5,
"y":0.5,
"z":0,
"width":0.7777778,
"height":0.28145695,
"rotation":0.0,
"is_pinned":0,
"is_hidden":0,
"is_sticker":1,
"question_sticker":{
"question_type":"text",
"question_id":17xxx749730392,
"question":"Esketit?",
"media_id":21683xxxx7066343,
"text_color":"#000000",
"background_color":"#ffffff",
"viewer_can_interact":true,
"profile_pic_url":""
}
}
]
"""
def story_question_response(self, media_id, question_id, response):
data = {
"client_context": self.generate_UUID(True),
"mutation_token": self.generate_UUID(True),
"response": response,
"type": "text",
}
url = "media/{}/{}/story_question_response/".format(media_id, question_id)
return self.send_request(url, self.json_data(data))
"""
/api/v1/media/17863xxx85013/follow_story_countdown/
signed_body=e6f0c686xxxxxxx8897eec1f78d9b8d21d602c49.{"_csrftoken":"dbCGA1xxxxWvV8yCDhjw","_uid":"1785xxx439","_uuid":"ffbe7b2f-166xxxx3637e"}&ig_sig_key_version=4
"story_countdowns":[
{
"x":0.36236402,
"y":0.67162275,
"z":0,
"width":0.75,
"height":0.25496688,
"rotation":0.0,
"is_pinned":0,
"is_hidden":0,
"is_sticker":1,
"countdown_sticker":{
"countdown_id":17863xxx85013,
"end_ts":1576969200,
"text":"VIAGGIO IN FRANCIA ",
"text_color":"#ffffff",
"start_background_color":"#ca2ee1",
"end_background_color":"#5eb1ff",
"digit_color":"#7e0091",
"digit_card_color":"#ffffffcc",
"following_enabled":true,
"is_owner":false,
"attribution":null,
"viewer_is_following":false
}
}
]
"""
def follow_story_countdown(self, countdown_id):
url = "media/{}/follow_story_countdown/".format(countdown_id)
return self.send_request(url)
def get_user_stories(self, user_id):
url = "feed/user/{}/story/?supported_capabilities_new={}".format(
user_id, config.SUPPORTED_CAPABILITIES
)
return self.send_request(url)
def get_self_story_viewers(self, story_id, max_id=""):
url = "media/{}/list_reel_media_viewer/?supported_capabilities_new={}".format(
story_id, config.SUPPORTED_CAPABILITIES
)
if max_id != "":
url += "&max_id={}".format(max_id)
return self.send_request(url)
def get_tv_suggestions(self):
url = "igtv/tv_guide/"
return self.send_request(url)
def get_hashtag_stories(self, hashtag):
url = "tags/{}/story/".format(hashtag)
return self.send_request(url)
def follow_hashtag(self, hashtag):
data = self.json_data({})
url = "tags/follow/{}/".format(hashtag)
return self.send_request(url, data)
def unfollow_hashtag(self, hashtag):
data = self.json_data({})
url = "tags/unfollow/{}/".format(hashtag)
return self.send_request(url, data)
def get_tags_followed_by_user(self, user_id):
url = "users/{}/following_tags_info/".format(user_id)
return self.send_request(url)
# Copied from burped device and SSL-Pinned IG-Version.
def get_hashtag_sections(
self, hashtag, page=0, next_max_id="", next_media_ids=[], tab="recent"
):
data = "?_csrftoken={}".format(self.token)
data += "&rank_token={}".format(self.rank_token)
data += "&_uuid={}".format(self.uuid)
data += "&include_persistent={}".format(True)
data += "&tab={}".format(tab)
if page != 0:
data += "&page={}".format(page)
if next_max_id != "":
data += "&max_id={}".format(next_max_id)
if next_media_ids != []:
data += "&next_media_ids={}".format(str(next_media_ids))
url = "tags/{}/sections/".format(hashtag)
return self.send_request(url, data, with_signature=False)
def get_media_insight(self, media_id):
url = "insights/media_organic_insights/{}/?ig_sig_key_version={}".format(
media_id, config.IG_SIG_KEY
)
return self.send_request(url)
def get_self_insight(self):
# TODO:
url = "insights/account_organic_insights/?show_promotions_in_landing_page=true&first={}".format()
return self.send_request(url)
# From profile => "module_name":"feed_contextual_profile"
# From home/feed => "module_name":"feed_timeline"
def save_media(self, media_id, module_name="feed_timeline"):
return self.send_request(
endpoint="media/{media_id}/save/".format(media_id=media_id),
post=self.json_data(self.action_data({"module_name": module_name})),
)
def unsave_media(self, media_id):
data = self.json_data()
url = "media/{}/unsave/".format(media_id)
return self.send_request(url, data)
def get_saved_medias(self):
url = "feed/saved/"
return self.send_request(url)
def get_loom_fetch_config(self):
return self.send_request("loom/fetch_config/")
def get_profile_notice(self):
return self.send_request("users/profile_notice/")
# ====== DIRECT METHODS ====== #
def get_inbox_v2(
self,
visual_message_return_type="unseen",
thread_message_limit=10,
persistentBadging=True,
limit=20,
cursor=None,
folder=None,
):
url = "direct_v2/inbox/?visual_message_return_type={}&thread_message_limit={}&persistentBadging={}&limit={}".format(
visual_message_return_type, thread_message_limit, persistentBadging, limit
)
if cursor is not None:
url += "&cursor={}".format(cursor)
if folder is not None:
url += "&folder={}".format(folder)
return self.send_request(url)
def get_presence(self):
return self.send_request("direct_v2/get_presence/")
def get_ranked_recipients(self, mode, show_threads, query=None):
data = {
"mode": mode,
"show_threads": "false" if show_threads is False else "true",
"use_unified_inbox": "true",
}
if query is not None:
data["query"] = query
return self.send_request("direct_v2/ranked_recipients/", json.dumps(data))
def send_direct_item(self, item_type, users, **options):
data = {"client_context": self.generate_UUID(True), "action": "send_item"}
headers = {}
recipients = self._prepare_recipients(
users, options.get("thread"), use_quotes=False
)
if not recipients:
return False
data["recipient_users"] = recipients.get("users")
if recipients.get("thread"):
data["thread_ids"] = recipients.get("thread")
data.update(self.default_data)
url = "direct_v2/threads/broadcast/{}/".format(item_type)
text = options.get("text", "")
if item_type == "link":
data["link_text"] = text
data["link_urls"] = json.dumps(options.get("urls"))
elif item_type == "text":
data["text"] = text
elif item_type == "media_share":
data["text"] = text
data["media_type"] = options.get("media_type", "photo")
data["media_id"] = options.get("media_id", "")
elif item_type == "hashtag":
data["text"] = text
data["hashtag"] = options.get("hashtag", "")
elif item_type == "profile":
data["text"] = text
data["profile_user_id"] = options.get("profile_user_id")
elif item_type == "photo":
url = "direct_v2/threads/broadcast/upload_photo/"
filepath = options["filepath"]
upload_id = str(int(time.time() * 1000))
with open(filepath, "rb") as f:
photo = f.read()
data["photo"] = (
"direct_temp_photo_%s.jpg" % upload_id,
photo,
"application/octet-stream",
{"Content-Transfer-Encoding": "binary"},
)
m = MultipartEncoder(data, boundary=self.uuid)
data = m.to_string()
headers.update({"Content-type": m.content_type})
return self.send_request(url, data, with_signature=False, headers=headers)
def get_pending_inbox(
self,
visual_message_return_type="unseen",
selected_filter="relevant",
sort_order="relevant",
persistentBadging=True,
):
url = "direct_v2/pending_inbox/?visual_message_return_type={}&selected_filter={}&sort_order={}&persistentBadging={}".format(
visual_message_return_type, selected_filter, sort_order, persistentBadging
)
return self.send_request(url)
def label_thread(self, thread_id):
url = "direct_v2/threads/{}/label/".format(thread_id)
data = self.json_data({"thread_label": 1})
return self.send_request(url, post=data)
def unlabel_thread(self, thread_id):
url = "direct_v2/threads/{}/unlabel/".format(thread_id)
return self.send_request(url, post=self.json_data())
def delete_thread(self, thread_id): # It's the same of hide_pending_thread =_=
url = "direct_v2/threads/{}/hide/".format(thread_id)
return self.send_request(url, post=self.json_data())
def read_thread(
self,
thread_id,
cursor=None,
seq_id=None,
visual_message_return_type="unseen",
direction="older",
limit=10,
):
url = "direct_v2/threads/{}/?visual_message_return_type={}&direction={}&limit={}".format(
thread_id, visual_message_return_type, cursor, direction, seq_id, limit
)
if cursor is not None:
url += "&cursor={}".format(cursor)
if seq_id is not None:
url += "&seq_id={}".format(seq_id)
return self.send_request(url)
def move_thread(self, thread_id, folder=1):
url = "direct_v2/threads/{}/move/".format(thread_id)
data = self.json_data({"folder": folder})
return self.send_request(url, post=data)
def approve_pending_thread(self, thread_id, folder=1): # 1 GENERAL / 0 PRIMARY
url = "direct_v2/threads/{}/approve/".format(thread_id)
return self.send_request(url, post=self.json_data({"folder": folder}))
def hide_pending_thread(self, thread_id):
url = "direct_v2/threads/{}/hide/".format(thread_id)
return self.send_request(url, post=self.json_data())
def decline_pending_thread(self, thread_id):
url = "direct_v2/threads/{}/decline/".format(thread_id)
return self.send_request(url, post=self.json_data())
def open_instagram_link(self, link):
return self.send_request(
"oembed/?url={}".format(urllib.parse.quote(link, safe=""))
)
| 39.637328
| 4,772
| 0.587565
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.