hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db9d8c67bcfd3a7c9d253f50f4a6bf8badfcdb9c | 592 | py | Python | betterbib/__init__.py | tbabej/betterbib | 80a3c9040232d9988f9a1e4c40724b40b9b9ed85 | [
"MIT"
] | null | null | null | betterbib/__init__.py | tbabej/betterbib | 80a3c9040232d9988f9a1e4c40724b40b9b9ed85 | [
"MIT"
] | null | null | null | betterbib/__init__.py | tbabej/betterbib | 80a3c9040232d9988f9a1e4c40724b40b9b9ed85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
from __future__ import print_function
from betterbib.__about__ import (
__version__,
__author__,
__author_email__,
__website__,
)
from betterbib.tools import (
create_dict,
decode,
pybtex_to_dict,
pybtex_to_bibtex_string,
write,
update,
JournalNameUpdater,
translate_month
)
from betterbib.crossref import Crossref
from betterbib.dblp import Dblp
try:
import pipdate
except ImportError:
pass
else:
if pipdate.needs_checking(__name__):
print(pipdate.check(__name__, __version__), end='')
| 18.5 | 59 | 0.701014 |
db9da718184383db0fb17735d540dd6d59f6b655 | 5,830 | py | Python | base/views.py | omololevy/my_portfolio | 29f8892c3a6e40a9c05c85110301987005d2c5c1 | [
"MIT"
] | 2 | 2021-12-25T23:11:03.000Z | 2021-12-26T07:09:35.000Z | base/views.py | omololevy/portfolio | 29f8892c3a6e40a9c05c85110301987005d2c5c1 | [
"MIT"
] | 6 | 2022-01-15T15:38:36.000Z | 2022-02-22T17:17:59.000Z | base/views.py | omololevy/my_portfolio | 29f8892c3a6e40a9c05c85110301987005d2c5c1 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.forms import UserCreationForm
from .decorators import *
from .forms import PostForm, CustomUserCreationForm, ProfileForm, UserForm
from .filters import PostFilter
from .models import *
# Create your views here.
#CRUD VIEWS
def sendEmail(request):
if request.method == 'POST':
template = render_to_string('base/email_template.html', {
'name':request.POST['name'],
'email':request.POST['email'],
'message':request.POST['message'],
})
email = EmailMessage(
request.POST['subject'],
template,
settings.EMAIL_HOST_USER,
['cotechlevy@gmail.com']
)
email.fail_silently=False
email.send()
return render(request, 'base/email_sent.html')
def loginPage(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == 'POST':
email = request.POST.get('email')
password =request.POST.get('password')
#Little Hack to work around re-building the usermodel
try:
user = User.objects.get(email=email)
user = authenticate(request, username=user.username, password=password)
except:
messages.error(request, 'User with this email does not exists')
return redirect('login')
if user is not None:
login(request, user)
return redirect('home')
else:
messages.error(request, 'Email OR password is incorrect')
context = {}
return render(request, 'base/login.html', context)
def registerPage(request):
form = CustomUserCreationForm()
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
messages.success(request, 'Account successfuly created!')
user = authenticate(request, username=user.username, password=request.POST['password1'])
if user is not None:
login(request, user)
next_url = request.GET.get('next')
if next_url == '' or next_url == None:
next_url = 'home'
return redirect(next_url)
else:
messages.error(request, 'An error has occured with registration')
context = {'form':form}
return render(request, 'base/register.html', context)
def logoutUser(request):
logout(request)
return redirect('home')
def myEducation(request):
return render(request, 'base/education.html')
def myExperience(request):
return render(request, 'base/experience.html')
def myAchievements(request):
return render(request, 'base/achievements.html')
def myAbout(request):
return render(request, 'base/about.html')
def myContact(request):
return render(request, 'base/contact.html')
def mySkills(request):
return render(request, 'base/skills.html')
| 25.911111 | 91 | 0.732247 |
db9dc14c3ce1122987ebe56a59b8a07194d400d2 | 30,282 | py | Python | radioLib/pastebin/pastebin.py | hephaestus9/Radio | c1560c25def211ab6354fb0aa5cc935e2851c8f0 | [
"MIT"
] | 1 | 2021-05-17T08:31:07.000Z | 2021-05-17T08:31:07.000Z | lib/pastebin/pastebin.py | hephaestus9/Ironworks | 37be48e37f63530dd7bf82618948ef82522699a0 | [
"MIT"
] | null | null | null | lib/pastebin/pastebin.py | hephaestus9/Ironworks | 37be48e37f63530dd7bf82618948ef82522699a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#############################################################################
# Pastebin.py - Python 3.2 Pastebin API.
# Copyright (C) 2012 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# This software is a derivative work of:
# http://winappdbg.sourceforge.net/blog/pastebin.py
#############################################################################
__ALL__ = ['delete_paste', 'user_details', 'trending', 'pastes_by_user', 'generate_user_key',
'legacy_paste', 'paste', 'Pastebin', 'PastebinError']
import sys
import urllib
######################################################
delete_paste = PastebinAPI.delete_paste
user_details = PastebinAPI.user_details
trending = PastebinAPI.trending
pastes_by_user = PastebinAPI.pastes_by_user
generate_user_key = PastebinAPI.generate_user_key
legacy_paste = PastebinAPI.legacy_paste
paste = PastebinAPI.paste
######################################################
if __name__ == "__main__":
main()
| 38.186633 | 125 | 0.533848 |
dba0ab7feb9b0f1f06f733ef048e8a1aa5355e67 | 2,544 | py | Python | app/requests.py | seron-ux/News-app | d22b256b26fb9fa2bb77658952139b9ddebb8f8c | [
"MIT"
] | 1 | 2021-04-16T12:03:37.000Z | 2021-04-16T12:03:37.000Z | app/requests.py | seron-ux/News-app | d22b256b26fb9fa2bb77658952139b9ddebb8f8c | [
"MIT"
] | null | null | null | app/requests.py | seron-ux/News-app | d22b256b26fb9fa2bb77658952139b9ddebb8f8c | [
"MIT"
] | null | null | null | import urllib.request,json
from .models import News
import requests
News = News
# Getting api key
api_key = None
# Getting the news base url
base_url = None
base_url2 = None
def get_news(category):
'''
Function that gets the json responce to our url request
'''
get_news_url = base_url.format(category,api_key)
print(get_news_url)
get_news_response = requests.get(get_news_url).json()
print(get_news_response)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_results(news_results_list)
return news_results
def process_results(news_list):
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
news_results: A list of news objects
'''
news_results = []
for news_item in news_list:
title = news_item.get('title')
image = news_item.get('urlToImage')
description = news_item.get('description')
date = news_item.get('publishedAt')
article = news_item.get('url')
if image:
news_object = News(title,image,description,date,article)
news_results.append(news_object)
return news_results
def get_article(source):
'''
Function that gets the json responce to our url request
'''
get_news_url = base_url.format(source,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_results(news_results_list)
return news_results
| 26.5 | 109 | 0.688286 |
dba12a8374326bf93ca2bf2928409a83d003c3d7 | 861 | py | Python | leetcode/151_reverse _words_in_a_string.py | caoxudong/code_practice | cb960cf69d67ae57b35f0691d35e15c11989e6d2 | [
"MIT"
] | 1 | 2020-06-19T11:23:46.000Z | 2020-06-19T11:23:46.000Z | leetcode/151_reverse _words_in_a_string.py | caoxudong/code_practice | cb960cf69d67ae57b35f0691d35e15c11989e6d2 | [
"MIT"
] | null | null | null | leetcode/151_reverse _words_in_a_string.py | caoxudong/code_practice | cb960cf69d67ae57b35f0691d35e15c11989e6d2 | [
"MIT"
] | null | null | null | """
Given an input string, reverse the string word by word.
For example,
Given s = "the sky is blue",
return "blue is sky the".
For C programmers: Try to solve it in-place in O(1) space.
Clarification:
* What constitutes a word?
A sequence of non-space characters constitutes a word.
* Could the input string contain leading or trailing spaces?
Yes. However, your reversed string should not contain leading or trailing spaces.
* How about multiple spaces between two words?
Reduce them to a single space in the reversed string.
https://leetcode.com/problems/reverse-words-in-a-string/
"""
| 28.7 | 86 | 0.680604 |
dba13534887cbe280b6557621729a3e4996855c7 | 1,250 | py | Python | toontown/uberdog/DistributedInGameNewsMgr.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/uberdog/DistributedInGameNewsMgr.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/uberdog/DistributedInGameNewsMgr.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | import socket, datetime, os
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.distributed.DistributedObject import DistributedObject
from toontown.toonbase import ToontownGlobals
from toontown.uberdog import InGameNewsResponses
| 32.051282 | 92 | 0.728 |
dba13fb4439b8ad0fa549819c5076a87665d49e6 | 3,540 | py | Python | Day10/loops.py | azeemchaudhrry/30DaysofPython | 8aa80c81967d87e4bc70254a41517d0303ca0599 | [
"MIT"
] | null | null | null | Day10/loops.py | azeemchaudhrry/30DaysofPython | 8aa80c81967d87e4bc70254a41517d0303ca0599 | [
"MIT"
] | null | null | null | Day10/loops.py | azeemchaudhrry/30DaysofPython | 8aa80c81967d87e4bc70254a41517d0303ca0599 | [
"MIT"
] | null | null | null | # Day 10 Loops
from countries import *
# While Loop
# count = 0
# while count < 5:
# if count == 3:
# break
# print(count)
# count = count + 1
# numbers = [0,2,3,4,5,6,7,8,9,10]
# for number in numbers:
# print(number)
# language = 'Python'
# for letter in language:
# print(letter)
# tpl = ('python','updates','wow')
# for number in tpl:
# print(number)
# person = {
# 'first_name':'Asabeneh',
# 'last_name':'Yetayeh',
# 'age':250,
# 'country':'Finland',
# 'is_marred':True,
# 'skills':['JavaScript', 'React', 'Node', 'MongoDB', 'Python'],
# 'address':{
# 'street':'Space street',
# 'zipcode':'02210'
# }
# }
# print('------------------------------------')
# for key in person:
# print(key)
# print('------------------------------------')
# for key,value in person.items():
# print(key, value)
# print('--------------------------------------')
# it_companies = {'Facebook', 'Google', 'Microsoft', 'Apple', 'IBM', 'Oracle', 'Amazon'}
# for company in it_companies:
# print(company)
# print('--------------------------------------')
# numbers = (0,1,2,3,4,5,6,7)
# for number in numbers:
# print(number)
# if(number == 3):
# break
# print('--------------------------------------')
# for number in numbers:
# print(number)
# if(number == 3):
# continue
# print('--------------------------------------')
# numbers = (0,1,2,3,4,5)
# for number in numbers:
# print(number)
# if number == 3:
# continue
# print('Next number should be ', number + 1) if number != 5 else print("loop's end") # for short hand conditions need both if and else statements
# print('outside the loop')
# print('--------------------------------------')
# lst = list(range(11))
# print(lst)
# st = set(range(1,11))
# print(st)
# lst = list(range(0,11,2))
# print(lst)
# st = set(range(0,11,2))
# print(st)
# Exercises: Day 10
# Iterate 0 to 10 using for loop, do the same using while loop.
# numbers = [0,1,2,3,4,5,6,7,8,9,10]
# for number in numbers:
# print(number)
# count = 0
# while count < 10:
# print(count)
# count += 1
# Iterate 10 to 0 using for loop, do the same using while loop.
# for number in range(10,-1,-1):
# print(number)
# count = 10
# while count > -1:
# print(count)
# count -= 1
# Write a loop that makes seven calls to print(), so we get on the output the following triangle:
for index in range(0,8):
print(index * '#')
limit = 9
for i in range(0,limit):
for j in range(0,limit):
print('# ', end='')
print('')
for i in range(0, 11):
print(f'{i} x {i} = {i * i}')
frameworks = ['Python', 'Numpy','Pandas','Django', 'Flask']
for framework in frameworks:
print(framework)
for i in range(0,101):
if i % 2 == 0:
print(i)
for i in range(0,101):
if i % 2 != 0:
print(i)
sum = 0
for i in range(0,101):
sum += i
print('The sum of all numbers is : ', sum)
even_sum = odd_sum = 0
for i in range(0,101):
if i % 2 == 0:
even_sum += i
elif i % 2 != 0:
odd_sum += i
print(f'The sum of all evens is {even_sum}. And the sum of all odds is {odd_sum}.')
for country in countries:
if 'land' in country:
print(country)
fruits = ['banana', 'orange', 'mango', 'lemon']
total_elements = len(fruits) - 1
for i in range(0, int(len(fruits) / 2)):
temp_element = fruits[i]
fruits[i] = fruits[total_elements - i]
fruits[total_elements - i] = temp_element
print(fruits) | 22.547771 | 150 | 0.530508 |
dba3388df291e70cf8ca9ead3a8d7661985dbeac | 10,412 | py | Python | tessera-server/tessera/views_api.py | Dimas625/tessera | 8e554f217220228fb8a0662fb5075cb839e9f1b1 | [
"Apache-2.0"
] | 379 | 2015-01-02T19:12:10.000Z | 2016-12-05T05:41:47.000Z | tessera-server/tessera/views_api.py | Dimas625/tessera | 8e554f217220228fb8a0662fb5075cb839e9f1b1 | [
"Apache-2.0"
] | 129 | 2015-01-07T04:21:05.000Z | 2016-07-24T18:37:43.000Z | tessera-server/tessera/views_api.py | Dimas625/tessera | 8e554f217220228fb8a0662fb5075cb839e9f1b1 | [
"Apache-2.0"
] | 44 | 2015-01-05T13:48:40.000Z | 2016-11-23T07:11:41.000Z | # -*- mode:python -*-
import flask
import json
import logging
from datetime import datetime
import inflection
from functools import wraps
from flask import request, url_for
from werkzeug.exceptions import HTTPException
from .client.api.model import *
from . import database
from . import helpers
from .application import db
mgr = database.DatabaseManager(db)
log = logging.getLogger(__name__)
api = flask.Blueprint('api', __name__)
# =============================================================================
# API Helpers
# =============================================================================
def _dashboard_sort_column():
"""Return a SQLAlchemy column descriptor to sort results by, based on
the 'sort' and 'order' request parameters.
"""
columns = {
'created' : database.DashboardRecord.creation_date,
'modified' : database.DashboardRecord.last_modified_date,
'category' : database.DashboardRecord.category,
'id' : database.DashboardRecord.id,
'title' : database.DashboardRecord.title
}
colname = helpers.get_param('sort', 'created')
order = helpers.get_param('order')
column = database.DashboardRecord.creation_date
if colname in columns:
column = columns[colname]
if order == 'desc' or order == u'desc':
return column.desc()
else:
return column.asc()
def _set_dashboard_hrefs(dash):
"""Add the various ReSTful hrefs to an outgoing dashboard
representation. dash should be the dictionary for of the dashboard,
not the model object.
"""
id = dash['id']
dash['href'] = url_for('api.dashboard_get', id=id)
dash['definition_href'] = url_for('api.dashboard_get_definition', id=id)
dash['view_href'] = url_for('ui.dashboard_with_slug',
id=id,
slug=inflection.parameterize(dash['title']))
if 'definition' in dash:
definition = dash['definition']
definition['href'] = url_for('api.dashboard_get_definition', id=id)
return dash
def _dashboards_response(dashboards):
"""Return a Flask response object for a list of dashboards in API
format. dashboards must be a list of dashboard model objects, which
will be converted to their JSON representation.
"""
if not isinstance(dashboards, list):
dashboards = [dashboards]
include_definition = helpers.get_param_boolean('definition', False)
return [ _set_dashboard_hrefs(d.to_json(include_definition=include_definition)) for d in dashboards]
def _set_tag_hrefs(tag):
"""Add ReSTful href attributes to a tag's dictionary
representation.
"""
id = tag['id']
tag['href'] = url_for('api.tag_get', id=id)
return tag
def _tags_response(tags):
"""Return a Flask response object for a list of tags in API
format. tags must be a list of tag model objects, which
will be converted to their JSON representation.
"""
if not isinstance(tags, list):
tags = [tags]
return [_set_tag_hrefs(t.to_json()) for t in tags]
# =============================================================================
# Dashboards
# =============================================================================
# =============================================================================
# Tags
# =============================================================================
# =============================================================================
# Miscellany
# =============================================================================
| 34.25 | 104 | 0.634364 |
dba3bf31e30dbc6e19d1f005b15ec72aaafc1178 | 4,175 | py | Python | modules/aws_service.py | Darkcybe/attack_range | b135251cc40e527e78e6e826759e421fb3834577 | [
"Apache-2.0"
] | 1 | 2020-08-26T18:14:17.000Z | 2020-08-26T18:14:17.000Z | modules/aws_service.py | Darkcybe/attack_range | b135251cc40e527e78e6e826759e421fb3834577 | [
"Apache-2.0"
] | null | null | null | modules/aws_service.py | Darkcybe/attack_range | b135251cc40e527e78e6e826759e421fb3834577 | [
"Apache-2.0"
] | null | null | null | import sys
import re
import boto3
from botocore.exceptions import ClientError
import uuid
import time
import yaml
import os
# def upload_file_s3_bucket(file_name, results, test_file, isArchive):
# region = config['region']
# s3_client = boto3.client('s3', region_name=region)
# if isArchive:
# response = s3_client.upload_file(file_name, 'attack-range-attack-data', str(test_file['simulation_technique'] + '/attack_data.tar.gz'))
# else:
# response = s3_client.upload_file(file_name, 'attack-range-attack-data', str(test_file['simulation_technique'] + '/attack_data.json'))
#
# with open('tmp/test_results.yml', 'w') as f:
# yaml.dump(results, f)
# response2 = s3_client.upload_file('tmp/test_results.yml', 'attack-range-automated-testing', str(test_file['simulation_technique'] + '/test_results.yml'))
# os.remove('tmp/test_results.yml')
| 36.946903 | 159 | 0.640958 |
dba4148e040528b537c6483d7f1281dc550a6268 | 5,685 | py | Python | pystacknet/metrics.py | KevinMichaelSchindler/pystacknet | bb723511787be6a0828d2ec5ef141fa76b80ef84 | [
"MIT"
] | null | null | null | pystacknet/metrics.py | KevinMichaelSchindler/pystacknet | bb723511787be6a0828d2ec5ef141fa76b80ef84 | [
"MIT"
] | null | null | null | pystacknet/metrics.py | KevinMichaelSchindler/pystacknet | bb723511787be6a0828d2ec5ef141fa76b80ef84 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 18:33:58 2018
@author: Marios Michailidis
metrics and method to check metrics used within StackNet
"""
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score , mean_squared_log_error #regression metrics
from sklearn.metrics import roc_auc_score, log_loss ,accuracy_score, f1_score ,matthews_corrcoef
import numpy as np
valid_regression_metrics=["rmse","mae","rmsle","r2","mape","smape"]
valid_classification_metrics=["auc","logloss","accuracy","f1","matthews"]
############ classification metrics ############
############ regression metrics ############
"""
metric: string or class that returns a metric given (y_true, y_pred, sample_weight=None)
Curently supported metrics are "rmse","mae","rmsle","r2","mape","smape"
"""
"""
metric: string or class that returns a metric given (y_true, y_pred, sample_weight=None)
Curently supported metrics are "rmse","mae","rmsle","r2","mape","smape"
"""
| 36.210191 | 140 | 0.628672 |
dba7508f72db5159de10c2533d780968df627768 | 5,629 | py | Python | check_logstash_pipeline.py | stdevel/nagios-plugins | 5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | check_logstash_pipeline.py | stdevel/nagios-plugins | 5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | check_logstash_pipeline.py | stdevel/nagios-plugins | 5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2017-11-24 21:10:35 +0100 (Fri, 24 Nov 2017)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check a Logstash pipeline is online via the Logstash Rest API
API is only available in Logstash 5.x onwards, will get connection refused on older versions
Optional thresholds apply to the number of pipeline workers
Ensure Logstash options:
--http.host should be set to 0.0.0.0 if querying remotely
--http.port should be set to the same port that you are querying via this plugin's --port switch
Tested on Logstash 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 6.0, 6.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
#from harisekhon.utils import log
from harisekhon.utils import ERRORS, UnknownError, support_msg_api
from harisekhon.utils import validate_chars
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.6'
if __name__ == '__main__':
CheckLogstashPipeline().main()
| 39.921986 | 119 | 0.637413 |
dba89946ffbf4b4e0ca04987e645e105d52edb8a | 2,412 | py | Python | dags/mailsdag.py | rvacaru/airflow-training-skeleton | 45fc6a8938d055b98c62c85b7c8085cb7d6f23ba | [
"Apache-2.0"
] | null | null | null | dags/mailsdag.py | rvacaru/airflow-training-skeleton | 45fc6a8938d055b98c62c85b7c8085cb7d6f23ba | [
"Apache-2.0"
] | null | null | null | dags/mailsdag.py | rvacaru/airflow-training-skeleton | 45fc6a8938d055b98c62c85b7c8085cb7d6f23ba | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the BashOperator."""
from datetime import timedelta
import datetime
import airflow
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(14),
}
dag = DAG(
dag_id='exercise_weekday',
default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60),
)
dummy_last = DummyOperator(
task_id='run_this_last',
dag=dag,
trigger_rule='one_success',
)
weekday_task = PythonOperator(
task_id='weekday_task',
python_callable=print_weekday,
provide_context=True,
dag=dag,
)
# optimize with try exept
weekday_person = {
"Mon": "bob",
"Tue": "joe",
"Thu": "joe",
}
branch_task = BranchPythonOperator(
task_id='branch_task',
python_callable=define_oncall,
provide_context=True,
dag=dag,
)
tasks = ["bob", "joe", "ali"]
for p in tasks:
taski = DummyOperator(
task_id=p,
dag=dag,
)
branch_task >> taski
taski >> dummy_last
weekday_task >> branch_task
| 25.125 | 66 | 0.717247 |
dba99e90b4b43e354898ce74c9ce989b11885ee9 | 1,359 | py | Python | appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py | speedplane/python-compat-runtime | 743ade7e1350c790c4aaa48dd2c0893d06d80cee | [
"Apache-2.0"
] | 26 | 2015-01-20T08:02:38.000Z | 2020-06-10T04:57:41.000Z | appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py | speedplane/python-compat-runtime | 743ade7e1350c790c4aaa48dd2c0893d06d80cee | [
"Apache-2.0"
] | 53 | 2016-04-06T21:10:43.000Z | 2018-03-19T23:14:33.000Z | appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py | speedplane/python-compat-runtime | 743ade7e1350c790c4aaa48dd2c0893d06d80cee | [
"Apache-2.0"
] | 23 | 2016-04-19T05:45:26.000Z | 2021-12-31T23:22:36.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Python equivalent of jdbc_type.h.
Python definition of the JDBC type constant values defined in Java class
java.sql.Types. Since the values don't fall into the range allowed by
a protocol buffer enum, we use Python constants instead.
If you update this, update jdbc_type.py also.
"""
BIT = -7
TINYINT = -6
SMALLINT = 5
INTEGER = 4
BIGINT = -5
FLOAT = 6
REAL = 7
DOUBLE = 8
NUMERIC = 2
DECIMAL = 3
CHAR = 1
VARCHAR = 12
LONGVARCHAR = -1
DATE = 91
TIME = 92
TIMESTAMP = 93
BINARY = -2
VARBINARY = -3
LONGVARBINARY = -4
NULL = 0
OTHER = 1111
JAVA_OBJECT = 2000
DISTINCT = 2001
STRUCT = 2002
ARRAY = 2003
BLOB = 2004
CLOB = 2005
REF = 2006
DATALINK = 70
BOOLEAN = 16
ROWID = -8
NCHAR = -15
NVARCHAR = -9
LONGNVARCHAR = -16
NCLOB = 2011
SQLXML = 2009
| 20.283582 | 74 | 0.725533 |
dbaa5fe4d5410450515867da0876df0842647fcf | 2,406 | py | Python | GestiRED/views.py | osabogal10/GestiREDBackend | 99aa3b01bd67910cc0f96751c88d0f4e83763392 | [
"MIT"
] | null | null | null | GestiRED/views.py | osabogal10/GestiREDBackend | 99aa3b01bd67910cc0f96751c88d0f4e83763392 | [
"MIT"
] | null | null | null | GestiRED/views.py | osabogal10/GestiREDBackend | 99aa3b01bd67910cc0f96751c88d0f4e83763392 | [
"MIT"
] | 1 | 2018-11-19T00:08:05.000Z | 2018-11-19T00:08:05.000Z | from django.http import HttpResponse
from django.core.mail import send_mail
import json
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from GestiRED.models import User
from GestiRED.models import QualityControl, Phase, Resource, ResourceType,PhaseType
from django.core import serializers
from django.db.models import Q
# Create your views here.
| 36.454545 | 130 | 0.656692 |
dbaa65a763de8c1cfbc863205e539ed71151b214 | 2,181 | py | Python | ext_modules/_maix_nn/example/yolo2_camera.py | sipeed/python3-maix | 9ced31b8f1c1e4ef93b6a57bbfced27ae9e3361e | [
"MIT"
] | 93 | 2021-01-12T01:56:06.000Z | 2022-03-30T12:52:01.000Z | ext_modules/_maix_nn/example/yolo2_camera.py | JasperG1998/MaixPy3 | b36800b8d6aebf55018894c215c23a73d2fe406d | [
"MIT"
] | 29 | 2021-02-04T10:37:26.000Z | 2022-03-20T15:10:55.000Z | ext_modules/_maix_nn/example/yolo2_camera.py | JasperG1998/MaixPy3 | b36800b8d6aebf55018894c215c23a73d2fe406d | [
"MIT"
] | 25 | 2021-01-25T18:10:09.000Z | 2022-03-31T13:55:36.000Z |
from maix import nn
from PIL import Image, ImageDraw, ImageFont
from maix import display, camera
import time
from maix.nn import decoder
camera.config(size=(224, 224))
model = {
"param": "/root/models/yolo2_face_awnn.param",
"bin": "/root/models/yolo2_face_awnn.bin"
}
options = {
"model_type": "awnn",
"inputs": {
"input0": (224, 224, 3)
},
"outputs": {
"output0": (7, 7, (1+4+1)*5)
},
"mean": [127.5, 127.5, 127.5],
"norm": [0.0078125, 0.0078125, 0.0078125],
}
print("-- load model:", model)
m = nn.load(model, opt=options)
print("-- load ok")
print("-- read image")
w = options["inputs"]["input0"][1]
h = options["inputs"]["input0"][0]
# # img.show()
print("-- read image ok")
labels = ["person"]
anchors = [1.19, 1.98, 2.79, 4.59, 4.53, 8.92, 8.06, 5.29, 10.32, 10.65]
yolo2_decoder = decoder.Yolo2(len(labels), anchors, net_in_size=(w, h), net_out_size=(7, 7))
while 1:
img = camera.capture()
if not img:
time.sleep(0.01)
continue
t = time.time()
out = m.forward(img, quantize=True, layout="hwc")
print("-- forward: ", time.time() - t )
t = time.time()
boxes, probs = yolo2_decoder.run(out, nms=0.3, threshold=0.5, img_size=(240, 240))
print("-- decode: ", time.time() - t )
t = time.time()
for i, box in enumerate(boxes):
class_id = probs[i][0]
prob = probs[i][1][class_id]
disp_str = "{}:{:.2f}%".format(labels[class_id], prob*100)
draw_rectangle_with_title(display.get_draw(), box, disp_str)
print("-- draw: ", time.time() - t )
t = time.time()
display.show()
print("-- show: ", time.time() - t )
| 27.2625 | 111 | 0.596057 |
dbaa6f31a1ce95280bfdff82b4090e6bc54d2002 | 10,143 | py | Python | tests/test_metadata_options.py | Fatal1ty/mashumaro | f32acf98f7cc7cdf638b921fe3fde96bef4fbefb | [
"Apache-2.0"
] | 394 | 2018-11-09T11:55:11.000Z | 2022-03-27T07:39:48.000Z | tests/test_metadata_options.py | Fatal1ty/mashumaro | f32acf98f7cc7cdf638b921fe3fde96bef4fbefb | [
"Apache-2.0"
] | 70 | 2018-12-10T19:43:01.000Z | 2022-03-17T07:37:45.000Z | tests/test_metadata_options.py | Fatal1ty/mashumaro | f32acf98f7cc7cdf638b921fe3fde96bef4fbefb | [
"Apache-2.0"
] | 29 | 2018-12-10T19:44:19.000Z | 2022-03-11T00:12:26.000Z | from dataclasses import dataclass, field
from datetime import date, datetime, time, timezone
from pathlib import Path
from typing import Any, Dict, Optional, Union
import ciso8601
import pytest
from mashumaro import DataClassDictMixin
from mashumaro.exceptions import UnserializableField
from mashumaro.types import SerializationStrategy
from .entities import (
MutableString,
MyList,
ThirdPartyType,
TypedDictRequiredKeys,
)
| 30.1875 | 79 | 0.648822 |
dbaa809e32092c26124943dd02d9f08d50cbc16b | 3,152 | py | Python | Intent model/Intent_model.py | yashrajt/college_FAQ-chatbot | b3a2a1b4958068b652d019c13f31f6329b093c0a | [
"MIT"
] | 4 | 2020-10-02T20:27:03.000Z | 2021-09-28T16:11:04.000Z | Intent model/Intent_model.py | yashrajt/college_FAQ-chatbot | b3a2a1b4958068b652d019c13f31f6329b093c0a | [
"MIT"
] | 1 | 2020-11-25T10:23:14.000Z | 2020-11-25T10:23:14.000Z | Intent model/Intent_model.py | yashrajt/college_FAQ-chatbot | b3a2a1b4958068b652d019c13f31f6329b093c0a | [
"MIT"
] | 2 | 2020-10-12T18:16:16.000Z | 2021-09-28T16:11:15.000Z | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.linear_model import SGDClassifier
from nltk import word_tokenize
import nltk
#nltk.download('punkt')
import re
import joblib
#train_intent()
'''
calender = 0
faculty =1
infra = 2
placement = 4
result = 5
small_talk = 6
student body = 7
syllabus = 8
'''
| 41.473684 | 1,001 | 0.599302 |
dbaae886d43e46ac193d1e7f28a6367192d2a640 | 7,552 | py | Python | vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/list_ops.py | owennewo/kfserving | 89f73c87525b8e06ea799f69f2979c4ad272fcb3 | [
"Apache-2.0"
] | 2 | 2018-12-12T23:33:05.000Z | 2019-02-26T07:20:22.000Z | vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/list_ops.py | owennewo/kfserving | 89f73c87525b8e06ea799f69f2979c4ad272fcb3 | [
"Apache-2.0"
] | 13 | 2020-11-13T18:53:29.000Z | 2022-03-12T00:33:00.000Z | vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/list_ops.py | owennewo/kfserving | 89f73c87525b8e06ea799f69f2979c4ad272fcb3 | [
"Apache-2.0"
] | 2 | 2020-10-06T09:24:31.000Z | 2020-12-20T15:10:56.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is a scalar.
if not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
return [d if d is not None else -1 for d in shape]
| 34.801843 | 80 | 0.742585 |
dbad2da50018b20b9e8cf4be1668cfeef2d4c6cb | 729 | py | Python | tests/test_dump.py | flaeppe/astunparse | 754ec7d113fa273625ccc7b6c5d65aa7700ab8a9 | [
"PSF-2.0"
] | 189 | 2016-03-15T06:48:48.000Z | 2022-03-12T09:34:10.000Z | tests/test_dump.py | flaeppe/astunparse | 754ec7d113fa273625ccc7b6c5d65aa7700ab8a9 | [
"PSF-2.0"
] | 50 | 2015-09-14T16:22:00.000Z | 2022-02-24T05:36:57.000Z | tests/test_dump.py | flaeppe/astunparse | 754ec7d113fa273625ccc7b6c5d65aa7700ab8a9 | [
"PSF-2.0"
] | 52 | 2015-04-29T10:52:33.000Z | 2022-03-03T19:59:54.000Z | import ast
import re
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import astunparse
from tests.common import AstunparseCommonTestCase
| 29.16 | 71 | 0.663923 |
dbad96b0fa05c373ff9f7995b182a8597ec11299 | 1,387 | py | Python | src/django/giraffe/blat/management/commands/reset_app.py | addgene/giraffe | c7d3b1f000ceea83e6c98cce06cd2a0f9e4f4c2c | [
"MIT"
] | 4 | 2016-10-13T15:46:06.000Z | 2018-08-22T21:43:28.000Z | src/django/giraffe/blat/management/commands/reset_app.py | addgene/giraffe | c7d3b1f000ceea83e6c98cce06cd2a0f9e4f4c2c | [
"MIT"
] | null | null | null | src/django/giraffe/blat/management/commands/reset_app.py | addgene/giraffe | c7d3b1f000ceea83e6c98cce06cd2a0f9e4f4c2c | [
"MIT"
] | 1 | 2015-07-26T21:42:31.000Z | 2015-07-26T21:42:31.000Z | from django.core.management.base import AppCommand, CommandError
from django.core.management.sql import sql_reset
from django.core.management.color import no_style
from django.db import connections
| 47.827586 | 370 | 0.626532 |
dbad9f49539fab32473ae89e8b92b989783f9cfd | 89 | py | Python | webBlog/apps.py | JordanBRoberts/python-theBand | 1e475a45a42b210c722ab43c0b966d7b58d97a9d | [
"MIT"
] | null | null | null | webBlog/apps.py | JordanBRoberts/python-theBand | 1e475a45a42b210c722ab43c0b966d7b58d97a9d | [
"MIT"
] | null | null | null | webBlog/apps.py | JordanBRoberts/python-theBand | 1e475a45a42b210c722ab43c0b966d7b58d97a9d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 |
dbaf82c57c0e8e70a6ba6faeba1bc88a6aa96173 | 996 | py | Python | requires.py | lydaaa/fzutils | 5f775d046876e3ce35d0b1174b5a3db96e9d627e | [
"MIT"
] | 1 | 2018-08-04T13:55:03.000Z | 2018-08-04T13:55:03.000Z | requires.py | lydaaa/fzutils | 5f775d046876e3ce35d0b1174b5a3db96e9d627e | [
"MIT"
] | null | null | null | requires.py | lydaaa/fzutils | 5f775d046876e3ce35d0b1174b5a3db96e9d627e | [
"MIT"
] | null | null | null | # coding:utf-8
'''
@author = super_fazai
@File : requires.py
@Time : 2016/8/3 12:59
@connect : superonesfazai@gmail.com
'''
install_requires = [
'ipython',
'wheel',
'utils',
'db',
'greenlet==0.4.13',
'web.py==0.40.dev1',
'pytz',
'requests',
'selenium==3.8.0', # 3.8.1phantomjs
'asyncio',
'psutil',
'pyexecjs',
'setuptools',
'colorama',
'twine',
'numpy',
'pprint',
'selenium',
'chardet',
'bs4',
'scrapy',
'demjson',
'pymssql',
'sqlalchemy',
'gevent',
'aiohttp',
'celery',
'jsonpath',
'matplotlib',
'wget',
'flask',
'flask_login',
'mitmproxy', # shell
'pymongo',
'pyexcel',
'pyexcel-xlsx',
'fabric',
'shadowsocks',
# 'pycurl==7.43.0.1',
'furl',
'yarl',
'prettytable',
'xlrd',
'pandas',
'jieba',
'geopandas',
'scikit-image',
'wordcloud', #
'pygame',
] | 16.6 | 53 | 0.491968 |
dbafb8e5a5c72fd3abd02eb1cca23ac263bc48ce | 2,433 | py | Python | m15_dos/dos.py | venkatarjun/Python3 | 606adf8588a74a53d592e62e07e81a5a1530b993 | [
"MIT"
] | 80 | 2020-11-14T19:19:27.000Z | 2022-03-10T17:43:17.000Z | m15_dos/dos.py | nerbertb/python-52-weeks | 55add5d75d1aabed4c59d445e1d1b773ede047b0 | [
"MIT"
] | 10 | 2020-11-24T06:19:45.000Z | 2022-02-27T23:53:28.000Z | m15_dos/dos.py | nerbertb/python-52-weeks | 55add5d75d1aabed4c59d445e1d1b773ede047b0 | [
"MIT"
] | 58 | 2020-11-13T18:35:22.000Z | 2022-03-28T06:40:08.000Z | import subprocess
import requests
import argparse
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from datetime import datetime
ICMP_ATTACK = "ICMP"
HTTP_ATTACK = "HTTP"
valid_attacks = {HTTP_ATTACK, ICMP_ATTACK}
parser = argparse.ArgumentParser(description="DoS HTTP")
parser.add_argument('-P', '--poolsize', default=10, help='Size of the threadpool')
parser.add_argument('-T', '--target', default='localhost', help='Target URL for http request')
parser.add_argument('-D', '--delay', default=0, help='Amount of time to wait between requests')
parser.add_argument('-A', '--attack', help='Type of attack (e.g. HTTP, ICMP)')
args = parser.parse_args()
threadpool_size = int(args.poolsize)
target = args.target
delay = int(args.delay)
attack = args.attack.upper()
if attack not in valid_attacks:
print(f"Invalid attack type, must be one of: {valid_attacks}")
exit()
terminate = False
if __name__ == "__main__":
main()
| 27.337079 | 102 | 0.630908 |
dbb02044e102ff75841402e288f20f24bd0e7921 | 3,444 | py | Python | maestro/backends/django/contrib/signals.py | estudio89/maestro-python | 331079cb3f0c10de2e19210cbade793544510f33 | [
"BSD-3-Clause"
] | null | null | null | maestro/backends/django/contrib/signals.py | estudio89/maestro-python | 331079cb3f0c10de2e19210cbade793544510f33 | [
"BSD-3-Clause"
] | null | null | null | maestro/backends/django/contrib/signals.py | estudio89/maestro-python | 331079cb3f0c10de2e19210cbade793544510f33 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import apps
from django.db import models
from django.db.models.signals import post_save, pre_delete
from typing import Type, Optional, List, cast, TYPE_CHECKING
from maestro.backends.django.settings import maestro_settings
from maestro.backends.django.contrib.factory import create_django_data_store
from maestro.backends.django.utils import model_to_entity_name
from maestro.core.metadata import Operation
from .middleware import _add_operation_to_queue
import copy
if TYPE_CHECKING:
from maestro.backends.django import DjangoDataStore
| 30.210526 | 86 | 0.702962 |
dbb47fb9bbbb993b07541531acf7c95109ac62eb | 142 | py | Python | top/urls.py | pbexe/nextbike-top | eca086406cf6b96d6e086dd0fa9ecae5b6364f4d | [
"MIT"
] | null | null | null | top/urls.py | pbexe/nextbike-top | eca086406cf6b96d6e086dd0fa9ecae5b6364f4d | [
"MIT"
] | null | null | null | top/urls.py | pbexe/nextbike-top | eca086406cf6b96d6e086dd0fa9ecae5b6364f4d | [
"MIT"
] | null | null | null | from django.urls import include, path
from .views import home, bike
urlpatterns = [
path("", home),
path("bike/<int:number>", bike)
] | 20.285714 | 37 | 0.661972 |
dbb4a7b40915f984e1d6c4fb86487617ba753bc3 | 2,421 | py | Python | Scripts/ReduceFragments.py | mike72353/FragFeatureNet | ef61ae52e3d6dcc6d2d56df2a6bd5fe1a298c930 | [
"BSD-3-Clause"
] | 1 | 2021-10-13T11:49:37.000Z | 2021-10-13T11:49:37.000Z | Scripts/ReduceFragments.py | mike72353/FragFeatureNet | ef61ae52e3d6dcc6d2d56df2a6bd5fe1a298c930 | [
"BSD-3-Clause"
] | null | null | null | Scripts/ReduceFragments.py | mike72353/FragFeatureNet | ef61ae52e3d6dcc6d2d56df2a6bd5fe1a298c930 | [
"BSD-3-Clause"
] | 1 | 2021-09-09T04:42:20.000Z | 2021-09-09T04:42:20.000Z | """
Remove Fragments not in Knowledgebase
"""
__author__ = "Michael Suarez"
__email__ = "masv@connect.ust.hk"
__copyright__ = "Copyright 2019, Hong Kong University of Science and Technology"
__license__ = "3-clause BSD"
from argparse import ArgumentParser
import numpy as np
import pickle
parser = ArgumentParser(description="Build Files")
parser.add_argument("--datadir", type=str, default="Data", help="input - XXX.YYY ")
parser.add_argument("--envNewAcronym", type=str, default="PRT.SNW", help="input - XXX.YYY ")
args = parser.parse_args()
# Check the Bound Fragments
BoundFrags = np.loadtxt("../%s/%s/%s.Homogenised.boundfrags_zeros.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), delimiter=',')
normalDF = pickle.load(open("../%s/GrandCID.dict" %(args.datadir), "rb"))
binding = np.full(BoundFrags.shape,-1)
mlength = 0
for r, i in enumerate(BoundFrags):
for c, j in enumerate(i[i!=0]):
try:
# Checks whether the Fragment can be found in the 59k Fragment Base
binding[r,c]=normalDF.index.get_loc(int(j))
except:
continue
temp = binding[r]
if temp[temp!=-1].shape[0] > mlength:
mlength = temp[temp!=-1].shape[0]
print(mlength) #Finds the maximum number of Fragments per environment -> 705
indices = np.empty(binding.shape[0])
red_binding = np.full((binding.shape[0], mlength), -1)
for j, i in enumerate(binding):
indices[j] = i[i!=-1].shape[0]
red_binding[j][:int(indices[j])] = i[i!=-1]
red_binding = np.delete(red_binding, np.where(indices==0), axis=0)
pickle.dump(red_binding, open("../%s/%s/%s.binding.mtr" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environments without binding Fragments
Features_all = pickle.load(open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "rb"))
Features_all = np.delete(Features_all, np.where(indices==0), axis=0)
pickle.dump(Features_all, open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environment annotiation without binding fragments
with open("../%s/%s/%s.Homogenised.annotation.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "r+") as f:
lines = f.readlines()
for i in np.where(indices==0)[0][::-1]:
del lines[i]
f.seek(0)
f.truncate()
f.writelines(lines)
| 38.428571 | 142 | 0.687732 |
dbb4ba3a72efae417ef662fbf9ea83724f57fdc1 | 11,352 | py | Python | client/core/tests/billing_tests.py | vbohinc/CommunityCellularManager | ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c | [
"BSD-3-Clause"
] | null | null | null | client/core/tests/billing_tests.py | vbohinc/CommunityCellularManager | ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c | [
"BSD-3-Clause"
] | 3 | 2021-03-20T00:02:37.000Z | 2022-02-11T03:46:59.000Z | client/core/tests/billing_tests.py | vbohinc/CommunityCellularManager | ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c | [
"BSD-3-Clause"
] | null | null | null | """Tests for core.billing.
Run this test from the project root
$ nosetests core.tests.billing_tests
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import random
import math
from core.billing import get_call_cost
from core.billing import get_prefix_from_number
from core.billing import get_sms_cost
from core.billing import process_prices
from core.billing import round_to_billable_unit
from core.billing import round_up_to_nearest_100
from core import config_database
TARIFF = 100
| 38.481356 | 79 | 0.587738 |
dbb81ecf1571a74c986e0ef5e76802273692f79e | 1,106 | py | Python | data_interrogator/admin/views.py | s-i-l-k-e/django-data-interrogator | 0284168b81aaa31a8df84f3ea52166eded8a4362 | [
"MIT"
] | null | null | null | data_interrogator/admin/views.py | s-i-l-k-e/django-data-interrogator | 0284168b81aaa31a8df84f3ea52166eded8a4362 | [
"MIT"
] | null | null | null | data_interrogator/admin/views.py | s-i-l-k-e/django-data-interrogator | 0284168b81aaa31a8df84f3ea52166eded8a4362 | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import user_passes_test
from django.utils.decorators import method_decorator
from data_interrogator.admin.forms import AdminInvestigationForm, AdminPivotTableForm
from data_interrogator.interrogators import Allowable
from data_interrogator.views import InterrogationView, InterrogationAutocompleteUrls, PivotTableView, \
InterrogationAutoComplete
| 35.677419 | 103 | 0.824593 |
dbb832b244c092d5e626be322221a0dd99c61a02 | 327 | py | Python | configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 11 | 2022-02-04T01:09:45.000Z | 2022-03-08T05:49:16.000Z | configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 2 | 2022-02-25T03:07:23.000Z | 2022-03-08T12:54:05.000Z | configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 2 | 2021-04-23T05:32:00.000Z | 2021-11-11T02:45:08.000Z | _base_ = './pspnet_r50-d8_512x512_80k_loveda.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')),
decode_head=dict(
in_channels=512,
channels=128,
),
auxiliary_head=dict(in_channels=256, channels=64))
| 27.25 | 72 | 0.629969 |
dbb9c02aefc14ce19f8f0ea13f80afd504f6a7db | 191 | py | Python | bba/objects.py | TheGenocides/BBA | 1617756ed9224027d7225ea68364f6568c56ed23 | [
"MIT"
] | 3 | 2021-11-07T16:44:13.000Z | 2021-12-13T13:48:07.000Z | bba/objects.py | TheGenocides/BBA | 1617756ed9224027d7225ea68364f6568c56ed23 | [
"MIT"
] | null | null | null | bba/objects.py | TheGenocides/BBA | 1617756ed9224027d7225ea68364f6568c56ed23 | [
"MIT"
] | null | null | null | from typing import Dict, Any | 27.285714 | 45 | 0.602094 |
dbba66cc16504421bbf294d9cd7ab892cc735e8e | 4,880 | py | Python | apps/greencheck/forms.py | BR0kEN-/admin-portal | 0c38dc0d790031f45bf07660bce690e972fe2858 | [
"Apache-2.0"
] | null | null | null | apps/greencheck/forms.py | BR0kEN-/admin-portal | 0c38dc0d790031f45bf07660bce690e972fe2858 | [
"Apache-2.0"
] | null | null | null | apps/greencheck/forms.py | BR0kEN-/admin-portal | 0c38dc0d790031f45bf07660bce690e972fe2858 | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.forms import ModelForm
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from .choices import ActionChoice
from .choices import StatusApproval
from .models import GreencheckIp
from .models import GreencheckIpApprove
from .models import GreencheckASN, GreencheckASNapprove
User = get_user_model()
| 30.886076 | 85 | 0.608811 |
dbbba499caecc6c455f90595eccf7b64b710a2e3 | 263 | py | Python | apps/utils/format/url_format.py | think-wang/osroom | 67bb5bbd7a63fbaeb0d919738859444b54500152 | [
"BSD-2-Clause"
] | 1 | 2020-04-03T08:01:07.000Z | 2020-04-03T08:01:07.000Z | apps/utils/format/url_format.py | dhgdhg/osroom | 4d693eaab96503cadd391bf924bffedcd931a07c | [
"BSD-2-Clause"
] | null | null | null | apps/utils/format/url_format.py | dhgdhg/osroom | 4d693eaab96503cadd391bf924bffedcd931a07c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*-coding:utf-8-*-
from tld import get_tld
__author__ = "Allen Woo"
def get_domain(url):
'''
url
:param url:
:return:
'''
res = get_tld(url, as_object=True)
return "{}.{}".format(res.subdomain, res.tld) | 18.785714 | 49 | 0.604563 |
dbbc25c0d987a2badd4b10e9df8a681d25f102e8 | 23,904 | py | Python | ipamanager/entities.py | Tjev/freeipa-manager | 0d40e64d81a86d4312b4e22cd57dcaecf25d0801 | [
"BSD-3-Clause"
] | null | null | null | ipamanager/entities.py | Tjev/freeipa-manager | 0d40e64d81a86d4312b4e22cd57dcaecf25d0801 | [
"BSD-3-Clause"
] | null | null | null | ipamanager/entities.py | Tjev/freeipa-manager | 0d40e64d81a86d4312b4e22cd57dcaecf25d0801 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017-2019, GoodData Corporation. All rights reserved.
"""
FreeIPA Manager - entity module
Object representations of the entities configured in FreeIPA.
"""
import os
import re
import voluptuous
import yaml
from abc import ABCMeta, abstractproperty
import schemas
from command import Command
from core import FreeIPAManagerCore
from errors import ConfigError, ManagerError, IntegrityError
class FreeIPAGroup(FreeIPAEntity):
"""Abstract representation a FreeIPA group entity (host/user group)."""
managed_attributes_push = ['description']
| 39.058824 | 79 | 0.616508 |
dbbca7079e41d333542d3d27bb46afa6aecbe834 | 1,580 | py | Python | test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | b7cf77408486f2bfa941b8609617ed47aa3e2d02 | [
"Apache-2.0"
] | 133 | 2018-09-17T12:43:14.000Z | 2022-03-15T20:03:12.000Z | test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | b7cf77408486f2bfa941b8609617ed47aa3e2d02 | [
"Apache-2.0"
] | 22 | 2018-11-19T21:51:04.000Z | 2022-03-08T12:13:19.000Z | test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | b7cf77408486f2bfa941b8609617ed47aa3e2d02 | [
"Apache-2.0"
] | 46 | 2018-10-04T04:27:26.000Z | 2022-03-01T03:28:38.000Z | # pylint: skip-file
from athena_glue_service_logs.catalog_manager import BaseCatalogManager
| 50.967742 | 119 | 0.79557 |
dbbd1a19c06924421a7c2e88261ac232f18c11f4 | 83 | py | Python | unsorted/pythonsnippets_0013.py | fiddlerwoaroof/sandbox | 652acaf710a8b60f005769bde317e7bbf548cc2b | [
"BSD-3-Clause"
] | null | null | null | unsorted/pythonsnippets_0013.py | fiddlerwoaroof/sandbox | 652acaf710a8b60f005769bde317e7bbf548cc2b | [
"BSD-3-Clause"
] | null | null | null | unsorted/pythonsnippets_0013.py | fiddlerwoaroof/sandbox | 652acaf710a8b60f005769bde317e7bbf548cc2b | [
"BSD-3-Clause"
] | null | null | null | from twisted.internet import reactor
reactor.listenTCP(8789, factory)
reactor.run() | 27.666667 | 36 | 0.831325 |
dbbe56b29123b2a0ee8c4986b892e3949b69a274 | 2,362 | py | Python | __main__.py | SHUcream00/MLBPitchVisual | a3092cef7cbd4e73f8d0010dd62811df6cc36cac | [
"MIT"
] | null | null | null | __main__.py | SHUcream00/MLBPitchVisual | a3092cef7cbd4e73f8d0010dd62811df6cc36cac | [
"MIT"
] | null | null | null | __main__.py | SHUcream00/MLBPitchVisual | a3092cef7cbd4e73f8d0010dd62811df6cc36cac | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Setting up Name and CSV location
player_name = "Put player name"
file_src = "Put target csv"
raw = pd.read_csv(file_src)
df = pd.DataFrame(raw)
#For filtering cases
replace_dict = {"description": {"hit_into_play_no_out": "contact", "hit_into_play": "contact", "hit_into_play_score": "contact", "swinging_strike": "miss", "swinging_strike_blocked": "miss"}}
ballname_dict = {"FF": "4-Seam Fastball", "CH": "Changeup", "CU": "Curveball", "SL": "Slider", "FT": "2-Seam Fastball", "AB": "Automatic Ball",
"AS": "Automatic Strike", "EP": "Eephus", "FC": "Cutter", "FO": "Forkball", "FS": "Splitter", "GY": "Gyroball", "IN": "Intentional Ball",
"KC": "Knuckle Curve", "NP": "No Pitch", "PO": "Pitchout", "SC": "Screwball", "SI": "Sinker", "UN": "Unknown"}
df = df.replace(replace_dict)
df = df[df["description"].isin(["contact", "miss"])]
for i in df["pitch_type"].unique():
visualize(df, i)
| 37.492063 | 192 | 0.615157 |
dbbee95cb22f9ddebb8ee025c418f2636a32f8bb | 790 | py | Python | shape_similarity.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | shape_similarity.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | shape_similarity.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 25 13:17:49 2019
@author: Toonw
"""
import numpy as np
# Similarity measure of article
## https://pdfs.semanticscholar.org/60b5/aca20ba34d424f4236359bd5e6aa30487682.pdf | 23.939394 | 130 | 0.613924 |
dbc01ab01c84c8a6897199dca9635aa645e6cdeb | 262 | py | Python | apps/chats/apps.py | aldwyn/effigia | eb456656949bf68934530bbec9c15ebc6d0236b8 | [
"MIT"
] | 1 | 2018-11-15T05:17:30.000Z | 2018-11-15T05:17:30.000Z | apps/chats/apps.py | aldwyn/effigia | eb456656949bf68934530bbec9c15ebc6d0236b8 | [
"MIT"
] | 5 | 2021-06-09T17:20:01.000Z | 2022-03-11T23:18:06.000Z | apps/chats/apps.py | aldwyn/effigia | eb456656949bf68934530bbec9c15ebc6d0236b8 | [
"MIT"
] | 1 | 2018-10-05T19:03:27.000Z | 2018-10-05T19:03:27.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
| 20.153846 | 45 | 0.694656 |
dbc0541470856937f6eef50be9d0887839277ab1 | 3,472 | py | Python | utils/ghost.py | JayJJChen/LoveXueXiQiangGuo | 648a38cd73d1eb7ed7267721f1a23c90afb0daee | [
"MIT"
] | 3 | 2019-04-16T07:52:20.000Z | 2021-08-16T03:07:14.000Z | utils/ghost.py | JayJJChen/LoveXueXiQiangGuo | 648a38cd73d1eb7ed7267721f1a23c90afb0daee | [
"MIT"
] | 1 | 2019-04-17T02:23:32.000Z | 2020-12-24T11:04:52.000Z | utils/ghost.py | JayJJChen/LoveXueXiQiangGuo | 648a38cd73d1eb7ed7267721f1a23c90afb0daee | [
"MIT"
] | 2 | 2019-04-17T04:00:55.000Z | 2019-09-18T00:57:35.000Z | import os
import time
from utils.eye import Eye
from utils.finger import Finger
| 30.191304 | 97 | 0.563652 |
dbc13915cb653c37c09279f81347a4bfea838dd2 | 3,686 | py | Python | src_taxonomy/bubble_tree_map.py | sanja7s/SR_Twitter | 2eb499c9aa25ba6e9860cd77eac6832890d2c126 | [
"MIT"
] | null | null | null | src_taxonomy/bubble_tree_map.py | sanja7s/SR_Twitter | 2eb499c9aa25ba6e9860cd77eac6832890d2c126 | [
"MIT"
] | null | null | null | src_taxonomy/bubble_tree_map.py | sanja7s/SR_Twitter | 2eb499c9aa25ba6e9860cd77eac6832890d2c126 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import random
from ete2 import Tree, TreeStyle, NodeStyle, faces, AttrFace, CircleFace, TextFace
def test_data():
D = {'taxonomy': [{"score": "0.718868", "label": "/art and entertainment/movies and tv/movies"},\
{"confident": "no", "score": "0.304296", "label": "/pets/cats"},\
{"score": "0.718868", "label": "/art and entertainment/movies and tv/series"}]}
t7s = Tree7s("ThingAdamsFamily")
for el in D["taxonomy"]:
#n = t7s
n = t7s.find_root()
taxonomy_tree = el["label"]
taxonomy_tree = taxonomy_tree.split("/")
taxonomy_tree.pop(0)
levels = len(taxonomy_tree)
score = float(el["score"])
print levels, taxonomy_tree, score
for i in range(levels):
label = taxonomy_tree[i]
#if n.find_child(label) == None:
n.add_child(label, score, i+1)
n = n.find_child(label)
t7s.find_root().print_me()
t = t7s.find_root()
S = t.create_newick() + ";"
print S
#S = "(((A,B,(C.,D)E)F,(S,N)K)R);"
#T = Tree(S, format=8)
T = Tree(S, format=1)
for node in T.traverse("postorder"):
# Do some analysis on node
print node.name
for node in T.traverse("levelorder"):
# Do some analysis on node
print node.name
#for branch in T
return T
if __name__ == "__main__":
#t.render("bubble_map.png", w=600, dpi=300, tree_style=ts)
#t.show(tree_style=ts)
t = test_data()
ts = give_tree_layout(t)
t.show(tree_style=ts)
t.render("bubble_map.png", w=600, dpi=300, tree_style=ts) | 24.091503 | 98 | 0.655724 |
dbc290ad28df369cc2a5189c66e670824982c619 | 28,719 | py | Python | compass/core/_scrapers/member.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | compass/core/_scrapers/member.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | compass/core/_scrapers/member.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | from __future__ import annotations
import re
import time
from typing import get_args, Literal, TYPE_CHECKING, Union
from lxml import html
from compass.core.interface_base import InterfaceBase
from compass.core.logger import logger
from compass.core.schemas import member as schema
from compass.core.settings import Settings
from compass.core.utility import cast
from compass.core.utility import maybe_int
from compass.core.utility import parse
if TYPE_CHECKING:
import requests
MEMBER_PROFILE_TAB_TYPES = Literal[
"Personal", "Roles", "Permits", "Training", "Awards", "Emergency", "Comms", "Visibility", "Disclosures"
]
| 41.262931 | 132 | 0.568126 |
dbc489c4f1e6739cd6d3b2e54cc4268da59045a7 | 336 | py | Python | quran_text/urls.py | Quran-Tafseer/tafseer_api | 49eede15a6e50812a4bab1e0e1e38069fcb0da4d | [
"MIT"
] | 16 | 2019-03-02T13:08:59.000Z | 2022-02-26T17:26:09.000Z | quran_text/urls.py | EmadMokhtar/tafseer_api | abb2d53eb917f58db1e09f7d92180b0eb8001a40 | [
"MIT"
] | 45 | 2017-10-25T06:17:50.000Z | 2018-12-08T17:01:41.000Z | quran_text/urls.py | Quran-Tafseer/tafseer_api | 49eede15a6e50812a4bab1e0e1e38069fcb0da4d | [
"MIT"
] | 6 | 2019-02-09T03:57:09.000Z | 2021-12-29T02:54:29.000Z | from django.urls import path
from . import views
urlpatterns = [
path('',
view=views.SuraListView.as_view(), name='sura-list'),
path('<int:sura_num>/<int:number>/',
view=views.AyahTextView.as_view(), name='ayah-detail'),
path('<int:sura_num>/<int:number>',
view=views.AyahTextView.as_view()),
]
| 25.846154 | 64 | 0.630952 |
dbc52992fc79a5adada939783cc09ffe329b0264 | 1,623 | py | Python | konnection/settings/local.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | null | null | null | konnection/settings/local.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | null | null | null | konnection/settings/local.py | IanSeng/CMPUT404_PROJECT | 80acd2c57de4b091e0e66ad9f5f2df17801bf09e | [
"W3C-20150513"
] | null | null | null | from konnection.settings.base import *
from pathlib import Path
import os
import dotenv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SECRET_KEY = 'temporaryKey'
# For tests
# https://stackoverflow.com/a/35224204
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--with-spec', '--spec-color']
# Adding secrets to env file
# From StackOverflow https://stackoverflow.com/a/61437799
# From Zack Plauch https://stackoverflow.com/users/10415970/zack-plauch%c3%a9
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# Connecting PostgreSQL to Django
# From https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-14-04
# From Digital Ocean
# From Justin Ellingwood https://www.digitalocean.com/community/users/jellingwood
if os.getenv('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github-actions',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432'
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'myproject',
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': 'localhost',
'PORT': '',
}
} | 31.211538 | 122 | 0.653112 |
dbc6237f7856e6445933721e9b53e17ec980bef0 | 8,205 | py | Python | main.py | PotentialParadox/PyReparm | 70062e351eebacb9c6cb3dc0262e97256c52be3d | [
"Apache-2.0"
] | null | null | null | main.py | PotentialParadox/PyReparm | 70062e351eebacb9c6cb3dc0262e97256c52be3d | [
"Apache-2.0"
] | null | null | null | main.py | PotentialParadox/PyReparm | 70062e351eebacb9c6cb3dc0262e97256c52be3d | [
"Apache-2.0"
] | null | null | null | import random
from evaluation import Evaluator
from generator import generator
from mutate import mutateset
from deap import base
from deap import creator
from deap import tools
from parameter_group import ParameterGroup
import gaussian_output
from analysis import Analysis
from gaussian_input import GaussianInput
from gaussian import gaussian_single
from header import Header
from reparm_data import ReparmData
from genesis import Genesis
import numpy as np
from scipy.optimize import minimize
from copy import deepcopy
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.linear_model import RidgeCV
from sklearn.ensemble import RandomForestRegressor
#############################################
# BEGIN USER INPUT
#############################################
fin = open("reparm.in", 'r')
file = fin.read()
reparm_data = ReparmData(file)
if reparm_data.reparm_input.should_continue:
reparm_data.load()
else:
Genesis(reparm_data=reparm_data)
reparm_data.save()
############################################
# END USER INPUT
############################################
#############################################
# BEGIN USER INPUT
#############################################
# Number of Generation
NGEN = reparm_data.reparm_input.number_generations
# PopulationSize
PSIZE = reparm_data.reparm_input.population_size
# Crossover Probability
CXPB = reparm_data.reparm_input.crossover_probability
# Mutation Probability
# How likely and individual will be mutated
MUTPB = reparm_data.reparm_input.mutation_probability
# Mutation Rate
# How likely a member of an individual will be mutated
MUTR = reparm_data.reparm_input.mutation_rate
# Crowding Factor
CWD = reparm_data.reparm_input.crowding_factor
# Mutation Perturbation
MUTPT = reparm_data.reparm_input.mutation_perturbation
# Initial Perturbation
IMUTPT = 0.05
# Initial List of parameters
IL = []
for i in range(0, len(reparm_data.best_am1_individual.inputs[0].parameters[0].p_floats), 4):
IL.append(reparm_data.best_am1_individual.inputs[0].parameters[0].p_floats[i])
# The evaluator (fitness, cost) function
eval = Evaluator(reparm_data=reparm_data)
if reparm_data.best_fitness is None:
reparm_data.best_fitness = list(eval.eval(IL))
reparm_data.original_fitness = deepcopy(reparm_data.best_fitness)
else:
reparm_data.best_fitness = list(eval.eval(IL))
print("original_fitness", reparm_data.original_fitness)
print("starting at", reparm_data.best_fitness)
#############################################
# END USER INPUT
#############################################
#############################################
# BEGIN DEAP SETUP
#############################################
creator.create("FitnessMax", base.Fitness, weights=(-1.0, 0, 0))
creator.create("ParamSet", list, fitness=creator.FitnessMax, best=None)
toolbox = base.Toolbox()
toolbox.register("individual", generator, IL, IMUTPT)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxSimulatedBinary)
toolbox.register("mutate", mutateset, pert=MUTPT, chance=MUTR)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", eval.eval)
pop = toolbox.population(n=PSIZE)
#############################################
# END DEAP SETUP
#############################################
#############################################
# BEGIN GENETIC ALGORITHM
#############################################
for g in range(NGEN):
print("Starting gen:", g)
offspring = toolbox.select(pop, len(pop))
offspring = list(map(toolbox.clone, offspring))
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2, CWD)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = []
for i in invalid_ind:
try:
fitness = toolbox.evaluate(i)
fitnesses.append(fitness)
reparm_data.observations.append(list(i))
i.fitness.values = fitness
if not reparm_data.best_fitness or fitness[0] < reparm_data.best_fitness[0]:
print("Previous Best", reparm_data.best_fitness)
reparm_data.best_fitness = list(fitness)
reparm_data.best_am1_individual.set_pfloats(i)
print("NewBest Found:", reparm_data.best_fitness)
except TypeError:
fitnesses.append(None)
reparm_data.save()
pop[:] = offspring
#############################################
# End Genetic Algorithm
#############################################
#############################################
# Begin Particle Simulation
#############################################
# for g in range(NGEN):
# for part in pop:
# part.fitness.values = toolbox.evaluate(part)
# if not part.best or part.best.fitness < part.fitness:
# part.best = creator.ParamSet(part)
# part.best.fitness.values = part.fitness.values
# if not best or best.fitness < part.fitness:
# best = creator.ParamSet(part)
# best.fitness.values = part.fitness.values
# for part in pop:
# toolbox.mutate(part)
# print(best, "with fitness", best.fitness)
#############################################
# End Particle Simulation
#############################################
#############################################
# Begin Print Out
#############################################
gin_best = reparm_data.best_am1_individual.inputs[0]
s_opt_header = "#P AM1(Input,Print) opt\n\nAM1\n"
opt_header = Header(s_opt_header)
gin_opt = GaussianInput(header=opt_header,
coordinates=gin_best.coordinates[0],
parameters=gin_best.parameters[0])
fout = open("reparm_best_opt.com", 'w')
fout.write(gin_opt.str())
fout.close()
try:
gout = gaussian_single(gin_opt.str())
fout = open("reparm_best_opt.log", 'w')
fout.write(gout)
fout.close()
except TypeError:
print("Could not get output file from input,"
"most likely, optimization failed to converge")
#############################################
# End Print Out
#############################################
#############################################
# Begin ScikitLearn
#############################################
# # Preprocessor
# targets = np.array(reparm_data.targets)
# X = np.array(reparm_data.observations)
# y = targets[:, 0] # 0, 1, 2 for total, energy, and dipole
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# stdsc = StandardScaler()
# X_train_std = stdsc.fit_transform(X_train)
# X_test_std = stdsc.transform(X_test)
#
# # Training
# clf = svm.SVR(C=1.3, kernel='rbf')
# # clf = RandomForestRegressor(n_estimators=20)
# clf.fit(X_train, y_train)
# print("Using {} samples with fitness score {}".format(len(y), clf.score(X_test, y_test)))
#
# initial_guess = np.array(IL)
# fun = lambda x: clf.predict(stdsc.transform(x.reshape(1, -1)))
# print("Predicting best parameters")
# min_params = (minimize(fun, initial_guess)).x
# stdsc.inverse_transform(min_params)
# params = min_params.tolist()
# skl_best = deepcopy(reparm_data.best_am1_individual)
# skl_best.set_pfloats(params)
# open("skl_best.com", 'w').write(skl_best.inputs[0].str())
# skl_fitness = eval.eval(params)
# if skl_fitness:
# print("skl_fitness:", skl_fitness)
#############################################
# End ScikitLearn
#############################################
#############################################
# Begin Analysis
#############################################
anal = Analysis(reparm_data)
anal.trithiophene()
#############################################
# End Analysis
#############################################
| 36.145374 | 92 | 0.584156 |
dbc6414ac2f786d426d11b5f7b21e310e975369d | 23,614 | py | Python | pyx12/test/test_x12context.py | arenius/pyx12 | 537493deaa0b8e18a3fa72eb1b3eeae9ef043b11 | [
"BSD-3-Clause"
] | 1 | 2019-11-06T21:22:28.000Z | 2019-11-06T21:22:28.000Z | pyx12/test/test_x12context.py | arenius/pyx12 | 537493deaa0b8e18a3fa72eb1b3eeae9ef043b11 | [
"BSD-3-Clause"
] | null | null | null | pyx12/test/test_x12context.py | arenius/pyx12 | 537493deaa0b8e18a3fa72eb1b3eeae9ef043b11 | [
"BSD-3-Clause"
] | 1 | 2021-04-12T14:32:41.000Z | 2021-04-12T14:32:41.000Z | import unittest
#import tempfile
try:
from StringIO import StringIO
except:
from io import StringIO
import pyx12.error_handler
from pyx12.errors import EngineError # , X12PathError
import pyx12.x12context
import pyx12.params
from pyx12.test.x12testdata import datafiles
| 38.210356 | 125 | 0.598247 |
dbc6b99c48a68e88a0554cb932a77dac52c1e5c0 | 1,460 | py | Python | repo/script.module.liveresolver/lib/liveresolver/resolvers/finecast.py | Hades01/Addons | 710da97ac850197498a3cd64be1811c593610add | [
"Apache-2.0"
] | 3 | 2020-03-03T13:21:44.000Z | 2021-07-21T09:53:31.000Z | repo/script.module.liveresolver/lib/liveresolver/resolvers/finecast.py | Hades01/Addons | 710da97ac850197498a3cd64be1811c593610add | [
"Apache-2.0"
] | null | null | null | repo/script.module.liveresolver/lib/liveresolver/resolvers/finecast.py | Hades01/Addons | 710da97ac850197498a3cd64be1811c593610add | [
"Apache-2.0"
] | 2 | 2020-04-01T22:11:12.000Z | 2020-05-07T23:54:52.000Z | # -*- coding: utf-8 -*-
import re,urlparse,cookielib,os,urllib
from liveresolver.modules import client,recaptcha_v2,control,constants, decryptionUtils
from liveresolver.modules.log_utils import log
cookieFile = os.path.join(control.dataPath, 'finecastcookie.lwp')
#except:
# return
| 30.416667 | 87 | 0.619178 |
dbc72ca28fa155b841727c07f4d5032dac9e8938 | 5,161 | py | Python | src/robotide/publish/__init__.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-08-20T14:46:02.000Z | 2017-08-20T14:46:02.000Z | src/robotide/publish/__init__.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/publish/__init__.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org:licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Message publishing and subscribing.
.. contents::
:depth: 2
:local:
Introduction
------------
RIDE uses messages for communication when something of interest happens, for
example a suite is loaded or item is selected in the tree. This module provides
means both for subscribing to listen to those messages and for sending them.
Messages are used for communication between the different components of the
core application, but their main usage is notifying plugins about various events.
Plugins can also send messages themselves, and also create custom messages, if
they have a need.
Subscribing
-----------
The core application uses the global `PUBLISHER` object (an instance of the
`Publisher` class) for subscribing to and unsubscribing from the messages.
Plugins should use the helper methods of the `Plugin` class instead of using
the `PUBLISHER` directly.
Message topics
~~~~~~~~~~~~~~
Regardless the method, subscribing to messages requires a message topic.
Topics can be specified using the actual message classes in
`robotide.publish.messages` module or with their dot separated topic strings.
It is, for example, equivalent to use the `RideTreeSelection` class and a
string ``ride.tree.selection``. Topic strings can normally, but not always, be
mapped directly to the class names.
The topic strings represents a hierarchy where the dots separate the hierarchy
levels. All messages with a topic at or below the given level will match the
subscribed topic. For example, subscribing to the ``ride.notebook`` topic means
that `RideNotebookTabChanged` or any other message with a topic starting with
``ride.notebook`` will match.
Listeners
~~~~~~~~~
Another thing needed when subscribing is a listener, which must be a callable
accepting one argument. When the corresponding message is published, the listener
will be called with an instance of the message class as an argument. That instance
contains the topic and possibly some additional information in its attributes.
The following example demonstrates how a plugin can subscribe to an event.
In this example the ``OnTreeSelection`` method is the listener and the
``message`` it receives is an instance of the `RideTreeSelection` class.
::
from robotide.pluginapi import Plugin, RideTreeSelection
class MyFancyPlugin(Plugin):
def activate(self):
self.subscribe(self.OnTreeSelection, RideTreeSelection)
def OnTreeSelection(self, message):
print message.topic, message.node
Unsubscribing
~~~~~~~~~~~~~
Unsubscribing from a single message requires passing the same topic and listener
to the unsubscribe method that were used for subscribing. Additionally both
the `PUBLISHER` object and the `Plugin` class provide a method for unsubscribing
all listeners registered by someone.
Publishing messages
-------------------
Both the core application and plugins can publish messages using message
classes in the `publish.messages` module directly. Sending a message is as easy
as creating an instance of the class and calling its ``publish`` method. What
parameters are need when the instance is created depends on the message.
Custom messages
~~~~~~~~~~~~~~~
Most of the messages in the `publish.messages` module are to be sent only by
the core application. If plugins need their own messages, for example for
communication between different plugins, they can easily create custom messages
by extending the `RideMessage` base class::
from robotide.pluginapi import Plugin, RideMessage
class FancyImportantMessage(RideMessage):
data = ['importance']
class MyFancyPlugin(Plugin):
def important_action(self):
# some code ...
MyImportantMessage(importance='HIGH').publish()
Plugins interested about this message can subscribe to it using either
the class ``FancyImportantMessage`` or its automatically generated title
``fancy.important``. Notice also that all the messages are exposed also through
the `robotide.pluginapi` module and plugins should import them there.
"""
import os
from robotide.context import WX_VERSION
if WX_VERSION > '3.0':
from wx.lib.pubsub import setuparg1
elif WX_VERSION > '2.9':
from wx.lib.pubsub import setupv1
from .messages import *
from .publisher import PUBLISHER
| 38.514925 | 83 | 0.742298 |
dbc7c8fe7bece88307002636b27bacde286985d2 | 3,520 | py | Python | app.py | pizzapanther/google-actions-python-example | 40d13fc1821e1e11f15cc7413571cb5bd6327024 | [
"MIT"
] | 9 | 2017-11-17T07:09:08.000Z | 2020-07-03T13:32:16.000Z | app.py | pizzapanther/google-actions-python-example | 40d13fc1821e1e11f15cc7413571cb5bd6327024 | [
"MIT"
] | 2 | 2019-08-10T05:49:47.000Z | 2021-04-30T20:51:40.000Z | app.py | pizzapanther/google-actions-python-example | 40d13fc1821e1e11f15cc7413571cb5bd6327024 | [
"MIT"
] | 5 | 2018-05-04T08:05:55.000Z | 2021-08-25T05:49:18.000Z | #!/usr/bin/env python
import os
import json
import tornado.ioloop
import tornado.log
import tornado.web
from google.oauth2 import id_token
from google.auth.transport import requests as google_requests
import jwt
import requests
API_KEY = os.environ.get('OPEN_WEATHER_MAP_KEY', None)
PROJECT_ID = os.environ.get('PROJECT_ID', None)
if __name__ == "__main__":
tornado.log.enable_pretty_logging()
app = make_app()
app.listen(int(os.environ.get('PORT', '8000')))
tornado.ioloop.IOLoop.current().start()
| 25.693431 | 98 | 0.563352 |
dbc804db6b0f3dbd711ac33b62c655260b3871e9 | 352 | py | Python | ProsperFlask/{{cookiecutter.project_name}}/tests/conftest.py | EVEprosper/ProsperCookiecutters | 569ca0c311a5ead2b49f0cdde4cb2ad14dcd3a2c | [
"MIT"
] | null | null | null | ProsperFlask/{{cookiecutter.project_name}}/tests/conftest.py | EVEprosper/ProsperCookiecutters | 569ca0c311a5ead2b49f0cdde4cb2ad14dcd3a2c | [
"MIT"
] | null | null | null | ProsperFlask/{{cookiecutter.project_name}}/tests/conftest.py | EVEprosper/ProsperCookiecutters | 569ca0c311a5ead2b49f0cdde4cb2ad14dcd3a2c | [
"MIT"
] | null | null | null | # AUTOGENERATED BY: ProsperCookiecutters/ProsperFlask
# TEMPLATE VERSION: {{cookiecutter.template_version}}
# AUTHOR: {{cookiecutter.author_name}}
"""PyTest fixtures and modifiers"""
import pytest
from {{cookiecutter.library_name}}.endpoints import APP
| 23.466667 | 55 | 0.755682 |
dbc8735d5b72a93d69f4f92640c632b9a9b76112 | 3,341 | py | Python | zoloto/coords.py | RealOrangeOne/yuri | 6ed55bdf97c6add22cd6c71c39ca30e2229337cb | [
"BSD-3-Clause"
] | 7 | 2019-08-09T10:05:14.000Z | 2021-11-14T17:37:50.000Z | zoloto/coords.py | RealOrangeOne/yuri | 6ed55bdf97c6add22cd6c71c39ca30e2229337cb | [
"BSD-3-Clause"
] | 226 | 2019-06-20T09:48:23.000Z | 2022-02-20T00:43:52.000Z | zoloto/coords.py | RealOrangeOne/yuri | 6ed55bdf97c6add22cd6c71c39ca30e2229337cb | [
"BSD-3-Clause"
] | 9 | 2019-07-19T10:55:47.000Z | 2020-07-23T19:16:47.000Z | from typing import Iterator, NamedTuple, Tuple
from cached_property import cached_property
from cv2 import Rodrigues
from pyquaternion import Quaternion
ThreeTuple = Tuple[float, float, float]
RotationMatrix = Tuple[ThreeTuple, ThreeTuple, ThreeTuple]
def __repr__(self) -> str:
return "Orientation(rot_x={},rot_y={},rot_z={})".format(
self.rot_x, self.rot_y, self.rot_z
)
| 25.7 | 83 | 0.609398 |
dbc99a75d68d09d60f840eae7b285af4fedbeeae | 2,988 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
| 44.597015 | 81 | 0.662316 |
dbca2b427044c8866cf81d44e473638aa489abca | 274 | py | Python | ia870/iagradm.py | rdenadai/ia870p3 | c4823efc4b8e5f187a64f8a4e9962e328bf86967 | [
"BSD-2-Clause"
] | 5 | 2018-10-15T12:02:03.000Z | 2022-02-11T12:47:12.000Z | ia870/iagradm.py | rdenadai/ia870p3 | c4823efc4b8e5f187a64f8a4e9962e328bf86967 | [
"BSD-2-Clause"
] | 1 | 2018-10-15T12:04:36.000Z | 2019-01-25T12:04:35.000Z | ia870/iagradm.py | rdenadai/ia870p3 | c4823efc4b8e5f187a64f8a4e9962e328bf86967 | [
"BSD-2-Clause"
] | 4 | 2019-01-25T11:13:48.000Z | 2020-12-20T01:42:33.000Z | # -*- encoding: utf-8 -*-
# Module iagradm
| 21.076923 | 50 | 0.642336 |
dbca8d6120f0830afa062de217262e49809ebe82 | 388 | py | Python | backend/api/tests/test_models/test_utils/test_ranking_suffixes.py | ChristchurchCityWeightlifting/lifter-api | a82b79c75106e7f4f8ea4b4e3e12d727213445e3 | [
"MIT"
] | null | null | null | backend/api/tests/test_models/test_utils/test_ranking_suffixes.py | ChristchurchCityWeightlifting/lifter-api | a82b79c75106e7f4f8ea4b4e3e12d727213445e3 | [
"MIT"
] | 5 | 2022-03-07T08:30:47.000Z | 2022-03-22T09:15:52.000Z | backend/api/tests/test_models/test_utils/test_ranking_suffixes.py | ChristchurchCityWeightlifting/lifter-api | a82b79c75106e7f4f8ea4b4e3e12d727213445e3 | [
"MIT"
] | null | null | null | import pytest
from api.models.utils import rankings
def test_rankings(test_data):
"""Tests if ranking works
e.g. 1 returns 1st
11 returns 11th
101 return 101st
"""
assert rankings(test_data[0]) == "1st"
assert rankings(test_data[1]) == "11th"
assert rankings(test_data[2]) == "101st"
| 19.4 | 44 | 0.641753 |
dbcc6f4ccb0dabce5252e1dd4108228b2c863f99 | 721 | py | Python | web/web-lemonthinker/src/app/app.py | NoXLaw/RaRCTF2021-Challenges-Public | 1a1b094359b88f8ebbc83a6b26d27ffb2602458f | [
"MIT"
] | 2 | 2021-08-09T17:08:12.000Z | 2021-08-09T17:08:17.000Z | web/web-lemonthinker/src/app/app.py | NoXLaw/RaRCTF2021-Challenges-Public | 1a1b094359b88f8ebbc83a6b26d27ffb2602458f | [
"MIT"
] | null | null | null | web/web-lemonthinker/src/app/app.py | NoXLaw/RaRCTF2021-Challenges-Public | 1a1b094359b88f8ebbc83a6b26d27ffb2602458f | [
"MIT"
] | 1 | 2021-10-09T16:51:56.000Z | 2021-10-09T16:51:56.000Z | from flask import Flask, request, redirect, url_for
import os
import random
import string
import time # lemonthink
clean = time.time()
app = Flask(__name__)
chars = list(string.ascii_letters + string.digits) | 28.84 | 79 | 0.653259 |
dbccbf08a5c6a38fe09196877c8bb3f8a56251c4 | 816 | py | Python | aprendizado/codewars/descending_order.py | renatodev95/Python | 2adee4a01de41f8bbb68fce563100c135a5ab549 | [
"MIT"
] | null | null | null | aprendizado/codewars/descending_order.py | renatodev95/Python | 2adee4a01de41f8bbb68fce563100c135a5ab549 | [
"MIT"
] | null | null | null | aprendizado/codewars/descending_order.py | renatodev95/Python | 2adee4a01de41f8bbb68fce563100c135a5ab549 | [
"MIT"
] | null | null | null | # Your task is to make a function that can take any non-negative integer as an argument and return it with its digits in descending order. Essentially, rearrange the digits to create the highest possible number.
# Funo que recebe um nmero inteiro (no negativo) como argumento e o retorna com os dgitos em ordem descendente. Essencialmente, organize os dgitos para criar o maior nmero possvel.
# Primeiro cdigo
# Refatorao do primeiro cdigo (utilizando list comprehension)
#
#
| 38.857143 | 211 | 0.734069 |
dbce1d6ebf5fac46543c3b47688a5f1e1c7cc668 | 8,981 | py | Python | dmarc_storage.py | Schramp/dmarc-monitoring | 619a162f71a788e81d92ca281ec0bdcf13c2e8e8 | [
"MIT"
] | 1 | 2020-05-25T05:09:18.000Z | 2020-05-25T05:09:18.000Z | dmarc_storage.py | Schramp/dmarc-monitoring | 619a162f71a788e81d92ca281ec0bdcf13c2e8e8 | [
"MIT"
] | 30 | 2019-08-12T05:10:50.000Z | 2021-07-21T04:25:02.000Z | dmarc_storage.py | Schramp/dmarc-monitoring | 619a162f71a788e81d92ca281ec0bdcf13c2e8e8 | [
"MIT"
] | 1 | 2022-03-12T19:24:24.000Z | 2022-03-12T19:24:24.000Z | import sqlite3
import os
import datetime
__all__ = ['DMARCStorage', 'totimestamp']
| 49.894444 | 118 | 0.565639 |
dbd0c614614154cd50e0792871e7aa778a2a1459 | 557 | py | Python | setup.py | mcdruid/sumologic-python-sdk | cb1d649d0166976fb104866e9174a41bd558b817 | [
"Apache-2.0"
] | 4 | 2019-05-09T01:31:15.000Z | 2019-12-08T03:35:32.000Z | setup.py | blaise-sumo/sumologic-python-sdk | 97c38fc2d493b94741fd17711923ec7e39264610 | [
"Apache-2.0"
] | null | null | null | setup.py | blaise-sumo/sumologic-python-sdk | 97c38fc2d493b94741fd17711923ec7e39264610 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="sumologic-sdk",
version="0.1.9",
packages=find_packages(),
install_requires=['requests>=2.2.1'],
# PyPI metadata
author="Yoway Buorn, Melchi Salins",
author_email="it@sumologic.com, melchisalins@icloud.com",
description="Sumo Logic Python SDK",
license="PSF",
keywords="sumologic python sdk rest api log management analytics logreduce splunk security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
| 32.764706 | 121 | 0.716338 |
dbd1044b9a9e2ac21f72f6855560f0e23688f3f9 | 8,025 | py | Python | docs/conf.py | urm8/django-translations | e8f66710af9433044937b75c061e1988add398a5 | [
"BSD-3-Clause"
] | 100 | 2018-11-20T19:30:49.000Z | 2022-03-10T07:46:27.000Z | docs/conf.py | urm8/django-translations | e8f66710af9433044937b75c061e1988add398a5 | [
"BSD-3-Clause"
] | 30 | 2018-11-27T19:53:53.000Z | 2022-02-04T14:56:52.000Z | docs/conf.py | urm8/django-translations | e8f66710af9433044937b75c061e1988add398a5 | [
"BSD-3-Clause"
] | 25 | 2019-05-30T13:41:47.000Z | 2022-03-25T04:28:17.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import json
import datetime
# `Django setup` below, will add the path to `translations` module
# automatically because it's been included in `project.settings`, so no need
# to import it here
# -- Django setup ------------------------------------------------------------
# generated project settings
import django
sys.path.insert(
0,
os.path.join(os.path.dirname(os.path.abspath('.')), 'project')
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
django.setup()
# -- Project information -----------------------------------------------------
with open(
os.path.join(
os.path.dirname(os.path.abspath('.')),
'config.json'
), 'r') as fh:
info = json.load(fh)
# project
project = info['project']['name']
# description
description = info['project']['desc']
# author
author = info['author']['name']
# The short X.Y version
version = info['release']['version']
# The full version, including alpha/beta/rc tags
release = info['release']['name']
# github
github_user = info['github']['user']
github_repo = info['github']['repo']
# donation
donate_url = info['urls']['funding']
# logo
logo = info['project']['logo']
# documentation
documentation = '{} {}'.format(project, 'Documentation')
# year
year = datetime.datetime.now().year
# copyright
copyright = '{year}, {author}'.format(year=year, author=author)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'note_bg': '#fec',
'note_border': '#ffe2a8',
'show_relbars': True,
'logo': logo,
'touch_icon': logo,
'logo_name': True,
'description': description,
'github_user': github_user,
'github_repo': github_repo,
'github_banner': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoTranslationsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoTranslations.tex', documentation,
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangotranslations', documentation,
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoTranslations', documentation,
author, 'DjangoTranslations', description,
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'django': ('http://django.readthedocs.org/en/latest/', None),
}
# -- Options for doctest extension -------------------------------------------
doctest_global_setup = """
import builtins
from django.db import connection
from django.test import TestCase
from sample.utils import create_samples
import beautifier
# Turn on the test database for the doctests
connection.creation.create_test_db(verbosity=0)
TestCase.setUpClass()
# Beautify `testoutput`
def print(value='', end='\\n'):
builtins.print(beautifier.beautify(value, False), end=end)
# Sample creation
def create_doc_samples(translations=True):
if translations:
create_samples(
continent_names=['europe', 'asia'],
country_names=['germany', 'south korea'],
city_names=['cologne', 'seoul'],
continent_fields=['name', 'denonym'],
country_fields=['name', 'denonym'],
city_fields=['name', 'denonym'],
langs=['de']
)
else:
create_samples(
continent_names=['europe', 'asia'],
country_names=['germany', 'south korea'],
city_names=['cologne', 'seoul'],
)
"""
doctest_global_cleanup = """
import builtins
from django.db import connection
from django.test import TestCase
# Normalize `testoutput`
def print(value='', end='\\n'):
builtins.print(value, end=end)
# Turn off the test database for the doctests
TestCase.tearDownClass()
connection.creation.destroy_test_db(verbosity=0)
"""
| 27.389078 | 79 | 0.642492 |
dbd150b0b609e70f340c545eccce6da7fadb2eeb | 86 | py | Python | skorecard/metrics/__init__.py | orchardbirds/skorecard-1 | 0f5375a6c159bb35f4b62c5be75a742bf50885e2 | [
"MIT"
] | null | null | null | skorecard/metrics/__init__.py | orchardbirds/skorecard-1 | 0f5375a6c159bb35f4b62c5be75a742bf50885e2 | [
"MIT"
] | null | null | null | skorecard/metrics/__init__.py | orchardbirds/skorecard-1 | 0f5375a6c159bb35f4b62c5be75a742bf50885e2 | [
"MIT"
] | null | null | null | """Import required Metric."""
from .metrics import IV_scorer
__all__ = ["IV_scorer"]
| 17.2 | 30 | 0.72093 |
dbd2339bf7055960ea772c1eecf31ab430a3ae71 | 5,297 | py | Python | src/waldur_core/core/tests/helpers.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_core/core/tests/helpers.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_core/core/tests/helpers.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | import copy
from django.conf import settings
from django.test.utils import override_settings
from rest_framework import status, test
def override_waldur_core_settings(**kwargs):
waldur_settings = copy.deepcopy(settings.WALDUR_CORE)
waldur_settings.update(kwargs)
return override_settings(WALDUR_CORE=waldur_settings)
| 37.302817 | 106 | 0.605815 |
dbd3d31bd1a8e525699ace640bf7abf893c326e1 | 1,121 | py | Python | data/benchmark.py | Gummary/denet | 00d814d75eea54d5b259fce128ae7b625a900140 | [
"MIT"
] | 343 | 2020-04-02T06:22:18.000Z | 2022-03-25T12:51:55.000Z | data/benchmark.py | sanglee325/cutblur | 1589718b27973bec41289bbd5ad5a71ebe2e9925 | [
"MIT"
] | 26 | 2020-04-30T03:23:15.000Z | 2022-02-20T07:31:42.000Z | data/benchmark.py | sanglee325/cutblur | 1589718b27973bec41289bbd5ad5a71ebe2e9925 | [
"MIT"
] | 66 | 2020-04-02T06:55:37.000Z | 2022-03-10T15:44:19.000Z | """
CutBlur
Copyright 2020-present NAVER corp.
MIT license
"""
import os
import glob
import data
| 22.877551 | 78 | 0.611954 |
dbd4271941c1c0d5952f6d9d574008a25b255d3d | 917 | py | Python | pytglib/api/types/update_chat_is_pinned.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/update_chat_is_pinned.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/update_chat_is_pinned.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
| 22.365854 | 60 | 0.563795 |
dbd4c90cb945544747b8308cf5ade961b6ff86c8 | 30,162 | py | Python | tests/test_api.py | jairhenrique/todoist-python | 755b9bd8a4fdf4e96b2381613ac0c4bed99731e5 | [
"MIT"
] | null | null | null | tests/test_api.py | jairhenrique/todoist-python | 755b9bd8a4fdf4e96b2381613ac0c4bed99731e5 | [
"MIT"
] | null | null | null | tests/test_api.py | jairhenrique/todoist-python | 755b9bd8a4fdf4e96b2381613ac0c4bed99731e5 | [
"MIT"
] | null | null | null | import io
import time
import todoist
| 29.1139 | 82 | 0.636264 |
dbd513568e3fe748df68592f5efb0230845ec0a5 | 990 | py | Python | setup.py | dylancrockett/iot.io | 472767186a5500e05b02d821f32e1208f3652418 | [
"MIT"
] | null | null | null | setup.py | dylancrockett/iot.io | 472767186a5500e05b02d821f32e1208f3652418 | [
"MIT"
] | null | null | null | setup.py | dylancrockett/iot.io | 472767186a5500e05b02d821f32e1208f3652418 | [
"MIT"
] | null | null | null | from setuptools import setup
import iotio
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="iot.io",
version=iotio.__version__,
packages=["iotio"],
author="Dylan Crockett",
author_email="dylanrcrockett@gmail.com",
license="MIT",
description="A management API for connecting and managing Clients via websocket connections.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dylancrockett/iot.io",
project_urls={
"Documentation": "https://iotio.readthedocs.io/",
"Source Code": "https://github.com/dylancrockett/iot.io"
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
install_requires=[
'gevent',
'gevent-websocket',
'flask',
'flask-sockets',
],
python_requires='>=3.7'
)
| 28.285714 | 98 | 0.639394 |
dbd57373c1091216c9a267bad2a40451008902b2 | 1,820 | py | Python | trellominer/api/trello.py | xnoder/trellominer | 629d8f916486aa94a5bfa3a9497c36316c2864ed | [
"MIT"
] | null | null | null | trellominer/api/trello.py | xnoder/trellominer | 629d8f916486aa94a5bfa3a9497c36316c2864ed | [
"MIT"
] | null | null | null | trellominer/api/trello.py | xnoder/trellominer | 629d8f916486aa94a5bfa3a9497c36316c2864ed | [
"MIT"
] | null | null | null | import os
import requests
from trellominer.config import yaml
| 40.444444 | 165 | 0.644505 |
dbd5cd5e6175ef560ba478a76fe061ded7bfc8d7 | 2,337 | py | Python | alexnet_guided_bp_vanilla.py | wezteoh/face_perception_thru_backprop | 449f78ce330876ff25fbcdf892023fd2ba86005c | [
"MIT"
] | null | null | null | alexnet_guided_bp_vanilla.py | wezteoh/face_perception_thru_backprop | 449f78ce330876ff25fbcdf892023fd2ba86005c | [
"MIT"
] | null | null | null | alexnet_guided_bp_vanilla.py | wezteoh/face_perception_thru_backprop | 449f78ce330876ff25fbcdf892023fd2ba86005c | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import os
from scipy.io import savemat
from scipy.io import loadmat
from scipy.misc import imread
from scipy.misc import imsave
from alexnet_face_classifier import *
import matplotlib.pyplot as plt
plt.switch_backend('agg')
###
def guided_backprop(graph, image, one_hot, sess):
image = np.expand_dims(image, 0)
one_hot = np.expand_dims(one_hot, 0)
saliency_map = sess.run(graph.grad_image, feed_dict={graph.inputs:image, graph.labels_1hot:one_hot})[0]
scaling_adjustment = 1E-20
saliency_map_scaled = saliency_map/(np.max(saliency_map)+scaling_adjustment)
return saliency_map_scaled
| 37.095238 | 110 | 0.693624 |
dbd6ae222f06041fd60daf0b6a6b62ee66225c4f | 18,729 | py | Python | tests/test_sqlalchemy_registry.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
] | 48 | 2015-01-15T08:57:24.000Z | 2022-01-26T04:04:34.000Z | tests/test_sqlalchemy_registry.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
] | 169 | 2015-01-16T13:17:38.000Z | 2021-05-31T13:23:23.000Z | tests/test_sqlalchemy_registry.py | AferriDaniel/coaster | 3ffbc9d33c981284593445299aaee0c3cc0cdb0b | [
"BSD-2-Clause"
] | 17 | 2015-02-15T07:39:04.000Z | 2021-10-05T11:20:22.000Z | """Registry and RegistryMixin tests."""
from types import SimpleNamespace
import pytest
from coaster.db import db
from coaster.sqlalchemy import BaseMixin
from coaster.sqlalchemy.registry import Registry
# --- Fixtures -------------------------------------------------------------------------
# --- Tests ----------------------------------------------------------------------------
# --- Creating a registry
def test_registry_set_name():
"""Registry's __set_name__ gets called."""
# Registry has no name unless added to a class
assert Registry()._name is None
assert RegistryUser.reg1._name == 'reg1'
assert RegistryUser.reg2._name == 'reg2'
def test_registry_reuse_error():
"""Registries cannot be reused under different names."""
# Registry raises TypeError from __set_name__, but Python recasts as RuntimeError
with pytest.raises(RuntimeError):
def test_registry_reuse_okay():
"""Registries be reused with the same name under different hosts."""
reusable = Registry()
assert reusable._name is None
assert HostA.registry._name == 'registry'
assert HostB.registry._name == 'registry'
assert HostA.registry is HostB.registry
assert HostA.registry is reusable
def test_registry_param_type():
"""Registry's param must be string or None."""
r = Registry()
assert r._param is None
r = Registry('')
assert r._param is None
r = Registry(1)
assert r._param == '1'
r = Registry('obj')
assert r._param == 'obj'
r = Registry(param='foo')
assert r._param == 'foo'
def test_registry_property_cached_property():
"""A registry can have property or cached_property set, but not both."""
r = Registry()
assert r._default_property is False
assert r._default_cached_property is False
r = Registry(property=True)
assert r._default_property is True
assert r._default_cached_property is False
r = Registry(cached_property=True)
assert r._default_property is False
assert r._default_cached_property is True
with pytest.raises(TypeError):
Registry(property=True, cached_property=True)
# --- Populating a registry
def test_add_to_registry(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added to registries and accessed as per registry settings."""
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member(1) == (callable_host, 1)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member(1) == (1, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_property_cache_mismatch(
PropertyRegistry, CachedPropertyRegistry # noqa: N803
):
"""A registry's default setting must be explicitly turned off if conflicting."""
with pytest.raises(TypeError):
with pytest.raises(TypeError):
def test_add_to_registry_host(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a function, overriding default settings."""
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member(1) == (callable_host, 1)
assert property_host.registry.member(2) == (property_host, 2)
assert cached_property_host.registry.member(3) == (cached_property_host, 3)
assert callable_param_host.registry.member(4) == (4, callable_param_host)
assert property_param_host.registry.member(5) == (5, property_param_host)
assert cached_property_param_host.registry.member(6) == (
6,
cached_property_param_host,
)
def test_add_to_registry_property(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a property, overriding default settings."""
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member == (callable_host, None)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member == (None, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_add_to_registry_cached_property(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a property, overriding default settings."""
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member == (callable_host, None)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member == (None, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_add_to_registry_custom_name(all_registry_hosts, registry_member):
"""Members can be added to a registry with a custom name."""
assert registry_member.__name__ == 'member'
for host in all_registry_hosts:
# Mock decorator call
host.registry('custom')(registry_member)
# This adds the member under the custom name
assert host.registry.custom is registry_member
# The default name of the function is not present...
with pytest.raises(AttributeError):
assert host.registry.member is registry_member
# ... but can be added
host.registry()(registry_member)
assert host.registry.member is registry_member
def test_add_to_registry_underscore(all_registry_hosts, registry_member):
"""Registry member names cannot start with an underscore."""
for host in all_registry_hosts:
with pytest.raises(ValueError):
host.registry('_new_member')(registry_member)
def test_add_to_registry_dupe(all_registry_hosts, registry_member):
"""Registry member names cannot be duplicates of an existing name."""
for host in all_registry_hosts:
host.registry()(registry_member)
with pytest.raises(ValueError):
host.registry()(registry_member)
host.registry('custom')(registry_member)
with pytest.raises(ValueError):
host.registry('custom')(registry_member)
def test_cached_properties_are_cached(
PropertyRegistry, # noqa: N803
CachedPropertyRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""Cached properties are truly cached."""
# Register registry member
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
# The properties and cached properties work
assert property_host.registry.member == [property_host, None]
assert cached_property_host.registry.member == [cached_property_host, None]
assert property_param_host.registry.member == [None, property_param_host]
assert cached_property_param_host.registry.member == [
None,
cached_property_param_host,
]
# The properties and cached properties return equal values on each access
assert property_host.registry.member == property_host.registry.member
assert cached_property_host.registry.member == cached_property_host.registry.member
assert property_param_host.registry.member == property_param_host.registry.member
assert (
cached_property_param_host.registry.member
== cached_property_param_host.registry.member
)
# Only the cached properties return the same value every time
assert property_host.registry.member is not property_host.registry.member
assert cached_property_host.registry.member is cached_property_host.registry.member
assert (
property_param_host.registry.member is not property_param_host.registry.member
)
assert (
cached_property_param_host.registry.member
is cached_property_param_host.registry.member
)
# TODO:
# test_registry_member_cannot_be_called_clear_cache
# test_multiple_positional_and_keyword_arguments
# test_registry_iter
# test_registry_members_must_be_callable
# test_add_by_directly_sticking_in
# test_instance_registry_is_cached
# test_clear_cache_for
# test_clear_cache
# test_registry_mixin_config
# test_registry_mixin_subclasses
# --- RegistryMixin tests --------------------------------------------------------------
def test_access_item_from_class(registrymixin_models):
"""Registered items are available from the model class."""
assert (
registrymixin_models.RegistryTest1.views.test
is registrymixin_models.RegisteredItem1
)
assert (
registrymixin_models.RegistryTest2.views.test
is registrymixin_models.RegisteredItem2
)
assert (
registrymixin_models.RegistryTest1.views.test
is not registrymixin_models.RegisteredItem2
)
assert (
registrymixin_models.RegistryTest2.views.test
is not registrymixin_models.RegisteredItem1
)
assert registrymixin_models.RegistryTest1.features.is1 is registrymixin_models.is1
assert registrymixin_models.RegistryTest2.features.is1 is registrymixin_models.is1
def test_access_item_class_from_instance(registrymixin_models):
"""Registered items are available from the model instance."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
# When accessed from the instance, we get a partial that resembles
# the wrapped item, but is not the item itself.
assert r1.views.test is not registrymixin_models.RegisteredItem1
assert r1.views.test.func is registrymixin_models.RegisteredItem1
assert r2.views.test is not registrymixin_models.RegisteredItem2
assert r2.views.test.func is registrymixin_models.RegisteredItem2
assert r1.features.is1 is not registrymixin_models.is1
assert r1.features.is1.func is registrymixin_models.is1
assert r2.features.is1 is not registrymixin_models.is1
assert r2.features.is1.func is registrymixin_models.is1
def test_access_item_instance_from_instance(registrymixin_models):
"""Registered items can be instantiated from the model instance."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
i1 = r1.views.test()
i2 = r2.views.test()
assert isinstance(i1, registrymixin_models.RegisteredItem1)
assert isinstance(i2, registrymixin_models.RegisteredItem2)
assert not isinstance(i1, registrymixin_models.RegisteredItem2)
assert not isinstance(i2, registrymixin_models.RegisteredItem1)
assert i1.obj is r1
assert i2.obj is r2
assert i1.obj is not r2
assert i2.obj is not r1
def test_features(registrymixin_models):
"""The features registry can be used for feature tests."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
assert r1.features.is1() is True
assert r2.features.is1() is False
| 33.148673 | 88 | 0.724171 |
dbd6cc6412096e169b145a7b948ae52708971c75 | 1,311 | py | Python | home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | ad0c7880e0ccfe81ea53b8bad8e0d4fcf0c5830b | [
"MIT"
] | null | null | null | home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | ad0c7880e0ccfe81ea53b8bad8e0d4fcf0c5830b | [
"MIT"
] | 10 | 2020-06-05T17:26:09.000Z | 2022-01-13T00:39:44.000Z | home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | ad0c7880e0ccfe81ea53b8bad8e0d4fcf0c5830b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-02-06 16:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 36.416667 | 158 | 0.62624 |
dbd8f78b064be6d992dd13fbfa97e40d68c26218 | 900 | py | Python | nesta/packages/misc_utils/tests/test_guess_sql_type.py | anniyanvr/nesta | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 13 | 2019-06-18T16:53:53.000Z | 2021-03-04T10:58:52.000Z | nesta/packages/misc_utils/tests/test_guess_sql_type.py | nestauk/old_nesta_daps | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 208 | 2018-08-10T13:15:40.000Z | 2021-07-21T10:16:07.000Z | nesta/packages/misc_utils/tests/test_guess_sql_type.py | nestauk/old_nesta_daps | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 8 | 2018-09-20T15:19:23.000Z | 2020-12-15T17:41:34.000Z | import pytest
from nesta.packages.misc_utils.guess_sql_type import guess_sql_type
def test_guess_sql_type_int(int_data):
assert guess_sql_type(int_data) == 'INTEGER'
def test_guess_sql_type_float(float_data):
assert guess_sql_type(float_data) == 'FLOAT'
def test_guess_sql_type_bool(bool_data):
assert guess_sql_type(bool_data) == 'BOOLEAN'
def test_guess_sql_type_str(text_data):
assert guess_sql_type(text_data, text_len=10) == 'TEXT'
assert guess_sql_type(text_data, text_len=100).startswith('VARCHAR(')
| 25.714286 | 73 | 0.725556 |
dbd936c5bdf9f66abffeaa3d4ec25c893af108da | 4,239 | py | Python | api/controller/activity.py | DXCChina/pms | c779a69f25fb08101593c6ff0451debc0abce6e4 | [
"MIT"
] | 27 | 2017-11-06T06:58:30.000Z | 2021-04-23T02:47:23.000Z | api/controller/activity.py | DXCChina/pms | c779a69f25fb08101593c6ff0451debc0abce6e4 | [
"MIT"
] | 3 | 2017-12-08T02:55:42.000Z | 2019-06-04T15:23:03.000Z | api/controller/activity.py | DXCChina/pms | c779a69f25fb08101593c6ff0451debc0abce6e4 | [
"MIT"
] | 16 | 2017-10-12T03:06:39.000Z | 2020-12-24T09:00:49.000Z | # -*- coding: utf-8 -*-
''''''
from flask import request
from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User
from model.role import identity
from flask_jwt_extended import (fresh_jwt_required)
def demand_activity_add(activity_id, data):
''''''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if not demand.activityId:
demand.activityId = activity_id
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_del(activity_id, data):
''''''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.activityId = None
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_done(activity_id, data):
''''''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.status = 1
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
| 35.033058 | 98 | 0.599198 |
dbd96b797fa91e96b8a7f838f8fb68571c587fa0 | 326 | py | Python | math/9. Palindrome number.py | Rage-ops/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | 1 | 2020-11-23T13:52:11.000Z | 2020-11-23T13:52:11.000Z | math/9. Palindrome number.py | harsha-sam/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | null | null | null | math/9. Palindrome number.py | harsha-sam/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | null | null | null | # Easy
# https://leetcode.com/problems/palindrome-number/
# Time Complexity: O(log(x) to base 10)
# Space Complexity: O(1) | 27.166667 | 50 | 0.542945 |
dbdc207882fb6307d686a3c2b77b753e65cc1495 | 114 | py | Python | panoramisk/__init__.py | Eyepea/panoramisk | c10725e358f5b802faa9df1d22de6710927735a0 | [
"MIT"
] | null | null | null | panoramisk/__init__.py | Eyepea/panoramisk | c10725e358f5b802faa9df1d22de6710927735a0 | [
"MIT"
] | null | null | null | panoramisk/__init__.py | Eyepea/panoramisk | c10725e358f5b802faa9df1d22de6710927735a0 | [
"MIT"
] | null | null | null | from .manager import Manager # NOQA
from .call_manager import CallManager # NOQA
from . import fast_agi # NOQA
| 28.5 | 45 | 0.763158 |
dbdc8acd947df0cf5d903b9fd18f947cd84ecb24 | 4,762 | py | Python | prtg/client.py | kevinschoon/prtg-py | 714e0750606e55b2cd4c7dff8770d94057fa932b | [
"MIT"
] | null | null | null | prtg/client.py | kevinschoon/prtg-py | 714e0750606e55b2cd4c7dff8770d94057fa932b | [
"MIT"
] | null | null | null | prtg/client.py | kevinschoon/prtg-py | 714e0750606e55b2cd4c7dff8770d94057fa932b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python library for Paessler's PRTG (http://www.paessler.com/)
"""
import logging
import xml.etree.ElementTree as Et
from urllib import request
from prtg.cache import Cache
from prtg.models import Sensor, Device, Status, PrtgObject
from prtg.exceptions import BadTarget, UnknownResponse
"""
def refresh(self, query):
logging.info('Refreshing content: {}'.format(content))
devices = Query(target='table', endpoint=self.endpoint, username=self.username, password=self.password, content=content, counter=content)
self.connection.get_paginated_request(devices)
self.cache.write_content(devices.response)
def update(self, content, attribute, value, replace=False):
for index, obj in enumerate(content):
logging.debug('Updating object: {} with {}={}'.format(obj, attribute, value))
if attribute == 'tags':
tags = value.split(',')
if replace:
obj.tags = value.split(',')
else:
obj.tags += [x for x in tags if x not in obj.tags]
content[index] = obj
self.cache.write_content(content, force=True)
def content(self, content_name, parents=False, regex=None, attribute=None):
response = list()
for resp in self.cache.get_content(content_name):
if not all([regex, attribute]):
response.append(resp)
else:
if RegexMatch(resp, expression=regex, attribute=attribute):
response.append(resp)
if all([content_name == 'sensors', parents is True]):
logging.info('Searching for parents.. this may take a while')
p = list()
ids = set()
for index, child in enumerate(response):
parent = self.cache.get_object(str(child.parentid)) # Parent device.
if parent:
ids.add(str(parent.objid)) # Lookup unique parent ids.
else:
logging.warning('Unable to find sensor parent')
for parent in ids:
p.append(self.cache.get_object(parent))
response = p
return response
"""
| 32.616438 | 145 | 0.558169 |
dbdce6502afcfa5e2708f1c6de7ac5e46b73c5d7 | 3,303 | py | Python | template/misc.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | template/misc.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | template/misc.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunArgs
# Define data loaders #####################################
# See https://gist.github.com/peterroelants/9956ec93a07ca4e9ba5bc415b014bcca
# redefine summarysaverhook (for more accurate saving)
def ExperimentTemplate() -> str:
"""A template with Markdown syntax.
:return: str with Markdown template
"""
return """
Experiment
==========
Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment.
For instance, you can find the automatically generated used settings of this run below.
Current Settings
----------------
| Argument | Value |
| -------- | ----- |
"""
| 32.70297 | 126 | 0.666969 |
dbdd97337631bf234182cdf6ceb595a8b38fcc53 | 359 | py | Python | pyunitwizard/_private_tools/parsers.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | null | null | null | pyunitwizard/_private_tools/parsers.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | null | null | null | pyunitwizard/_private_tools/parsers.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | null | null | null | parsers = ['openmm.unit', 'pint', 'unyt']
def digest_parser(parser: str) -> str:
""" Check if parser is correct."""
if parser is not None:
if parser.lower() in parsers:
return parser.lower()
else:
raise ValueError
else:
from pyunitwizard.kernel import default_parser
return default_parser
| 25.642857 | 54 | 0.601671 |
dbddb2e414eaaea37bf5ee700d9d3c21f697c101 | 6,606 | py | Python | metric_wsd/utils/data_utils.py | bartonlin/MWSD | 70ad446ee7f00a11988acb290270e32d8e6af925 | [
"MIT"
] | 4 | 2021-04-27T16:28:51.000Z | 2021-08-30T11:10:28.000Z | metric_wsd/utils/data_utils.py | bartonlin/MWSD | 70ad446ee7f00a11988acb290270e32d8e6af925 | [
"MIT"
] | null | null | null | metric_wsd/utils/data_utils.py | bartonlin/MWSD | 70ad446ee7f00a11988acb290270e32d8e6af925 | [
"MIT"
] | 2 | 2021-08-25T14:29:45.000Z | 2022-02-12T02:09:45.000Z | '''
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Code taken from: https://github.com/facebookresearch/wsd-biencoders/blob/master/wsd_models/util.py
'''
import os
import re
import torch
import subprocess
from transformers import *
import random
pos_converter = {'NOUN':'n', 'PROPN':'n', 'VERB':'v', 'AUX':'v', 'ADJ':'a', 'ADV':'r'}
#run WSD Evaluation Framework scorer within python
#normalize ids list, masks to whatever the passed in length is
#filters down training dataset to (up to) k examples per sense
#for few-shot learning of the model
#EOF
| 29.891403 | 98 | 0.673479 |
dbddc1c2c35c862c97e10c987a1255308c864f59 | 2,825 | py | Python | examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 1d7b4ccec0e76799a25992534cd295a80d83878a | [
"MIT"
] | null | null | null | examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 1d7b4ccec0e76799a25992534cd295a80d83878a | [
"MIT"
] | null | null | null | examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 1d7b4ccec0e76799a25992534cd295a80d83878a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from typing import Dict, AnyStr
from pathlib import Path
from ontopy import get_ontology
import dlite
from dlite.mappings import make_instance
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
workflow1dir = rootdir / '1-simple-workflow'
entitiesdir = rootdir / 'entities'
atomdata = workflow1dir / 'atomscaledata.json'
dlite.storage_path.append(f'{entitiesdir}/*.json')
# Define the calculation
def get_energy(reaction):
"""Calculates reaction energies with data from Substance entity
data is harvested from collection and mapped to Substance according to
mappings.
Args:
reaction: dict with names of reactants and products ase keys
and stochiometric coefficient as value
Negative stochiometric coefficients for reactants.
Positive stochiometric coefficients for products.
Returns:
reaction energy
"""
energy = 0
for label, n in reaction.items():
inst = make_instance(Substance, coll[label], mappings,
mapsTo=mapsTo)
energy+=n*inst.molecule_energy
return energy
# Import ontologies with mappings
molecules_onto = get_ontology(f'{thisdir}/mapping_mols.ttl').load()
reaction_onto = get_ontology(f'{thisdir}/mapping_substance.ttl').load()
# Convert to mappings to a single list of triples
mappings = list(molecules_onto.get_unabbreviated_triples())
mappings.extend(list(reaction_onto.get_unabbreviated_triples()))
# Obtain the Metadata to be mapped to each other
Molecule = dlite.get_instance('http://onto-ns.com/meta/0.1/Molecule')
Substance = dlite.get_instance('http://onto-ns.com/meta/0.1/Substance')
# Find mapping relation
# TODO: investigate what to do if the two cases
# use a different mappings relation. As of now it is a
# hard requirement that they use the same.
mapsTo = molecules_onto.mapsTo.iri
# Define where the molecule data is obtained from
# This is a dlite collection
coll = dlite.Collection(f'json://{atomdata}?mode=r#molecules', 0)
# input from chemical engineer, e.g. what are reactants and products
# reactants (left side of equation) have negative stochiometric coefficient
# products (right side of equation) have positive stochiometric coefficient
reaction1 = {'C2H6':-1, 'C2H4':1,'H2':1}
reaction_energy = get_energy(reaction1)
print('Reaction energy 1', reaction_energy)
reaction2 = {'C3H8':-1, 'H2': -2,'CH4':3}
reaction_energy2 = get_energy(reaction2)
print('Reaction energy 1', reaction_energy2)
# Map instance Molecule with label 'H2' to Substance
#inst = make_instance(Substance, coll['H2'], mappings)
#print(inst)
# Map instance Molecule with label 'H2' to itself
#inst2 = make_instance(Molecule, coll['H2'], mappings, strict=False)
#print(inst2)
| 31.388889 | 75 | 0.735929 |
91551c7d6fac7874ebf8acc4dfa5dfb4b2e853a5 | 6,479 | py | Python | forms.py | lendoo73/my_idea_boxes | c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5 | [
"MIT"
] | null | null | null | forms.py | lendoo73/my_idea_boxes | c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5 | [
"MIT"
] | null | null | null | forms.py | lendoo73/my_idea_boxes | c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, SubmitField, RadioField, HiddenField
from wtforms.fields.html5 import DateField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, NumberRange
from models import Colleagues, Admins, Boxes, Ideas
allowed_format = ['png', 'svg', 'jpg', "jpeg"] | 40.49375 | 113 | 0.679426 |
9155a081d524a7aa2a093b5db6afb167995bd2d7 | 3,381 | py | Python | 5.analysis/scikit-multilearn-master/skmultilearn/adapt/brknn.py | fullmooncj/textmining_edu | b1402fd96fbde945f48c52d71ba4dfe51fd96602 | [
"Apache-2.0"
] | null | null | null | 5.analysis/scikit-multilearn-master/skmultilearn/adapt/brknn.py | fullmooncj/textmining_edu | b1402fd96fbde945f48c52d71ba4dfe51fd96602 | [
"Apache-2.0"
] | null | null | null | 5.analysis/scikit-multilearn-master/skmultilearn/adapt/brknn.py | fullmooncj/textmining_edu | b1402fd96fbde945f48c52d71ba4dfe51fd96602 | [
"Apache-2.0"
] | null | null | null | from builtins import range
from ..base import MLClassifierBase
from ..utils import get_matrix_in_format
from sklearn.neighbors import NearestNeighbors
import scipy.sparse as sparse
import numpy as np | 37.566667 | 135 | 0.675244 |
9155e8339948407989efd32f44f9c2682f1c678e | 931 | py | Python | groclient/constants.py | eric-gro/api-client | 0ca73422c25b5065907d068a44b72bdc43fea79f | [
"MIT"
] | 18 | 2019-01-10T21:06:17.000Z | 2022-03-15T06:22:18.000Z | groclient/constants.py | eric-gro/api-client | 0ca73422c25b5065907d068a44b72bdc43fea79f | [
"MIT"
] | 138 | 2019-01-16T15:35:35.000Z | 2022-03-23T13:05:03.000Z | groclient/constants.py | eric-gro/api-client | 0ca73422c25b5065907d068a44b72bdc43fea79f | [
"MIT"
] | 24 | 2019-02-22T19:24:54.000Z | 2022-03-15T10:17:37.000Z | """Constants about the Gro ontology that can be imported and re-used anywhere."""
REGION_LEVELS = {
'world': 1,
'continent': 2,
'country': 3,
'province': 4, # Equivalent to state in the United States
'district': 5, # Equivalent to county in the United States
'city': 6,
'market': 7,
'other': 8,
'coordinate': 9
}
ENTITY_TYPES_PLURAL = ['metrics', 'items', 'regions', 'frequencies', 'sources', 'units']
DATA_SERIES_UNIQUE_TYPES_ID = [
'metric_id',
'item_id',
'region_id',
'partner_region_id',
'frequency_id',
'source_id'
]
ENTITY_KEY_TO_TYPE = {
'metric_id': 'metrics',
'item_id': 'items',
'region_id': 'regions',
'partner_region_id': 'regions',
'source_id': 'sources',
'frequency_id': 'frequencies',
'unit_id': 'units'
}
DATA_POINTS_UNIQUE_COLS = DATA_SERIES_UNIQUE_TYPES_ID + [
'reporting_date',
'start_date',
'end_date'
]
| 22.707317 | 88 | 0.628357 |
9156c4aa90ea0469b8acd15340e3ebcae1eab123 | 1,535 | py | Python | asv_bench/benchmarks/tslibs/period.py | CitizenB/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2020-09-10T15:03:25.000Z | 2021-04-01T22:48:33.000Z | asv_bench/benchmarks/tslibs/period.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | 7 | 2015-08-30T23:51:00.000Z | 2018-12-29T19:52:35.000Z | asv_bench/benchmarks/tslibs/period.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | 5 | 2017-10-04T22:24:49.000Z | 2021-08-06T13:50:13.000Z | """
Period benchmarks that rely only on tslibs. See benchmarks.period for
Period benchmarks that rely on other parts fo pandas.
"""
from pandas import Period
from pandas.tseries.frequencies import to_offset
| 21.619718 | 70 | 0.536808 |
91575f345c10efb311ca9de7963da8d6ac0667fd | 1,894 | py | Python | Bugscan_exploits-master/exp_list/exp-1788.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-1788.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-1788.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #/usr/bin/python
#-*- coding: utf-8 -*-
#Refer http://www.wooyun.org/bugs/wooyun-2015-0137140
#__Author__ =
#_PlugName_ = whezeip Plugin
#_FileName_ = whezeip.py
if __name__ == '__main__':
from dummy import *
audit(assign('whezeip', 'http://218.104.147.71:7001/')[1]) | 37.137255 | 97 | 0.659451 |
9158ba2878b65e7507783af35fb834ff85d1e33a | 535 | py | Python | 3-working-with-lists/zip_tuples.py | thecodingsim/learn-python | bf8e98f40e73ebf7dcf5641312c2c0296d886952 | [
"MIT"
] | null | null | null | 3-working-with-lists/zip_tuples.py | thecodingsim/learn-python | bf8e98f40e73ebf7dcf5641312c2c0296d886952 | [
"MIT"
] | null | null | null | 3-working-with-lists/zip_tuples.py | thecodingsim/learn-python | bf8e98f40e73ebf7dcf5641312c2c0296d886952 | [
"MIT"
] | null | null | null | # Use zip() to create a new variable called names_and_dogs_names that combines owners and dogs_names lists into a zip object.
# Then, create a new variable named list_of_names_and_dogs_names by calling the list() function on names_and_dogs_names.
# Print list_of_names_and_dogs_names.
owners = ["Jenny", "Alexus", "Sam", "Grace"]
dogs_names = ["Elphonse", "Dr. Doggy DDS", "Carter", "Ralph"]
names_and_dogs_names = zip(owners, dogs_names)
list_of_names_and_dogs_names = list(names_and_dogs_names)
print(list_of_names_and_dogs_names) | 48.636364 | 125 | 0.792523 |
915a53aa4a7088b23b53c3227ab2635547e8ba50 | 1,593 | py | Python | setup.py | abhiomkar/couchdbkit | 035062b504b57c1cc6e576be47fb05423fb1ddb3 | [
"MIT"
] | 1 | 2021-06-03T21:34:38.000Z | 2021-06-03T21:34:38.000Z | setup.py | abhiomkar/couchdbkit | 035062b504b57c1cc6e576be47fb05423fb1ddb3 | [
"MIT"
] | null | null | null | setup.py | abhiomkar/couchdbkit | 035062b504b57c1cc6e576be47fb05423fb1ddb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 5, 0, 'final'):
raise SystemExit("couchdbkit requires Python 2.5 or later.")
from setuptools import setup, find_packages
from couchdbkit import __version__
setup(
name = 'couchdbkit',
version = __version__,
description = 'Python couchdb kit',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.com',
license = 'Apache License 2',
url = 'http://couchdbkit.org',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests']),
zip_safe = False,
install_requires = [
'restkit>=3.2',
],
entry_points="""
[couchdbkit.consumers]
sync=couchdbkit.consumer.sync:SyncConsumer
eventlet=couchdbkit.consumer.ceventlet:EventletConsumer
gevent=couchdbkit.consumer.cgevent:GeventConsumer
""",
test_suite='noses',
)
| 27 | 77 | 0.626491 |
915bb507e25fc7cb08c5d136b971e88a2d706d9b | 1,934 | py | Python | tests/integration/test_infrastructure_persistence.py | othercodes/sample-todo-list-hexagonal-achitecture | a958c6906d8e777e837c8348c754b637b89a7031 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_infrastructure_persistence.py | othercodes/sample-todo-list-hexagonal-achitecture | a958c6906d8e777e837c8348c754b637b89a7031 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_infrastructure_persistence.py | othercodes/sample-todo-list-hexagonal-achitecture | a958c6906d8e777e837c8348c754b637b89a7031 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from complexheart.domain.criteria import Criteria
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
from to_do_list.tasks.domain.models import Task
from to_do_list.tasks.infrastructure.persistence.relational import RelationalTaskRepository, DBInstaller
db_engine: Optional[Engine] = None
| 27.628571 | 104 | 0.73061 |
915c531ce1a9edc3c8480b0c6bf84bed9c0ec81f | 2,934 | py | Python | wagtail_jinja2/extensions.py | minervaproject/wagtail-jinja2-extensions | 708f2f873273312ead80d67c3eff0555f152d072 | [
"MIT"
] | 6 | 2015-09-25T15:33:17.000Z | 2021-11-17T23:25:52.000Z | wagtail_jinja2/extensions.py | minervaproject/wagtail-jinja2-extensions | 708f2f873273312ead80d67c3eff0555f152d072 | [
"MIT"
] | 1 | 2015-09-29T15:53:40.000Z | 2015-09-29T15:53:40.000Z | wagtail_jinja2/extensions.py | minervaproject/wagtail-jinja2-extensions | 708f2f873273312ead80d67c3eff0555f152d072 | [
"MIT"
] | null | null | null | from jinja2.ext import Extension
from jinja2 import nodes
from jinja2 import Markup
from wagtail.wagtailadmin.templatetags.wagtailuserbar import wagtailuserbar as original_wagtailuserbar
from wagtail.wagtailimages.models import Filter, SourceImageIOError
| 40.75 | 113 | 0.65576 |
915d6d3e43279c39fd9d72fc48c527f4f811ec46 | 180 | py | Python | rta/provision/__init__.py | XiaoguTech/rta-sandbox | 2783a3ba8920bf64273761ce7392e51c9c8fb1f7 | [
"MIT"
] | null | null | null | rta/provision/__init__.py | XiaoguTech/rta-sandbox | 2783a3ba8920bf64273761ce7392e51c9c8fb1f7 | [
"MIT"
] | null | null | null | rta/provision/__init__.py | XiaoguTech/rta-sandbox | 2783a3ba8920bf64273761ce7392e51c9c8fb1f7 | [
"MIT"
] | null | null | null | from rta.provision.utils import *
from rta.provision.passwd import *
from rta.provision.influxdb import *
from rta.provision.grafana import *
from rta.provision.kapacitor import *
| 30 | 37 | 0.805556 |
915d76b7f2fcca50d25cf033042e2f1d7c43e461 | 14,694 | py | Python | nn_dataflow/tests/unit_test/test_network.py | Pingziwalk/nn_dataflow | 5ae8eeba4e243df6e9a69127073513a852a62d17 | [
"BSD-3-Clause"
] | 170 | 2017-02-28T01:33:11.000Z | 2022-03-12T09:56:47.000Z | nn_dataflow/tests/unit_test/test_network.py | Pingziwalk/nn_dataflow | 5ae8eeba4e243df6e9a69127073513a852a62d17 | [
"BSD-3-Clause"
] | 24 | 2017-09-18T20:14:51.000Z | 2022-01-23T06:43:28.000Z | nn_dataflow/tests/unit_test/test_network.py | Pingziwalk/nn_dataflow | 5ae8eeba4e243df6e9a69127073513a852a62d17 | [
"BSD-3-Clause"
] | 71 | 2017-02-07T17:36:17.000Z | 2022-03-26T00:45:00.000Z | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from nn_dataflow.core import Network
from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, \
PoolingLayer, EltwiseLayer
| 38.365535 | 79 | 0.598884 |
915df7659ad33f08dc46ec91edcb67d2d6a2b9af | 365 | py | Python | apps/division/urls.py | Jingil-Integrated-Management/JIM_backend | f0e7860d57eddaee034531a52ab91d6715d12c18 | [
"Apache-2.0"
] | null | null | null | apps/division/urls.py | Jingil-Integrated-Management/JIM_backend | f0e7860d57eddaee034531a52ab91d6715d12c18 | [
"Apache-2.0"
] | null | null | null | apps/division/urls.py | Jingil-Integrated-Management/JIM_backend | f0e7860d57eddaee034531a52ab91d6715d12c18 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from .views import DivisionListCreateAPIView, DivisionRetrieveUpdateDestroyAPIView, MainDivisionListAPIView
urlpatterns = [
path('division/', DivisionListCreateAPIView.as_view()),
path('division/<division_pk>', DivisionRetrieveUpdateDestroyAPIView.as_view()),
path('division/main/', MainDivisionListAPIView.as_view()),
]
| 33.181818 | 107 | 0.794521 |
915ff60df252f62c3f259d30deba52d17fbf124c | 9,077 | py | Python | sympy/solvers/tests/test_pde.py | nashalex/sympy | aec3e6512be46f0558f5dbcf2b4d723496c91649 | [
"BSD-3-Clause"
] | 8,323 | 2015-01-02T15:51:43.000Z | 2022-03-31T13:13:19.000Z | sympy/solvers/tests/test_pde.py | nashalex/sympy | aec3e6512be46f0558f5dbcf2b4d723496c91649 | [
"BSD-3-Clause"
] | 15,102 | 2015-01-01T01:33:17.000Z | 2022-03-31T22:53:13.000Z | sympy/solvers/tests/test_pde.py | nashalex/sympy | aec3e6512be46f0558f5dbcf2b4d723496c91649 | [
"BSD-3-Clause"
] | 4,490 | 2015-01-01T17:48:07.000Z | 2022-03-31T17:24:05.000Z | from sympy import (Derivative as D, Eq, exp, sin,
Function, Symbol, symbols, cos, log)
from sympy.core import S
from sympy.solvers.pde import (pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol)
from sympy.testing.pytest import raises
a, b, c, x, y = symbols('a b c x y')
| 38.299578 | 84 | 0.537512 |
91613dad90fa3ec0c081f265b28f59e30cdfc17e | 6,376 | py | Python | GCN/GCN.py | EasternJournalist/learn-deep-learning | cc424713ffc57b8a796ebd81354a1b887f9c5092 | [
"MIT"
] | 6 | 2021-08-18T03:29:12.000Z | 2022-03-22T13:15:35.000Z | GCN/GCN.py | EasternJournalist/learn-deep-learning | cc424713ffc57b8a796ebd81354a1b887f9c5092 | [
"MIT"
] | null | null | null | GCN/GCN.py | EasternJournalist/learn-deep-learning | cc424713ffc57b8a796ebd81354a1b887f9c5092 | [
"MIT"
] | 2 | 2022-01-06T12:25:02.000Z | 2022-03-22T13:15:36.000Z | import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, PairNorm
from torch_geometric.utils.undirected import to_undirected
import random
import matplotlib.pyplot as plt
data_name = 'citeseer' # 'cora' or 'citeseer'
data_edge_path = f'datasets/{data_name}/{data_name}.cites'
data_content_path = f'datasets/{data_name}/{data_name}.content'
raw_content = pd.read_table(data_content_path, header=None, dtype={0:np.str})
raw_edge = pd.read_table(data_edge_path, header=None, dtype=np.str)
paper_ids = raw_content[0]
paper_id_map = {}
for i, pp_id in enumerate(paper_ids):
paper_id_map[pp_id] = i
edge_index = torch.from_numpy(raw_edge.apply(lambda col: col.map(paper_id_map)).dropna().values).long().t().contiguous()
x = torch.from_numpy(raw_content.values[:, 1:-1].astype(np.float)).float()
labels = np.unique(raw_content[raw_content.keys()[-1]]).tolist()
y = torch.from_numpy(raw_content[raw_content.keys()[-1]].map(lambda x: labels.index(x)).values).long()
train_mask, test_mask = get_mask(y)
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, test_mask=test_mask)
num_epochs = 100
test_cases = [
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# num layers
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# self loop
{'num_layers':2, 'add_self_loops':False, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# pair norm
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# drop edge
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
# activation fn
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'tanh', 'undirected':False},
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'leaky_relu', 'undirected':False},
# undirected
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.8, 'activation':'relu', 'undirected':True},
]
for i_case, kwargs in enumerate(test_cases):
print(f'Test Case {i_case:>2}')
model = GCNNodeClassifier(x.shape[1], len(labels), **kwargs)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
history_test_acc = []
input_edge_index = to_undirected(edge_index) if kwargs['undirected'] else edge_index
for i_epoch in range(0, num_epochs):
print(f'Epoch {i_epoch:>3} ', end='')
y_pred = model(x, input_edge_index)
train_acc = eval_acc(y_pred[train_mask], y[train_mask])
# Train
loss = F.cross_entropy(y_pred[train_mask], y[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Test
test_acc = eval_acc(y_pred[test_mask], y[test_mask])
history_test_acc.append(test_acc)
print(f'Train Acc = {train_acc}. Test Acc = {test_acc}')
kwargs['best_acc'] = max(history_test_acc)
plt.plot(list(range(num_epochs)), history_test_acc, label=f'case_{str(i_case).zfill(2)}')
plt.legend()
plt.savefig(f'{data_name}-HistoryAcc.jpg')
pd.DataFrame(test_cases).to_csv(f'{data_name}-Result.csv')
| 44.587413 | 134 | 0.674875 |
9163007875867d67440a283e2e9737b0b98baef2 | 3,724 | py | Python | esg_leipzig_homepage_2015/views.py | ESG-Leipzig/Homepage-2015 | 6b77451881031dcb640d2e61ce862617d634f9ac | [
"MIT"
] | null | null | null | esg_leipzig_homepage_2015/views.py | ESG-Leipzig/Homepage-2015 | 6b77451881031dcb640d2e61ce862617d634f9ac | [
"MIT"
] | 4 | 2015-03-31T22:37:09.000Z | 2015-10-22T21:37:17.000Z | esg_leipzig_homepage_2015/views.py | ESG-Leipzig/Homepage-2015 | 6b77451881031dcb640d2e61ce862617d634f9ac | [
"MIT"
] | 3 | 2015-02-03T10:23:24.000Z | 2018-04-11T12:29:23.000Z | import datetime
import json
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from django.views import generic
from .models import Event, FlatPage, News
| 31.559322 | 78 | 0.603652 |
91632bfaf2e874f47f67ae904c5dae1d1c06cb7a | 3,509 | py | Python | train.py | ronniechong/tensorflow-trainer | 79e58d224ce1e5ae687abee2bfd81deb49bd41dd | [
"MIT"
] | null | null | null | train.py | ronniechong/tensorflow-trainer | 79e58d224ce1e5ae687abee2bfd81deb49bd41dd | [
"MIT"
] | 6 | 2021-06-08T21:56:34.000Z | 2022-03-12T00:39:34.000Z | train.py | ronniechong/tensorflow-trainer | 79e58d224ce1e5ae687abee2bfd81deb49bd41dd | [
"MIT"
] | null | null | null | from dotenv import load_dotenv
load_dotenv()
from flask import Flask, flash, request, redirect, url_for
from flask_ngrok import run_with_ngrok
from flask_cors import CORS
from werkzeug.utils import secure_filename
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import vgg16
from tensorflow.keras import layers, models, Model, optimizers
from tensorflow.keras.preprocessing import image
import numpy as np
import os
import base64
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.secret_key = os.getenv('SECRETKEY')
CORS(app)
# run_with_ngrok(app)
# https://github.com/gstaff/flask-ngrok/issues/2
category_names = os.getenv('CATEGORIES').split(',')
nb_categories = len(category_names)
type = os.getenv('MODE')
if type == 'checkpoint':
# Load via checkpoints
img_height, img_width = 200,200
conv_base = vgg16.VGG16(weights='imagenet', include_top=False, pooling='max', input_shape = (img_width, img_height, 3))
layers = [
conv_base,
layers.Dense(nb_categories, activation='softmax')
]
model = models.Sequential(layers)
model.load_weights('./model/cp2-0010.ckpt')
else:
# Load saved model
model = models.load_model('./model/model_vgg16.h5')
if __name__ == '__main__':
app.run(host='0.0.0.0') | 29 | 121 | 0.654032 |
91633c0b686a90b166f71428baf166c3cd9fcb51 | 4,555 | py | Python | src/models/train_model.py | sandorfoldi/chess_positions_recognition | b051f5ba066876d54c435d96cf7e339dfc369b3b | [
"FTL"
] | null | null | null | src/models/train_model.py | sandorfoldi/chess_positions_recognition | b051f5ba066876d54c435d96cf7e339dfc369b3b | [
"FTL"
] | null | null | null | src/models/train_model.py | sandorfoldi/chess_positions_recognition | b051f5ba066876d54c435d96cf7e339dfc369b3b | [
"FTL"
] | 1 | 2022-01-08T20:26:08.000Z | 2022-01-08T20:26:08.000Z | import random
import matplotlib.pyplot as plt
import wandb
import hydra
import torch
import torch.utils.data as data_utils
from model import ChessPiecePredictor
from torch import nn, optim
from google.cloud import storage
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
if __name__ == "__main__":
train()
| 29.967105 | 100 | 0.636004 |
9163be87e7924e53bd340c783bc5110d591ba91f | 1,386 | py | Python | fairseq/scoring/__init__.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 33 | 2021-01-06T18:03:55.000Z | 2022-03-28T12:07:44.000Z | fairseq/scoring/__init__.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 8 | 2021-06-11T03:11:37.000Z | 2022-03-08T19:15:42.000Z | fairseq/scoring/__init__.py | fairseq-FT/fairseq | 18725499144c1bba7c151b796ba774e59d36eaa9 | [
"MIT"
] | 14 | 2021-05-17T06:55:01.000Z | 2022-03-28T12:07:42.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| 24.315789 | 87 | 0.665945 |
9164b76283b749a665c678ccd635362448fe685d | 10,817 | py | Python | dfn/tests/test_FractureNetworkThermal.py | richardhaslam/discrete-fracture-network | 2a235fdd3aedfb80dbd9f441d07c5713a6d6c74f | [
"MIT"
] | 1 | 2021-06-01T17:38:15.000Z | 2021-06-01T17:38:15.000Z | dfn/tests/test_FractureNetworkThermal.py | richardhaslam/discrete-fracture-network | 2a235fdd3aedfb80dbd9f441d07c5713a6d6c74f | [
"MIT"
] | null | null | null | dfn/tests/test_FractureNetworkThermal.py | richardhaslam/discrete-fracture-network | 2a235fdd3aedfb80dbd9f441d07c5713a6d6c74f | [
"MIT"
] | null | null | null | import copy
import unittest
import networkx as nx
import numpy as np
from scipy.special import erf
from dfn import Fluid, FractureNetworkThermal
if __name__ == '__main__':
unittest.main()
| 38.222615 | 79 | 0.547194 |
91674cee92c414668d806e044c2d5ffc326ce9fc | 10,775 | py | Python | dataapi/AWS/getawsdata.py | gusamarante/Quantequim | 3968d9965e8e2c3b5850f1852b56c485859a9c89 | [
"MIT"
] | 296 | 2018-10-19T21:00:53.000Z | 2022-03-29T21:50:55.000Z | dataapi/AWS/getawsdata.py | gusamarante/Quantequim | 3968d9965e8e2c3b5850f1852b56c485859a9c89 | [
"MIT"
] | 11 | 2019-06-18T11:43:35.000Z | 2021-11-14T21:39:20.000Z | dataapi/AWS/getawsdata.py | gusamarante/FinanceLab | 3968d9965e8e2c3b5850f1852b56c485859a9c89 | [
"MIT"
] | 102 | 2018-10-18T14:14:34.000Z | 2022-03-06T00:34:53.000Z | """
Author: Gustavo Amarante
"""
import numpy as np
import pandas as pd
from datetime import datetime
| 35.212418 | 117 | 0.604826 |
9167fe0a7f3eeef9305940bbccf9dcc614aaf736 | 569 | py | Python | assets/utils/config.py | mklew/quickstart-data-lake-qubole | bb9b4a559815fc293b0fa06aa7e536fe14ced6dd | [
"Apache-2.0"
] | null | null | null | assets/utils/config.py | mklew/quickstart-data-lake-qubole | bb9b4a559815fc293b0fa06aa7e536fe14ced6dd | [
"Apache-2.0"
] | null | null | null | assets/utils/config.py | mklew/quickstart-data-lake-qubole | bb9b4a559815fc293b0fa06aa7e536fe14ced6dd | [
"Apache-2.0"
] | null | null | null | from configparser import ConfigParser
CONFIG_INT_KEYS = {
'hadoop_max_nodes_count',
'hadoop_ebs_volumes_count',
'hadoop_ebs_volume_size',
'spark_max_nodes_count',
'spark_ebs_volumes_count',
'spark_ebs_volume_size'
}
| 27.095238 | 101 | 0.72232 |
91685cf5f5c65ae2f279254e25c1a73ac7408132 | 609 | py | Python | app/blueprints/admin_api/__init__.py | lvyaoo/api-demo | f45c05c154385510572b5200b74dcbbfdb7e234c | [
"MIT"
] | null | null | null | app/blueprints/admin_api/__init__.py | lvyaoo/api-demo | f45c05c154385510572b5200b74dcbbfdb7e234c | [
"MIT"
] | null | null | null | app/blueprints/admin_api/__init__.py | lvyaoo/api-demo | f45c05c154385510572b5200b74dcbbfdb7e234c | [
"MIT"
] | null | null | null | from flask import Blueprint
from .hooks import admin_auth
from ...api_utils import *
bp_admin_api = Blueprint('bp_admin_api', __name__)
bp_admin_api.register_error_handler(APIError, handle_api_error)
bp_admin_api.register_error_handler(500, handle_500_error)
bp_admin_api.register_error_handler(400, handle_400_error)
bp_admin_api.register_error_handler(401, handle_401_error)
bp_admin_api.register_error_handler(403, handle_403_error)
bp_admin_api.register_error_handler(404, handle_404_error)
bp_admin_api.before_request(before_api_request)
bp_admin_api.before_request(admin_auth)
from . import v_admin
| 33.833333 | 63 | 0.868637 |
916aaa2f9132fad05b66933ca386d50c7aed073b | 6,083 | py | Python | project/starter_code/student_utils.py | nihaagarwalla/nd320-c1-emr-data-starter | 6ce6bb65e89b38f1c2119a739b892ad2504adf7d | [
"MIT"
] | null | null | null | project/starter_code/student_utils.py | nihaagarwalla/nd320-c1-emr-data-starter | 6ce6bb65e89b38f1c2119a739b892ad2504adf7d | [
"MIT"
] | null | null | null | project/starter_code/student_utils.py | nihaagarwalla/nd320-c1-emr-data-starter | 6ce6bb65e89b38f1c2119a739b892ad2504adf7d | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import os
import tensorflow as tf
import functools
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
ndc_df["Non-proprietary Name"]= ndc_df["Non-proprietary Name"].str.replace("Hcl", "Hydrochloride")
ndc_df["Non-proprietary Name"]= ndc_df["Non-proprietary Name"].str.replace(" And ", "-")
ndc_df["Non-proprietary Name"]= (ndc_df["Non-proprietary Name"].str.strip()).str.upper()
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Film Coated", "TABLET")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Coated", "TABLET")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Film Coated, Extended Release", "Tablet Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Extended Release", "Tablet Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("For Suspension, Extended Release", "For Suspension Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Powder, Metered", "Powder Metered")
# ndc_df["Dosage Form"]= (ndc_df["Dosage Form"].str.strip()).str.upper()
# ndc_df["generic_drug_name"]= ndc_df["Non-proprietary Name"]+"_"+ndc_df["Dosage Form"]
ndc_df["generic_drug_name"]= ndc_df["Non-proprietary Name"]
df_reduce_dimension = pd.merge(df, ndc_df, on=['ndc_code'], how='inner')
df_reduce_dimension['LABEL'] = 0
reduce_dim_df= df_reduce_dimension.drop(columns=['Proprietary Name', 'Non-proprietary Name', 'Dosage Form', 'Route Name', 'Company Name', 'Product Type'])
return reduce_dim_df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
first_encounter_df = df.sort_values('encounter_id').groupby('patient_nbr').first()
first_encounter_df = first_encounter_df.reset_index()
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, key='patient_nbr'):
'''
df: pandas dataframe, input dataset that will be split
patient_key: string, column that is the patient id
return:
- train: pandas dataframe,
- validation: pandas dataframe,
- test: pandas dataframe,
'''
df = df.iloc[np.random.permutation(len(df))]
unique_values = df[key].unique()
total_values = len(unique_values)
train_size = round(total_values * (1 - 0.4 ))
train = df[df[key].isin(unique_values[:train_size])].reset_index(drop=True)
left_size = len(unique_values[train_size:])
validation_size = round(left_size*0.5)
validation = df[df[key].isin(unique_values[train_size:train_size+validation_size])].reset_index(drop=True)
test = df[df[key].isin(unique_values[validation_size+train_size:])].reset_index(drop=True)
return train, validation, test
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
'''
Which TF function allows you to read from a text file and create a categorical feature
You can use a pattern like this below...
tf_categorical_feature_column = tf.feature_column.......
'''
tf_categorical_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(
key=c, vocabulary_file = vocab_file_path, num_oov_buckets=1)
one_hot_origin_feature = tf.feature_column.indicator_column(tf_categorical_feature_column)
output_tf_list.append(one_hot_origin_feature)
return output_tf_list
#Question 8
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer = functools.partial(normalize_numeric_with_zscore, mean=MEAN, std=STD)
tf_numeric_feature= tf.feature_column.numeric_column(
key=col, default_value = default_value, normalizer_fn=normalizer, dtype=tf.float64)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = diabetes_yhat.mean()
s = diabetes_yhat.stddev()
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
def convert_to_binary(df, pred_field, actual_field):
df['score'] = df[pred_field].apply(lambda x: 1 if x>=25 else 0 )
df['label_value'] = df[actual_field].apply(lambda x: 1 if x>=25 else 0)
return df
binary_df = convert_to_binary(model_output_df, 'pred', 'actual_value')
binary_df.head()
'''
return student_binary_prediction
| 40.553333 | 158 | 0.706395 |