blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e766d1791c25108dbb653d7258644de85a8c46a | 40b3028706b79b2c12603ec3d8c3731186ff054c | /template/project/views/__init__.py | 8ab71c6bb2795bf48c9ecf78989f3384a105c55c | [] | no_license | avara1986/python-ms | 565c3ddac46eaf8be2a7e7094b73122aebd5911b | 788943686c69ead7029253ff20d74b64fa122628 | refs/heads/master | 2021-09-10T10:11:05.703836 | 2018-03-24T14:07:42 | 2018-03-24T14:07:42 | 121,014,766 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # coding=utf-8
from __future__ import unicode_literals, print_function, absolute_import, division
from flask import Blueprint
views_bp = Blueprint('views', __name__, static_url_path='/static')
from project.views import views | [
"a.vara.1986@gmail.com"
] | a.vara.1986@gmail.com |
133dd666a8084cf7b442838d7798f039cbb32c14 | ff66dfb302dfdc5a519787cea8ad0ccfc2264334 | /python/ex2_logistic_regression/log_reg_funcs/map_feature.py | e18a6bc7d0f8ee1195718d8e67bf4de005bbca2a | [
"MIT"
] | permissive | ashu-vyas-github/AndrewNg_MachineLearning_Coursera | 1c2d50e6a44e8e673203bf06a3f0165cac0a240e | 1be5124b07df61f7295dd1c5151b86b061bf50fc | refs/heads/main | 2023-07-11T14:30:52.057125 | 2021-08-17T06:04:30 | 2021-08-17T06:04:30 | 388,360,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | import numpy
def map_feature(X1, X2, num_examples, degree=6):
"""
Feature mapping function to polynomial features.
Maps the features to quadratic features.
Returns a new df with more features, comprising of
x1, x2, x1^2, x2^2, x1*x2, x1*x2^2, etc...
Args:
X1, X2: vectors of original features
features: int, the number of initial features
degree: int, the polynomial degree
Returns:
mapped_features: a matrix with the new features
"""
mapped_features = numpy.ones((num_examples))
for idx_i in range(1, degree+1):
for idx_j in range(idx_i + 1):
polynomial_features = numpy.multiply(numpy.power(X1, idx_i - idx_j), numpy.power(X2, idx_j))
mapped_features = numpy.c_[mapped_features, polynomial_features]
return mapped_features
| [
"ashutoshavyas@gmail.com"
] | ashutoshavyas@gmail.com |
0a65abe66e6a1fdb564042336f4b90993b9c6ce4 | 4deda1b482534cbd8e9a8f638b8e89651251e62e | /2_Training/src/keras_yolo3/kmeans.py | ff6af5da5aca2c882996ae6045ec7d82841e91cb | [] | no_license | SIlvaMFPedro/train-your-own-yolo | 4cd92af5542a81caa6ce607bf6e487fc0aa43ef0 | f872f514dbc553ce7732b98c6d0f1b1134fa539e | refs/heads/master | 2022-12-30T11:26:59.462514 | 2020-10-09T13:32:17 | 2020-10-09T13:32:17 | 295,769,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | # -----------------------------
# USAGE
# -----------------------------
# python kmeans.py
# -----------------------------
# IMPORTS
# -----------------------------
# Import the necessary packages
import numpy as np
# -----------------------------
# YOLO KMEANS
# -----------------------------
class YOLO_Kmeans:
def __init__(self, cluster_number, filename):
self.cluster_number = cluster_number
self.filename = "2012_train.txt"
def iou(self, boxes, clusters): # 1 box -> k clusters
n = boxes.shape[0]
k = self.cluster_number
box_area = boxes[:, 0] * boxes[:, 1]
box_area = box_area.repeat(k)
box_area = np.reshape(box_area, (n, k))
cluster_area = clusters[:, 0] * clusters[:, 1]
cluster_area = np.tile(cluster_area, [1, n])
cluster_area = np.reshape(cluster_area, (n, k))
box_w_matrix = np.reshape(boxes[:, 0].repeat(k), (n, k))
cluster_w_matrix = np.reshape(np.tile(clusters[:, 0], (1, n)), (n, k))
min_w_matrix = np.minimum(cluster_w_matrix, box_w_matrix)
box_h_matrix = np.reshape(boxes[:, 1].repeat(k), (n, k))
cluster_h_matrix = np.reshape(np.tile(clusters[:, 1], (1, n)), (n, k))
min_h_matrix = np.minimum(cluster_h_matrix, box_h_matrix)
inter_area = np.multiply(min_w_matrix, min_h_matrix)
result = inter_area / (box_area + cluster_area - inter_area)
return result
def avg_iou(self, boxes, clusters):
accuracy = np.mean([np.max(self.iou(boxes, clusters), axis=1)])
return accuracy
def kmeans(self, boxes, k, dist=np.median):
box_number = boxes.shape[0]
distances = np.empty((box_number, k))
last_nearest = np.zeros((box_number,))
np.random.seed()
clusters = boxes[np.random.choice(box_number, k, replace=False)] # init k clusters
while True:
distances = 1 - self.iou(boxes, clusters)
current_nearest = np.argmin(distances, axis=1)
if (last_nearest == current_nearest).all():
break # Clusters won't change
for cluster in range(k):
# Update clusters
clusters[cluster] = dist(boxes[current_nearest == cluster], axis=0)
last_nearest = current_nearest
return clusters
def result2txt(self, data):
f = open("yolo_anchors.txt", "w")
row = np.shape(data)[0]
for i in range(row):
if i == 0:
x_y = "%d,%d" % (data[i][0], data[i][1])
else:
x_y = ", %d,%d" % (data[i][0], data[i][1])
f.write(x_y)
f.close()
def txt2boxes(self):
f = open(self.filename, "r")
dataset = []
for line in f:
infos = line.split(" ")
length = len(infos)
for i in range(1, length):
width = int(infos[i].split(",")[2]) - int(infos[i].split(",")[0])
height = int(infos[i].split(",")[3]) - int(infos[i].split(",")[1])
dataset.append([width, height])
result = np.array(dataset)
f.close()
return result
def txt2clusters(self):
all_boxes = self.txt2boxes()
result = self.kmeans(all_boxes, k=self.cluster_number)
result = result[np.lexsort(result.T[0, None])]
self.result2txt(result)
print("K anchors:\n {}".format(result))
print("Accuracy: {:.2f}%".format(self.avg_iou(all_boxes, result) * 100))
# -----------------------------
# MAIN
# -----------------------------
if __name__ == '__main__':
cluster_number = 9
filename = "2012_train.txt"
kmeans = YOLO_Kmeans(cluster_number, filename)
kmeans.txt2clusters()
| [
"silva.mfpedro@gmail.com"
] | silva.mfpedro@gmail.com |
94d9193a284802436208775ba4c528db0580ef80 | 5eef5390146a6a1a8502ffbeba5b3bc211060bf2 | /0x0F-python-object_relational_mapping/10-model_state_my_get.py | aac6c0c0c3b83628cc89f650ee0f63df3cfdddd8 | [] | no_license | sebastiancalleu/holbertonschool-higher_level_programming | 581b68fea5c5ea469a8abfddae9890cc8c9387e3 | 06b7a7f6481d01f37f0fa0a66073881cda76016f | refs/heads/master | 2023-04-22T15:22:18.981649 | 2021-05-13T04:18:16 | 2021-05-13T04:18:16 | 319,347,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | #!/usr/bin/python3
'''
Select data from states table and return the first row
the print the state id of that first element
the element to search is given by the user.
'''
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy.orm import Session
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format
(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
session = Session(engine)
state = (session.query(State).order_by(State.id).filter
(State.name == sys.argv[4]).first())
if state:
print(state.id)
else:
print("Not found")
session.close()
| [
"sebastian.calleu@gmail.com"
] | sebastian.calleu@gmail.com |
c267b35781314c7896acf033300f5734509d3201 | 8205fe05169b8fd478f4f5e6b8d190e0378148b2 | /automationFramework/test_end2end.py | 2f79fbc24b084f97ee6b524e3d849ce565cc64df | [] | no_license | akashgkrishnan/test_automate | 04de6f0f3b3ee3ab92675897451ae53d44a37322 | 0e89e5759edeb6866be252d2d3c742ded0c81c3e | refs/heads/master | 2022-07-09T00:46:22.665077 | 2020-05-12T10:32:36 | 2020-05-12T10:32:36 | 263,295,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
import pytest
from automationFramework.utilities.BaseClass import BaseClass
#@pytest.mark.usefixtures('setup')
class TestOne(BaseClass):
def test_case1(self,):
self.driver.find_element_by_link_text('Shop').click()
sleep(2)
products = self.driver.find_elements_by_xpath('//div[@class="card h-100"]')
for product in products:
product_name = product.find_element_by_xpath('div/h4/a').text
if product_name == 'Blackberry':
product.find_element_by_xpath('div/button').click()
self.driver.find_element_by_css_selector('a[class*="btn-primary"]').click()
sleep(2)
self.driver.find_element_by_xpath('//button[@class="btn btn-success"]').click()
sleep(2)
self.driver.find_element_by_id("country").send_keys('Ind')
wait = WebDriverWait(self.driver, 7)
wait.until(expected_conditions.presence_of_element_located((By.LINK_TEXT, 'India')))
self.driver.find_element_by_link_text('India').click()
self.driver.find_element_by_xpath('//div[@class="checkbox checkbox-primary"]').click()
self.driver.find_element_by_css_selector('[type="submit"]').click()
sucess_text = self.driver.find_element_by_class_name('alert-success').text
assert "Success! Thank you!" in sucess_text
self.driver.get_screenshot_as_file('suces_page.png')
| [
"krishnanag1996@gmail.com"
] | krishnanag1996@gmail.com |
8e7d2f3abd934396afb8202bf74aac908df7bd2c | e6fac8e0289d9f82369d2eb8e22bc175c6f51b3b | /Interview Practice/sumOfTwo/sumOfTwo.py | 5b51c64a9afc26f2a8ec4511ba4c2170686d43a6 | [] | no_license | Zahidsqldba07/CodeFights-9 | f361c15d24f96afa26de08af273a7f8f507ced4a | 6c5d152b1ad35cf178dd74acbc44ceb5fdcdf139 | refs/heads/master | 2023-03-18T23:52:43.274786 | 2017-05-12T07:28:08 | 2017-05-12T07:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | def sumOfTwo(a, b, v):
# Convert a and b to sets for O(1) average case in-operations.
a = set(a)
b = set(b)
# For each number in a, is the difference `in` b?
for number in a:
if v - number in b:
return True
# There were no pairs that added up to v.
return False
| [
"hallosputnik@gmail.com"
] | hallosputnik@gmail.com |
73e9f458e6dc0153719f1d6fd927b85b27897c0b | 854bf3649552aeda06573b7e7fea38e3a14332a1 | /thirdapp/wsgi.py | f6a77b8f75dcf0c9a423740289619093f9b83bfe | [] | no_license | mehulchopradev/charles-django-thirdapp | a90d88034c4233f175d3828f81811105f9eaeb56 | ea2ea37830393a0fbf437cb517129c13fddf696c | refs/heads/master | 2020-04-03T16:48:37.782132 | 2018-10-30T16:30:07 | 2018-10-30T16:30:07 | 155,420,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for thirdapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thirdapp.settings')
application = get_wsgi_application()
| [
"mehul.chopra.dev@gmail.com"
] | mehul.chopra.dev@gmail.com |
a7c6bd2c553b9c2bcf8071d69e20e4e1e3f77a55 | f1df5173f34465c416904c0e119393cbfd9be32d | /app/tasks.py | dd250e279b00490c803a25392897587b83f1a8d8 | [] | no_license | zhangwei1989/microblog | e7765c0aa3f1218292334744f1a22963ecbd4216 | 7f8e8ac74e8114d687d25d1f0c89e49717ff8efd | refs/heads/master | 2022-12-10T14:17:58.795978 | 2019-04-03T08:59:02 | 2019-04-03T08:59:02 | 176,434,070 | 0 | 0 | null | 2022-11-22T03:45:13 | 2019-03-19T05:50:48 | JavaScript | UTF-8 | Python | false | false | 1,718 | py | import sys
import time
import json
from rq import get_current_job
from app import create_app, db
from app.models import Task, User, Post
from flask import render_template
from app.email import send_email
app = create_app()
app.app_context().push()
def _set_task_progress(progress):
job = get_current_job()
if job:
job.meta['progress'] = progress
job.save_meta()
task = Task.query.get(job.get_id())
task.user.add_notification('task_progress', {'task_id': job.get_id(),
'progress': progress})
if progress >= 100:
task.complete = True
db.session.commit()
def export_posts(user_id):
try:
user = User.query.get(user_id)
_set_task_progress(0)
data = []
i = 0
total_posts = user.posts.count()
for post in user.posts.order_by(Post.timestamp.asc()):
data.append({'body': post.body,
'timestamp': post.timestamp.isoformat() + 'Z'})
time.sleep(1)
i += 1
_set_task_progress(100 * i // total_posts)
send_email('[Microblog] Your blog posts',
sender=app.config['ADMINS'][0], recipients=[user.email],
text_body=render_template('email/export_posts.txt', user=user),
html_body=render_template('email/export_posts.html', user=user),
attachments=[('posts.json', 'application/json',
json.dumps({'posts': data}, indent=4))],
sync=True)
except:
_set_task_progress(100)
app.logger.error('Unhandled exception', exc_info=sys.exc_info())
| [
"zhangwei19890518@gmail.com"
] | zhangwei19890518@gmail.com |
54b10beeee3ef88100dbb01782ff9c9e1bb1a0f8 | 05217f20200f03ff18f522c79377426373f7cf9f | /flaskproject/blueprintproject - 副本/blueprintproject/user/__init__.py | 34a3fc3a61d8eed522a78a215f35604749d59be5 | [] | no_license | njw-666/1118Django | d381b90f1148f9ae8eb6baa00b4600e01b9512a5 | c3cae1f832114e79b73ec11b39130eee2ea1655c | refs/heads/master | 2022-11-20T07:52:55.846013 | 2020-03-23T08:29:07 | 2020-03-23T08:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | ## 子应用的初始化文件
from flask import Blueprint
from flask_restful import Api
user_bl = Blueprint("user",__name__)
api=Api(user_bl)
# from user.views import *
from .models import *
from blueprintproject.user.views import *
## 收集路由
api.add_resource(Demo,"/demo/") | [
"str_wjp@126.com"
] | str_wjp@126.com |
af5fc97d37e7ae14f03fe6da6e8adbca257be03a | 5a61eb222fda029d2b0a8169d6508bf8b3222d57 | /opinion_dynamics/opinion_dynamics_on_hete_social_distance_network.py | 4c2450705ea23194e50fbbcbb9f34c845d45c062 | [] | no_license | Dcomplexity/research | f7b5ed539ce63b16026bddad0d08b3d23c3aa2a8 | 7e487f765b7eee796464b6a1dc90baa5d3e5d5db | refs/heads/master | 2022-04-16T19:02:38.634091 | 2020-04-13T02:31:28 | 2020-04-13T02:31:28 | 199,882,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import networkx as nx
import random
from network_build import *
def get_network(mul_dimen, degree, group_size, group_base, group_length, alpha, beta):
G = generate_hete_network_connected(mul_dimen, degree, group_size, group_base, group_length, alpha)
adj_array = nx.to_numpy_array(G)
adj_link = []
for i in range(adj_array.shape[0]):
adj_link.append(list(np.where(adj_array[i] == 1)[0]))
nodes = G.nodes
edges = G.edges
return adj_array, adj_link, nodes, edges
class Agent:
def __init__(self, id, init_op, neighbor):
self.id = id
self.op = init_op
self.old_op = init_op
self.neighbor = neighbor
def set_op(self, new_op):
self.op = new_op
def get_op(self):
return self.op
def get_old_op(self):
return self.old_op
def get_id(self):
return self.id
def backup(self):
self.old_op = self.op
def get_neighbor(self):
return self.neighbor[:]
def initialize_population(group_size, group_base, group_length, mul_dimen, degree, alpha, beta):
total_num = group_size * (group_base ** (group_length - 1))
adj_array, adj_link, nodes, edges = get_network(mul_dimen, degree, group_size, group_base,
group_length, alpha, beta)
population = []
popu_num = len(nodes)
for i in nodes:
# if i / popu_num <= 0.5:
# population.append(Agent(i, i/popu_num + 0.5, adj_link[i]))
# else:
# population.append(Agent(i, i/popu_num - 0.5, adj_link[i]))
population.append(Agent(i, (i+popu_num/2)%popu_num/popu_num, adj_link[i]))
return population
def run(popu, bound, iter_num):
popu_num = len(popu)
op_history = [[] for _ in range(popu_num)]
for _ in range(iter_num):
for i in range(popu_num):
i_op = popu[i].get_old_op()
op_history[i].append(i_op)
neighbors = popu[i].get_neighbor()
neighbors.append(i)
op_in_bound = []
for j in neighbors:
j_op = popu[j].get_old_op()
if abs(i_op - j_op) < bound or (1.0 - abs(i_op - j_op)) < bound:
# if abs(i_op - j_op) < bound:
op_in_bound.append(j_op)
new_op = np.mean(op_in_bound)
popu[i].set_op(new_op)
for i in range(popu_num):
popu[i].backup()
return op_history
if __name__ == '__main__':
group_size_r = 50
group_base_r = 2
group_length_r = 6
mul_dimen_r = 10
degree_r = 20
alpha_r = 2
beta_r = 2
total_num_r = group_size_r * (group_base_r ** (group_length_r - 1))
popu_r = initialize_population(group_size_r, group_base_r, group_length_r, mul_dimen_r, degree_r, alpha_r, beta_r)
op_history_r = run(popu_r, 0.3, 50)
op_history_pd = pd.DataFrame(op_history_r)
plt.figure()
op_history_pd.T.plot(legend=False)
plt.show()
print(op_history_pd) | [
"cdengcnc@sjtu.edu.cn"
] | cdengcnc@sjtu.edu.cn |
df5a6b91b902fa050e18a252084453dd0d8a2d3d | 509b8316075f18612f5600993ccefbfe14527a35 | /src/_spacefligth/pipeline_registry.py | a674db6c272aee3dfe2557249f4228fec54e26d8 | [] | no_license | Princekrampah/SpaceFlightKedro | 44a2eb14a5e6356f136fa45dd0c9496a514aa5d7 | deab13030e4181fae33ce452a96403f549974750 | refs/heads/master | 2023-05-05T00:08:53.814882 | 2021-05-30T14:38:14 | 2021-05-30T14:38:14 | 372,237,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from typing import Dict
from kedro.pipeline import Pipeline
from _spacefligth.pipelines import data_processing as dp
from _spacefligth.pipelines import data_science as ds
def register_pipelines() -> Dict[str, Pipeline]:
"""Register the project's pipeline.
Returns:
A mapping from a pipeline name to a ``Pipeline`` object.
"""
data_processing_pipeline = dp.create_pipeline()
data_science_pipeline = ds.create_pipeline()
return {
"__default__": data_processing_pipeline + data_science_pipeline,
"dp": data_processing_pipeline,
"ds": data_science_pipeline,
}
| [
"jsksprince@gmail.com"
] | jsksprince@gmail.com |
cc1354efb7277cd1d71af9e0579c730536239931 | 14856ffe01c711af7a41af0b1abf0378ba4ffde6 | /Python/Django/group_project/apps/travel/models.py | 34f47a390411072fa349b8cca78f69d1ffdf6d69 | [] | no_license | sharonanchel/coding-dojo | 9a8db24eec17b0ae0c220592e6864510297371c3 | d6c4a7efd0804353b27a49e16255984c4f4b7f2a | refs/heads/master | 2021-05-05T18:17:48.101853 | 2017-06-23T23:53:51 | 2017-06-23T23:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Tourist (models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
memberID = models.IntegerField()
destination_id = models.ForeignKey('Destination')
class Destination (models.Model):
country = models.CharField(max_length=100)
city = models.CharField(max_length=100)
description = models.TextField(max_length=1000)
| [
"jao.colin@gmail.com"
] | jao.colin@gmail.com |
f1ee89673ec345caeddc3233b30a649d55c62bf4 | d3e252c5c8a507b14aad3fba419c2c4535c49e27 | /migrations/versions/afe21b1fbed1_comment.py | 0b9201337663f2125c37b02f1562fefd02d97b10 | [] | no_license | MutuaFranklin/Watchlist | 2076dadc02eaa0599aec89393dc2c9721e1fdc5b | 73b033342fb58da9aa7d3911e38beb93e557aa47 | refs/heads/main | 2023-07-22T13:08:39.947380 | 2021-08-23T11:45:22 | 2021-08-23T11:45:22 | 392,306,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | """Comment
Revision ID: afe21b1fbed1
Revises: 24b376f6e5fa
Create Date: 2021-08-12 13:29:58.852546
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'afe21b1fbed1'
down_revision = '24b376f6e5fa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('reviews',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('movie_id', sa.Integer(), nullable=True),
sa.Column('movie_title', sa.String(), nullable=True),
sa.Column('image_path', sa.String(), nullable=True),
sa.Column('movie_review', sa.String(), nullable=True),
sa.Column('posted', sa.Time(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reviews')
# ### end Alembic commands ###
| [
"franklin.mutua@student.moringaschool.com"
] | franklin.mutua@student.moringaschool.com |
d6c07151daabf0c745ea0b53d3309a2a5408d995 | a4681043cb56a9ab45be32a62fa9700b391f087f | /19-Beautiful_Soup/10_of_11_Reading_Text.py | 9948d5ba655ac865cc685313ffa24266d7551eda | [] | no_license | MarceloDL-A/Python | b16b221ae4355b6323092d069bf83d1d142b9975 | c091446ae0089f03ffbdc47b3a6901f4fa2a25fb | refs/heads/main | 2023-01-01T02:29:31.591861 | 2020-10-27T19:04:11 | 2020-10-27T19:04:11 | 301,565,957 | 0 | 0 | null | 2020-10-27T19:04:12 | 2020-10-05T23:41:30 | Python | MacCentralEurope | Python | false | false | 2,953 | py | """
WEB SCRAPING WITH BEAUTIFUL SOUP
Reading Text
When we use BeautifulSoup to select HTML elements, we often want to grab the text inside of the element, so that we can analyze it. We can use .get_text() to retrieve the text inside of whatever tag we want to call it on.
<h1 class="results">Search Results for: <span class='searchTerm'>Funfetti</span></h1>
If this is the HTML that has been used to create the soup object, we can make the call:
soup.get_text()
Which will return:
'Search Results for: Funfetti'
Notice that this combined the text inside of the outer h1 tag with the text contained in the span tag inside of it! Using get_text(), it looks like both of these strings are part of just one longer string. If we wanted to separate out the texts from different tags, we could specify a separator character. This command would use a . character to separate:
soup.get_text('|')
Now, the command returns:
'Search Results for: |Funfetti'
"""
import requests
from bs4 import BeautifulSoup
prefix = "https://content.codecademy.com/courses/beautifulsoup/"
webpage_response = requests.get('https://content.codecademy.com/courses/beautifulsoup/shellter.html')
webpage = webpage_response.content
soup = BeautifulSoup(webpage, "html.parser")
turtle_links = soup.find_all("a")
links = []
#go through all of the a tags and get the links associated with them:
for a in turtle_links:
links.append(prefix+a["href"])
#Define turtle_data:
turtle_data = {}
#follow each link:
for link in links:
webpage = requests.get(link)
turtle = BeautifulSoup(webpage.content, "html.parser")
turtle_name = turtle.select(".name")[0].get_text()
turtle_data[turtle_name] = [turtle.find("ul").get_text("|").split("|")]
print(turtle_data)
"""
After the loop, print out turtle_data. We have been storing the names as the whole p tag containing the name.
Instead, letís call get_text() on the turtle_name element and store the result as the key of our dictionary instead.
hint:
turtle_name should now be equal to something like:
turtle.select(".name")[0].get_text()
"""
"""
Instead of associating each turtle with an empty list, letís have each turtle associated with a list of the stats that are available on their page.
It looks like each piece of information is in a li element on the turtleís page.
Get the ul element on the page, and get all of the text in it, separated by a '|' character so that we can easily split out each attribute later.
Store the resulting string in turtle_data[turtle_name] instead of storing an empty list there.
Hint:
At this point, the value of each turtle_data[turtle_name] should look something like:
turtle.find("ul").get_text("|")
"""
"""
When we store the list of info in each turtle_data[turtle_name], separate out each list element again by splitting on '|'.
Hint
At this point, the value of each turtle_data[turtle_name] should look something like:
turtle.find("ul").get_text("|").split("|")
"""
| [
"marcelo.delmondes.lima@usp.br"
] | marcelo.delmondes.lima@usp.br |
8f11c565a577e78d997f30bb8cfbc51293c2337a | d4280eca1a9badb0a4ad2aa22598616eedece373 | /PyQt/PyQt5 tutorial/Dialogs/inputdialog.py | be359884367273f401bca2ba1344afedd634941e | [] | no_license | Little-Captain/py | 77ec12bb2aaafe9f709a70831266335b03f63663 | 74ba3c3449e7b234a77500a17433e141e68169f7 | refs/heads/master | 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from PyQt5.QtWidgets import (QWidget, QPushButton, QLineEdit,
QInputDialog, QApplication)
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
self.btn.clicked.connect(self.showDialog)
self.le = QLineEdit(self)
self.le.move(130, 22)
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Input dialog')
self.show()
def showDialog(self):
# This line displays the input dialog.
# The first string is a dialog title,
# the second one is a message within the dialog.
# The dialog returns the entered text and a boolean value.
# If we click the Ok button, the boolean value is true.
text, ok = QInputDialog.getText(self, 'Input Dialog',
'Enter your name:')
if ok:
self.le.setText(str(text))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"littlecaptain@foxmail.com"
] | littlecaptain@foxmail.com |
b2c37f8ae5e7c59302df4e81734325b8f55263af | 430b9e03e36e355bba475df49505011f99fa0819 | /keji/lesson03_data_type_list (2)/demo7_tuple.py | 4d383d19df25b0628d127fe4cd18ac2cd5616b1a | [] | no_license | gaoyang1224/mysite | b43e5d5e378b810b94dd60ffcac1c992173cc11a | 72150c67b9590b0498241a1eacb2669a836520ff | refs/heads/master | 2023-05-01T21:42:40.096287 | 2021-05-20T14:40:30 | 2021-05-20T14:40:30 | 368,254,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # 元组是用 () 表示
a = (1,2)
print(a)
print(type(a))
print(len(a))
# 元组如果是空的
a = ()
print(a)
print(type(a))
print(len(a))
# 如果表示 1 个元素的元组:
# TODO: 一定要在元素后加一个 , 不然的话,元组不生效
a = ("星河",1,2,3)
print(a)
print(type(a))
print(len(a))
# 元组不可变类型,只能查
print(a[0])
print(a[1:3])
print(a.index("星河"))
# 字典
# 集合
# 数据运算, + - 比较 and not or, 成员
# 作业。 | [
"15195989321@163.com"
] | 15195989321@163.com |
67cf26c42ec0530cc7f8e5bf1cb724eba7d8bf9d | 049ca48d22011604f4c7594c42467e0a6d95d7f5 | /tests/python3/kyu_5/test_convert_string_to_camel_case.py | b35988f80a839c8e29b752a13eb589d947b8f400 | [] | no_license | wangerde/codewars | 3ffdf560f0fd2333ab2711d20e2f2b32588fd9fd | bcfd15aba49f87c0a64cf840e96df06ef5ec9162 | refs/heads/master | 2021-01-23T05:35:29.217960 | 2017-01-15T18:23:30 | 2017-01-15T18:23:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # pylint: disable=missing-docstring
"""Convert string to camel case"""
import pytest
from python3.kyu_5.convert_string_to_camel_case import to_camel_case
EXAMPLES = (
('text', 'expected'),
[
('', ''),
('the_stealth_warrior', 'theStealthWarrior'),
('The-Stealth-Warrior', 'TheStealthWarrior'),
('A-B-C', 'ABC'),
]
)
@pytest.mark.parametrize(*EXAMPLES)
def test_returns_correct_result(text, expected):
assert to_camel_case(text) == expected
| [
"karateev.pavel@ya.ru"
] | karateev.pavel@ya.ru |
a474200d782ba6c520d3792b044a9ebced08b3a5 | 293a1d4ce3e3ec034fd4d662cb8dcc8c58b512e4 | /tools/scripts/prepare_submission.py | 516857ea830e2fac07e9523eb3457e5ab7411d2c | [] | no_license | czhiming/POSTECH | 87475137674dbce3d6add290ef455ca253d7c423 | 7e0436fe74e55ce0ec4875bc8d70964f85d64209 | refs/heads/master | 2021-09-02T12:11:17.001027 | 2018-01-02T13:51:11 | 2018-01-02T13:51:11 | 116,019,207 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if len(sys.argv) < 2:
print "usage: {} method-name < input.txt > output.txt".format(sys.argv[0])
exit(1)
method = sys.argv[1]
for idx, line in enumerate(sys.stdin):
print "{}\t{}\t{}".format(method, idx+1, line.strip())
| [
"qqchenzhiming@jxnu.edu.cn"
] | qqchenzhiming@jxnu.edu.cn |
4f7db53b849c5840d0ae7303bb14c6f8fdf62404 | 55ac013ac7fc80d878fb47def8d6218c2fe2d391 | /backend/home/management/commands/load_initial_data.py | f71a44de842eb711d2760b024594c8fce1b4e607 | [] | no_license | crowdbotics-apps/romano-at-law-3401 | 98f2845d138b9589b89b660a580beaad23050c25 | ae58daf3da747a5b19af96a7186a09424c1800c8 | refs/heads/master | 2022-12-13T14:02:46.247773 | 2019-05-15T17:32:08 | 2019-05-15T17:32:08 | 186,874,268 | 0 | 0 | null | 2022-12-06T16:01:29 | 2019-05-15T17:32:03 | JavaScript | UTF-8 | Python | false | false | 739 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">romano_at_law_3401</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'romano_at_law_3401'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b61316b862a7647911f05d2eea8e9e749f65e77d | 1eaaeee197d0809f354b8dfe669ecc2fe8424757 | /11_PaginationDRF/PaginationDRF/settings.py | c7083642ab915643a3596995ebe25064b54bcf3f | [
"MIT"
] | permissive | jhleed/LikeLion_Django_Study_Summary | 4ec3ae9b05b24eca370075c613c70211da957c1c | c788182af5bcfd16bdd4b57235a48659758e494b | refs/heads/master | 2022-03-27T16:53:42.886054 | 2019-12-07T03:49:33 | 2019-12-07T03:49:33 | 265,724,111 | 1 | 0 | MIT | 2020-05-21T01:22:33 | 2020-05-21T01:22:33 | null | UTF-8 | Python | false | false | 3,292 | py | """
Django settings for PaginationDRF project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ihc!jbbs!+4_cr)$y*@74&0a63zd$vc)oaxitr1i5vdhp3z-oq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post.apps.PostConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'PaginationDRF.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'PaginationDRF.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
}
| [
"alstn2468_@naver.com"
] | alstn2468_@naver.com |
93216cfecb0a8cd165fb8267341028ee1f87dba0 | a4c04117685c3d28dd60bdfc45654cb2c935f746 | /template_match_vswir2dimac.py | a6bc5dfce49cf4c30f862b2f2b88c960f7ebc8cb | [] | no_license | DKnapp64/General_Python_Codes | 1ca40779bb381d526d61c5d5fedcc76ae797c590 | 8d4669c82c17455640a0a3123f92760cd65cc26a | refs/heads/main | 2023-02-28T05:55:46.018482 | 2021-02-01T21:55:16 | 2021-02-01T21:55:16 | 335,077,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,162 | py | #!/bin/env python2
import cv2
from PIL import Image
import numpy as np
import gdal, gdalconst
import os, sys
import time
import random
## import pdb
def main(in1, in2, scorethresh, rmsethresh, outf):
scorethresh = float(scorethresh)
rmsethresh = float(rmsethresh)
## reasonable values for Score threshold = 7000
## reasonable values for RMSE threshold = 5.0
## def surfit(in1, in2):
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch13_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch13_20170930_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch4and5_20171001_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch4and5_20171001_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patchHIMB_20171001_atrem_refl3'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patchHIMB_20170930_and_20171001_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch42_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch42_20170930_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch44_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch44_20170930_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch25_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch25_20171001_dimac_match'
vswirds = gdal.Open(in1)
vswirarr = np.zeros((vswirds.RasterYSize, vswirds.RasterXSize, 3), dtype=np.float32)
vswir8uint = np.zeros((vswirds.RasterYSize, vswirds.RasterXSize, 3), dtype=np.uint8)
bandit = vswirds.GetRasterBand(45)
vswirarr[:,:,0] = bandit.ReadAsArray()
bandit = vswirds.GetRasterBand(27)
vswirarr[:,:,1] = bandit.ReadAsArray()
bandit = vswirds.GetRasterBand(9)
vswirarr[:,:,2] = bandit.ReadAsArray()
sort1 = np.sort(vswirarr[:,:,0].flatten())
sort2 = np.sort(vswirarr[:,:,1].flatten())
sort3 = np.sort(vswirarr[:,:,2].flatten())
## find how many Nans are in each band
numnan1 = np.sum(np.logical_or(np.isnan(vswirarr[:,:,0]), (vswirarr[:,:,0] < -50.0)))
numnan2 = np.sum(np.logical_or(np.isnan(vswirarr[:,:,1]), (vswirarr[:,:,1] < -50.0)))
numnan3 = np.sum(np.logical_or(np.isnan(vswirarr[:,:,2]), (vswirarr[:,:,2] < -50.0)))
min1 = sort1[np.int(np.floor(0.02 * (len(sort1)-numnan1)))]
max1 = sort1[np.int(np.floor(0.98 * (len(sort1)-numnan1)))]
min2 = sort2[np.int(np.floor(0.02 * (len(sort2)-numnan2)))]
max2 = sort2[np.int(np.floor(0.98 * (len(sort2)-numnan2)))]
min3 = sort3[np.int(np.floor(0.02 * (len(sort3)-numnan3)))]
max3 = sort3[np.int(np.floor(0.98 * (len(sort3)-numnan3)))]
scale1 = 255./(max1-min1)
scale2 = 255./(max2-min2)
scale3 = 255./(max3-min3)
shift1 = -(min1*255.)
shift2 = -(min2*255.)
shift3 = -(min3*255.)
vswir8uint[:,:,0] = cv2.convertScaleAbs(vswirarr[:,:,0], alpha=scale1, beta=shift1)
vswir8uint[:,:,1] = cv2.convertScaleAbs(vswirarr[:,:,1], alpha=scale2, beta=shift2)
vswir8uint[:,:,2] = cv2.convertScaleAbs(vswirarr[:,:,2], alpha=scale3, beta=shift3)
bandit = None
temp1 = random.randint(0,100000000)
temp2 = random.randint(0,100000000)
nametemp1 = "%010d" % temp1
nametemp2 = "%010d" % temp2
gray1 = cv2.cvtColor(vswir8uint, cv2.COLOR_RGB2GRAY)
grayimg1 = Image.fromarray(gray1, mode='L')
grayimg1.save(nametemp1+".jpg")
dimacds = gdal.Open(in2)
bandit = dimacds.GetRasterBand(1)
driver = gdal.GetDriverByName('MEM')
outds = driver.Create('', vswirds.RasterXSize, vswirds.RasterYSize, 3, bandit.DataType)
refProj = vswirds.GetProjection()
refTrans = vswirds.GetGeoTransform()
outds.SetGeoTransform(refTrans)
outds.SetProjection(refProj)
gdal.ReprojectImage(dimacds, outds, refProj, refProj, gdalconst.GRA_Average)
dimacarr = np.zeros((outds.RasterYSize, outds.RasterXSize, 3), dtype=np.uint8)
bandit = outds.GetRasterBand(1)
dimacarr[:,:,0] = bandit.ReadAsArray()
bandit = outds.GetRasterBand(2)
dimacarr[:,:,1] = bandit.ReadAsArray()
bandit = outds.GetRasterBand(3)
dimacarr[:,:,2] = bandit.ReadAsArray()
bandit = None
dimacds = None
## img2 = cv2.imread(in2)
gray2 = cv2.cvtColor(dimacarr, cv2.COLOR_BGR2GRAY)
grayimg2 = Image.fromarray(gray2, mode='L')
grayimg2.save(nametemp2+".jpg")
tilerows = int(np.floor(dimacarr.shape[0]/20.)) - 2
tilecols = int(np.floor(dimacarr.shape[1]/20.)) - 2
f = open(outf, 'w')
f.write("; ENVI Image to Image GCP File\n")
f.write("; base file: %s\n" % (in2))
f.write("; warp file: %s\n" % (in1))
f.write("; Base Image (x,y), Warp Image (x,y)\n")
f.write(";\n")
## offset = 25
offset = 10
listpoints = []
method = eval('cv2.TM_CCOEFF')
for j in range(tilerows):
rowrange = (25+j*20, 25+(j+1)*20)
for g in range(tilecols):
colrange = (25+g*20, 25+(g+1)*20)
## pdb.set_trace()
template = gray1[rowrange[0]:rowrange[1],colrange[0]:colrange[1]]
w, h = template.shape[::-1]
result = cv2.matchTemplate(gray2, template, method)
resultsub = result[(rowrange[0]-offset):(rowrange[1]-offset),(colrange[0]-offset):(colrange[1]-offset)]
minval, maxval, minloc, maxloc = cv2.minMaxLoc(resultsub)
tempx = maxloc[0]+(colrange[0]-offset)+10
tempy = maxloc[1]+(rowrange[0]-offset)+10
dimacx = colrange[0]+10
dimacy = rowrange[0]+10
diffx = tempx - dimacx
diffy = tempy - dimacy
vswirx = dimacx - diffx
vswiry = dimacy - diffy
listpoints.append((dimacx, dimacy, vswirx, vswiry))
## if ((np.abs(dimac2x-dimac1x) < 80) and (np.abs(dimac2y-dimac1y) < 80)):
f.write(("%10.2f %10.2f " % (dimacx*10.0, dimacy*10.0)) + ("%10.2f %10.2f" % (vswirx, vswiry)) + (" %f\n" % maxval))
f.close()
time.sleep(3.0)
f = open(outf, 'r')
listpoints = f.readlines()
listpoints = listpoints[5:]
f.close()
inarr1 = np.array([[float(l.split()[0]), float(l.split()[1]), 0.0] for l in listpoints])
inarr2 = np.array([[float(l.split()[2]), float(l.split()[3]), 0.0] for l in listpoints])
maxvals = np.array([[float(l.split()[4])] for l in listpoints])
n = inarr1.shape[0]
pad = lambda x:np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(inarr1)
Y = pad(inarr2)
A, res, rank, s = np.linalg.lstsq(X, Y)
transform = lambda x: unpad(np.dot(pad(x), A))
preds = transform(inarr1)
diffx = preds[:,0] - inarr2[:,0]
diffy = preds[:,1] - inarr2[:,1]
dists = np.sqrt(np.power(diffx,2) + np.power(diffy,2))
rmse = np.sqrt(np.mean(np.power(dists,2)))
np.savez('testout.npz', inarr1=inarr1, inarr2=inarr2, maxvals=maxvals, dists=dists, rmse=rmse)
f = open(outf, 'w')
f.write("; ENVI Image to Image GCP File\n")
f.write("; base file: %s\n" % (in2))
f.write("; warp file: %s\n" % (in1))
f.write("; Base Image (x,y), Warp Image (x,y)\n")
f.write(";\n")
for j in range(inarr1.shape[0]):
if (dists[j] < rmsethresh) and (maxvals[j] > scorethresh):
f.write(("%10.2f %10.2f " % (inarr1[j,0], inarr1[j,1])) + ("%10.2f %10.2f\n" % (inarr2[j,0], inarr2[j,1])))
f.close()
try:
os.remove(nametemp1+'.jpg')
except:
pass
try:
os.remove(nametemp2+'.jpg')
except:
pass
if __name__ == "__main__":
if len( sys.argv ) != 6:
print "[ ERROR ] you must supply 5 arguments: template_match_vswir2dimac.py vswirimage dimacimage scorethrshold rmsethreshold outputfile"
print "where:"
print " vswirimage = an orthocorrected VSWIR image to warp to the DiMAC image"
print " dimacimage = an orthocorrected DiMAC image to use as the base"
print " scorehreshold = The value of the template matching coefficient threshold BELOW which points are rejected (usually 1000000.0)"
print " rmsethreshold = The value of the point RMSE value threshold ABOVE which points are rejected (for DiMAC, usually 30.0)"
print " outputfile = an output text file in ENVI image-to-image for warping the first DiMAC image to the second."
print ""
sys.exit( 1 )
print main( sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5] )
| [
"dknapp4@asu.edu"
] | dknapp4@asu.edu |
ad40ce01d4d7c2bc546c2517391733816774e136 | ab98aaf1b40a5f2a7ab3c4937f7918421e24ea08 | /awacs/ssmmessages.py | 2908a3e3203504588532e773c47c2d57b51cfca3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bruvio/awacs | 6e7b7f2b5feddf792d983fc187a6460c7125ed1f | 9b9140a645219a4a9f606f97f19893d69bdc8494 | refs/heads/master | 2023-02-23T11:41:24.862343 | 2021-02-01T05:23:11 | 2021-02-01T05:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Session Manager Message Gateway Service'
prefix = 'ssmmessages'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateControlChannel = Action('CreateControlChannel')
CreateDataChannel = Action('CreateDataChannel')
OpenControlChannel = Action('OpenControlChannel')
OpenDataChannel = Action('OpenDataChannel')
| [
"mark@peek.org"
] | mark@peek.org |
526e46e5dd05ee4442f1b022940b7ec2f78eb4b8 | a566cb316ab93aeadd366b148f5110c327c7eb2b | /chp3/ex4.py | 8c89bcf4faccc137baf37af597a0523e2359341d | [] | no_license | piochelepiotr/crackingTheCode | 4aeaffd2c46b2761b2f9642107292d0932731489 | 163ff60f723869a7096b330965d90dc1443d7199 | refs/heads/master | 2021-06-20T21:30:56.033989 | 2021-01-13T08:44:57 | 2021-01-13T08:44:57 | 172,414,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import stack
class MyQueue:
def __init__(self):
self.in_stack = stack.Stack()
self.out_stack = stack.Stack()
def push(self, x):
self.in_stack.push(x)
def pull(self):
if self.out_stack.size() == 0:
if self.in_stack.size() == 0:
raise Exception("empty queue")
while self.in_stack.size() > 0:
self.out_stack.push(self.in_stack.pop())
return self.out_stack.pop()
| [
"piotr.wolski@telecom-paristech.fr"
] | piotr.wolski@telecom-paristech.fr |
0a83cf1bd9b3cc886f61571f18089d7a006463de | 55173732ce1f2537a4fd8a6137b2a813f594b250 | /azure-mgmt-scheduler/azure/mgmt/scheduler/models/oauth_authentication.py | 1b85128c1419f34d634eedd5dbcb6e5d491038fb | [
"Apache-2.0"
] | permissive | dipple/azure-sdk-for-python | ea6e93b84bfa8f2c3e642aecdeab9329658bd27d | 9d746cb673c39bee8bd3010738c37f26ba6603a4 | refs/heads/master | 2020-02-26T15:32:39.178116 | 2016-03-01T19:25:05 | 2016-03-01T19:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .http_authentication import HttpAuthentication
class OAuthAuthentication(HttpAuthentication):
"""OAuthAuthentication
:param str type: Gets or sets the http authentication type. Possible
values include: 'NotSpecified', 'ClientCertificate',
'ActiveDirectoryOAuth', 'Basic'
:param str secret: Gets or sets the secret.
:param str tenant: Gets or sets the tenant.
:param str audience: Gets or sets the audience.
:param str client_id: Gets or sets the client identifier.
"""
_required = []
_attribute_map = {
'secret': {'key': 'secret', 'type': 'str'},
'tenant': {'key': 'tenant', 'type': 'str'},
'audience': {'key': 'audience', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(self, type=None, secret=None, tenant=None, audience=None, client_id=None):
super(OAuthAuthentication, self).__init__(type=type)
self.secret = secret
self.tenant = tenant
self.audience = audience
self.client_id = client_id
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
4637bc96cd5dc021a8983c88d76563d4cd4c56df | eb7bf9ee76f3b38ef11b09440934b36a64639396 | /castero/episode.py | 059bc84c4f1c53ec2168dd6c531b551f326f1ad2 | [
"MIT"
] | permissive | Dramicas/castero | 9cea0dc5d5de949f7df76308ce221a28cbf8bba8 | 9d7edb39ab21c9bd8e6b94e134ef336358f74222 | refs/heads/master | 2020-03-16T16:06:59.623720 | 2018-05-06T19:20:57 | 2018-05-06T19:20:57 | 132,773,066 | 1 | 0 | null | 2018-05-09T14:58:16 | 2018-05-09T14:58:15 | null | UTF-8 | Python | false | false | 7,435 | py | import os
import threading
from castero import helpers
from castero.datafile import DataFile
class Episode:
"""The Episode class.
This class represents a single episode from a podcast feed.
"""
def __init__(self, feed, title=None, description=None, link=None,
pubdate=None, copyright=None, enclosure=None) -> None:
"""Initializes the object.
At least one of a title or description must be specified.
Args:
feed: the feed that this episode is a part of
title: (optional) the title of the episode
description: (optional) the description of the episode
link: (optional) a link to the episode
pubdate: (optional) the date the episode was published, as a string
copyright: (optional) the copyright notice of the episode
enclosure: (optional) a url to a media file
"""
assert title is not None or description is not None
self._feed = feed
self._title = title
self._description = description
self._link = link
self._pubdate = pubdate
self._copyright = copyright
self._enclosure = enclosure
def __str__(self) -> str:
"""Represent this object as a single-line string.
Returns:
string: this episode's title, if it exists, else its description
"""
if self._title is not None:
representation = self._title
else:
representation = self._description
return representation.split('\n')[0]
def _feed_directory(self) -> str:
"""Gets the path to the downloaded episode's feed directory.
This method does not ensure whether the directory exists -- it simply
acts as a single definition of where it _should_ be.
Returns:
str: a path to the feed directory
"""
feed_dirname = helpers.sanitize_path(str(self._feed))
return os.path.join(DataFile.DOWNLOADED_DIR, feed_dirname)
def get_playable(self) -> str:
"""Gets a playable path for this episode.
This method checks whether the episode is available on the disk, giving
the path to that file if so. Otherwise, simply return the episode's
enclosure, which is probably a URL.
Returns:
str: a path to a playable file for this episode
"""
playable = self.enclosure
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
playable = os.path.join(feed_directory, File)
return playable
def download(self, download_queue, display=None):
"""Downloads this episode to the file system.
This method currently only supports downloading from an external URL.
In the future, it may be worthwhile to determine whether the episode's
source is a local file and simply copy it instead.
Args:
download_queue: the download_queue overseeing this download
display: (optional) the display to write status updates to
"""
if self._enclosure is None:
if display is not None:
display.change_status("Download failed: episode does not have"
" a valid media source")
return
feed_directory = self._feed_directory()
episode_partial_filename = helpers.sanitize_path(str(self))
extension = os.path.splitext(self._enclosure)[1].split('?')[0]
output_path = os.path.join(feed_directory,
episode_partial_filename + str(extension))
DataFile.ensure_path(output_path)
if display is not None:
display.change_status("Starting episode download...")
t = threading.Thread(
target=DataFile.download_to_file,
args=[
self._enclosure, output_path, str(self),
download_queue, display
],
name="download_%s" % str(self)
)
t.start()
def delete(self, display=None):
"""Deletes the episode file from the file system.
Args:
display: (optional) the display to write status updates to
"""
assert self.downloaded
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
os.remove(os.path.join(feed_directory, File))
if display is not None:
display.change_status(
"Successfully deleted the downloaded episode"
)
# if there are no more files in the feed directory, delete it
if len(os.listdir(feed_directory)) == 0:
os.rmdir(feed_directory)
@property
def title(self) -> str:
"""str: the title of the episode"""
result = self._title
if result is None:
result = "Title not available."
return result
@property
def description(self) -> str:
"""str: the description of the episode"""
result = self._description
if result is None:
result = "Description not available."
return result
@property
def link(self) -> str:
"""str: the link of/for the episode"""
result = self._link
if result is None:
result = "Link not available."
return result
@property
def pubdate(self) -> str:
"""str: the publish date of the episode"""
result = self._pubdate
if result is None:
result = "Publish date not available."
return result
@property
def copyright(self) -> str:
"""str: the copyright of the episode"""
result = self._copyright
if result is None:
result = "No copyright specified."
return result
@property
def enclosure(self) -> str:
"""str: the enclosure of the episode"""
result = self._enclosure
if result is None:
result = "Enclosure not available."
return result
@property
def downloaded(self) -> bool:
"""bool: whether or not the episode is downloaded"""
found_downloaded = False
feed_dirname = helpers.sanitize_path(str(self._feed))
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = os.path.join(DataFile.DOWNLOADED_DIR, feed_dirname)
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
found_downloaded = True
return found_downloaded
@property
def downloaded_str(self) -> str:
"""str: a text description of whether the episode is downloaded"""
if self.downloaded:
result = "Episode downloaded and available for offline playback."
else:
result = "Episode not downloaded."
return result
| [
"jake@faltro.com"
] | jake@faltro.com |
93519bcda9ed48a7c96840b95c632bd619fda9f9 | b01429f27f8d7f4db7e3eba0abbb6be1ea67e2fa | /imageimage1.2/propriete/propriete_vivant_air.py | e31973f542422c9ebc8de6f4de654e9f0b8becc5 | [] | no_license | pastrouveedespeudo/ste-fois-c-la-bonne | 3dce8cdfc6b5523d9651e8ec9a143b7ab7789d21 | 9872c35423870c9854ee0bda120cca0c832c1fc9 | refs/heads/master | 2020-04-20T22:08:34.295196 | 2019-02-17T17:18:36 | 2019-02-17T17:18:36 | 169,129,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | class vivant:
def vivant(self):
self.vivant_air = ["chat",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chien",
"chien",
"chat",
"chien",
"chien",
"chien",
"chat",
"chien",
"chien",
"chien",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"requin",
"chien",
"chat",
"chien",
"chien",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chien",
"chien",
"chien",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chien",
"chien",
"chien",
"chat",
"requin",
"chien",
"chat",
"requin",
"requin",
"requin",
"requin",
"requin",
"chien",
"chat",
"dinosaure",
"chat",
"chien",
"dinosaure",
"chat",
"chien",
"dinosaure" ] | [
"noreply@github.com"
] | pastrouveedespeudo.noreply@github.com |
df181d1dd23af220e91c7c1f1f8ad80dce1f7d23 | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/group/behav478.py | 1899e1d5fda53dc78b027212ecc6502b202141a0 | [] | no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | #!/usr/bin/python
import sys
def compute(prey, otherHunter, dist):
temp0 = -1 * prey[0]
if otherHunter[1] != 0:
temp1 = prey[1] / otherHunter[1]
else:
temp1 = otherHunter[1]
temp1 = prey[0] + prey[1]
temp1 = -1 * otherHunter[1]
temp1 = dist - temp0
temp0 = min( otherHunter[1] , prey[0] )
temp1 = max( temp0 , otherHunter[1] )
temp1 = min( prey[1] , otherHunter[0] )
temp1 = max( prey[0] , prey[0] )
if otherHunter[1] != 0:
temp1 = otherHunter[0] / otherHunter[1]
else:
temp1 = otherHunter[1]
if otherHunter[1] != 0:
temp1 = otherHunter[0] % otherHunter[1]
else:
temp1 = otherHunter[1]
temp0 = prey[0] * prey[1]
temp0 = prey[0] - prey[1]
temp2 = prey[1] - temp0
temp1 = max( temp0 , prey[1] )
if temp1 != 0:
temp0 = otherHunter[1] / temp1
else:
temp0 = temp1
if temp2 > temp0 :
temp3 = max( temp1 , prey[0] )
else:
if temp1 > otherHunter[0] :
temp3 = min( otherHunter[1] , temp0 )
else:
if temp1 > otherHunter[1] :
temp3 = temp0 - otherHunter[0]
else:
if temp0 > otherHunter[0] :
if dist > otherHunter[1] :
temp3 = temp1 * temp1
else:
temp3 = min( otherHunter[0] , dist )
else:
temp3 = prey[1] + temp2
return [ temp3 , otherHunter[0] ]
| [
"i7674211@bournemouth.ac.uk"
] | i7674211@bournemouth.ac.uk |
cb015a533d9e178936ea1c750e1174ccc0214944 | 8808906b8562b679540e9fe51f8f034e36e8a977 | /adler/tensorflow/losses.py | 370651b2f1392cd2d7036490a484791831a909b9 | [
"MIT"
] | permissive | adler-j/adler | 2bd0a969f8d31505d99bd4853f57f74d1984dc17 | f5fb62c41d50f270eafdd53e93c1763c99a1d902 | refs/heads/master | 2021-01-20T08:15:39.645701 | 2019-11-28T21:41:18 | 2019-11-28T21:41:18 | 90,125,611 | 8 | 5 | MIT | 2019-11-28T21:41:19 | 2017-05-03T08:22:49 | Python | UTF-8 | Python | false | false | 2,598 | py | import demandimport
with demandimport.enabled():
import tensorflow as tf
import numpy as np
__all__ = ('log10', 'psnr', 'ssim')
def log10(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def psnr(x_result, x_true, name='psnr'):
with tf.name_scope(name):
maxval = tf.reduce_max(x_true) - tf.reduce_min(x_true)
mse = tf.reduce_mean((x_result - x_true) ** 2)
return 20 * log10(maxval) - 10 * log10(mse)
def _tf_fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1,
-size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g / tf.reduce_sum(g)
def ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5,
name='ssim'):
"""Structural SIMilarity index.
Code from:
https://stackoverflow.com/questions/39051451/ssim-ms-ssim-for-tensorflow
"""
with tf.name_scope(name):
window = _tf_fspecial_gauss(size, sigma) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1], padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1], padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1], padding='VALID') - mu1_mu2
if cs_map:
value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value | [
"jonasadl@kth.se"
] | jonasadl@kth.se |
ab02fa783977bd1142c4ca52d2fd181959bacfa1 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5708921029263360_0/Python/ziyan/c.py | c250248466e97f63f5bb90fb5797cc1624f5e7b5 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | #!/usr/bin/env python
import os
import sys
import collections
def solve(J, P, S, K):
sols = []
jppairs = collections.defaultdict(int)
pspairs = collections.defaultdict(int)
jspairs = collections.defaultdict(int)
for j in range(J):
for p in range(P):
for s in range(S):
if jppairs[(j, p)] < K and pspairs[(p, s)] < K and jspairs[(j, s)] < K:
sols += [(j, p, s)]
jppairs[(j, p)] += 1
pspairs[(p, s)] += 1
jspairs[(j, s)] += 1
return sols
def main():
T = int(sys.stdin.readline().strip())
for t in range(T):
J, P, S, K = map(int, sys.stdin.readline().strip().split())
sols = solve(J, P, S, K)
print 'Case #%d: %d' % (t + 1, len(sols))
for sol in sols:
print '%d %d %d' % (sol[0] + 1, sol[1] + 1, sol[2] + 1)
if __name__ == '__main__':
main()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
808d8073572cc25e44a844f47b654d2ebf298a8b | 13724823af94e5e5351ffa42ca896397f12f1f05 | /install/lamachine/bin/foliamerge | b6f61bf89f1f7cbb27cd35578c4d359457e6c0df | [] | no_license | AymanYac/Neonec-Deep-Classsifier | 21e00cb0c5561f4ac22968f748ada0aa299e0a94 | a7978f434cc09d9e00a7df5d391bae77daf17637 | refs/heads/master | 2022-06-08T12:44:10.203386 | 2018-07-06T15:28:00 | 2018-07-06T15:28:00 | 139,996,406 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/mnt/c/Users/yacay/Downloads/LaMachine-master/install/lamachine/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from foliatools.foliamerge import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"root@Razer-Stealth.localdomain"
] | root@Razer-Stealth.localdomain | |
d2983122fb0009d363cf14e6c7be027b5fbdd062 | 54791fd57ecc9a4fe7c5164dfa6eb79c8df48ee1 | /tmpdoc/experiment/python_demos/work/selenium_demo/selenium_execjs.py | 7fa71e1eb47fced1401baba2eaf6c2b033cffe73 | [] | no_license | cherry-wb/quietheart | 8dfc91f88046bd1b40240e2f6121043977ab78b4 | 715ed73c990da2b4634313c93910769a59ce51f4 | refs/heads/master | 2021-01-18T00:04:39.802220 | 2014-08-21T07:39:21 | 2014-08-21T07:39:21 | 23,286,239 | 1 | 3 | null | 2019-03-11T09:32:21 | 2014-08-24T16:37:05 | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/python
from selenium import webdriver
#url = "http://10.126.1.29/wirelesssetup_radiosetup.html"
#url = "http://10.126.1.29/advancedsetup_lanipdhcpsettings.html"
#url = "http://10.126.1.29/wirelesssetup_basicsettings.html"
#url = "http://10.126.1.29/wirelesssetup_radiosetup.html"
#url = "http://10.126.1.29/wirelesssetup_multiplessid.html"
url = "http://admin:admin@10.126.1.15/Wireless_Basic.asp"
formName = 'wireless'
firefoxDriver = webdriver.Firefox()
firefoxDriver.get(url)
#content = firefoxDriver.execute_script("return document.forms['%s'].outerHTML;" % (formName))
#content = firefoxDriver.execute_script("return document.forms['%s'].outerHTML" %(formName))
content = firefoxDriver.execute_script("return document.forms[0].outerHTML")
print content
firefoxDriver.quit()
| [
"quietheart@quietheart-ThinkPad-E420.(none)"
] | quietheart@quietheart-ThinkPad-E420.(none) |
5fd135d961041599ba6517fc3bc51b6192575f70 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/dialogflow/intents/hooks.py | 35fab4c997e2bc806e20eb418ea5a0a03f27c244 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 1,402 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declarative hooks for `gcloud dialogflow intents`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
def TrainingPhrasesType(training_phrase):
return {
'parts': [{'text': training_phrase}],
'type': 'EXAMPLE'
}
def ResponseToMessage(response):
return {'text': {'text': [response]}}
def AddOtherPropertiesToRequest(unused_instance_ref, args, request):
intent = encoding.MessageToDict(request.googleCloudDialogflowV2Intent)
if args.IsSpecified('other_properties'):
intent.update(args.other_properties)
request.googleCloudDialogflowV2Intent = encoding.DictToMessage(
intent, type(request.googleCloudDialogflowV2Intent))
return request
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
77e7aabcbc9de1998068a6633dc55119edcbc6db | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf.0/gsn-edf_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=0/params.py | d4f81c06a2325c865d4af4a06346ec07d1c9ec8f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.721167',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 0,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
081d2606bb85413135f9cf37448d40647dde1cbe | 3199331cede4a22b782f945c6a71150a10c61afc | /20210523LangReview/Python/review04/04-generator/gen02.py | 2624c1c06a7d841915f7e0b8c362406a78e431e6 | [] | no_license | AuroraBoreas/language-review | 6957a3cde2ef1b6b996716addaee077e70351de8 | 2cb0c491db7d179c283dba205b4d124a8b9a52a3 | refs/heads/main | 2023-08-19T23:14:24.981111 | 2021-10-11T12:01:47 | 2021-10-11T12:01:47 | 343,345,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | "#Python is a protocol orientated lang; every top-level function or syntax has a corresponding dunder method implemented;"
import time
class Compute:
def __init__(self, last: int):
self.last = last
self.first = 0
def __iter__(self):
return self
def __next__(self):
rv = self.first
self.first += 1
time.sleep(.5)
if self.first > self.last:
raise StopIteration()
return rv
if __name__ == "__main__":
for i in Compute(10):
print(i)
| [
"noreply@github.com"
] | AuroraBoreas.noreply@github.com |
47c0ab6d57f95d8b1d7819eb25b2c4be405b67ef | cc64b1b5deb4530a5bd3eaabd98ebd4daa2deea1 | /Aulas/Exercícios-Mundo3/Aula016/Ex072.py | 6a361ae1465ad181cd99a5831421f1306f1a034c | [
"MIT"
] | permissive | Sofista23/Aula1_Python | 239b9920353138ff99d99dd0af66a4788f1cbb22 | 129132d977058ac6f23cc95c7bb8b55d8a1bb429 | refs/heads/main | 2023-09-01T23:55:20.529528 | 2021-10-13T23:19:33 | 2021-10-13T23:19:33 | 416,924,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | t=("zero","um","dois","três","quatro","cinco","seis","sete","oito","nove","dez","onze","doze","treze","quatorze","quinze","dezesseis","dezessete","dezoito","dezenove20","vinte")
while True:
esc=int(input("Digite um número de 0 a 20:"))
if 0<=esc<=20:
print(f"Você digitou o valor {t[esc]}.")
esc2=input("Você quer continuar [s/n]:").strip().upper()
if esc2=="N":
break
print("Obrigado por perder seu tempo conosco.") | [
"81760467+Sofista23@users.noreply.github.com"
] | 81760467+Sofista23@users.noreply.github.com |
874e34415a4f5d7c2ddb22a3966ca448f742d45b | 2635d6f24df87d0813e9dd8d3853fb9632d39686 | /setup.py | f8c12f6adcb5876d7aa8340adab284698b3abf79 | [
"MIT"
] | permissive | tolulomo/materialsmine | cc921464aefa0f47fc6ac9f85a8bd65a67c0f3bb | 8ac7d942b89492c8750bc5cb95951e2ab9694ae4 | refs/heads/master | 2022-11-18T08:10:51.631100 | 2020-07-15T17:30:13 | 2020-07-15T17:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #!/usr/bin/env python
from distutils.core import setup
setup(name='Nanomine',
version='0.1',
description='Nanomine project configuration file',
author='rui',
packages=[
# 'pymongo'
],
)
| [
"mccusker@gmail.com"
] | mccusker@gmail.com |
3afab3079ec8742ba54e9a0b1a48976d2ad481f3 | a0b7a7104ca701e8b08d590660ee92b325fd17e9 | /jeri/core/models/fields/__init__.py | 72fba11357e8171207a8f80cb4e8eae570e9bd62 | [
"BSD-3-Clause"
] | permissive | fmorgner/jeri | fecd4df05b62ee00a248005f3cbf1c313eb6d35d | 5b33411c0e25375e3e5928fc044581a24c56f3ad | refs/heads/master | 2021-01-01T16:46:52.786518 | 2017-07-22T17:49:18 | 2017-07-22T17:49:18 | 97,918,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from jeri.core.models.fields.value import StringField # NOQA
from jeri.core.models.fields.related import ( # NOQA
OneToOneField,
OneToManyField
)
| [
"felix.morgner@gmail.com"
] | felix.morgner@gmail.com |
6eecbdfe33a0d7bf82903ca4bfd6b8c3a3c79f4f | 5de5ae0adb6fb1e73c2e897fbc13b6abf53c559b | /Applications/Logic_Puzzles/pipe.py | c2c73d8342e179dfbc8a640d20a4512ce7d4a0d0 | [] | no_license | Trietptm-on-Coding-Algorithms/Learning-Z3 | af935450226ee3299e10361f21a567945aa0fd5c | c5ef7faca49aa164556b3c7e9ccfb4709027cf74 | refs/heads/master | 2020-05-13T18:34:38.105308 | 2017-12-23T11:08:43 | 2017-12-23T11:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,250 | py | from z3 import *
# Solving Puzzle
# Pipe puzzle is a puzzle where we are given a sets of randomly configured pipe
# The goal is to configure the pipes to make a close loop where the water can
# flow only inside the pipe.
# Imagine the field is a matrix with certain row and columns.
# Each cell can be connected with other by a joint, either horizontal joint or
# vertical joint.
# A pipe can be imagined as one or more joints that operate as single body.
# Then, based on how many joints a pipe has, we can create the pipe types.
# The pipe can be rotated to certain degree (0, 90, 180, 270) which result in
# the change of position.
# cell type, angle, (pseudo)graphical representation
symbols={("0", 0): " ",
("2a", 0): "┃",
("2a", 90): "━",
("2b", 0): "┏",
("2b", 90): "┓",
("2b",180): "┛",
("2b",270): "┗",
("3", 0): "┣",
("3", 90): "┳",
("3", 180): "┫",
("3", 270): "┻",
("4", 0): "╋"}
def print_model(m):
# print angles:
for r in range(HEIGHT):
for c in range(WIDTH):
t=cells_type[r][c]
angle=int(str(m[A[r][c]]))
sys.stdout.write("%3d " % angle)
print()
# print pipes:
for r in range(HEIGHT):
for c in range(WIDTH):
t=cells_type[r][c]
angle=int(str(m[A[r][c]]))
sys.stdout.write(symbols[(t, angle)]+" ")
print()
print()
s=Solver()
HEIGHT=8
WIDTH=16
# if T/B/R/L is Bool instead of Int, Z3 solver will work faster
T=[[Bool('cell_%d_%d_top' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
B=[[Bool('cell_%d_%d_bottom' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
R=[[Bool('cell_%d_%d_right' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
L=[[Bool('cell_%d_%d_left' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
A=[[Int('cell_%d_%d_angle' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
# initial configuration
cells_type=[
["0", "0", "2b", "3", "2a", "2a", "2a", "3", "3", "2a", "3", "2b", "2b", "2b", "0", "0"],
["2b", "2b", "3", "2b", "0", "0", "2b", "3", "3", "3", "3", "3", "4", "2b", "0", "0"],
["3", "4", "2b", "0", "0", "0", "3", "2b", "2b", "4", "2b", "3", "4", "2b", "2b", "2b"],
["2b", "4", "3", "2a", "3", "3", "3", "2b", "2b", "3", "3", "3", "2a", "2b", "4", "3"],
["0", "2b", "3", "2b", "3", "4", "2b", "3", "3", "2b", "3", "3", "3", "0", "2a", "2a"],
["0", "0", "2b", "2b", "0", "3", "3", "4", "3", "4", "3", "3", "3", "2b", "3", "3"],
["0", "2b", "3", "2b", "0", "3", "3", "4", "3", "4", "4", "3", "0", "3", "4", "3"],
["0", "2b", "3", "3", "2a", "3", "2b", "2b", "3", "3", "3", "3", "2a", "3", "3", "2b"]]
# We know that if each of half joints is present, corresponding half-joint must be
# also present, and vice-versa. We define this using these constraints.
# shorthand variables for True and False:
t=True
f=False
# "top" of each cell must be equal to "bottom" of the cell above
# "bottom" of each cell must be equal to "top" of the cell below
# "left" of each cell must be equal to "right" of the cell at left
# "right" of each cell must be equal to "left" of the cell at right
for r in range(HEIGHT):
for c in range(WIDTH):
if r!=0:
s.add(T[r][c]==B[r-1][c])
if r!=HEIGHT-1:
s.add(B[r][c]==T[r+1][c])
if c!=0:
s.add(L[r][c]==R[r][c-1])
if c!=WIDTH-1:
s.add(R[r][c]==L[r][c+1])
# "left" of each cell of first column shouldn't have any connection
# so is "right" of each cell of the last column
for r in range(HEIGHT):
s.add(L[r][0]==f)
s.add(R[r][WIDTH-1]==f)
# "top" of each cell of the first row shouldn't have any connection
# so is "bottom" of each cell of the last row
for c in range(WIDTH):
s.add(T[0][c]==f)
s.add(B[HEIGHT-1][c]==f)
for r in range(HEIGHT):
for c in range(WIDTH):
ty=cells_type[r][c]
if ty=="0":
s.add(A[r][c]==f)
s.add(T[r][c]==f, B[r][c]==f, L[r][c]==f, R[r][c]==f)
if ty=="2a":
s.add(Or(And(A[r][c]==0, L[r][c]==f, R[r][c]==f, T[r][c]==t, B[r][c]==t), # ┃
And(A[r][c]==90, L[r][c]==t, R[r][c]==t, T[r][c]==f, B[r][c]==f))) # ━
if ty=="2b":
s.add(Or(And(A[r][c]==0, L[r][c]==f, R[r][c]==t, T[r][c]==f, B[r][c]==t), # ┏
And(A[r][c]==90, L[r][c]==t, R[r][c]==f, T[r][c]==f, B[r][c]==t), # ┓
And(A[r][c]==180, L[r][c]==t, R[r][c]==f, T[r][c]==t, B[r][c]==f), # ┛
And(A[r][c]==270, L[r][c]==f, R[r][c]==t, T[r][c]==t, B[r][c]==f))) # ┗
if ty=="3":
s.add(Or(And(A[r][c]==0, L[r][c]==f, R[r][c]==t, T[r][c]==t, B[r][c]==t), # ┣
And(A[r][c]==90, L[r][c]==t, R[r][c]==t, T[r][c]==f, B[r][c]==t), # ┳
And(A[r][c]==180, L[r][c]==t, R[r][c]==f, T[r][c]==t, B[r][c]==t), # ┫
And(A[r][c]==270, L[r][c]==t, R[r][c]==t, T[r][c]==t, B[r][c]==f))) # ┻
if ty=="4":
s.add(A[r][c]==0)
s.add(T[r][c]==t, B[r][c]==t, L[r][c]==t, R[r][c]==t) # ╉
print(s.check())
print_model (s.model()) | [
"me@xathrya.id"
] | me@xathrya.id |
1d026ecec5a670431b899914e47b7896880ac674 | 5de0c0e76bdde469156d057007a5008a63a0d66b | /openeeg/proto.py | 2fa2ae2d0a6741714a867192b9b835eb0801cace | [] | no_license | mattharkness/sixthdev | 6bcfd1c490efafb114dc5f014c6e5f1d91d56b4d | a7df929147d82d225606c216f69c48d898e19ebe | refs/heads/master | 2023-06-08T05:57:38.928657 | 2021-06-15T16:53:15 | 2021-06-15T16:53:15 | 338,441,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,255 | py | #!/usr/bin/python2.2
#
# openEEG software prototype
# by michal wallace (sabren@manifestation.com)
#
# python prototype of a mind-mirror style biofeedback machine
# Basically, this is just a spectral analysis program.
#
# This version still only graphs fake data, but adds windowing
# to clean up some of the noise. The scale is still wrong, though.
#
# $Id$
## dependencies: #####################################################
try:
import Numeric # http://www.pfdubois.com/numpy/
import MLab, FFT, RandomArray # (parts of numeric)
import pygame # http://www.pygame.org/
from pygame.locals import *
except:
raise SystemExit, "This program requries NumPy and pygame."
# the rest of these come with python:
import whrandom
import time
## graphic routines ##################################################
def makeGradient():
"""
Returns an 163*10 Surface showing mirrored green-yellow-red
gradients with a blue line in between.
"""
colors = []
for i in range(0, 0xff, 0x22):
colors.append((i, 0xff, 0))
colors.append((0xff, 0xff, 0))
for i in range(0xcc, -1, -0x22):
colors.append((0xff, i, 0))
rcolors = colors
lcolors = colors[:]; lcolors.reverse()
center = 80
sprite = pygame.Surface((163, 10))
for x in range(len(colors)):
# left (red to green)
pygame.draw.rect(sprite, lcolors[x],
pygame.Rect(x*5, 1, 4, 8))
# right (green to red)
pygame.draw.rect(sprite, rcolors[x],
pygame.Rect(center+2+(x*5), 1, 4, 8))
pygame.draw.line(sprite, (0xcc,0xcc,0xff), (center, 0), (center, 10))
return sprite
def makeWindow(winsize):
pygame.init()
pygame.display.set_caption("openEEG prototype")
return pygame.display.set_mode(winsize, RESIZABLE, 0)
def keepLooping():
pygame.display.update()
for e in pygame.event.get():
if (e.type == KEYUP and e.key == K_ESCAPE) \
or (e.type == QUIT):
return 0
return 1
## data routines #####################################################
def wave(frequency, sampRate=256.0):
"""
Returns a sampled wave at the given frequency and sample rate.
This routine is generalized from Eric Hagemann's article at:
http://www.onlamp.com/pub/a/python/2001/01/31/numerically.html
"""
return Numeric.sin(2 * Numeric.pi
* (frequency/sampRate)
* Numeric.arange(sampRate))
def fakeSession():
"""
Creates ten seconds of completely fake data.
"""
pureAlpha = 10 # alpha is 8-12hz
pureBeta = 20 # beta is 13-30hz
pureTheta = 6 # theta is 4-8hz
pureDelta = 2 # delta is 0.5-4hz
sec = [None] * 10 # make an empty list
# when animated, this should move right up the line:
sec[0] = wave(pureDelta)
sec[1] = wave(pureTheta)
sec[2] = wave(pureAlpha)
sec[3] = wave(pureBeta)
# and this should move back down in pairs:
sec[4] = wave(pureBeta) + wave(pureAlpha)
sec[5] = wave(pureAlpha) + wave(pureTheta)
sec[6] = wave(pureTheta) + wave(pureDelta)
sec[7] = wave(pureDelta) + wave(pureBeta)
# all four at once:
sec[8] = wave(pureDelta) + wave(pureTheta) \
+ wave(pureAlpha) + wave(pureBeta)
# and then silence:
sec[9] = wave(0)
return Numeric.concatenate(sec)
def makeSpectrogram(slice):
"""
Returns a list of length 32, with the FFT of the slice.
We seem to need 64 samples to do this.
If the sample rate is 256Hz, then we're talking about
1/4th of a second's worth of data here.
"""
assert len(slice)==64, "we want 32 bins, so we need 64 samples"
res = abs(FFT.real_fft(slice))[:-1] # discard 33rd slot (is this okay?)
res = Numeric.floor(res) # round off to integers
assert len(res)==32, len(res)
return res
## main program ######################################################
def main():
#@TODO: make names for all these magic numbers...
screen = makeWindow(winsize=(200, 400))
grad = makeGradient()
black = pygame.Surface((80,10))
black.fill((0,0,0))
# the windowing array quiets down the edges of the sample
# to prevent "clicking" at the edges:
windowing = MLab.blackman(64)
session = fakeSession()
t = 0
center= 81 # same as in creating the graph @TODO: consolidate these
while keepLooping():
# simulate aquiring data for 1/4th of a second (64 samples):
time.sleep(0.25)
data = session[t:t+64] * windowing
graph = makeSpectrogram(data)
t += 64
if t >= len(session):
t = 0
# draw the gradient, then cover part of it up:
for i in range(32):
screen.blit(grad, (20, 20+i*10))
# left is blank for now:
#screen.blit(black,(20 +(0 ), 20+i*10))
# right side shows the data:
screen.blit(black,(20+center+(graph[i]*10), 20+i*10))
if __name__=="__main__":
main()
| [
"sabren"
] | sabren |
31ee594e35458cdcaaa3616917d92259bf6f73d3 | ea1a86f636db98d111360cc2d6988dc449f21ca7 | /backend-code/website/serializers.py | fee383d25ce736bf463db816d659a7dfe387e5e7 | [] | no_license | riaaniru2613/iste.nitk.ac.in-1 | 76434cd2a019b14e29dba138618975d8dd14c6a0 | 573001912bac0c53a7118c35be6358aeb0f96b1d | refs/heads/master | 2023-07-07T11:45:07.357822 | 2021-08-05T16:28:08 | 2021-08-05T16:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from rest_framework import serializers
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
fields = kwargs.pop('fields', None)
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name) | [
"amodhshenoy@gmail.com"
] | amodhshenoy@gmail.com |
f49bf0a3be14a5038d8868cbf934a3c39958629e | e585c222ecc8fa95b7c47a80cb0efb2be578b01e | /base/views.py | 29d2118ea0ffa9a512d9af9fa9e223dade01b788 | [] | no_license | 49527/miniprogram_backend | e0c13075e6af8eb1ce040c345ec7bbd448ddd58e | 105e8d85c71dfb2c7ecaf64f35c48ac3dedc9a4d | refs/heads/master | 2020-04-09T02:08:02.166013 | 2018-12-11T14:48:00 | 2018-12-11T14:48:00 | 159,929,690 | 0 | 0 | null | 2018-12-01T09:38:22 | 2018-12-01T09:38:22 | null | UTF-8 | Python | false | false | 3,824 | py | import urllib
import json
import logging
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import MethodNotAllowed
from django.http.response import HttpResponseNotAllowed
from django.conf import settings
from django.http.response import HttpResponse, FileResponse
from base.exceptions import WLException
from base.util.serializer_helper import errors_summery
logger = logging.getLogger(__name__)
class WLAPIView(object):
API_VERSION = "0.1"
parser_classes = (JSONParser, )
DEFAULT_VALIDATE_EXC_CODE = 400
ERROR_HTTP_STATUS = False
http_method_names = ['get', 'post', 'options']
def generate_response(self, data, context):
return Response(data={
"response": dict(
{"result": 200},
**data
),
"version": self.API_VERSION,
"context": context
})
def get_request_obj(self, request, method=None):
if method is None:
method = request.method
if method == "POST":
try:
context = request.data.get("context", None)
data = request.data["data"]
return data, context
except KeyError:
raise WLException(code=400, message="Request format incorrect, data field is missing.")
elif method == "GET":
objs = request.GET
if "context" in objs:
context = objs.pop("context")
try:
context = json.loads(urllib.unquote(context))
except ValueError:
context = None
else:
context = None
data = objs
return data, context
else:
raise WLException(code=500, message="Unexpected call of get request object method.")
def validate_serializer(self, serializer, exc_code=None):
if not serializer.is_valid():
message = errors_summery(serializer)
raise WLException(
message=message,
code=exc_code if exc_code is not None else self.DEFAULT_VALIDATE_EXC_CODE
)
def handle_exception(self, exc):
if isinstance(exc, WLException):
reason = exc.message
code = exc.code
if exc.code == 500:
logger.exception("WLException 500", extra={"request": self.request})
else:
logger.warn("WLException: %d, %s" % (code, reason), extra={"request": self.request})
elif isinstance(exc, MethodNotAllowed):
return HttpResponseNotAllowed(self.http_method_names)
else:
if settings.DEBUG:
reason = "%s %s" % (str(exc.__class__), str(exc))
else:
reason = "Internal Error"
code = 500
# Log the detailed exception
logger.exception("Exception not handled", extra={"request": self.request})
if self.ERROR_HTTP_STATUS:
return HttpResponse(content=reason, status=code)
else:
return Response(data={
"response": {
"result": code,
"reason": reason
},
"version": self.API_VERSION,
})
class WLBinaryView(WLAPIView):
ERROR_HTTP_STATUS = True
def get(self, request):
data, context = self.get_request_obj(request)
io_stream, content_type = self.get_io_stream(data, context)
return FileResponse(io_stream, content_type=content_type)
def get_io_stream(self, data, context):
"""
:param data:
:param context:
:return: BinaryIO, content_type
"""
raise NotImplementedError
| [
"fhy14@mails.tsinghua.edu.cn"
] | fhy14@mails.tsinghua.edu.cn |
0b1b38916d41392f1d08f3a10dbb7bce96a9e49a | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20171227/example_httplib2/01get.py | 8a8bd52dd952e4265c41b351d1c6da01239cdbc7 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 218 | py | import httplib2
import urllib.parse as parselib
http = httplib2.Http()
qs = parselib.urlencode({"name": "foo"})
response, body = http.request(f"http://localhost:44444/?{qs}", method="GET")
print(body.decode("utf-8"))
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
30a08c0dd0df890fdfc29c1163cc085d343e74f9 | 63c261c8bfd7c15f6cdb4a08ea2354a6cd2b7761 | /acaizerograu/acaizerograu/outros/migrations/0015_acaienergy_img.py | 2facb02213763ea87b8e37caec314dce25edb154 | [] | no_license | filhosdaputa/AcaiZero | 93295498d95bcc13d020f2255e6b87a12cff04bf | 99a775f823d98a0b7b10e685936f1c12ccd1a70a | refs/heads/master | 2022-10-29T05:31:10.512990 | 2017-08-11T13:49:06 | 2017-08-11T13:49:06 | 149,019,853 | 0 | 1 | null | 2022-10-18T00:41:16 | 2018-09-16T17:38:48 | JavaScript | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 22:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('outros', '0014_acaicreme_img'),
]
operations = [
migrations.AddField(
model_name='acaienergy',
name='img',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
| [
"igor-peres@hotmail.com"
] | igor-peres@hotmail.com |
27a385f5bed81772f708b3340dd406c08d200b27 | 6732dce33ccc8d3912c7dd9bb5a029988586a649 | /tests/all_tests_cached.py | 0515e76f07896484a441f62a9a98df0cd0eb011e | [
"Apache-2.0"
] | permissive | hamada2029/gdata-python3 | 8a0d3cb53b707b7ad2f826a486df254c813e7463 | c1028f6567b480908b90848523bebaf78e6b49f7 | refs/heads/master | 2021-01-22T12:53:28.196826 | 2014-11-30T07:05:30 | 2014-11-30T07:05:30 | 46,613,040 | 1 | 0 | null | 2015-11-21T11:44:20 | 2015-11-21T11:44:19 | null | UTF-8 | Python | false | false | 1,088 | py | #!/usr/bin/python3
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'true')
conf.options.set_value('savecache', 'true')
conf.options.set_value('clearcache', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| [
"jvarshney20@gmail.com"
] | jvarshney20@gmail.com |
ed1d23fc1f6ecda72389cdaea307ea28a1e07b23 | 83b242997a1560214285fd38ab4d39a0b1210ddc | /opencv/SimpleBlobDetector.py | 3b471d90063328e07509532a210cbe45856f5a4b | [] | no_license | ivartz/vid2fft | 0a25d853e178b43fd0a5f765934887963f5c37f9 | 1b6ec82de04f86819ab4c1056d4f9d9bde1ed9c8 | refs/heads/master | 2020-08-07T21:44:28.745553 | 2019-10-08T09:18:41 | 2019-10-08T09:18:41 | 213,594,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,154 | py | #/******************************************************************************
#
# Copyright (c) 2018 Antillia.com TOSHIYUKI ARAI. ALL RIGHTS RESERVED.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#******************************************************************************/
# SimpleBlobDetector.py
# encodig: utf-8
import sys
import os
import cv2
import traceback
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
#
sys.path.append('../')
from SOL4Py.ZApplicationView import *
from SOL4Py.ZLabeledComboBox import ZLabeledComboBox
from SOL4Py.ZLabeledSlider import ZLabeledSlider
from SOL4Py.opencv.ZOpenCVImageView import ZOpenCVImageView
from SOL4Py.ZVerticalPane import ZVerticalPane
class MainView(ZApplicationView):
# Inner classes
#--------------------------------------------
class SourceImageView(ZOpenCVImageView):
def __init__(self, parent):
ZOpenCVImageView.__init__(self, parent)
def load(self, filename):
self.load_opencv_image(filename)
self.update()
class DetectedImageView(ZOpenCVImageView):
def __init__(self, parent):
ZOpenCVImageView.__init__(self, parent)
def load(self, filename):
source_image = self.load_opencv_image(filename)
self.gray_image = cv2.cvtColor(source_image, cv2.COLOR_RGB2GRAY)
def detect(self, minDist, minArea, maxArea):
source_image = self.get_opencv_image()
params = cv2.SimpleBlobDetector_Params()
params.thresholdStep = 10.0
params.minThreshold = 50.0
params.maxThreshold = 220.0
params.filterByArea = True
params.minArea = minArea
params.maxArea = maxArea
params.filterByColor = True
params.blobColor = 0
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.8
params.filterByInertia = True
params.minInertiaRatio = 0.1
params.minRepeatability = 2
params.minDistBetweenBlobs= 5.0
params.minDistBetweenBlobs= float(minDist)
detector = cv2.SimpleBlobDetector_create(params)
keypoints = detector.detect(self.gray_image);
out_image = cv2.drawKeypoints(source_image, keypoints,
None, (0, 0, 255),
cv2.DrawMatchesFlags_DRAW_RICH_KEYPOINTS )
self.set_opencv_image(out_image)
self.update()
#--------------------------------------------
# MainView Constructor
def __init__(self, title, x, y, width, height):
super(MainView, self).__init__(title, x, y, width, height)
filename = "../images/cat.jpg"
# 1 Create first imageview.
self.source_image_view = self.SourceImageView(self)
# 2 Create second imageview.
self.detectd_image_view = self.DetectedImageView(self)
# 3 Load the file
self.load_file(filename)
# 4 Add two image views to a main_layout of this main view.
self.add(self.source_image_view)
self.add(self.detectd_image_view)
self.show()
def add_control_pane(self, fixed_width=220):
# Control pane widget
self.vpane = ZVerticalPane(self, fixed_width)
self.minDist = 9;
self.minArea = 15;
self.maxArea = 131
self.minDistance_slider = ZLabeledSlider(self.vpane, "MinDistanceBetweenBlob", take_odd =False,
minimum=5, maximum=100, value=self.minDist, fixed_width=200)
self.minDistance_slider.add_value_changed_callback(self.minDistance_value_changed)
self.minArea_slider = ZLabeledSlider(self.vpane, "MinArea", take_odd =False,
minimum=1, maximum=100, value=self.minArea, fixed_width=200)
self.minArea_slider.add_value_changed_callback(self.minArea_value_changed)
self.maxArea_slider = ZLabeledSlider(self.vpane, "MaxArea", take_odd =False,
minimum=100, maximum=200, value=self.maxArea, fixed_width=200)
self.maxArea_slider.add_value_changed_callback(self.maxArea_value_changed)
self.vpane.add(self.minDistance_slider)
self.vpane.add(self.minArea_slider)
self.vpane.add(self.maxArea_slider)
self.set_right_dock(self.vpane)
def file_open(self):
options = QFileDialog.Options()
filename, _ = QFileDialog.getOpenFileName(self,"FileOpenDialog", "",
"All Files (*);;Image Files (*.png;*jpg;*.jpeg)", options=options)
if filename:
self.load_file(filename)
def load_file(self, filename):
self.source_image_view.load(filename)
self.detectd_image_view.load(filename)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
self.set_filenamed_title(filename)
def minDistance_value_changed(self, value):
self.minDist= int(value)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
def minArea_value_changed(self, value):
self.minArea = int(value)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
def maxArea_value_changed(self, value):
self.maxArea = int(value)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
#*************************************************
#
if main(__name__):
try:
app_name = os.path.basename(sys.argv[0])
applet = QApplication(sys.argv)
main_view = MainView(app_name, 40, 40, 900, 380)
main_view.show ()
applet.exec_()
except:
traceback.print_exc()
pass
| [
"djloek@gmail.com"
] | djloek@gmail.com |
c0b3d26047b039b6f39ae57cad8047f7af89eb6c | 9356f0b10133ed0671cd5414de81cadc97e0097d | /stravalib/tests/functional/test_client_write.py | 41d66e97f4b86e2f9b467b01faca0e83f12fb383 | [
"Apache-2.0"
] | permissive | peter-kolenic/stravalib | 850800ce716243a8498d2f6c4a9078bb29737dee | 571adc063179d0ef1519a468fcf2cfd9852b9874 | refs/heads/master | 2021-01-18T17:19:28.938813 | 2015-05-23T21:30:54 | 2015-05-23T21:30:54 | 36,108,269 | 1 | 1 | null | 2015-05-23T05:27:57 | 2015-05-23T05:27:56 | null | UTF-8 | Python | false | false | 3,326 | py | from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from io import BytesIO
from stravalib import model, exc, attributes, unithelper as uh
from stravalib.client import Client
from stravalib.tests.functional import FunctionalTestBase
class ClientWriteTest(FunctionalTestBase):
def test_create_activity(self):
"""
Test Client.create_activity simple case.
"""
now = datetime.now().replace(microsecond=0)
a = self.client.create_activity("test_create_activity#simple",
activity_type=model.Activity.RIDE,
start_date_local=now,
elapsed_time=timedelta(hours=3, minutes=4, seconds=5),
distance=uh.miles(15.2))
print a
self.assertIsInstance(a, model.Activity)
self.assertEquals("test_create_activity#simple", a.name)
self.assertEquals(now, a.start_date_local)
self.assertEquals(round(float(uh.miles(15.2)), 2), round(float(uh.miles(a.distance)), 2))
self.assertEquals(timedelta(hours=3, minutes=4, seconds=5), a.elapsed_time)
def test_update_activity(self):
"""
Test Client.update_activity simple case.
"""
now = datetime.now().replace(microsecond=0)
a = self.client.create_activity("test_update_activity#create",
activity_type=model.Activity.RIDE,
start_date_local=now,
elapsed_time=timedelta(hours=3, minutes=4, seconds=5),
distance=uh.miles(15.2))
self.assertIsInstance(a, model.Activity)
self.assertEquals("test_update_activity#create", a.name)
update1 = self.client.update_activity(a.id, name="test_update_activivty#update")
self.assertEquals("test_update_activivty#update", update1.name)
self.assertFalse(update1.private)
self.assertFalse(update1.trainer)
self.assertFalse(update1.commute)
update2 = self.client.update_activity(a.id, private=True)
self.assertTrue(update2.private)
update3 = self.client.update_activity(a.id, trainer=True)
self.assertTrue(update3.private)
self.assertTrue(update3.trainer)
def test_upload_activity(self):
"""
Test uploading an activity.
NOTE: This requires clearing out the uploaded activities from configured
writable Strava acct.
"""
with open(os.path.join(RESOURCES_DIR, 'sample.tcx')) as fp:
uploader = self.client.upload_activity(fp, data_type='tcx')
self.assertTrue(uploader.is_processing)
a = uploader.wait()
self.assertTrue(uploader.is_complete)
self.assertIsInstance(a, model.Activity)
self.assertEquals("02/21/2009 Leiden, ZH, The Netherlands", a.name)
# And we'll get an error if we try the same file again
with self.assertRaises(exc.ActivityUploadFailed):
self.client.upload_activity(fp, data_type='tcx')
| [
"hans@xmpl.org"
] | hans@xmpl.org |
1b46a272a3f67f353f53056f13e3223b617f355c | 303a4d41da8f2cd2000630ff30424d2875490e67 | /190329multitimetest/gendat.py | 7704c283a733f1501c221aafe5d58fcc19b0e6d5 | [] | no_license | noobermin/sharks | beb1d3d6a593e8d62f3d7416697d4de1fe9558b1 | af87113781eb67af45a9c2f79b73b1512ae0a1fa | refs/heads/master | 2022-05-10T11:55:17.200591 | 2021-09-30T14:27:22 | 2021-09-30T14:27:22 | 19,997,024 | 0 | 2 | null | 2016-05-20T19:27:49 | 2014-05-20T20:49:16 | Common Lisp | UTF-8 | Python | false | false | 5,587 | py | #!/usr/bin/env python
'''
Generate a dat file.
'''
from io import BytesIO; #we python3 now
import re;
import numpy as np;
from pys import test,parse_numtuple,sd,take,mk_getkw;
mt = lambda t,m=1e-4: tuple([i*m for i in t]);
c = 299792458
c_cgs=c*100;
e0 = 8.8541878176e-12
datdefaults = {
'expf': 1.5,
'tlim': (0,27.5, 0,0,0.0 ,0.0,0.0),
'n_s' : 1e23,
'n_min' : 1e18,
'long_margin' : [2.5, 5.0],
'sdim': (17.5,27.5, 0.0,0.0, 0.0,0.0),
'type' : 'singlescale',
'ux' : 1e-4,
'dat_xres' : 100,
'datfmt' : '%.8e',
};
def gendats(ds,**kw):
return [ gendat(**sd(kw, d))
for d in ds ];
def gendat(**kw):
getkw=mk_getkw(kw,datdefaults);
xres = getkw('dat_xres');
yres=zres=xres;
if test(kw,'dat_yres'): yres = kw['dat_yres'];
if test(kw,'dat_zres'): zres = kw['dat_zres'];
unit=getkw('ux');
tlim = mt(getkw('tlim'),m=unit);
fmt = getkw('datfmt');
if test(kw,'f_1D') or test(kw, 'data1D'):
dim = 1;
elif (test(kw,'f_2D') or test(kw, 'data2D')) and test(kw, 'tlim'):
dim = 2;
elif (test(kw,'f_3D') or test(kw, 'data3D')) and test(kw, 'tlim'):
dim = 3;
else:
raise ValueError("Cannot reckon data dimensionality");
if dim == 1:
if test(kw,'f_1D'):
x = np.linspace(tlim[0],tlim[1],xres);
d = getkw('f_1D')(x);
elif test(kw,'data1D'):
x,d = getkw('data1D');
s = BytesIO();
np.savetxt(s,np.array([x,d]).T,fmt=fmt,);
return s.getvalue();
elif dim == 2:
if test(kw,'f_2D'):
x = np.linspace(tlim[0],tlim[1],xres);
if np.isclose(tlim[2],tlim[3]):
y = np.linspace(tlim[4],tlim[5],yres);
else:
y = np.linspace(tlim[2],tlim[3],yres);
X,Y = np.meshgrid(x,y,indexing='ij');
d = getkw('f_2D')(X,Y);
elif test(kw,'data2D'):
x,y,d = getkw('data2D');
s = BytesIO();
np.savetxt(s,np.array(list(d.shape)).reshape(1,-1), fmt='%i');
np.savetxt(s,np.array(x).reshape(1,-1), fmt=fmt);
np.savetxt(s,np.array(y).reshape(1,-1), fmt=fmt);
np.savetxt(s,np.array(d).T,fmt=fmt,);
return s.getvalue();
else:
s = BytesIO();
if test(kw, 'f_3D'):
X,Y,Z = np.mgrid[
tlim[0]:tlim[1]:xres*1j,
tlim[2]:tlim[3]:yres*1j,
tlim[4]:tlim[5]:zres*1j];
d = getkw('f_3D')(X,Y,Z);
np.savetxt(s,np.array(list(d.shape)).reshape(1,-1), fmt='%i');
np.savetxt(s,X[:,0,0].reshape(1,-1),fmt=fmt);
np.savetxt(s,Y[0,:,0].reshape(1,-1),fmt=fmt);
np.savetxt(s,Z[0,0,:].reshape(1,-1),fmt=fmt);
del X,Y,Z;
elif test(kw,'data3D'):
x,y,z,d = getkw('data3D');
np.savetxt(s,np.array(list(d.shape)).reshape(1,-1), fmt='%i');
np.savetxt(s,np.array(x).reshape(1,-1),fmt=fmt);
np.savetxt(s,np.array(y).reshape(1,-1),fmt=fmt);
np.savetxt(s,np.array(z).reshape(1,-1),fmt=fmt);
#manual is probably best.
zl = d.shape[-1];
for i in range(zl):
np.savetxt(s,np.array(d[:,:,i]).T,fmt=fmt);
return s.getvalue();
pass;
def mktwoscales(solid, sdim, xdim, L_front, L_back,
tlim=None,
front_floor=0.0,
back_floor=0.0):
if tlim is None:
tlim = xdim;
#the darkness...
ppf_len = abs(sdim[0] - tlim[0]);
if front_floor > 0.0:
ppf_len = min(np.log(solid/front_floor)*L_front, ppf_len);
ppb_len = abs(sdim[1] - tlim[1]);
if back_floor > 0.0:
ppb_len = min(np.log(solid/back_floor)*L_back, ppb_len);
def outf(x):
out = np.zeros_like(x);
good= np.logical_and(x >= xdim[0],x <= xdim[1])
out[np.logical_and(sdim[0] >= x, x >= tlim[0])] = front_floor;
out[np.logical_and(sdim[1] <= x, x <= tlim[1])] = back_floor;
solids = np.logical_and(sdim[0] <= x, x <= sdim[1]);
out[solids] = solid;
fronts = np.logical_and(sdim[0] - ppf_len <= x, x<= sdim[0]);
out[fronts] = solid*np.exp(-np.abs(x-sdim[0])/L_front)[fronts];
backs = np.logical_and(sdim[1] <= x, x <= sdim[1] + ppb_len);
out[backs] = solid*np.exp(-np.abs(x-sdim[1])/L_back)[backs];
return out;
return outf;
def mkdecay(solid, sdim, xdim, l):
def out(x):
if x <= xdim[0] or x >= xdim[1]:
return 0.0;
elif sdim[0] <= x <= sdim[1]:
return solid;
else:
return np.exp(-np.abs(x-sdim[0])/l)*solid;
return np.vectorize(out);
def tlim_mvorig(tlim):
return (
0, tlim[1]-tlim[0],
0, tlim[3]-tlim[2],
0, tlim[5]-tlim[4])
def genf(**kw):
getkw=mk_getkw(kw,datdefaults);
if getkw('type') == 'singlescale':
tlim = mt(getkw('tlim'),m=getkw('ux'));
xdim = tlim[0], tlim[1];
return mkdecay(
getkw('n_s'), mt(getkw('sdim'),m=getkw('ux')),
xdim, getkw('expf')*getkw('ux'));
else:
raise NotImplementedError("Coming soon!");
onescale_defaults = sd(
datdefaults,
solid_len=10,
xlen=27.5,
);
def genonescale(**kw):
getkw=mk_getkw(kw,onescale_defaults);
slen = getkw("solid_len");
xlen = getkw("xlen");
kw1 = sd(
kw,
tlim=(0.0, xlen) + (0.0,0.0,0.0,0.0),
sdim= (xlen-slen, xlen) + (0.0,0.0,0.0,0.0));
kw1['f_1D']= genf(**kw1)
return gendat(**kw1);
| [
"ngirmang.1@osu.edu"
] | ngirmang.1@osu.edu |
324e053537ed14e06f80510fe149a26724df36b1 | 5c254373f6725107931b68704436c2dbcd39d877 | /ute/probabilistic_utils/mallow.py | a9193339cded704e6b8f18ef329bbb1af5c8466e | [
"MIT"
] | permissive | JunLi-Galios/unsup_temp_embed_alternating | 22330346094720ecba2e5af305febe586566b92f | 1b054fd82aadcfe1aa219be17beb77c89efd974e | refs/heads/master | 2023-03-21T04:06:16.044321 | 2021-03-20T06:06:06 | 2021-03-20T06:06:06 | 322,737,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | #!/usr/bin/env python
"""Implementation of the Generalized Mallow Model. It's used for modeling
temporal relations within video collection of one complex activity. """
__author__ = 'Anna Kukleva'
__date__ = 'August 2018'
import numpy as np
class Mallow(object):
"""The Generalized Mallows Model"""
def __init__(self, K, rho_0=1.0, nu_0=0.1):
"""
Args:
K: number of subactions in current complex activity
"""
self._canon_ordering = None
# number of subactions
self._K = K
self.k = 0
self.rho = [1e-8] * (K - 1)
self.rho_0 = rho_0
self._nu_0 = nu_0
self._dispersion = np.zeros((self._K, 1))
self._v_j_0 = {}
self._init_v_j_0()
self._v_j_sample = 0
self._nu_sample = 0
def _init_v_j_0(self):
for k in range(self._K):
v_j_0 = 1. / (np.exp(self.rho_0) - 1) - \
(self._K - k + 1) / (np.exp((self._K - k + 1) * self.rho_0) - 1)
self._v_j_0[k] = v_j_0
def set_sample_params(self, sum_inv_vals, k, N):
"""
Args:
sum_inv_vals: summation over all videos in collection for certain
position in inverse count vectors
k: current position for computations
N: number of videos in collection
"""
self._k = k
self._nu_sample = self._nu_0 + N
self._v_j_sample = (sum_inv_vals + self._v_j_0[k] * self._nu_0) # / (self._nu_0 + N)
def logpdf(self, ro_j):
norm_factor = np.log(self._normalization_factor(self.k, ro_j))
result = -ro_j * self._v_j_sample - norm_factor * self._nu_sample
return np.array(result)
def _normalization_factor(self, k, rho_k):
power = (self._K - k + 1) * rho_k
numerator = 1. - np.exp(-power)
denominator = 1. - np.exp(-rho_k)
return numerator / denominator
def single_term_prob(self, count, k):
result = -(self.rho[k] * count) - \
np.log(self._normalization_factor(k, self.rho[k]))
return result
@staticmethod
def inversion_counts(ordering):
"""Compute inverse count vector from ordering"""
ordering = np.array(ordering)
inversion_counts_v = []
for idx, val in enumerate(ordering):
idx_end = int(np.where(ordering == idx)[0])
inversion_counts_v.append(np.sum(ordering[:idx_end] > idx))
return inversion_counts_v[:-1]
def ordering(self, inverse_count):
"""Compute ordering from inverse count vector"""
ordering = np.ones(self._K, dtype=int) * -1
for action, val in enumerate(inverse_count):
for idx, established in enumerate(ordering):
if established > -1:
continue
if val == 0:
ordering[idx] = action
break
if established == -1:
val -= 1
# last action
ordering[np.where(ordering == -1)] = self._K - 1
return ordering
| [
"kuklevaanna@gmail.com"
] | kuklevaanna@gmail.com |
3f6f20932447ab92f92ee5991e43992a14450eca | 8baec0fc6e2e2e4b46e7880df9dbaa313c01272f | /data/cn_few_fusion_dataset.py | f4be2acb0d640f67343116793852b0c2840a0172 | [
"BSD-2-Clause"
] | permissive | hologerry/BicycleGAN | 6ce4884fdaf8d4c5231dae537b3f0f552856add9 | 64671c38058744d49e988980770d11b72466c59b | refs/heads/master | 2021-06-26T07:33:16.941169 | 2019-08-20T12:38:44 | 2019-08-20T12:38:44 | 149,060,743 | 0 | 0 | NOASSERTION | 2019-03-13T05:07:19 | 2018-09-17T02:56:34 | Python | UTF-8 | Python | false | false | 4,109 | py | import os
import random
from PIL import Image, ImageFilter
from data.base_dataset import BaseDataset, transform_few_with_label
from data.image_folder import make_dataset
class CnFewFusionDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def rreplace(self, s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_ABC = os.path.join(opt.dataroot, opt.phase)
self.ABC_paths = sorted(make_dataset(self.dir_ABC))
# self.chars = list(range(500)) # only use 500 of 639 to train, and the remain 139 as test set
# guarantee consistent for test
# so just shuffle 500 once
self.shuffled_gb639list = [172, 370, 222, 37, 220, 317, 333, 494, 468, 25,
440, 208, 488, 177, 167, 104, 430, 383, 422, 174,
441, 475, 473, 72, 9, 389, 132, 412, 24, 288,
453, 372, 181, 322, 115, 34, 345, 243, 188, 118,
142, 197, 429, 358, 223, 121, 20, 241, 178, 238,
272, 182, 384, 295, 490, 98, 96, 476, 226, 129,
305, 28, 207, 351, 193, 378, 390, 353, 452, 240,
477, 214, 306, 373, 63, 248, 323, 109, 21, 381,
393, 263, 111, 92, 231, 114, 218, 69, 482, 252,
257, 300, 283, 420, 62, 154, 146, 478, 89, 419]
assert(opt.few_size <= len(self.shuffled_gb639list))
self.chars = self.shuffled_gb639list[:opt.few_size]
def __getitem__(self, index):
ABC_path = self.ABC_paths[index]
ABC = Image.open(ABC_path).convert('RGB')
w3, h = ABC.size
w = int(w3 / 3)
A = ABC.crop((0, 0, w, h))
B = ABC.crop((w, 0, w+w, h))
C = ABC.crop((w+w, 0, w+w+w, h))
Bases = []
Shapes = []
Colors = []
Style_paths = []
blur_Shapes = []
blur_Colors = []
target_char = int(ABC_path.split('_')[-1].split('.')[0])
ABC_path_c = ABC_path
label = 0.0
if target_char in self.chars:
label = 1.0
# for shapes
random.shuffle(self.chars)
chars_random = self.chars[:self.opt.nencode]
for char in chars_random:
s_path = self.rreplace(ABC_path_c, str(target_char), str(char), 1) # /path/to/img/XXXX_XX_XXX.png
s_path = s_path.replace(self.opt.phase, 'style')
Style_paths.append(s_path)
Bases.append(Image.open(s_path).convert('RGB').crop((0, 0, w, h)))
Shapes.append(Image.open(s_path).convert('RGB').crop((w, 0, w+w, h)))
Colors.append(Image.open(s_path).convert('RGB').crop((w+w, 0, w+w+w, h)))
blur_Shapes.append(
Image.open(s_path).convert('RGB').crop((w, 0, w+w, h)).filter(
ImageFilter.GaussianBlur(radius=(random.random()*2+2)))
)
blur_Colors.append(
Image.open(s_path).convert('RGB').crop((w+w, 0, w+w+w, h)).filter(
ImageFilter.GaussianBlur(radius=(random.random()*2+2)))
)
A, B, B_G, C, C_G, C_l, label, Bases, Shapes, Colors, blur_Shapes, blur_Colors = \
transform_few_with_label(self.opt, A, B, C, label, Bases, Shapes, Colors, blur_Shapes, blur_Colors)
# A is the reference, B is the gray shape, C is the gradient
return {'A': A, 'B': B, 'B_G': B_G, 'C': C, 'C_G': C_G, 'C_l': C_l, 'label': label,
'Bases': Bases, 'Shapes': Shapes, 'Colors': Colors,
'blur_Shapes': blur_Shapes, 'blur_Colors': blur_Colors,
'ABC_path': ABC_path, 'Style_paths': Style_paths,
}
def __len__(self):
return len(self.ABC_paths)
def name(self):
return 'CnFewFusionDataset'
| [
"hologerry@gmail.com"
] | hologerry@gmail.com |
6a833dc13c4576d7d6ac68aa2ac28032e4b16eb8 | edbf8601ae771031ad8ab27b19c2bf450ca7df76 | /45-Jump-Game-II/JumpGameII.py3 | 68cb781b045a49898b021dd462bc34abdeadfb91 | [] | no_license | gxwangdi/Leetcode | ec619fba272a29ebf8b8c7f0038aefd747ccf44a | 29c4c703d18c6ff2e16b9f912210399be427c1e8 | refs/heads/master | 2022-07-02T22:08:32.556252 | 2022-06-21T16:58:28 | 2022-06-21T16:58:28 | 54,813,467 | 3 | 2 | null | 2022-06-21T16:58:29 | 2016-03-27T05:02:36 | Java | UTF-8 | Python | false | false | 581 | py3 | class Solution:
def jump(self, nums: List[int]) -> int:
if nums == None or len(nums) == 0 :
return -1
size = len(nums)
if size == 1 :
return 0
dp = [sys.maxsize]*size
dp[0] = 0
cur = 1
for i in range(size) :
far = i + nums[i]
value = dp[i] + 1
if far >= size -1:
return value
if far < cur :
continue
while cur <= far:
dp[cur] = value
cur+=1
return dp[-1]
| [
"gxwangdi@gmail.com"
] | gxwangdi@gmail.com |
25aadc99c54d46377158797eb238e1e889e95e9b | d9d6250eb862e4b4cace91f5d7ab82bc70ea689c | /src/comment/migrations/0001_initial.py | 4b471ce54e4a68c5da6989acac2be0b2de8ce46f | [] | no_license | belal-bh/CLIC_PUST | f6ae867115899733722d356b1f27a1bc78eee89f | 59c251e621ac2f6460bd4faa31aad5e569a060c2 | refs/heads/master | 2022-04-08T13:05:06.795597 | 2020-03-15T10:12:45 | 2020-03-15T10:12:45 | 212,201,928 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | # Generated by Django 2.2.1 on 2019-10-10 07:10
import account.helpers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to=account.helpers.UploadTo('image'), width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comment.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"bh.pro.pust@gmail.com"
] | bh.pro.pust@gmail.com |
01aeafb98ed6d93725ba3ab260a74eaa6daeeb51 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20dd5e82-5cc5-11e4-af55-00155d01fe08.py | a599a33e0e06904fc5484a89cf9782cb80531146 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | #!/usr/bin/python
################################################################################
# 20dd5e82-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20dd5e82-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Accounts
usernames = cli.get_secedit_account('SeProfileSingleProcessPrivilege')
# Output Lines
self.output = [("SeProfileSingleProcessPrivilege=")] + usernames
# Recommended MultiSZ
rec_usernames = ("BUILTIN\Administrators")
for user in usernames:
if user.lower() not in rec_usernames.lower():
self.is_compliant = False
return self.is_compliant
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
abce8b6224be5ad4780574b9df6386674fd23647 | 227ecf8b7967cfcf3bb0822d268941c04a05bd20 | /matrix_comp_approx_colored.py | dda878aef7ff1809716989e29e163264cbf6539a | [] | no_license | johnjasa/derivative_comparisons | 1a8f3dba62dd9e081537cb6ecf4a1df93192893b | d50a1f86042841b37804fbb3abbc600f3870cce5 | refs/heads/master | 2021-05-18T17:54:42.906729 | 2020-04-06T17:45:13 | 2020-04-06T17:45:13 | 251,347,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | import numpy as np
import openmdao.api as om
class MatrixComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_inputs', default=2)
self.options.declare('num_outputs', default=5)
self.options.declare('bandwidth', default=2)
self.options.declare('random_seed', default=314)
def setup(self):
self.add_input('x', shape=self.options['num_inputs'])
self.add_output('y', shape=self.options['num_outputs'])
self.declare_partials('y', 'x', method='fd')
self.declare_coloring('*', method='cs', show_summary=True)
np.random.seed(self.options['random_seed'])
self.random_array = np.random.random_sample(self.options['num_inputs'])
def compute(self, inputs, outputs):
num_inputs = self.options['num_inputs']
num_outputs = self.options['num_outputs']
bandwidth = self.options['bandwidth']
x = inputs['x']
y = outputs['y']
x_and_random = x + self.random_array
tiled_x = np.tile(x_and_random, int(np.ceil(num_outputs / num_inputs) + bandwidth))
for i in range(num_outputs):
y[i] = np.sum(tiled_x[i:i+bandwidth]**4)
| [
"johnjasa11@gmail.com"
] | johnjasa11@gmail.com |
f66590ed24326e5a66bd05a44b6fe1bd619b3f61 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1227.py | 942da9b5fe104de59a0e5faf6af34254b248e801 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/1784FCF9-7DFD-AA45-AEA1-5EBCEDE11A59.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1227.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
5023035cb29590f585108c7aee78dc4373800804 | c6053ad14e9a9161128ab43ced5604d801ba616d | /Lemon/Python_Base/Lesson10_object_20181117/homework_02.py | c3bfb59c5135b0b7432c470d7a36aa6518d3cc6c | [] | no_license | HesterXu/Home | 0f6bdace39f15e8be26031f88248f2febf33954d | ef8fa0becb687b7b6f73a7167bdde562b8c539be | refs/heads/master | 2020-04-04T00:56:35.183580 | 2018-12-25T02:48:51 | 2018-12-25T02:49:05 | 155,662,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # -*- coding: utf-8 -*-
# @Time : 2018/11/17/13:35
# @Author : Hester Xu
# Email : xuruizhu@yeah.net
# @File : homework_02.py
# @Software : PyCharm
'''
2:定义一个学生类。
1)有下面的类属性: 1 姓名 2 年龄 3 成绩(语文,数学,英语)[每课成绩的类型为整数] ,均放在初始化函数里面。
2)类方法:
a)获取学生的姓名:get_name() 返回类型:str
b)获取学生的年龄:get_age() 返回类型:int
c) 返回3门科目中最高的分数。get_course() 返回类型:int
写好类以后,可以定义2个同学测试下: zm = Student('zhangming',20,[69,88,100]) 返回结果: zhangming 20 100
'''
class Student:
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
def get_name(self):
return self.name
def get_age(self):
return self.age
def get_course(self):
return max(self.score)
zm = Student('zhangming', 20, [69, 88, 100])
print(zm.get_name())
print(zm.get_age())
print(zm.get_course())
| [
"xuruizhu@yeah.net"
] | xuruizhu@yeah.net |
6ed3e6a009cf9820d10c5b2bcec7966bc71920da | 9f2c8c6b9c7caac464193fa9a995dc7244f3aac5 | /Exercicios Curso Em Video Mundo 2/ex038.py | bc7fb3f83e250eb62ce07ca8dab2bccf6cde09df | [
"MIT"
] | permissive | JorgeTranin/Python_Curso_Em_Video | a5c1a119e30aa08663d5b3e3d86625fb852ccbe8 | be74c9301aafc055bdf883be649cb8b7716617e3 | refs/heads/master | 2021-06-13T23:29:36.184378 | 2020-04-10T00:49:25 | 2020-04-10T00:49:25 | 254,464,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | n1 = int(input('Digite um numero! '))
n2 = int(input('Digite outro numero! '))
if n1 > n2:
print('Entre {} e {} O primeiro valor é maior'.format(n1, n2))
elif n2 > n1:
print('Entre {} e {} O segundo valor é maior.'.format(n1, n2))
elif n1 == n2:
print('Os dois valores são iguais.')
| [
"antoniotraninjorge@gmail.com"
] | antoniotraninjorge@gmail.com |
fc50e5a2055a8a78a3042ca9d49a37270c2e9c4b | 108034973f9046a7603d5fe3f26c59b20a7e68da | /lab/lab13/tests/schedule.py | 4247e34134bdaacf49dedc64c9d011381688e8f3 | [] | no_license | paulhzq/cs61a | b1b1387cefbaaf1823c02d535891db7d085f3b04 | 9eee13df9ad113591dc55d106561951cea34abc5 | refs/heads/master | 2020-05-23T08:16:14.193086 | 2017-01-15T02:06:18 | 2017-01-15T02:06:18 | 70,255,875 | 8 | 8 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | test = {
'name': 'schedule',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM schedule;
SFO, SLC, PDX|176
SFO, LAX, PDX|186
SFO, PDX|192
""",
'hidden': False,
'locked': False
}
],
'ordered': True,
'scored': True,
'setup': r"""
sqlite> .read lab13.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
| [
"paul_hzq@hotmail.com"
] | paul_hzq@hotmail.com |
676e220636adf6125be74d69a020cc4d43e83248 | 556417a05b437c111290287df47a39f15fb28f4b | /apps/payement/forms.py | bc9b95551bc0609de4ac2cd8b711096f021e1781 | [] | no_license | root92/test-erp | 74626f7b0ce423e9451dd0cc9371ed644a9b8af9 | ef108353b5a886822574bded7f2f0b323c483c37 | refs/heads/master | 2020-04-21T20:53:04.401368 | 2018-01-30T16:10:15 | 2018-01-30T16:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | from django import forms
from .models import Payement, Fees
class PayementForm(forms.ModelForm):
class Meta:
model = Payement
fields =['fees', 'student', 'amount']
labels = {
'student': 'Elève',
'fees': 'frais',
'amount': 'Montant'
}
widgets = {
'student': forms.Select(attrs={'class': 'form-control form-element' }),
'fees': forms.Select(attrs={'class': 'form-control form-element' }),
'amount': forms.TextInput(attrs={'class': 'form-control form-element' }),
}
class FeeForm(forms.ModelForm):
class Meta:
model = Fees
fields = ['label', 'fee_value', 'fee_description']
labels = {
'label': 'Libellé',
'fee_value': 'Montant',
'fee_description': 'Description',
}
widgets = {
'label': forms.TextInput(attrs={'class': 'form-control form-element' }),
'fee_value': forms.TextInput(attrs={'class': 'form-control form-element' }),
'fee_description':forms.Textarea(attrs={'class': 'form-control admis-process-comment',
'required':False})
}
| [
"souleymanemoudou@gmail.com"
] | souleymanemoudou@gmail.com |
9b4423958aa920b68ecdc3b7b0b67fddf60b8c27 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc101-abc150/abc142/a.py | 5e40cee36eb2e3dbbc38c7b5b5e18aa6317544d4 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 165 | py | # -*- coding: utf-8 -*-
def main():
from math import ceil
n = int(input())
print(ceil(n / 2) / n)
if __name__ == '__main__':
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
6808d2b19dcde91927041394b1afc5ea14c5e750 | a1a43879a2da109d9fe8d9a75f4fda73f0d7166b | /api/tests_v2/compare.py | 867fb572fcc46f017e8682e5674ec51fc82d49ca | [] | no_license | PaddlePaddle/benchmark | a3ed62841598d079529c7440367385fc883835aa | f0e0a303e9af29abb2e86e8918c102b152a37883 | refs/heads/master | 2023-09-01T13:11:09.892877 | 2023-08-21T09:32:49 | 2023-08-21T09:32:49 | 173,032,424 | 78 | 352 | null | 2023-09-14T05:13:08 | 2019-02-28T03:14:16 | Python | UTF-8 | Python | false | false | 1,874 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
class CompareConfig(APIConfig):
def __init__(self):
super(CompareConfig, self).__init__('compare')
self.api_name = 'less_than'
self.api_list = {
'less_than': 'less',
'less_equal': 'less_equal',
'not_equal': 'not_equal',
'greater_than': 'greater',
'greater_equal': 'greater_equal',
'equal': 'equal'
}
class PDCompare(PaddleAPIBenchmarkBase):
def build_program(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = self.layers(config.api_name, x=x, y=y)
self.feed_vars = [x, y]
self.fetch_vars = [result]
class TFCompare(TensorflowAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = self.layers(config.api_name, x=x, y=y)
self.feed_list = [x, y]
self.fetch_list = [result]
if __name__ == '__main__':
test_main(PDCompare(), TFCompare(), config=CompareConfig())
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
5ff1f136a4a394975d0d1989cb5cf7d296f32655 | 3bf0bdebf785063ce1a721d4a83750ba0b5033df | /src/sentry/web/frontend/remove_project.py | 985d0a3a4168278f42470b58ee4dbe6b15abec9a | [
"BSD-2-Clause"
] | permissive | TaurusTiger/sentry | cf932d3fbac81673157ef5f483bbb3daf6a664f3 | dca33172b70d0cf79a56f751543eea364ce92ee6 | refs/heads/master | 2021-01-21T19:13:43.098303 | 2015-10-10T00:41:24 | 2015-10-10T00:41:24 | 43,991,907 | 1 | 0 | null | 2015-10-10T03:19:34 | 2015-10-10T03:19:33 | null | UTF-8 | Python | false | false | 1,884 | py | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry.api import client
from sentry.models import OrganizationMemberType
from sentry.permissions import can_remove_project
from sentry.web.frontend.base import ProjectView
class RemoveProjectForm(forms.Form):
pass
class RemoveProjectView(ProjectView):
required_access = OrganizationMemberType.OWNER
sudo_required = True
def get_form(self, request):
if request.method == 'POST':
return RemoveProjectForm(request.POST)
return RemoveProjectForm()
def get(self, request, organization, team, project):
if not can_remove_project(request.user, project):
return HttpResponseRedirect(reverse('sentry'))
form = self.get_form(request)
context = {
'form': form,
}
return self.respond('sentry/projects/remove.html', context)
def post(self, request, organization, team, project):
if not can_remove_project(request.user, project):
return HttpResponseRedirect(reverse('sentry'))
form = self.get_form(request)
if form.is_valid():
client.delete('/projects/{}/{}/'.format(organization.slug, project.slug),
request.user, is_sudo=True)
messages.add_message(
request, messages.SUCCESS,
_(u'The project %r was scheduled for deletion.') % (project.name.encode('utf-8'),))
return HttpResponseRedirect(reverse('sentry-organization-home', args=[team.organization.slug]))
context = {
'form': form,
}
return self.respond('sentry/projects/remove.html', context)
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
66317284cc07a9785b1fa7a0ff525d864ac27676 | e51b99514bd9b12c7cde4128549aa0206e0391f3 | /24 swapPairs.py | c571fe8d4ecf91d4d33a5163b3d27c4323825f6d | [] | no_license | ABenxj/leetcode | 5f65d2a90f79a32c8d9387bb6c4a655061d004cd | f2c162654a83c51495ebd161f42a1d0b69caf72d | refs/heads/main | 2023-05-14T11:55:28.180609 | 2021-06-08T01:11:54 | 2021-06-08T01:11:54 | 347,963,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | #!/usr/bin/env pyhton
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 , Inc. All Rights Reserved
#
"""
Authors: jufei
Date: 2021/4/7 4:19 PM
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
"""
准备好三个指针,交换顺序,并依次向后遍历
:param head:
:return:
"""
if not head or not head.next:
return head
ans = ListNode(0)
ans.next = head
pre, left, right = ans, head, head.next
while True:
left.next = right.next
right.next = left
pre.next = right
if not left.next or not left.next.next:
break
pre, left, right = left, left.next, left.next.next
return ans.next
| [
"jufei@wecash.net"
] | jufei@wecash.net |
90501e32e6ea9c14c125b254dcf091e8d125b049 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/coghq/CashbotMintLavaRoomFoyer_Battle00.py | 060df18a0bba15f595366b19d1077ab11dca586c | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | # 2013.08.22 22:18:15 Pacific Daylight Time
# Embedded file name: toontown.coghq.CashbotMintLavaRoomFoyer_Battle00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10004: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(23.908908844, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'cellId': 0,
'radius': 10},
10002: {'type': 'model',
'name': 'crates',
'comment': '',
'parentEntId': 10001,
'pos': Point3(17.3283443451, 20.1608715057, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10003: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10001,
'pos': Point3(-14.04317379, 20.9443073273, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_E.bam'},
10006: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(-3.16324114799, -0.608929097652, 5.57751512527),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10000: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': 1},
10001: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10005: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Point3(-90.0, 0.0, 0.0),
'scale': 1}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\CashbotMintLavaRoomFoyer_Battle00.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:18:15 Pacific Daylight Time
| [
"anonymoustoontown@gmail.com"
] | anonymoustoontown@gmail.com |
5b01280a33dbeeca6cee9f2a38e5def7526cefc2 | 3b53aa80a584416a9c8e0de4efb8ef682012bf9e | /0x11-python-network_1/10-my_github.py | 11ea426758188bcc5229f9716f56b1d970c29f2a | [] | no_license | Diegokernel/holbertonschool-higher_level_programming | c273c140b1761046f1a7db80a135d87115c34a9b | 7ebd07e947d6c9a9173699d117741eae38dfcdbe | refs/heads/master | 2020-05-18T01:31:17.582237 | 2019-10-04T04:13:23 | 2019-10-04T04:13:23 | 184,092,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | #!/usr/bin/python3
"""takes your Github credentials (username and password) and uses the Github API to display your id"""
import requests
import sys
if __name__ == "__main__":
page = "https://api.github.com/user"
q = (sys.argv[1], sys.argv[2])
req = requests.get(page, auth=q)
print(req.json().get("id"))
| [
"777@holbertonschool.com"
] | 777@holbertonschool.com |
bd0f4f29e65e2be6d51c4e9d8be129c9ac840a5b | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Withings/Measure/GetActivityMetrics.py | 9847037ed05b8219cb3ec705519d9d2a852c6162 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetActivityMetrics
# Retrieves activity metrics for the specified user.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetActivityMetrics(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetActivityMetrics Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Withings/Measure/GetActivityMetrics')
def new_input_set(self):
return GetActivityMetricsInputSet()
def _make_result_set(self, result, path):
return GetActivityMetricsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetActivityMetricsChoreographyExecution(session, exec_id, path)
class GetActivityMetricsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetActivityMetrics
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
InputSet._set_input(self, 'AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Withings.)
"""
InputSet._set_input(self, 'ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Withings.)
"""
InputSet._set_input(self, 'ConsumerSecret', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((required, date) The date for the log in YYYY-MM-DD format.)
"""
InputSet._set_input(self, 'Date', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((required, string) The ID of the user to retrieve activity metrics for.)
"""
InputSet._set_input(self, 'UserID', value)
class GetActivityMetricsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetActivityMetrics Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Withings.)
"""
return self._output.get('Response', None)
class GetActivityMetricsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetActivityMetricsResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
95f26936b10e68352c2da05ab0c55e794949d63f | 1624fd1db522c3d8b7533418cec09793ca6f80a3 | /setup.py | 2dc7d322bcf4973fbaedb0117b1d89744453ce88 | [
"MIT"
] | permissive | yuwin/UnbalancedDataset | 7c3444f1f3b82a0c0b941c514096c39a330eb4e7 | e97ea2f23e9c06d44c6cbc14145db87f104f61a7 | refs/heads/master | 2021-01-18T13:04:18.082366 | 2016-06-27T23:51:38 | 2016-06-27T23:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | #! /usr/bin/env python
"""Toolbox for imbalanced dataset in machine learning."""
import sys
import os
import codecs
from setuptools import setup, find_packages
def load_version():
"""Executes imblearn/version.py in a globals dictionary and
return it.
"""
# load all vars into globals, otherwise
# the later function call using global vars doesn't work.
globals_dict = {}
with codecs.open(os.path.join('imblearn', 'version.py'),
encoding='utf-8-sig') as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
descr = """Toolbox for imbalanced dataset in machine learning."""
_VERSION_GLOBALS = load_version()
DISTNAME = 'imbalanced-learn'
DESCRIPTION = 'Toolbox for imbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = 'Fernando Nogueira, Guillaume Lemaitre'
MAINTAINER_EMAIL = 'fmfnogueira@gmail.com, g.lemaitre58@gmail.com'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_imbalanced_dataset_installing=True)
install_requires = \
['%s>=%s' % (mod, meta['min_version'])
for mod, meta in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
if not meta['required_at_installation']]
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
install_requires=install_requires,)
| [
"glemaitre@visor.udg.edu"
] | glemaitre@visor.udg.edu |
76ebde0afed83ac4627c0e5b5ade1bb9588d1735 | 47f4e3aabb6dcb0f9a48c8a5634eac1523b71b2c | /edit_being/qyaddons/ct_pos_ticket/__manifest__.py | 75d13c59ac772e7fda752e19009424c2c23dd1b7 | [] | no_license | marvin981973/odoo-2 | 485b7815b639da17400f38ab2200fb6956486451 | f45a562b1bd962697f096e7f7bc57b131b3e11f3 | refs/heads/master | 2020-06-26T06:22:16.520775 | 2018-03-11T13:26:04 | 2018-03-11T13:26:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
{
'name': 'POS小票',
'summary': '修改打印小票格式',
'description': """
修改POS内部连接小票打印机打印出来的内容格式
""",
'category': 'other',
'version': '1.0',
'author': '今晨科技|企通软件',
'website': 'http://www.168nz.cn/',
'depends': ['base', 'web','point_of_sale'],
'data': [
'views/template.xml',
],
'qweb': [
'static/src/xml/*.xml',
],
'installable': True,
'application': True,
} | [
"guwenfengvip@163.com"
] | guwenfengvip@163.com |
30e40d8e872dd61da615410d1d1d9f51cb8e0986 | 29fb2eb3b9bb21b529e814da53518fab2958693a | /bayesian_treatment/10_table_Electron_table_Comparison.py | a2d7ec3151e13a8c90fa98b2d96e424c973e65e7 | [] | no_license | Vital-Fernandez/thesis_pipeline | acca734b1a2ce11b0bee5bd41fab534022ea295e | 1253e2ed94e0f502a16cae6b88f84b633d0f16c2 | refs/heads/master | 2022-05-31T10:15:47.241645 | 2021-05-18T17:43:44 | 2021-05-18T17:43:44 | 90,319,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,892 | py | from dazer_methods import Dazer
from lib.CodeTools.sigfig import round_sig
from uncertainties import unumpy
from collections import OrderedDict
from pylatex import Package, NoEscape
from numpy import isnan
from pandas import isnull
import pandas as pd
import numpy as np
import uncertainties as un
from uncertainties.umath import pow as umath_pow, log10 as umath_log10, exp as umath_exp, isnan as un_isnan
def colorChooser(ObsRatio, TheRatio):
if (TheRatio * 0.95 < ObsRatio < TheRatio * 1.05):
color = 'ForestGreen' # 'green'#
elif (TheRatio * 0.90 < ObsRatio < TheRatio * 1.10):
color = 'YellowOrange' # 'yellow'#
else:
color = 'BrickRed'
return color
#Load observational data
bayes_catalogue_df_address = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_BayesianResults.txt'
bayes_catalogue_df = pd.read_csv(bayes_catalogue_df_address, delim_whitespace=True, header=0, index_col=0)
#Define data to load
# Import library object
dz = Dazer()
dz.load_elements()
# Load observational data
catalogue_dict = dz.import_catalogue()
catalogue_df = dz.load_excel_DF('/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx')
AbundancesFileExtension = '_' + catalogue_dict['Datatype'] + '_linesLog_emission_2nd.txt'
dz.quick_indexing(catalogue_df)
# Reddening properties
R_v = 3.4
red_curve = 'G03_average'
cHbeta_type = 'cHbeta_emis'
# Define data to load
ext_data = '_emis2nd'
ext_data_bayes = ''
pdf_address = '/home/vital/Dropbox/Astrophysics/Thesis/tables/objProperties_Preamble'
# Headers
properties_list = ['neSII', 'TeSIII', 'TeOIII']
properties_list = map((lambda x: x + ext_data), properties_list)
properties_list_bayes = ['neSII', 'TeSIII']
headers_format = ['HII Galaxy', r'$\frac{[OIII]\lambda5007\AA}{[OIII]\lambda4959\AA}$', r'$\frac{[SIII]\lambda9531\AA}{[SIII]\lambda9069\AA}$']
headers_format += [r'$n_{e}[SII](cm^{-3})$', r'$T_{e}[SIII](K)$', r'$T_{e}[OIII](K)$']
headers_format += ['$n_{e}(cm^{-3})$', r'$T_{low}(K)$', r'$T_{high}(K)$']
# Set the pdf format
dz.create_pdfDoc(pdf_address, pdf_type='table')
dz.pdf_insert_table(headers_format)
for objName in catalogue_df.loc[dz.idx_include].index:
ouput_folder = '{}{}/'.format(catalogue_dict['Obj_Folder'], objName)
lineslog_address = '{objfolder}{codeName}{lineslog_extension}'.format(objfolder=ouput_folder, codeName=objName, lineslog_extension=AbundancesFileExtension)
# Load lines frame
lineslog_frame = dz.load_lineslog_frame(lineslog_address)
# Perform the reddening correction
cHbeta = catalogue_df.loc[objName, cHbeta_type]
dz.deredden_lines(lineslog_frame, reddening_curve=red_curve, cHbeta=cHbeta, R_v=R_v)
# Sulfur ratios
if set(lineslog_frame.index) >= set(['S3_9069A', 'S3_9531A']):
s3_ratio = lineslog_frame.loc['S3_9531A'].line_Int / lineslog_frame.loc['S3_9069A'].line_Int
s3_color = colorChooser(s3_ratio.nominal_value, dz.S3_ratio)
s3_entry = r'\textcolor{' + s3_color + '}{' + dz.format_for_table(s3_ratio, rounddig=3) + '}'
else:
s3_entry = '-'
# Oxygen ratios
if set(lineslog_frame.index) >= set(['O3_4959A', 'O3_5007A']):
O3_ratio = lineslog_frame.loc['O3_5007A'].line_Int / lineslog_frame.loc['O3_4959A'].line_Int
O3_color = colorChooser(O3_ratio.nominal_value, dz.O3_5000_ratio)
O3_entry = r'\textcolor{' + O3_color + '}{' + dz.format_for_table(O3_ratio, rounddig=3) + '}'
else:
O3_entry = '-'
# Fill the table
if (catalogue_df.loc[objName].T_low == 'TeSIII') and (catalogue_df.loc[objName].T_high == 'TeOIII'):
exponent = ''
elif (catalogue_df.loc[objName].T_low != 'TeSIII'):
exponent = 'O'
else:
exponent = 'S'
# Add the Bayesian data
bayesCodeName = '{}'.format(bayes_catalogue_df.loc[objName].quick_index)
bayes_values = []
print '------', bayesCodeName, objName
if bayesCodeName not in ['SHOC588', 'SHOC592', 'SHOC036', 'SHOC575', 'SHOC579', 'SHOC220']:
objData = bayes_catalogue_df.loc[objName]
for param in properties_list_bayes:
param_value = objData[param]
param_err = objData[param + '_err']
param_un = un.ufloat(param_value, param_err)
if np.isnan(param_un.nominal_value):
param_un = np.nan
bayes_values.append(param_un)
param_un = (1.0807 * param_un / 10000.0 - 0.0846) * 10000.0
bayes_values.append(param_un)
else:
bayes_values = ['-', '-', '-']
entry_name = '{codename}$^{{{elements}}}$'.format(codename=catalogue_df.loc[objName].quick_index, elements=exponent)
T_low_entry = r'$T_{e}[SIII]$' if catalogue_df.loc[objName].T_low == 'TeSIII' else r'$T_{e}[SIII] eq.16$'
T_high_entry = r'$T_{e}[OIII]$' if catalogue_df.loc[objName].T_high == 'TeOIII' else r'$T_{e}[OIII] eq.16$'
row = [entry_name] + [O3_entry] + [s3_entry] + list(catalogue_df.loc[objName, properties_list].values) + bayes_values
dz.addTableRow(row, last_row=False if catalogue_df.index[-1] != objName else True, rounddig=3)
dz.generate_pdf(clean_tex=False)
# dz.generate_pdf(output_address=pdf_address)
print 'Table generated'
# from dazer_methods import Dazer
# from uncertainties import unumpy
# from collections import OrderedDict
# from pylatex import Package, NoEscape
# from numpy import isnan
# from pandas import isnull
# import pandas as pd
# import numpy as np
# import uncertainties as un
# from uncertainties.umath import pow as umath_pow, log10 as umath_log10, exp as umath_exp, isnan as un_isnan
#
# dz = Dazer()
#
# #Load observational data
# bayes_catalogue_df_address = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_BayesianResults.txt'
# bayes_catalogue_df = pd.read_csv(bayes_catalogue_df_address, delim_whitespace=True, header=0, index_col=0)
#
# #Define data to load
# ext_data = ''
# pdf_address = '/home/vital/Dropbox/Astrophysics/Thesis/tables/bayes_AbundancesTable'
#
# #Headers
# headers_dic = OrderedDict()
# headers_dic['HeI_HI'] = r'$\nicefrac{He}{H}$'
# headers_dic['Ymass_O'] = r'$Y_{\left(\nicefrac{O}{H}\right)}$'
# headers_dic['Ymass_S'] = r'$Y_{\left(\nicefrac{S}{H}\right)}$'
# headers_dic['OI_HI'] = r'$12 + log\left(\nicefrac{O}{H}\right)$'
# headers_dic['NI_HI'] = r'$12 + log\left(\nicefrac{N}{H}\right)$'
# headers_dic['SI_HI'] = r'$12 + log\left(\nicefrac{S}{H}\right)$'
#
# properties_list = map(( lambda x: x + ext_data), headers_dic.keys())
# headers_format = ['HII Galaxy'] + headers_dic.values()
#
# # Create a new list for the different entries
# metals_list = properties_list[:]
#
# del metals_list[metals_list.index('HeI_HI' + ext_data)]
# del metals_list[metals_list.index('Ymass_O' + ext_data)]
# del metals_list[metals_list.index('Ymass_S' + ext_data)]
#
# #Set the pdf format
# dz.pdf_insert_table(headers_format)
#
# print properties_list
#
# for objName in bayes_catalogue_df.index:
#
# entry_name = '{}'.format(bayes_catalogue_df.loc[objName].quick_index)
#
# if entry_name not in ['SHOC588', 'SHOC592', 'SHOC036', 'SHOC575', 'SHOC579', 'SHOC220']:
#
# objData = bayes_catalogue_df.loc[objName]
# row = [entry_name]
#
# for param in properties_list:
# param_value = objData[param]
# param_err = objData[param + '_err']
# param_un = un.ufloat(param_value, param_err)
#
# if param not in ['HeI_HI', 'Ymass_O', 'Ymass_S']:
# param_un = 12 + umath_log10(param_un)
#
# if np.isnan(param_un.nominal_value):
# param_un = np.nan
#
# row.append(param_un)
#
# dz.addTableRow(row, last_row = False if bayes_catalogue_df.index[-1] != objName else True, rounddig=3, rounddig_er=1)
#
# dz.generate_pdf()
# #dz.generate_pdf(output_address=pdf_address)
| [
"vital.fernandez@gmail.com"
] | vital.fernandez@gmail.com |
d543b03fd232f81b04d4ea29f1993ad04ba26c94 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/automation/v20180115/outputs.py | 0a8b07f580f75db3fc25b8e64b9658b630192036 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'DscConfigurationAssociationPropertyResponse',
]
@pulumi.output_type
class DscConfigurationAssociationPropertyResponse(dict):
"""
The Dsc configuration property associated with the entity.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
The Dsc configuration property associated with the entity.
:param str name: Gets or sets the name of the Dsc configuration.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets or sets the name of the Dsc configuration.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
62b22fda6d4ef03350bbf3914df64d4c0dc25f95 | 03d68ceacf35455d5cd692411940400bcf7d8541 | /tools/coded/ipconvert.py | 8c7976dcd974585fe6525b7feb923f28afa0f24c | [] | no_license | j4ckzh0u/ctf-tools-1 | 569822fe102e54084ff26916760205598ab9db3f | 119a5b4b73a032d49740ab371055e9f2400cb79a | refs/heads/master | 2021-05-24T12:49:44.102597 | 2020-03-31T06:48:27 | 2020-03-31T06:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | #coding=utf-8
#version 1.2
import sys
def tab_to_8(binip):
if len(binip)>8:
raise Exception('lenth error')
return '0'*(8-len(binip))+binip
def dot_to_bin(ip):
ip=str(ip)
if ip.count('.')!=3:
return False
ip=ip.split('.')
return ''.join([tab_to_8(str(bin(int(i,base=10)))[2:]) for i in ip])
def int_to_dot(ip):
ip=bin(ip)[2:]
if len(ip)>32:
return False
ip='0'*(32-len(ip))+ip
return '.'.join([str(int(ip[i*8:(i+1)*8],base=2)) for i in range(4)])
def dot_to_oct(dot_ip):
ip=dot_ip.split('.')
if len(ip)!=4:
return False
return '0'+'.'.join([oct(int(i))[2:] for i in ip])
def main(ip):
out='dot: {}\nbin: {}\nhex: {}\nint: {}\noct: {}'
if ip=='exit()':
exit()
elif ip[:2]=='0b' or ip[:2]=='0x' or ip.find('.')==-1:#二进制输入||十六进制输入||十进制输入
if ip[:2]=='0b':
ip=int(ip,base=2)
elif ip[:2]=='0x':
ip=int(ip,base=16)
else:
ip=int(ip)
dot_ip=int_to_dot(ip)
if dot_ip==False:
print('ip format error')
return
bin_ip=dot_to_bin(dot_ip)
else:
bin_ip=dot_to_bin(ip)
if bin_ip==False:#格式不正确
print('ip format error')
return
dot_ip=ip
ip=int(bin_ip,base=2)
#输出
print(out.format(dot_ip,bin_ip,hex(int(bin_ip,base=2))[2:],ip,dot_to_oct(dot_ip)))
if len(sys.argv)==2:
ip=sys.argv[1]
print()
main(ip)
exit()
print('ps:输入二进制ip需要以0b开头,十六进制以0x开头')
if __name__ == "__main__":
while True:
ip=input('input ip:')
main(ip) | [
"yun1067530461@gmail.com"
] | yun1067530461@gmail.com |
6d82dde142112a41c6c2e0432c936797e40d7fb7 | 79e19819aec49b500825f82a7de149eb6a0ba81d | /leetcode/104.py | d15b1378939a65e4139d4810208f43daccfa2bcb | [] | no_license | seoyeonhwng/algorithm | 635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26 | 90406ee75de69996e666ea505ff5d9045c2ad941 | refs/heads/master | 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if not root:
return 0
queue = collections.deque([root])
depth = 0
while queue:
depth += 1
for _ in range(len(queue)):
v = queue.popleft()
if v.left:
queue.append(v.left)
if v.right:
queue.append(v.right)
return depth
| [
"seoyeon@nowbusking.com"
] | seoyeon@nowbusking.com |
0e2b20cc7003718d91f5888ba076de4eff653767 | b76c08a4c33245a737fa0e139d212bb424017cd1 | /src/cybersource/tests/test_models.py | 0de6ae3f4b7ba8af4a49ab21716ba81bce88f55b | [
"ISC"
] | permissive | thelabnyc/django-oscar-cybersource | 5b09845121ef1c074335c01e86c649c36e4e51e4 | 95b33362adf8ba0217ac73c6f816b544c9faa18d | refs/heads/master | 2023-03-15T15:25:55.388795 | 2023-03-14T16:00:07 | 2023-03-14T16:00:07 | 58,149,620 | 4 | 3 | ISC | 2023-02-07T22:17:15 | 2016-05-05T17:45:52 | Python | UTF-8 | Python | false | false | 3,376 | py | from django.test import TestCase
from ..models import CyberSourceReply, PaymentToken, SecureAcceptanceProfile
from .factories import build_accepted_token_reply_data
class PaymentTokenTest(TestCase):
def test_log_data_parsing(self):
data = build_accepted_token_reply_data("S123456789", "")
log = CyberSourceReply.objects.create(
data=data,
auth_avs_code=data.get("auth_avs_code"),
auth_code=data.get("auth_code"),
auth_response=data.get("auth_response"),
auth_trans_ref_no=data.get("auth_trans_ref_no"),
decision=data.get("decision"),
message=data.get("message"),
reason_code=data.get("reason_code"),
req_bill_to_address_postal_code=data.get("req_bill_to_address_postal_code"),
req_bill_to_forename=data.get("req_bill_to_forename"),
req_bill_to_surname=data.get("req_bill_to_surname"),
req_card_expiry_date=data.get("req_card_expiry_date"),
req_reference_number=data.get("req_reference_number"),
req_transaction_type=data.get("req_transaction_type"),
req_transaction_uuid=data.get("req_transaction_uuid"),
request_token=data.get("request_token"),
transaction_id=data.get("transaction_id"),
)
token = PaymentToken.objects.create(
log=log,
token=data["payment_token"],
masked_card_number=data["req_card_number"],
card_type=data["req_card_type"],
)
self.assertEqual(token.card_type_name, "Visa")
self.assertEqual(token.billing_zip_code, "10001")
self.assertEqual(token.expiry_month, "12")
self.assertEqual(token.expiry_year, "2020")
self.assertEqual(token.card_last4, "1111")
self.assertEqual(token.card_holder, "Bob Smith")
class SecureAcceptanceProfileTest(TestCase):
def setUp(self):
SecureAcceptanceProfile.objects.create(
hostname="foo.example.com",
profile_id="a",
access_key="",
secret_key="",
is_default=False,
)
SecureAcceptanceProfile.objects.create(
hostname="bar.example.com",
profile_id="b",
access_key="",
secret_key="",
is_default=False,
)
SecureAcceptanceProfile.objects.create(
hostname="www.example.com",
profile_id="c",
access_key="",
secret_key="",
is_default=True,
)
def test_get_profile(self):
profile = SecureAcceptanceProfile.get_profile("foo.example.com")
self.assertEqual(profile.profile_id, "a")
profile = SecureAcceptanceProfile.get_profile("bar.example.com")
self.assertEqual(profile.profile_id, "b")
profile = SecureAcceptanceProfile.get_profile("www.example.com")
self.assertEqual(profile.profile_id, "c")
def test_default_fallback(self):
profile = SecureAcceptanceProfile.get_profile("baz.example.com")
self.assertEqual(profile.profile_id, "c")
def test_no_profiles(self):
SecureAcceptanceProfile.objects.all().delete()
profile = SecureAcceptanceProfile.get_profile("www.example.com")
self.assertEqual(profile.profile_id, "2A37F989-C8B2-4FEF-ACCF-2562577780E2")
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
9bd9fd8e914cfb6c6e9206d96e6448f17e74db1a | dfb4cb8d916b62d7272ca353302d1ad95e4d7244 | /qa/rpc-tests/forknotify.py | cb1481fcf20133fcbce7f26965cb5cf73b0cf0e7 | [
"MIT"
] | permissive | mirzaei-ce/core-shahbit | d166ab47067bf66c3015c3da49ff31cd29f843db | 57ad738667b3d458c92d94aee713c184d911c537 | refs/heads/master | 2021-07-21T11:09:22.493418 | 2017-10-25T13:50:55 | 2017-10-25T13:50:55 | 108,276,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -alertnotify
#
from test_framework.test_framework import ShahbitTestFramework
from test_framework.util import *
class ForkNotifyTest(ShahbitTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
with open(self.alert_filename, 'w') as f:
pass # Just open then close to create zero-length file
self.nodes.append(start_node(0, self.options.tmpdir,
["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
# Node1 mines block.version=211 blocks
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockversion=211"]))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Mine 51 up-version blocks
self.nodes[1].generate(51)
self.sync_all()
# -alertnotify should trigger on the 51'st,
# but mine and sync another to give
# -alertnotify time to write
self.nodes[1].generate(1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
if len(alert_text) == 0:
raise AssertionError("-alertnotify did not warn of up-version blocks")
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text2 = f.read()
if alert_text != alert_text2:
raise AssertionError("-alertnotify excessive warning of up-version blocks")
if __name__ == '__main__':
ForkNotifyTest().main()
| [
"mirzaei@ce.sharif.edu"
] | mirzaei@ce.sharif.edu |
495615fd0a075747a90732de5998be193f2a7a0a | 4081698d691baafc58343c72a721622cec251f67 | /tools/testing/cross_language/util/cli_daead.py | d1bc265be0234911d65cf6485037529b47aeb990 | [
"Apache-2.0"
] | permissive | thalescpl-io/tink | 5ac62a54b73414402f6b600cff0fd21a4f999137 | 0d1769b28cabe2a60daca9b8da0bd14def54bc21 | refs/heads/master | 2021-03-10T03:27:58.161079 | 2020-05-15T23:45:42 | 2020-05-15T23:45:42 | 246,412,910 | 0 | 0 | Apache-2.0 | 2020-03-10T21:33:19 | 2020-03-10T21:33:18 | null | UTF-8 | Python | false | false | 3,134 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps a Deterministic AEAD CLI into a Python Tink DeterministicAead class."""
# Placeholder for import for type annotations
import os
import subprocess
import tempfile
import tink
from tink import cleartext_keyset_handle
from tink import daead
from typing import Text
# All languages that have an Deterministic AEAD CLI.
LANGUAGES = ('cc', 'go', 'java', 'python')
# Path are relative to tools directory.
_DAEAD_CLI_PATHS = {
'cc': 'testing/cc/deterministic_aead_cli_cc',
'go': 'testing/go/deterministic_aead_cli_go',
'java': 'testing/deterministic_aead_cli_java',
'python': 'testing/python/deterministic_aead_cli_python',
}
def _tools_path() -> Text:
util_path = os.path.dirname(os.path.abspath(__file__))
return os.path.dirname(os.path.dirname(os.path.dirname(util_path)))
class CliDeterministicAead(daead.DeterministicAead):
"""Wraps Deterministic AEAD CLI binary into a DeterministicAead primitive."""
def __init__(self, lang: Text, keyset_handle: tink.KeysetHandle) -> None:
self.lang = lang
self._cli = os.path.join(_tools_path(), _DAEAD_CLI_PATHS[lang])
self._keyset_handle = keyset_handle
def _run(self, operation: Text, input_data: bytes,
associated_data: bytes) -> bytes:
with tempfile.TemporaryDirectory() as tmpdir:
keyset_filename = os.path.join(tmpdir, 'keyset_file')
input_filename = os.path.join(tmpdir, 'input_file')
associated_data_filename = os.path.join(tmpdir, 'associated_data_file')
output_filename = os.path.join(tmpdir, 'output_file')
with open(keyset_filename, 'wb') as f:
cleartext_keyset_handle.write(
tink.BinaryKeysetWriter(f), self._keyset_handle)
with open(input_filename, 'wb') as f:
f.write(input_data)
with open(associated_data_filename, 'wb') as f:
f.write(associated_data)
try:
unused_return_value = subprocess.check_output([
self._cli, keyset_filename, operation,
input_filename, associated_data_filename, output_filename
])
except subprocess.CalledProcessError as e:
raise tink.TinkError(e)
with open(output_filename, 'rb') as f:
output_data = f.read()
return output_data
def encrypt_deterministically(
self, plaintext: bytes, associated_data: bytes) -> bytes:
return self._run('encryptdeterministically', plaintext, associated_data)
def decrypt_deterministically(
self, ciphertext: bytes, associated_data: bytes) -> bytes:
return self._run('decryptdeterministically', ciphertext, associated_data)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
21e7f14bf83ed3670db484b437bab5433bc03ac0 | 2901c198fd36f16e59e22e37d748497bdc51246e | /firstproject/clients/migrations/0008_client_client_id.py | 405e84e77e18398b1f41294fbdefe19d60698974 | [] | no_license | Sarathsathyan/FREELANCING- | b81803340983e4396ee1be032d75367ce416ea79 | bb800f900757ffb757ddb95e2c3c5924785f3386 | refs/heads/master | 2020-05-27T11:47:54.465644 | 2019-08-22T17:40:47 | 2019-08-22T17:40:47 | 188,605,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 2.2.1 on 2019-07-06 05:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0007_auto_20190706_0513'),
]
operations = [
migrations.AddField(
model_name='client',
name='client_id',
field=models.IntegerField(null=True),
),
]
| [
"sarathsathyan98@gmail.com"
] | sarathsathyan98@gmail.com |
2ff9e5a093af8bb5e1ef34ea5c281a6cdf3c10be | 7debcea5a702835479a3639e5deed7ed3f277d65 | /텍스트마이닝 - 네이버 영화 리뷰 크롤링.py | b7567a8752a29953d33e33ae10b7f85119214f35 | [] | no_license | swj8905/Intermediate_Course_0918 | 902db757e130332c7f3d64aa1007a1d0c8a62508 | e2199888d84006934001e1863ce4ec10819fc7f2 | refs/heads/master | 2023-08-11T04:40:45.978468 | 2021-09-26T03:47:17 | 2021-09-26T03:47:17 | 407,747,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | from bs4 import BeautifulSoup
import urllib.request as req
page_num = 1
while True:
code = req.urlopen("https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=204496&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page={}".format(page_num))
soup = BeautifulSoup(code, "html.parser")
comment = soup.select("li > div.score_reple > p > span")
if len(comment) == 0:
break
for i in comment:
i = i.text.strip()
if i == "관람객":
continue
print(i)
page_num += 1 | [
"swj8905@naver.com"
] | swj8905@naver.com |
174e32b528f75a1f2e37b3ade6a4145d9a082f66 | 705649d075e112e5546c5d01bf0ae45122c251ea | /account/admin.py | ecb8ced5f615b776cab362d94afa4ab3e2ee07e4 | [] | no_license | liuyuhang791034063/LaoLiu_blog | ffbb81f72ed86803bbebfbae9397aaefdff4d0cc | b9352d1ea84533aa948b342c39e512f134df7acd | refs/heads/master | 2020-03-13T20:40:41.224540 | 2018-05-23T05:44:45 | 2018-05-23T05:44:45 | 131,279,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from django.contrib import admin
from .models import UserProfile,UserInfo
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user','birth','phone')
list_filter = ("phone",)
admin.site.register(UserProfile, UserProfileAdmin)
class UserInfoAdmin(admin.ModelAdmin):
list_display = ('user','school','company','profession','address','aboutme','photo')
list_filter = ('school','company','profession')
admin.site.register(UserInfo,UserInfoAdmin) | [
"liuyuhang791034063@qq.com"
] | liuyuhang791034063@qq.com |
a1fbde175cd3d2f6a0772b2147af4995a3d118cc | c31e69b763e1b52d3cefa4f5a49432ae966f22d0 | /day31/07_漏斗图.py | 5f9a116ddb867d090212802276bb1f64595e7a71 | [] | no_license | lvah/201901python | cbda174a3c97bc5a2f732c8e16fc7cf8451522d2 | 7bffe04a846f2df6344141f576820730a7bbfa6a | refs/heads/master | 2022-12-13T09:49:29.631719 | 2019-04-06T09:48:33 | 2019-04-06T09:48:33 | 165,477,671 | 3 | 0 | null | 2022-12-08T04:57:01 | 2019-01-13T07:23:44 | HTML | UTF-8 | Python | false | false | 326 | py | """
文件名: $NAME.py
日期: 22
作者: lvah
联系: xc_guofan@qq.com
代码描述:
"""
# Funnel
from pyecharts import Funnel
x_movies_name = ["猩球崛起", "敦刻尔克", "蜘蛛侠", "战狼2"]
y_16 = [20, 40, 60, 80]
funnel = Funnel("xxxx")
funnel.add("电影信息", x_movies_name, y_16)
funnel.render()
| [
"976131979@qq.com"
] | 976131979@qq.com |
be8fee0b6bd84369dcb6184b9d336616c62b9c1e | 52381a4fc02e90ce1fcfffd8d9876d9e8f44c248 | /core/domain/improvements_domain.py | 25ef52e9fe1a9039bf11be65260e769fa9f4e94e | [
"Apache-2.0"
] | permissive | ankita240796/oppia | 18aa1609a0f237ce76142b2a0d3169e830e5bcdd | ba4f072e494fd59df53fecc37e67cea7f9727234 | refs/heads/develop | 2022-07-11T01:11:53.136252 | 2022-06-30T08:55:49 | 2022-06-30T08:55:49 | 160,626,761 | 0 | 0 | Apache-2.0 | 2020-04-28T16:12:26 | 2018-12-06T06:02:18 | Python | UTF-8 | Python | false | false | 7,962 | py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects related to Oppia improvement tasks."""
from __future__ import annotations
import datetime
from core import feconf
from core import utils
from core.constants import constants
from typing import Optional
from typing_extensions import TypedDict
class TaskEntryDict(TypedDict):
"""Dict for TaskEntry object."""
entity_type: str
entity_id: str
entity_version: int
task_type: str
target_type: str
target_id: str
issue_description: Optional[str]
status: str
resolver_username: Optional[str]
resolver_profile_picture_data_url: Optional[str]
resolved_on_msecs: Optional[float]
class TaskEntry:
"""Domain object representing an actionable task from the improvements tab.
Attributes:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry refers to.
For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str or None. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str or None. The corresponding user who resolved this task.
resolved_on: datetime or None. The datetime at which this task was
resolved.
"""
def __init__(
self,
entity_type: str,
entity_id: str,
entity_version: int,
task_type: str,
target_type: str,
target_id: str,
issue_description: Optional[str],
status: str,
resolver_id: Optional[str] = None,
resolved_on: Optional[datetime.datetime] = None
) -> None:
"""Initializes a new TaskEntry domain object from the given values.
Args:
entity_type: str. The type of entity the task entry refers to.
For example: "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example: an exploration ID.
entity_version: int. The version of the entity the task entry refers
to. For example: an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, when entity type is "exploration": "state".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to describe
why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str. The corresponding user who resolved this task.
Only used when status is resolved, otherwise replaced with None.
resolved_on: datetime. The datetime at which this task was resolved.
Only used when status is resolved, otherwise replaced with None.
"""
if status != constants.TASK_STATUS_RESOLVED:
resolver_id = None
resolved_on = None
self.entity_type = entity_type
self.entity_id = entity_id
self.entity_version = entity_version
self.task_type = task_type
self.target_type = target_type
self.target_id = target_id
self.issue_description = issue_description
self.status = status
self.resolver_id = resolver_id
self.resolved_on = resolved_on
@property
def task_id(self) -> str:
"""Returns the unique identifier of this task.
Value has the form: "[entity_type].[entity_id].[entity_version].
[task_type].[target_type].[target_id]"
Returns:
str. The ID of this task.
"""
return feconf.TASK_ENTRY_ID_TEMPLATE % (
self.entity_type, self.entity_id, self.entity_version,
self.task_type, self.target_type, self.target_id)
@property
def composite_entity_id(self) -> str:
"""Utility field which results in a 20% speedup compared to querying by
each of the invididual fields used to compose it.
Value has the form: "[entity_type].[entity_id].[entity_version]".
Returns:
str. The value of the utility field.
"""
return feconf.COMPOSITE_ENTITY_ID_TEMPLATE % (
self.entity_type, self.entity_id, self.entity_version)
def to_dict(self) -> TaskEntryDict:
"""Returns a dict-representation of the task.
Returns:
dict. Contains the following keys:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry
refers to. For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers
to. For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers
to. For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_username: str|None. Username of the user who resolved
the task when status is resolved. Otherwise None.
resolver_profile_picture_data_url: str|None. Profile picture
URL of the user who resolved the task when status is
resolved. Otherwise None.
resolved_on_msecs: float|None. Time in
milliseconds since epoch at which the task was resolved
when status is resolved. Otherwise None.
"""
return {
'entity_type': self.entity_type,
'entity_id': self.entity_id,
'entity_version': self.entity_version,
'task_type': self.task_type,
'target_type': self.target_type,
'target_id': self.target_id,
'issue_description': self.issue_description,
'status': self.status,
'resolver_username': None,
'resolver_profile_picture_data_url': None,
'resolved_on_msecs': (
None if not self.resolved_on
else utils.get_time_in_millisecs(self.resolved_on)),
}
| [
"noreply@github.com"
] | ankita240796.noreply@github.com |
f5f81681f36f3471f4d27bbec8fce45ee8f30473 | 8157b3619467c8928f2c2d1669d115a00a4e1edc | /bert/optimization.py | 4b75429eaaf8be262b562847068edea6ec84d245 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | soft-pure-empty/GEC-reaching-human-level | 0e332849d45533de99ab8b991e25379c0b9c7cc2 | 2cd542b4fbbb40f426ae6e4625142de17f385744 | refs/heads/master | 2022-10-27T17:19:02.645578 | 2019-03-06T13:06:40 | 2019-03-06T13:06:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,261 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| [
"334973834@qq.com"
] | 334973834@qq.com |
1b87df6e5c9001abd520146c6fc11f2b78351d09 | f124a2bc35fa348d5f5b637eae2a736d67470c76 | /tf-hub2/vector_calcu.py | f04be4c7015163255248a369b1555c7e845c8767 | [
"Apache-2.0"
] | permissive | arfu2016/DuReader | fd173c0eb90abedad0ca65bd9b847ccd58bf567a | 66934852c508bff5540596aa71d5ce40c828b37d | refs/heads/master | 2021-04-06T05:45:13.002887 | 2018-09-06T03:58:26 | 2018-09-06T03:58:26 | 124,838,393 | 0 | 0 | Apache-2.0 | 2018-03-12T05:35:13 | 2018-03-12T05:35:13 | null | UTF-8 | Python | false | false | 5,454 | py | """
@Project : DuReader
@Module : vector_calcu.py
@Author : Deco [deco@cubee.com]
@Created : 5/15/18 10:44 AM
@Desc :
"""
"""
Created on Sun Aug 20 14:40:29 2017
@author: zimuliu
"""
from functools import reduce
from math import acos, pi
import numpy as np
class Vector:
def __init__(self, coordinates):
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
def __str__(self):
return "%dD Vector: %s" % (self.dimension,
', '.join(["%.3f" % round(x, 3)
for x in self.coordinates]))
def __eq__(self, v):
"""两向量相等"""
return self.coordinates is v.coordinates
def _eq_dim(self, v):
"""两向量维度相同"""
assert self.dimension is v.dimension, \
"The dimensions of vectors must be equal!"
def _zero_vec(self):
"""零向量"""
assert self.magnitude() != 0, "Encount with zero vector!"
def plus(self, v):
"""两向量相加"""
self._eq_dim(v)
return Vector([x + y for x, y in zip(self.coordinates, v.coordinates)])
def plus2(self, v):
self._eq_dim(v)
temp = np.array(self.coordinates) + np.array(v.coordinates)
return Vector(temp.tolist())
def minus(self, v):
"""两向量相减"""
self._eq_dim(v)
return Vector([x - y for x, y in zip(self.coordinates, v.coordinates)])
def minus2(self, v):
self._eq_dim(v)
temp = np.array(self.coordinates) - np.array(v.coordinates)
return Vector(temp.tolist())
def scalar_mult(self, m):
"""向量乘以标量"""
return Vector([x * m for x in self.coordinates])
def scalar_mult2(self, m):
temp = np.array(self.coordinates)*m
return Vector(temp.tolist())
def magnitude(self, *args):
"""求向量的norm"""
return reduce(lambda x, y: x + y,
map(lambda z: z ** 2, self.coordinates)) ** 0.5
def magnitude2(self):
return np.linalg.norm(self.coordinates)
def direction(self, *args):
"""转化为向量所在方向的方向向量; 或者说,求单位向量"""
self._zero_vec()
return self.scalar_mult(1 / self.magnitude())
def dot_product(self, v):
"""求向量的点乘,与矩阵的内积有关联"""
self._eq_dim(v)
return reduce(lambda x, y: x + y,
[a * b for a, b in zip(self.coordinates, v.coordinates)])
def dot_product2(self, v):
self._eq_dim(v)
a = np.array(self.coordinates)
b = np.array(v.coordinates)
temp = np.dot(a, b)
print('temp in dot_product2:', temp)
print('type of temp:', type(temp))
print('type of temp.tolist():', type(temp.tolist()))
return temp.tolist()
def multiply_elementwise(self, v):
self._eq_dim(v)
return Vector([a * b for a, b in zip(self.coordinates, v.coordinates)])
def multiply_elementwise2(self, v):
self._eq_dim(v)
temp = np.multiply(self.coordinates, v.coordinates)
return temp.tolist()
def cross_product(self, v):
def cross(a, b):
c = [a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]]
return c
self._eq_dim(v)
a0 = self.coordinates
b0 = v.coordinates
return cross(a0, b0)
def cross_product2(self, v):
self._eq_dim(v)
a = np.array(self.coordinates)
b = np.array(v.coordinates)
temp = np.cross(a, b)
return temp.tolist()
def angle(self, v, degree=False):
"""求两个向量的夹角大小,可以表征两个向量的相似度;
可以选择用实数表示还是用度数表示"""
self._zero_vec()
v._zero_vec()
measurement = pi / 180 if degree else 1
return acos(self.dot_product(v) / (self.magnitude() * v.magnitude())) \
/ measurement
def parallelism(self, v, threshold=10e-6):
"""判断两个向量是否平行"""
self._eq_dim(v)
res = False
if self.magnitude() < threshold or v.magnitude() < threshold:
res = True
else:
ang = self.angle(v)
if ang < threshold or (pi - ang) < threshold:
res = True
return res
def orthogonality(self, v, threshold=10e-6):
"""判断两个向量是否垂直"""
return abs(self.dot_product(v)) < threshold
def projection(self, v):
"""求一个向量在另一个向量方向上的投影"""
_v = v.direction()
weight = self.dot_product(_v)
return _v.scalar_mult(weight)
if __name__ == '__main__':
a = Vector([1, 2])
b = Vector([3, 4])
print(a.magnitude())
print(a.magnitude2())
print(a.plus(b))
print(a.plus2(b))
print(a.minus(b))
print(a.minus2(b))
print(a.scalar_mult(2))
print(a.scalar_mult2(2))
print(a.dot_product(b))
print(a.dot_product2(b))
print(a.multiply_elementwise(b))
print(a.multiply_elementwise2(b))
print(a.angle(b))
print(a.parallelism(b))
print(a.orthogonality(b))
print(a.projection(b))
c = Vector([1, 2, 3])
d = Vector([4, 5, 6])
print(c.cross_product(d))
print(c.cross_product2(d))
| [
"deco@cubee.com"
] | deco@cubee.com |
08c7a0d5de9c427ddea43392421159401108dedc | 7704dfa69e81c8a2f22b4bdd2b41a1bdad86ac4a | /fuel_upgrade_system/fuel_upgrade/fuel_upgrade/tests/test_cli.py | b6d10faf846aeb2ea48f87e1d6b2f5b8c52536fa | [
"Apache-2.0"
] | permissive | andrei4ka/fuel-web-redhat | 8614af4567d2617a8420869c068d6b1f33ddf30c | 01609fcbbae5cefcd015b6d7a0dbb181e9011c14 | refs/heads/master | 2022-10-16T01:53:59.889901 | 2015-01-23T11:00:22 | 2015-01-23T11:00:22 | 29,728,913 | 0 | 0 | Apache-2.0 | 2022-09-16T17:48:26 | 2015-01-23T10:56:45 | Python | UTF-8 | Python | false | false | 2,251 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade import errors
from fuel_upgrade import messages
from fuel_upgrade.cli import parse_args
from fuel_upgrade.cli import run_upgrade
from fuel_upgrade.tests.base import BaseTestCase
@mock.patch('fuel_upgrade.cli.CheckerManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.PreUpgradeHookManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.UpgradeManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.build_config')
class TestAdminPassword(BaseTestCase):
default_args = ['host-system', '--src', '/path']
def get_args(self, args):
return parse_args(args)
def test_use_password_arg(self, mbuild_config):
password = '12345678'
args = self.get_args(self.default_args + ['--password', password])
run_upgrade(args)
mbuild_config.assert_called_once_with(
mock.ANY, password
)
@mock.patch('fuel_upgrade.cli.getpass')
def test_ask_for_password(self, mgetpass, mbuild_config):
password = '987654321'
mgetpass.getpass.return_value = password
args = self.get_args(self.default_args)
run_upgrade(args)
mbuild_config.assert_called_once_with(
mock.ANY, password
)
@mock.patch('fuel_upgrade.cli.getpass')
def test_no_password_provided(self, mgetpass, mbuild_config):
password = ''
mgetpass.getpass.return_value = password
with self.assertRaisesRegexp(errors.CommandError,
messages.no_password_provided):
args = self.get_args(self.default_args)
run_upgrade(args)
| [
"akirilochkin@mirantis.com"
] | akirilochkin@mirantis.com |
7890a12e113f4a009322f64939ac986783a5565f | 372b1321c545757308aa1ef93a3584d5674af40b | /2017/07/solver.py | 13c3dd9fa254b6922c9fe0e5e47fa2453220fdac | [] | no_license | verdouxscience/advent-of-code | a10b129959a75c4821af1b831f88b89e71857bae | 1f993f1104c818a8a0a459357c1be9a78bd33198 | refs/heads/main | 2023-04-09T10:20:44.307794 | 2021-04-05T01:55:18 | 2021-04-05T01:55:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from aoc_parser import Parser
from aoc_board import Grid, Point, Graph
FILE_NAME = 'data'
class Node:
def __init__(self, value):
value = value.split()
self.id = value[0]
self.weight = int(value[1][1:-1])
def __eq__(self, o):
return str(self) == str(o)
def __hash__(self):
return hash(str(self))
def __repr__(self):
return str(self)
def __str__(self):
return '{}: {}'.format(self.id, self.weight)
def main():
graph = get_graph()
# Part 1: xegshds
top_most = graph.top_most()
print('Part 1: {}'.format(top_most))
# Part 2: 299
graph.get_weight(graph.get_node(top_most))
print('Part 2: {}'.format(graph.to_change))
def get_graph():
graph = Graph()
for line in Parser(FILE_NAME).lines():
line = line.split(' -> ')
node = Node(line[0])
graph.add_node(node)
if len(line) == 2:
for edge in line[1].split(', '):
graph.add_edge(node, edge)
return graph
if __name__ == '__main__':
main()
| [
"suslikovvd@gmail.com"
] | suslikovvd@gmail.com |
c47a2131c66e6a0693914c73f4f493137080963c | aaa6354278eb889264e8cb2ee5877cd8f79d4c04 | /torchwisdom/core/progress.py | 667f9142230617d462920c94bed175de1b0f41fa | [
"MIT"
] | permissive | nunenuh/torchwisdom | 88682ff71a87ebe7c01fbc149b9040e9a26fde89 | 0a0e5dda84d59243a084b053d98f2eabd76474f5 | refs/heads/master | 2020-04-27T09:11:33.078513 | 2019-05-12T13:33:48 | 2019-05-12T13:33:48 | 174,204,225 | 8 | 4 | MIT | 2020-03-08T22:44:04 | 2019-03-06T19:06:45 | Python | UTF-8 | Python | false | false | 3,712 | py | from fastprogress import master_bar
from fastprogress.fastprogress import isnotebook
from torchwisdom.core.callback import Callback
from typing import *
from torchwisdom.core.statemgr import StateManager
from datetime import timedelta
# __all__ = []
class ProgressTable(object):
def __init__(self):
pass
def time_formatter(sec, last_cut=-4)->str:
return str(timedelta(seconds=sec))[:last_cut]
def format_text(text, empty_space=15):
ltext=len(text)
if empty_space>ltext:
len_iter = empty_space-ltext
space = "".join([" " for i in range(len_iter)])
out = space+text
else:
out = " "+text+" "
return out
def build_line_console(line, use_tab=False):
str_build = ""
for ln in line:
text = format_text(ln)
str_build+=text
if use_tab: str_build+="\t"
return str_build
def time_delta_remain(epoch_state):
delta_last = epoch_state.get('time')[-1]
delta = time_formatter(delta_last)
remain_last = epoch_state.get('remain')[-1]
remain = time_formatter(remain_last)
return delta, remain
def time_delta_remain_resume(epoch_state, epoch):
delta_last = epoch_state.get('time')[epoch]
delta = time_formatter(delta_last)
remain_last = epoch_state.get('remain')[epoch]
remain = time_formatter(remain_last)
return delta, remain
def line_builder(metric_state: Dict, epoch, tdelta, tremain):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get('valid')
line = [f'{epoch}']
for key in train.keys():
line.append(f"{train[key]['mean'][-1]:.6f}")
line.append(f"{valid[key]['mean'][-1]:.6f}")
line.append(f'{tdelta}')
line.append(f'{tremain}')
if isnotebook():
return line
else:
return build_line_console(line)
def line_builder_resume(metric_state: Dict, epoch, tdelta, tremain):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get('valid')
line = [f'{epoch+1}']
for key in train.keys():
line.append(f"{train[key]['epoch'][epoch]:.6f}")
line.append(f"{valid[key]['epoch'][epoch]:.6f}")
line.append(f'{tdelta}')
line.append(f'{tremain}')
if isnotebook():
return line
else:
return build_line_console(line)
def line_head_builder(metric_state: Dict):
train: Dict = metric_state.get('train')
line = ['epoch']
for val in train.keys():
line.append(f'trn_{val}')
line.append(f'val_{val}')
line.append('time')
line.append('remain')
if isnotebook():
return line
else:
return build_line_console(line)
def graph_builder(metric_state: Dict, trainer_state: Dict):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get('valid')
epoch_curr = trainer_state.get('epoch')['curr']
train_loss = train.get('loss').get('epoch')
valid_loss = valid.get('loss').get('epoch')
if epoch_curr == 1:
x = [1]
else:
x = list(range(1, len(train_loss)+1))
graph = [[x, train_loss], [x, valid_loss]]
# print(graph)
return graph
def clean_up_metric_resume(metric_state: Dict, epoch_curr):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get("valid")
for key in train.keys():
# print("train epoch len", len(train[key]['epoch']))
# print(key, train[key]['epoch'])
if len(train[key]['epoch']) != epoch_curr-1:
train[key]['epoch'].pop()
# print("valid epoch len", len(valid[key]['epoch']))
# print(key, valid[key]['epoch'])
if len(valid[key]['epoch']) != epoch_curr-1:
valid[key]['epoch'].pop()
| [
"nunenuh@gmail.com"
] | nunenuh@gmail.com |
bbd0c5e6dfe3b1dd6ce23e3e5ea09fe588e6ecdc | 987a68b9c196f39ba1810a2261cd4a08c35416a3 | /BinarySearch/374-guess-number-higher-or-lower.py | 719ded9d3727476c6b598a21120e1847f0b62c51 | [] | no_license | xizhang77/LeetCode | c26e4699fbe1f2d2c4706b2e5ee82131be066ee5 | ce68f5af57f772185211f4e81952d0345a6d23cb | refs/heads/master | 2021-06-05T15:33:22.318833 | 2019-11-19T06:53:24 | 2019-11-19T06:53:24 | 135,076,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # -*- coding: utf-8 -*-
'''
We are playing the Guess Game. The game is as follows:
I pick a number from 1 to n. You have to guess which number I picked.
Every time you guess wrong, I'll tell you whether the number is higher or lower.
You call a pre-defined API guess(int num) which returns 3 possible results (-1, 1, or 0):
-1 : My number is lower
1 : My number is higher
0 : Congrats! You got it!
Example :
Input: n = 10, pick = 6
Output: 6
'''
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
lower, upper = 1, n
if guess( lower ) == 0:
return lower
if guess( upper ) == 0:
return upper
while True:
ans = (lower + upper)/2
if guess( ans ) == -1:
upper = min( upper, ans )
elif guess( ans ) == 1:
lower = max( lower, ans )
else:
return ans
| [
"xizhang1@cs.stonybrook.edu"
] | xizhang1@cs.stonybrook.edu |
459c64a151d5f14c2571ae8ddcda8396b1a73dee | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/8 kyu/BinToDecimal/bin_to_decimal_test.py | 0aae2d128c84e951df54be278457b2b6b1a82121 | [] | no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from bin_to_decimal import bin_to_decimal
import unittest
class TestBinToDecimal(unittest.TestCase):
def test(self):
tests = (
("1", 1),
("0", 0),
("1001001", 73),
)
for t in tests:
inp, exp = t
self.assertEqual(bin_to_decimal(inp), exp)
def test_rand(self):
from random import randint
for _ in range(100):
n = randint(1, 5000000)
b = bin(n)[2:]
self.assertEqual(bin_to_decimal(b), n)
if __name__ == '__main__':
unittest.main() | [
"darkdan099@gmail.com"
] | darkdan099@gmail.com |
3ef84fc59f17834ac7d0fd369bd367bc09009366 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_knighted.py | 53b6a3c8b9792513c95ece677355011f50817313 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.nouns._knight import _KNIGHT
#calss header
class _KNIGHTED(_KNIGHT, ):
def __init__(self,):
_KNIGHT.__init__(self)
self.name = "KNIGHTED"
self.specie = 'nouns'
self.basic = "knight"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9134c21c482197da0d0d50f272fc04d4bc7d382b | 9b9b6a7aa1de1092a8480771f2b08ffa0972218d | /python/sklearn/linear-regression/workload-analysis/faster-rcnn/lr/lr.py | 90e61bda1999d1313da975be3ec8679947fef79f | [
"WTFPL"
] | permissive | lijiansong/lang | c42ca757306b38f37a26fef841b2460f05a13af6 | 27ffecd9afe67ddac003fc4d6333e06e2cc20434 | refs/heads/master | 2023-02-25T17:36:01.221720 | 2023-02-14T14:10:29 | 2023-02-14T14:10:29 | 149,586,739 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,826 | py | from sklearn import linear_model
def get_data(file_name):
file_reader = open(file_name, 'r')
x_list = []
y_list = []
try:
text_lines = file_reader.readlines()
print(type(text_lines))
data_parallel_min = model_parallel_min = thread_num_min = fifo_size_min = end2end_fps_min = 100000000
data_parallel_max = model_parallel_max = thread_num_max = fifo_size_max = end2end_fps_max = 0
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
data_parallel_min, model_parallel_min, thread_num_min, fifo_size_min, end2end_fps_min = min(data_parallel_min, data_parallel), min(model_parallel_min, model_parallel), min(thread_num_min, thread_num), min(fifo_size_min, fifo_size), min(end2end_fps_min, end2end_fps)
data_parallel_max, model_parallel_max, thread_num_max, fifo_size_max, end2end_fps_max = max(data_parallel_max, data_parallel), max(model_parallel_max, model_parallel), max(thread_num_max, thread_num), max(fifo_size_max, fifo_size), max(end2end_fps_max, end2end_fps)
x_list.append([data_parallel, model_parallel, thread_num, fifo_size])
y_list.append(end2end_fps)
print(data_parallel_min, model_parallel_min, thread_num_min, fifo_size_min, end2end_fps_min)
print(data_parallel_max, model_parallel_max, thread_num_max, fifo_size_max, end2end_fps_max)
for i, item in enumerate(x_list):
if (model_parallel_min == model_parallel_max) and (fifo_size_min == fifo_size_max):
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), 1, (item[3] - thread_num_min) / (thread_num_max - thread_num_min), 1]
elif (model_parallel_min != model_parallel_max) and (fifo_size_min == fifo_size_max):
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), (item[2] - model_parallel_min) / (model_parallel_max - model_parallel_min), (item[3] - thread_num_min) / (thread_num_max - thread_num_min), 1]
elif (model_parallel_min == model_parallel_max) and (fifo_size_min != fifo_size_max):
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), 1, (item[3] - thread_num_min) / (thread_num_max - thread_num_min), (item[4] - fifo_size_min) / (fifo_size_max - fifo_size_min)]
else:
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), (item[2] - model_parallel_min) / (model_parallel_max - model_parallel_min), (item[3] - thread_num_min) / (thread_num_max - thread_num_min), (item[4] - fifo_size_min) / (fifo_size_max - fifo_size_min)]
for i, item in enumerate(y_list):
y_list[i] = (item - end2end_fps_min) / (end2end_fps_max - end2end_fps_min)
finally:
if file_reader:
file_reader.close()
return x_list, y_list
def get_lr_model(X, y):
clf = linear_model.LinearRegression()
clf.fit(X, y)
return clf.coef_
if __name__ == '__main__':
clf = linear_model.LinearRegression()
print('===---------------- dense fp16 end2end fps ----------------===')
X, y = get_data('faster-rcnn-dense-fp16.txt')
print(len(X), len(y))
clf.fit(X, y)
print(clf.coef_)
# https://statinfer.com/204-1-7-adjusted-r-squared-in-python/
print('R-squared:', clf.score(X, y))
print('===---------------- dense fp16 hardware fps ----------------===')
X, y = get_data('faster-rcnn-dense-fp16-hw.txt')
print(len(X), len(y))
clf.fit(X, y)
print(clf.coef_)
print('R-squared:', clf.score(X, y))
| [
"lijiansong@ict.ac.cn"
] | lijiansong@ict.ac.cn |
fd8d9edccb5cf431782d7a3b811a8be8d97b3cab | b182a3407b56c14b830b6ff3a543ba29d5996f84 | /beartype_test/a00_unit/a00_util/test_utilclass.py | 243c41c5a70f9dcddf4f9dced5d7a6262195fa2d | [
"MIT"
] | permissive | yamgent/beartype | 9d1899a6e6dacd1dd74652a81a2c1f275b1fd775 | afaaa0d8c25f8e5c06dd093982787b794ee48f2d | refs/heads/main | 2023-03-19T18:27:44.326772 | 2021-03-08T06:20:57 | 2021-03-08T06:26:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype class utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.cls.utilclstest` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS }....................
def test_is_class_builtin() -> None:
'''
Test the :func:`beartype._util.cls.utilclstest.is_type_builtin` function.
'''
# Defer heavyweight imports.
from beartype._util.cls.utilclstest import is_type_builtin
from beartype_test.a00_unit.data.data_type import (
CLASSES_BUILTIN, CLASSES_NON_BUILTIN)
# Assert this tester accepts all builtin types.
for class_builtin in CLASSES_BUILTIN:
assert is_type_builtin(class_builtin) is True
# Assert this tester rejects non-builtin types.
for class_non_builtin in CLASSES_NON_BUILTIN:
assert is_type_builtin(class_non_builtin) is False
def test_is_classname_builtin() -> None:
'''
Test the :func:`beartype._util.cls.utilclstest.is_classname_builtin` function.
'''
# Defer heavyweight imports.
from beartype._util.cls.utilclstest import is_classname_builtin
from beartype._util.utilobject import get_object_type_name
from beartype_test.a00_unit.data.data_type import (
CLASSES_BUILTIN, CLASSES_NON_BUILTIN)
# Assert this tester accepts the fully-qualified names of all builtin
# types.
for class_builtin in CLASSES_BUILTIN:
assert is_classname_builtin(
get_object_type_name(class_builtin)) is True
# Assert this tester rejects non-builtin types.
for class_non_builtin in CLASSES_NON_BUILTIN:
assert is_classname_builtin(
get_object_type_name(class_non_builtin)) is False
| [
"leycec@gmail.com"
] | leycec@gmail.com |
5904499d418489afdf5bcc82482c93dea481d2b4 | 7d9d3d5ce2ac19221163d54a94c025993db0af4f | /autotest/gcore/asyncreader.py | e5cdc8c94fae9dfcb59bfb31d900631c01c51494 | [
"MIT"
] | permissive | dcgull/gdal | 5408adad77d001db32173bba547b447220b5e9a2 | a5e2a7b54db955bd061ebfc6d69aa2dd752b120c | refs/heads/master | 2020-04-03T13:30:40.013172 | 2013-10-11T12:07:57 | 2013-10-11T12:07:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | #!/usr/bin/env python
###############################################################################
# $Id: asyncreader.py 22782 2011-07-23 19:20:29Z warmerdam $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test AsyncReader interface
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010, Even Rouault <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
import gdal
###############################################################################
# Test AsyncReader interface on the default (synchronous) implementation
def asyncreader_1():
ds = gdal.Open('data/rgbsmall.tif')
asyncreader = ds.BeginAsyncReader(0,0,ds.RasterXSize,ds.RasterYSize)
buf = asyncreader.GetBuffer()
result = asyncreader.GetNextUpdatedRegion(0)
if result != [gdal.GARIO_COMPLETE, 0, 0, ds.RasterXSize,ds.RasterYSize]:
gdaltest.post_reason('wrong return values for GetNextUpdatedRegion()')
print(result)
return 'fail'
ds.EndAsyncReader(asyncreader)
asyncreader = None
out_ds = gdal.GetDriverByName('GTiff').Create('/vsimem/asyncresult.tif', ds.RasterXSize,ds.RasterYSize,ds.RasterCount)
out_ds.WriteRaster(0,0,ds.RasterXSize,ds.RasterYSize,buf)
expected_cs = [ ds.GetRasterBand(i+1).Checksum() for i in range(ds.RasterCount)]
cs = [ out_ds.GetRasterBand(i+1).Checksum() for i in range(ds.RasterCount)]
ds = None
out_ds = None
gdal.Unlink('/vsimem/asyncresult.tif')
for i in range(len(cs)):
if cs[i] != expected_cs[i]:
gdaltest.post_reason('did not get expected checksum for band %d' % (i+1))
print(cs[i])
print(expected_cs[i])
return 'fail'
return 'success'
gdaltest_list = [ asyncreader_1 ]
if __name__ == '__main__':
gdaltest.setup_run( 'asyncreader' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| [
"gerard.choinka@ambrosys.de"
] | gerard.choinka@ambrosys.de |
e8fb239b281d1398883df79eb69f5d7664a1a78c | 92fb3d19b329434fe577fb5b8cc2e3302700d427 | /midterm/task1/forms.py | 8521fa2035b958a521928a7a1a41bbea07a0bf2b | [] | no_license | aigerimzh/BFDjango | 6a2635db8a1017b64c304193277d3030b3daf196 | 99f81274abdf0afcd9925cf2af057e616c433448 | refs/heads/master | 2020-03-28T22:45:13.782301 | 2018-11-22T17:10:54 | 2018-11-22T17:10:54 | 146,699,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django import forms
from .models import Restaurant
class RestaurantForm(forms.ModelForm):
class Meta:
model = Restaurant
fields = ["r_name", "tel", "city"]
| [
"ajgerimzumabaeva10@gmail.com"
] | ajgerimzumabaeva10@gmail.com |
71af7fe8f846df967083da104419347fd6448bda | 39f879ced0dbcbb92e7a12d8b09f4fa0aea4f925 | /pajbot/models/kvi.py | 499ba8d96ff3d9277996298df2f262fdc3e390c5 | [
"MIT"
] | permissive | coral/pajbot | f205b750d77cf06c75229aee93a5879abe4a10de | 682580f2a43a19a907cba231290b6d59157e123c | refs/heads/master | 2021-01-14T08:30:17.534620 | 2016-03-24T22:15:24 | 2016-03-24T22:15:24 | 54,676,838 | 0 | 0 | null | 2016-03-24T21:58:09 | 2016-03-24T21:58:08 | null | UTF-8 | Python | false | false | 1,383 | py | import logging
from collections import UserDict
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from pajbot.managers import Base
from pajbot.managers import DBManager
log = logging.getLogger('pajbot')
class KVIData(Base):
__tablename__ = 'tb_idata'
id = Column(String(64), primary_key=True)
value = Column(Integer)
def __init__(self, id):
self.id = id
self.value = 0
def set(self, new_value):
self.value = new_value
def get(self):
return self.value
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def __str__(self):
return str(self.value)
class KVIManager(UserDict):
def __init__(self):
UserDict.__init__(self)
self.db_session = DBManager.create_session()
def __getitem__(self, id):
if id not in self.data:
kvidata = KVIData(id)
self.db_session.add(KVIData(id))
self.data[id] = kvidata
return self.data[id]
def commit(self):
self.db_session.commit()
def reload(self):
self.data = {}
num_values = 0
for kvdata in self.db_session.query(KVIData):
num_values += 1
self.data[kvdata.id] = kvdata
log.info('Loaded {0} KVIData values'.format(num_values))
return self
| [
"pajlada@bithack.se"
] | pajlada@bithack.se |
83ba5b3175e103d12d140d6720c3c1c842808d87 | 9f9f4280a02f451776ea08365a3f119448025c25 | /plans/hsppw/qcut_hsp-s_002_pwcc_logit_hs.py | 389812e7751f3c4e9cb7cdc5e3736e373e543373 | [
"BSD-2-Clause"
] | permissive | dbis-uibk/hit-prediction-code | 6b7effb2313d2499f49b2b14dd95ae7545299291 | c95be2cdedfcd5d5c27d0186f4c801d9be475389 | refs/heads/master | 2023-02-04T16:07:24.118915 | 2022-09-22T12:49:50 | 2022-09-22T12:49:50 | 226,829,436 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | """Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
from hit_prediction_code.dataloaders import QcutLoaderWrapper
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 2
dataloader = ClassLoaderWrapper(
wrapped_loader=QcutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-s_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=LogisticRegression(),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='concat',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
| [
"mikevo-uibk@famv.net"
] | mikevo-uibk@famv.net |
b3272192375b1b837f1071863e9a82efcad1198e | 0e59533f5ed141fd0d286dbdaebdbeba14ee576e | /Scripts/viewer.py | a70503d56ef444a57b0e4632c6e962bf3e714e87 | [] | no_license | LizinczykKarolina/DjangoBussiness | cab793ee73435143abf3293b12371ac81805e3fc | b1d89109533c3f1f6b004b2ec259ea9ec13185bc | refs/heads/master | 2021-09-25T17:59:09.380062 | 2018-10-24T21:52:34 | 2018-10-24T21:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | #!c:\pycharmprojects\djangoprojects\djangobussiness\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| [
"wieczorek.karolina1@o2.pl"
] | wieczorek.karolina1@o2.pl |
bd689d04a2bef94ca53dd98eaece7d358b901fcb | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_schizConnect/unsupervised analysis/VIP/pcatv_components_analysis_VIP.py | 661f315760166011bf65da58fca12157fcc75feb | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,856 | py |
import os
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd
import nibabel as nib
import json
from nilearn import plotting
from nilearn import image
from scipy.stats.stats import pearsonr
###############################################################################
# SCZ ONLY
###############################################################################
INPUT_POPULATION = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/population.csv"
BASE_PATH = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/results/pcatv_scz/5_folds_VIP_scz"
INPUT_RESULTS = os.path.join(BASE_PATH,"results","0")
# Compute clinical Scores
pop = pd.read_csv(INPUT_POPULATION)
age = pop[pop.dx ==1].age.values
scores = np.load(os.path.join(INPUT_RESULTS,"struct_pca_0.1_0.8_0.8","X_test_transform.npz"))['arr_0']
for i in range(scores.shape[1]):
corr,p = pearsonr(scores[:,i],age)
if p < 0.05:
print ("Significant correlation between age and score on component %s" % (i))
plt.figure()
plt.plot(scores[:,i],age,'o')
plt.xlabel('Score on component %s' %(i))
plt.ylabel('age')
plt.title("Pearson's correlation = %.02f, p = %.01e" % (corr,p),fontsize=12)
###############################################################################
###############################################################################
# PLOT ALL CORRELATIONW WITH AGE
fig, axs = plt.subplots(2,5, figsize=(15, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 1.0, wspace=.3)
axs = axs.ravel()
for i in range(10):
corr,p = pearsonr(scores[:,i],age)
axs[i].plot(scores[:,i],age,'o', markersize = 4)
axs[i].set_title("Pearson' s correlation = %.02f \n p = %.01e" % (corr,p),fontsize=12)
axs[i].xaxis.set_ticks(np.arange(-0.3,0.4,0.2))
axs[i].set_xlabel('Score on component %s' %(i+1))
axs[i].set_ylabel('age')
axs[i].yaxis.set_ticks(np.arange(10,80,10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.pdf"))
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.png"))
###############################################################################
###############################################################################
# CONTROLS ONLY
###############################################################################
INPUT_POPULATION = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/population.csv"
BASE_PATH = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/results/pcatv_controls/5_folds_VIP_controls"
INPUT_RESULTS = os.path.join(BASE_PATH,"results","0")
# Compute clinical Scores
pop = pd.read_csv(INPUT_POPULATION)
age = pop[pop.dx ==0].age.values
scores = np.load(os.path.join(INPUT_RESULTS,"struct_pca_0.1_0.8_0.5","X_test_transform.npz"))['arr_0']
for i in range(scores.shape[1]):
corr,p = pearsonr(scores[:,i],age)
if p < 0.05:
print ("Significant correlation between age and score on component %s" % (i))
plt.figure()
plt.plot(scores[:,i],age,'o')
plt.xlabel('Score on component %s' %(i))
plt.ylabel('age')
plt.title("Pearson's correlation = %.02f, p = %.01e" % (corr,p),fontsize=12)
###############################################################################
# PLOT ALL CORRELATIONW WITH AGE
fig, axs = plt.subplots(2,5, figsize=(15, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 1.0, wspace=.3)
axs = axs.ravel()
for i in range(10):
corr,p = pearsonr(scores[:,i],age)
axs[i].plot(scores[:,i],age,'o', markersize = 4)
axs[i].set_title("Pearson' s correlation = %.02f \n p = %.01e" % (corr,p),fontsize=12)
axs[i].xaxis.set_ticks(np.arange(-0.3,0.4,0.2))
axs[i].set_xlabel('Score on component %s' %(i+1))
axs[i].set_ylabel('age')
axs[i].yaxis.set_ticks(np.arange(10,80,10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.pdf"))
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.png"))###############################################################################
###############################################################################
###############################################################################
###############################################################################
# CONTROLS + SCZ
###############################################################################
INPUT_POPULATION = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/population.csv"
BASE_PATH = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/results/pcatv_all/5_folds_VIP_all"
INPUT_RESULTS = os.path.join(BASE_PATH,"results","0")
INPUT_DATA_y = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/data/y.npy"
y = np.load(INPUT_DATA_y)
pop = pd.read_csv(INPUT_POPULATION)
age = pop.age.values
scores = np.load(os.path.join(INPUT_RESULTS,"struct_pca_0.1_0.5_0.1","X_test_transform.npz"))['arr_0']
for i in range(scores.shape[1]):
corr,p = pearsonr(scores[:,i],age)
if p < 0.05:
print ("Significant correlation between age and score on component %s" % (i))
plt.figure()
plt.plot(scores[y==0,i],age[y==0],'o')
plt.plot(scores[y==1,i],age[y==1],'o')
plt.xlabel('Score on component %s' %(i))
plt.ylabel('age')
plt.title("Pearson's correlation = %.02f, p = %.01e" % (corr,p),fontsize=12)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# PLOT ALL CORRELATIONW WITH AGE
fig, axs = plt.subplots(2,5, figsize=(15, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 1.0, wspace=.3)
axs = axs.ravel()
for i in range(10):
corr,p = pearsonr(scores[:,i],age)
axs[i].plot(scores[y==0,i],age[y==0],'o', markersize = 4)
axs[i].plot(scores[y==1,i],age[y==1],'o', markersize = 4)
axs[i].set_title("Pearson' s correlation = %.02f \n p = %.01e" % (corr,p),fontsize=12)
axs[i].xaxis.set_ticks(np.arange(-0.3,0.4,0.2))
axs[i].set_xlabel('Score on component %s' %(i+1))
axs[i].set_ylabel('age')
axs[i].yaxis.set_ticks(np.arange(10,80,10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.pdf"))
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.png"))###############################################################################
###############################################################################
| [
"ad247405@is222241.intra.cea.fr"
] | ad247405@is222241.intra.cea.fr |
32c1fd44ad91e650d9dd909d4c6ffd0db4a42814 | 288bbf5b6bf4c8471896533dc4c0538f1733c3a4 | /web_flask/1-hbnb_route.py | 1f5e59d54c15956153ef5b59f68cb0d58f6ad668 | [] | no_license | lemejiamo/AirBnB_clone_v2 | e75e61551763ed9677981b66d15667bdfe288dfc | 9a72db6fe2f100c1974fb0ebe0e3a8b5fb140d65 | refs/heads/master | 2023-07-19T03:17:10.412000 | 2021-09-21T23:46:58 | 2021-09-21T23:46:58 | 393,437,169 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/python3
""" initilice basic flask server """
from flask import Flask
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def index():
""" route to root """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb():
"""route to hbnb"""
return 'HBNB'
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)
| [
"luismejia69@gmail.com"
] | luismejia69@gmail.com |
b25c921d84ede98178527e22823a6081472ab0f5 | 409c4d0dce72de987dff7c76857499fba8f8b7a0 | /popmail.py | 9735a642e45baeca25654101668a883343048f32 | [] | no_license | crystaleone/test | b4fece7fbc4e8ddd6186ea13245c62970c6d7038 | 4af3964bf6a657e888c7850f07a031440ba29e7a | refs/heads/master | 2021-01-18T19:17:36.924170 | 2017-09-19T03:37:01 | 2017-09-19T03:37:01 | 86,895,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import poplib, getpass, sys, mailconfig
mailserver = mailconfig.popservername
mailuser = mailconfig.popusername
mailpasswd = getpass.getpass('Password for %s?' % mailserver)
print('Connection...')
server = poplib.POP3(mailserver)
server.user(mailuser)
server.pass_(mailpasswd)
try:
print(server.getwelcom())
msgCount, msgByes = server.stat()
print('There are', msgCount, 'mail messages in', msgBytes, 'bytes')
print(server.list())
print('-'*80)
input('[Press Enter key]')
for i in range(msgCount):
hdr, message, octets = server.retr(i+1)
for line in message: print(line.decode())
print('-' * 80)
if i < msgCount - 1:
input('[Press Enter key]')
finally:
server.quit()
print('Bye.')
| [
"403868144@qq.com"
] | 403868144@qq.com |
4305a78a71ad47ae984d4d566913ad9b7caf9f6f | 6336828aeab3ea2ba3e1cf9452a8a3f3a084b327 | /django_react_users_tutorial/virtual-env/bin/pip3 | 90eaf20b91fc5bd44e053571f6f42364ee896361 | [] | no_license | kalereshma96/DjangoNewRepository | 85f2eaed6b689be273af48d328c0a388244bbe2b | 37fd232c2ac91eb6940300f20118f93d17926f9a | refs/heads/master | 2020-04-12T18:12:15.698279 | 2019-01-21T13:46:37 | 2019-01-21T13:46:37 | 162,672,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | #!/home/admin1/PycharmProjects/mynewpythonproject/django_react_users_tutorial/virtual-env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"kalereshma96@gmail.com"
] | kalereshma96@gmail.com | |
ffcf2b18e3380c3824334a82b20ab916324a1565 | b68d4e7826c29a22b002ff9c10583faeb7a10455 | /pikry-3.4.1/bin/pilfont.py | bd6e47c8c714d7f812e439293df8a80bde33f003 | [] | no_license | mikanyman/.pyenv_versions-legacy | ec392821290bd38873b25824c4261b15dc1a5067 | 5a42c7c21e800610f4f5f322d73d1dbd62a081b9 | refs/heads/master | 2022-10-13T10:22:13.956161 | 2017-01-31T20:10:04 | 2017-01-31T20:10:04 | 80,555,789 | 0 | 1 | null | 2022-09-30T13:39:01 | 2017-01-31T19:49:56 | Python | UTF-8 | Python | false | false | 1,059 | py | #!/home/mnyman/.pyenv/versions/pikry-3.4.1/bin/python3.4
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
VERSION = "0.4"
import glob, sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"mika.nyman@synapse-computing.com"
] | mika.nyman@synapse-computing.com |
4782ed428274689a290b12241878031341eba26d | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/minCost_20200826171103.py | dfc9066a4786aa0d420bc1121c66f4347f61d3bd | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | def minCost(days,costs):
# brute force approach
# find if numbers are consecutive
# if they are past 7 then means we do a 30 day pass
# once they stop being consecutive means to opt for something different
# like [1,4,6,7,8,20]
ways = [0] * days[len(days)-1]
newDays = set(days)
for i in range(1,len(ways)):
print('i')
total = ways[i-1]+costs[0]
if i-7 >= 0:
total1 = ways[i-7] + costs[1]
else:
total1 = 0 + costs[1]
if i-15 >= 0:
total2 = ways[i-15] + costs[2]
else:
total2 = 0 + costs[2]
if i in newDays:
ways[i] = min(total,total1,total2)
else:
ways[i] = ways[i-1]
print(ways)
minCost([1,4,6,7,8,20],[2,7,15]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.