blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
162f62f3a62bc6c0dc16eea9d059bdc1e30f069f | 0f458289a8c1d95ed3a9d548b4dcb6be9a2e6ad7 | /analysis/groups/steps/load_data.py | 80bdc124a1f4cc50f3d35bd7fc0aea407d637fc3 | [] | no_license | sernst/airplane_boarding | 510a805da2d5f30ea3af49f482071d9a02a6676e | 6d93089566fcd2512d68a7300cd24861b34e141f | refs/heads/master | 2016-09-12T21:31:08.384911 | 2016-06-08T15:08:08 | 2016-06-08T15:08:08 | 58,775,553 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import glob
import os
import json
import pandas as pd
from cauldron import project
methods = {
'b': 'Back',
'f': 'Front',
'r': 'Rand'
}
groups = {
'two': [2, 0],
'twogs': [2, 0]
}
data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'results')
)
status_glob = glob.iglob(
'{}/**/status.json'.format(data_path),
recursive=True
)
data = []
for path in status_glob:
trial_name = os.path.dirname(path).split(os.sep)[-1]
parts = trial_name.split('-')
collection = parts[0]
if collection not in groups:
continue
boarding_method = parts[1]
group_count = int(parts[2][:-1])
if len(parts) > 3:
trial_index = int(parts[3][1:])
else:
trial_index = 1
with open(path, 'r+') as f:
status = json.load(f)
trial_label = '{} {}'.format(
methods[boarding_method],
'{}'.format(trial_index).zfill(2) if boarding_method == 'r' else ''
).strip()
status.update(dict(
trial_index=trial_index,
trial_label=trial_label,
collection=collection,
boarding_method=boarding_method,
board_method_label=methods[boarding_method],
group_count=group_count,
seating_delay=groups[collection][0],
interchange_delay=groups[collection][1]
))
data.append(status)
df = pd.DataFrame(data)
project.shared.data = df
project.display.table(df, scale=0.5)
print('Shape:', df.shape)
| [
"swernst@gmail.com"
] | swernst@gmail.com |
3d7c1a756df91073f2da57eb2e2135b37c032f74 | 32bbbd6dbd100bbb9a2282f69ac3b7b34516347f | /Study/lotte/sample2.py | 60d2153c17777ab39ff9dad2be8087d22fd43d5f | [] | no_license | kimjh1753/AIA_Academy_Study | 2162d4d4f1a6b8ca1870f86d540df45a8742f359 | 6022718ae7f9e5170a19c4786d096c8042894ead | refs/heads/master | 2023-05-07T12:29:12.920693 | 2021-06-05T01:09:33 | 2021-06-05T01:09:33 | 324,136,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | import numpy as np
import PIL
from numpy import asarray
from PIL import Image
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from numpy import expand_dims
from sklearn.model_selection import StratifiedKFold, KFold
from keras.models import Sequential, Model, load_model
from keras.layers import *
from keras.layers import GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam,SGD
from sklearn.model_selection import train_test_split
import string
import scipy.signal as signal
from keras.applications.resnet import ResNet101,preprocess_input
# img1=[]
# for i in range(0,72000):
# filepath='../study/LPD_COMPETITION/test/%d.jpg'%i
# image2=Image.open(filepath)
# image2 = image2.convert('RGB')
# image2 = image2.resize((72, 72))
# image_data2=asarray(image2)
# # image_data2 = signal.medfilt2d(np.array(image_data2), kernel_size=3)
# img1.append(image_data2)
# np.save('../study/LPD_COMPETITION/npy/pred.npy', arr=img1)
x_pred = np.load('../study/LPD_COMPETITION/npy/pred.npy',allow_pickle=True)
print(x_pred.shape) | [
"kimjh1753@naver.com"
] | kimjh1753@naver.com |
3b2855bafd2eed43028ba02f1556ea2d841e4645 | 2ad5f93c2515c9a3a2d24bbd43bf353be4c5b741 | /blogpost/models.py | be8723e14897040b3c49aae09bc87baf0bd85037 | [] | no_license | oereo/likelion_3rd_CRUD | 22c7d316478ef077d3467aea4bcbbeaf2a180f67 | 7889d7f968909c82ca94978a9831647108dc0436 | refs/heads/master | 2022-04-20T17:45:36.075059 | 2020-04-15T05:45:25 | 2020-04-15T05:45:25 | 254,722,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
user_id = models.IntegerField()
title = models.CharField(max_length = 200)
pub_date = models.DateTimeField('date published')
body = models.TextField()
def __str__(self):
return self.title
def summary(self):
return self.body[:50]
class Comment(models.Model):
blog = models.ForeignKey('blogpost.Blog', on_delete=models.CASCADE, null=True, related_name='comments')
#author = models.ForeignKey('blogpost.Blog', on_delete=models.SET_NULL, null=True, blank=True, related_name='comments')
text = models.TextField()
created_date = models.DateTimeField('date published')
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.text | [
"dlstpgns0406@gmail.com"
] | dlstpgns0406@gmail.com |
a20abdd5e48fa7fad20571ecc08e86f890c7aa00 | bbd65a48e9fb340b29f39082483680969d6e2571 | /python/misc/seven_boom.py | 3d95211a3ab449b70b9cefa5004dbf8d32999cd6 | [
"MIT"
] | permissive | christopher-burke/warmups | 2784eef3b959bca5c270b3e642b505f3b4c0b790 | 140c96ada87ec5e9faa4622504ddee18840dce4a | refs/heads/master | 2022-05-24T11:26:40.046650 | 2022-03-28T16:47:16 | 2022-03-28T16:47:16 | 152,440,792 | 0 | 0 | MIT | 2022-03-13T03:25:43 | 2018-10-10T14:51:43 | Python | UTF-8 | Python | false | false | 1,096 | py | #!/usr/bin/env python3
"""Seven Boom!
Create a function that takes a list of numbers and return "Boom!"
if the number 7 appears in the list. Otherwise, return
"there is no 7 in the list".
Source:
https://edabit.com/challenge/BokhFunYBvsvHEjfx
"""
def boom(iterable, target: int) -> str:
"""Find the target in iterable.
Return 'Boom!' if found.
Return f"there is no {target} in the list"
"""
for number in iterable:
if str(target) in str(number):
return "Boom!"
return f"there is no {target} in the list"
def seven_boom(iterable):
"""Boom! if 7 is found in iterable."""
return boom(iterable=iterable, target=7)
def main():
"""Run sample seven_boom functions. Do not import."""
assert seven_boom([2, 6, 7, 9, 3]) == 'Boom!'
assert seven_boom([33, 68, 400, 5]) == 'there is no 7 in the list'
assert seven_boom([86, 48, 100, 66]) == 'there is no 7 in the list'
assert seven_boom([76, 55, 44, 32]) == 'Boom!'
assert seven_boom([35, 4, 9, 37]) == 'Boom!'
print("Passed.")
if __name__ == "__main__":
main()
| [
"christopherjamesburke@gmail.com"
] | christopherjamesburke@gmail.com |
f5f3517c0ff6bcc0f43587271b85632677d7b57e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_041/ch26_2019_04_02_16_07_38_488264.py | 57bde7e9da6a3eb22678bcb3f6248dec2980525e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | d=int(input('Quantos dias: '))
h=int(input('Quantos horas: '))
m=int(input('Quantos minutos: '))
s=int(input('Quantos segundos: '))
total=d*24*60*60 + h*3600 + m*60 + s
print(total) | [
"you@example.com"
] | you@example.com |
a3bb3a7d77c9b9226614e48945600b092669f15d | 21b201ebf2ffbbc19fa8d74e5657e12ef597b02d | /research/delf/delf/__init__.py | 7b226c81981070ae50af1c7135e634deb99e6fa2 | [] | no_license | alhsnouf/model | fa619691ad9d0afc7ad849a9471e6bb0643a8d47 | 5fe429b115634e642a7469b3f1d4bc0c5cf98782 | refs/heads/master | 2021-04-12T11:16:02.150045 | 2018-03-27T15:19:18 | 2018-03-27T15:19:18 | 126,702,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4e6ff6d395f70f65df777f6d7ce2e4dda1834b39732c72b01872ab001e7f13e2
size 1159
| [
"alhanouf987@hotmail.com"
] | alhanouf987@hotmail.com |
e162b69187ef86385f72df31980a1b8156669aa0 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /BDjhphREEa6Ds44Ty_20.py | 15be2eecdff35a92649714e01724f9439d1ef46c | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py |
def bomb(lst):
for a in range(51):
for b in range(51):
matches = 0
for x, y, t in lst:
matches += (a - x)**2 + (b - y)**2 == round((t*0.343)**2)
if matches == 3:
return a, b
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9be1486b946143c72e7897d2f570a254c3f3c382 | 6929a33a7259dad9b45192ca088a492085ed2953 | /solutions/0315-count-of-smaller-numbers-after-self/count-of-smaller-numbers-after-self.py | f75f21b99a1f4155210dfb7fc92cc0f428166479 | [] | no_license | moqi112358/leetcode | 70366d29c474d19c43180fd4c282cc02c890af03 | fab9433ff7f66d00023e3af271cf309b2d481722 | refs/heads/master | 2022-12-10T01:46:14.799231 | 2021-01-14T05:00:09 | 2021-01-14T05:00:09 | 218,163,960 | 3 | 0 | null | 2022-07-06T20:26:38 | 2019-10-28T23:26:47 | Python | UTF-8 | Python | false | false | 2,522 | py | # You are given an integer array nums and you have to return a new counts array. The counts array has the property where counts[i] is the number of smaller elements to the right of nums[i].
#
#
# Example 1:
#
#
# Input: nums = [5,2,6,1]
# Output: [2,1,1,0]
# Explanation:
# To the right of 5 there are 2 smaller elements (2 and 1).
# To the right of 2 there is only 1 smaller element (1).
# To the right of 6 there is 1 smaller element (1).
# To the right of 1 there is 0 smaller element.
#
#
#
# Constraints:
#
#
# 0 <= nums.length <= 10^5
# -10^4 <= nums[i] <= 10^4
#
#
class Solution:
def countSmaller(self, nums):
res = [0] * len(nums)
T = BinarySearchTree()
for i in range(len(nums)-1, -1, -1):
res[i] = T.insert(nums[i])
return res
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.count = 1
self.left_smaller = 0
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, val):
count = 0
if self.root is None:
self.root = TreeNode(val)
return count
root = self.root
while root:
if val > root.val:
count += root.count + root.left_smaller
if root.right is None:
root.right = TreeNode(val)
break
else:
root = root.right
elif val < root.val:
root.left_smaller += 1
if root.left is None:
root.left = TreeNode(val)
break
else:
root = root.left
elif val == root.val:
count += root.left_smaller
root.count += 1
break
return count
# def countSmaller(self, nums):
# def sort(enum):
# half = len(enum) / 2
# if half:
# left, right = sort(enum[:half]), sort(enum[half:])
# for i in range(len(enum))[::-1]:
# if not right or left and left[-1][1] > right[-1][1]:
# smaller[left[-1][0]] += len(right)
# enum[i] = left.pop()
# else:
# enum[i] = right.pop()
# return enum
# smaller = [0] * len(nums)
# sort(list(enumerate(nums)))
# return smaller
| [
"983028670@qq.com"
] | 983028670@qq.com |
cd6420002aea9c6f9d0f6ed5b1e78f38f1988c90 | eca3dd04a15e7780ca46e79c2b54a9fb3a448daa | /app.py | 8bab6244b4fedcff2d6d8f0efee34f5fbbbee828 | [
"MIT"
] | permissive | twtrubiks/line-bot-oop | 367ca085925d4f2c5b01726b7771e7ffd576e55f | 874daeb2e4b0d3083025801e42c6f27d2f27e5e1 | refs/heads/master | 2022-08-23T17:23:47.015343 | 2022-06-25T04:24:27 | 2022-06-25T04:24:27 | 163,133,263 | 11 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,364 | py | from config import Config
from flask import Flask, request, abort
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import StickerMessage, MessageEvent, \
TextMessage
from strategy import TaskStrategy, eyny_movie, apple_news, \
ptt_beauty, imgur_beauty, random_beauty, ptt_hot, \
ptt_gossiping, movie, youtube_video, technews, panx, \
oil_price
from strategy import TemplateStrategy, start_template, news_template, \
movie_template, ptt_template, beauty_template, imgur_bot_template
from strategy import ImageStrategy
from my_dict import MyDict
config = Config()
handler = config.handler
app = Flask(__name__)
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# print("body:",body)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'ok'
class Bot:
task_map = {
MyDict.eyny_movie: eyny_movie,
MyDict.apple_news: apple_news,
MyDict.ptt_beauty: ptt_beauty,
MyDict.imgur_beauty: imgur_beauty,
MyDict.random_beauty: random_beauty,
MyDict.ptt_hot: ptt_hot,
MyDict.ptt_gossiping: ptt_gossiping,
MyDict.movie: movie,
MyDict.youtube_video: youtube_video,
MyDict.technews: technews,
MyDict.panx: panx,
MyDict.oil_price: oil_price
}
template_map = {
MyDict.start_template: start_template,
MyDict.news_template: news_template,
MyDict.movie_template: movie_template,
MyDict.ptt_template: ptt_template,
MyDict.beauty_template: beauty_template,
MyDict.imgur_bot_template: imgur_bot_template,
}
def __init__(self, val):
self.val = val
self.special_handle()
def strategy_action(self):
strategy_class = None
action_fun = None
if self.val in self.task_map:
strategy_class = TaskStrategy
action_fun = self.task_map.get(self.val)
elif self.val in self.template_map:
strategy_class = TemplateStrategy
action_fun = self.template_map.get(self.val)
return strategy_class, action_fun
def special_handle(self):
if self.val.lower() == MyDict.eyny_movie:
self.val = self.val.lower()
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
# print("event.reply_token:", event.reply_token)
# print("event.message.text:", event.message.text)
message = event.message.text
bot = Bot(message)
strategy_class, action_fun = bot.strategy_action()
if strategy_class:
task = strategy_class(action_fun, event)
task.name = str(action_fun)
task.execute()
return 0
default_task = TemplateStrategy(event=event)
default_task.execute()
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
# print("package_id:", event.message.package_id)
# print("sticker_id:", event.message.sticker_id)
image_strategy = ImageStrategy(event=event)
image_strategy.execute()
if __name__ == '__main__':
app.run()
| [
"twtrubiks@gmail.com"
] | twtrubiks@gmail.com |
ee25be524068910a48050b1eee9b3a18e07561d6 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201101163842.py | d05a6885c5dc6f0ee287f3969b46210c660305eb | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
val = 0
blink = False
alpha = 1
a_change = True
blink_color = GREEN
get_cord(INITIAL_CORDS)
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN and input_lock != 1:
pos = pg.mouse.get_pos()
get_cord(pos)
# Checks if selection is on the board
if pos[0] < TOP_LX or pos[1] < TOP_LY or pos[0] > int(BOT_RX) or pos[1] > int(BOT_RY):
blink = False
else:
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
if event.key == pg.K_BACKSPACE:
board[int(box_index_x)][int(box_index_y)] = 0
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
blink_color = GREEN
board[int(box_index_x)][int(box_index_y)] = 0
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = val
val = 0
# Draws the screen
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
# Draws the board
display.draw(board)
# Check if cell is selected
if blink:
cell = display.find_cell(box_index_x, box_index_y)
blink = display.blink(alpha, a_change)
alpha = blink[0]
a_change = blink[1]
myRect = pg.Rect(cell)
rectSurf = pg.Surface(myRect.size, pg.SRCALPHA)
rectSurf.fill(blink_color)
rectSurf.set_alpha(alpha)
self.screen.blit(rectSurf, (myRect.x, myRect.y))
# Check if incorrect input
if input_lock == 1:
if val != 0:
display.update(board, row_index, col_index, blk_index)
blink_color = RED
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"jle040@uit.no"
] | jle040@uit.no |
312ef5410957d8b16e34b487aa2b9c64ad6d460b | 78e96321c8647594678e8899e6845844f6b8b95f | /psono/restapi/serializers/delete_duo.py | ea9d7e0321b4090cd8f259d48fafe618b340d773 | [
"MIT",
"CC0-1.0",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | mirazmamun/psono-server | c5514c33ace72c67c207c0556db9c9cf4cbb9e03 | 90f64337063bdd0165557187470f12306cb050a4 | refs/heads/master | 2020-03-19T05:41:30.509721 | 2018-04-05T18:37:54 | 2018-04-05T18:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from ..fields import UUIDField
from ..models import Duo
class DeleteDuoSerializer(serializers.Serializer):
duo_id = UUIDField(required=True)
def validate(self, attrs: dict) -> dict:
duo_id = attrs.get('duo_id')
try:
duo = Duo.objects.get(pk=duo_id, user=self.context['request'].user)
except Duo.DoesNotExist:
msg = _("You don't have permission to access or it does not exist.")
raise exceptions.ValidationError(msg)
duo_count = Duo.objects.filter(user=self.context['request'].user, active=True).count()
attrs['duo'] = duo
attrs['duo_count'] = duo_count
return attrs | [
"sascha.pfeiffer@psono.com"
] | sascha.pfeiffer@psono.com |
3038cc0b222b31de169081725a22737f2cce3451 | cf4e5165a8408344a4c62e63a0fd2d0fe6308b37 | /00-2017/基础班/Python函数实现学生管理系统.py | 5ed20604dc7418b17839aac67218c349bbbea424 | [] | no_license | kennycaiguo/Heima-Python-2018 | 5f8c340e996d19f2b5c44d80ee7c144bf164b30e | a8acd798f520ec3d079cc564594ebaccb9c232a0 | refs/heads/master | 2021-01-08T10:54:18.937511 | 2019-09-01T14:37:49 | 2019-09-01T14:37:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,786 | py | #coding=utf-8
#auther:microease
studentInfos = [{'name': 'huyankai', 'sex': '男', 'phoneNumber': '15172332476'}]
newName = ""
newSex = ""
newPhoneNumber = ""
'''
IndentationError: unindent does not match any outer indentation level
出现这个错误是因为缩进有问题
'''
def printinfo():
print("*"*30)
print("欢迎使用系统")
print("1:添加新名字")
print("2:删除一个名字(使用本功能前,请使用5选项查询所有的学生序号)")
print("3:修改一个名字(使用本功能前,请使用5选项查询所有的学生序号)")
print("4:查询一个名字(使用本功能前,请使用5选项查询所有的学生序号)")
print("5:遍历所有的名字")
print("0:退出系统")
print("*"*30)
def getStudentInfo():
global newName
global newSex
global newPhoneNumber
newName = input("请输入新学生的名字:")
newSex = input("请输入新学生的性别:")
newPhoneNumber = input("请输入新学生的电话:")
def addStudentInfo():
getStudentInfo()
newStudentInfos ={}
newStudentInfos['name'] =newName
newStudentInfos['sex'] =newSex
newStudentInfos['phoneNumber'] =newPhoneNumber
studentInfos.append(newStudentInfos)
def modifyStudentInfo():
studentID = int(input("请输入您要修改的学生序号:"))
#此处加int是因为下面发生计算,所以类型不能为字符,必须为数字
getStudentInfo()
studentInfos[studentID-1]['name'] = newName
studentInfos[studentID-1]['sex'] = newSex
studentInfos[studentID-1]['PhoneNumber'] = newPhoneNumber
def deleteStudentInfo():
studentID = int(input("请输入您要修改的学生序号:"))
#此处加int是因为下面发生计算,所以类型不能为字符,必须为数字
del studentInfos[studentID-1]
def findStudentInfo():
studentID = int(input("请输入您要修改的学生序号:"))
print("*"*30)
print("学生的信息如下:")
print("序号 姓名 性别 电话")
for tempInfo in studentInfos:
print("%s %s %s"%(tempInfo['name'],tempInfo['sex'],tempInfo['phoneNumber']))
print("*"*30)
def main():
while True:
printinfo()
key = input("请输入您想要的选项:")
if key=="1":
addStudentInfo()
print(studentInfos)
elif key=="2":
deleteStudentInfo()
print(studentInfos)
elif key=="3":
print(studentInfos)
modifyStudentInfo()
print(studentInfos)
elif key=="4":
findStudentInfo()
#此处待完善
elif key=="5":
print("*"*30)
print("学生的信息如下:")
print("序号 姓名 性别 电话")
i = 1
for tempInfo in studentInfos:
print("%d %s %s %s"%(i,tempInfo['name'],tempInfo['sex'],tempInfo['phoneNumber']))
print("*"*30)
i+=1
elif key=="0":
break
else:
print("非法输入,请重新输入!")
main() | [
"microease@163.com"
] | microease@163.com |
01fc0ba9d4cd8f25064ad04a169087523fb3164d | 99ca151c59afd9c0e7091b6919768448e40f88a2 | /multi_return2.py | 9127ed44a9be30cefc46c5e2aff6a3f671442c60 | [] | no_license | zainabnazari/Python_note | 1b6a454f6e7b3aca998d87a201823a600ec28815 | 3beb52beb3a0ebe17a6ac8c5695670e9dde59269 | refs/heads/main | 2023-02-10T22:32:33.160428 | 2021-01-12T18:36:54 | 2021-01-12T18:36:54 | 304,724,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | #file name: multi_return.py
def smallest_element(list_of_things):
"""
Takes a list of comparable items. Returns the position of the smallest element and its value.
"""
val=3
smallest_indices =[]
the_value = val
smallest_values=[]
for index, element in enumerate(list_of_things):
if element < the_value:
smallest_values += [element]
smallest_indices += [index]
return (smallest_indices, smallest_values)
if __name__=="__main__":
"""The following test code is run only if the file is run, not if it is imported:"""
smalls = smallest_element([9,3,1,4,2,8,4,6,2,6,2,5,72,1,76,8,-1,3,1000])
print(smalls)
print(smalls[0])
print(smalls[1])
"""
output:
(16, -1)
16
-1
"""
| [
"nazari.zainab@gmail.com"
] | nazari.zainab@gmail.com |
de9f64e04343df6e8b39b3b42ab29a7e393a7be1 | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/zscaler/icon_zscaler/actions/get_users/action.py | 7d8f906f4b6142b4c21e447999525d1306c7c19d | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 900 | py | import insightconnect_plugin_runtime
from .schema import GetUsersInput, GetUsersOutput, Input, Output, Component
# Custom imports below
from icon_zscaler.util.helpers import clean_dict
class GetUsers(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_users", description=Component.DESCRIPTION, input=GetUsersInput(), output=GetUsersOutput()
)
def run(self, params={}):
self.logger.info(f"Getting list of users with filter: {params}.\n")
parameters = {
"name": params.get(Input.NAME),
"dept": params.get(Input.DEPARTMENT),
"group": params.get(Input.GROUP),
"page": params.get(Input.PAGE),
"pageSize": params.get(Input.PAGESIZE),
}
return {Output.USERS: self.connection.client.get_users(clean_dict(parameters))}
| [
"noreply@github.com"
] | rapid7.noreply@github.com |
5e2948a51baf2366b88d9084f5f13f04daf78a10 | a873f3cd46a10ad879fc56d78e1f533d8bf486c0 | /z_python-stu1/tpytest/p3/tc/conftest.py | 2a238c85e888f0210687a8d73eaa274557addfdb | [] | no_license | shenhaiyu0923/resful | d0301b39363e6b3d3659f62fa4a9b2532ebcd225 | 1e66cae7d68fa231794776953cc1a5e999bf36c6 | refs/heads/master | 2021-07-08T20:46:57.300298 | 2021-06-01T08:17:27 | 2021-06-01T08:17:27 | 244,308,016 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | import pytest
from p3.pylib.ApiSchoolClass import ins_ApiSchoolClass
@pytest.fixture(scope='package',autouse=True)#fixture是测试装置,package声明对当前目录下所有包有效,autouse声明自动使用
def st_clearAll(request):
print(f'\n---初始化::构建空白数据环境')
# 初始化代码
ins_ApiSchoolClass.delete_all_school_class()
def fin():
print(f'\n---清除::清除空白数据环境')
request.addfinalizer(fin)
# pytest -s tc --html=report1.html | [
"jwang9@vova.com.hk"
] | jwang9@vova.com.hk |
026bd81e122a8c7bbc4e59a8b3ff78753ccfb57d | 39ea026c441a05b8328afc3d5928f8d2ddb43a58 | /W3Resource_Exercises/Loops and Conditionals/divisibility.py | 09dba621688b489004be2f700dc76dc9227602f4 | [] | no_license | kamit17/Python | 576931b9152b434d8ca62abfea719b748d1ebef0 | 840963648dd22189d5ee7694789b2315901e4aed | refs/heads/master | 2022-06-19T21:01:13.881545 | 2022-05-26T14:40:53 | 2022-05-26T14:40:53 | 116,746,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #1. Write a Python program to find those
#numbers which are divisible by 7 and multiple of 5,
#between 1500 and 2700 (both included).
num= []
for num in range(1500,2701):
if (num % 7 == 0 ) and (num % 5 == 0):
print(num) | [
"kamit17@outlook.com"
] | kamit17@outlook.com |
45f0febacd6da6a8ef09bb2a7c38a0ebd1d39102 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /last_world/feel_high_year/do_life_after_woman/hand.py | d7917e3c952966f3aa51e5226319fe3253e14fb7 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py |
#! /usr/bin/env python
def time(str_arg):
child(str_arg)
print('small_fact_and_next_year')
def child(str_arg):
print(str_arg)
if __name__ == '__main__':
time('get_new_company')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
82e8e0d441dc68eb33e2c0dce4caf932c5f4624c | cb1bdfe34a758140941e19171389eea94b03c755 | /src/scripts/processar_arquivos.py | 7de9206816818b639a8d97f66bf4b24da6b72c37 | [
"MIT"
] | permissive | danilopcarlotti/scdf | 03a973546251b252f8e9f534e643bda3c8dd7df1 | 1960f6b2db5af884c72cbdfaac9849dfec4acef4 | refs/heads/master | 2022-09-07T12:14:49.143418 | 2022-08-30T14:26:02 | 2022-08-30T14:26:02 | 158,738,443 | 3 | 0 | MIT | 2019-09-13T15:34:41 | 2018-11-22T18:37:41 | Python | UTF-8 | Python | false | false | 633 | py | import sys
from pathlib import Path
PATH_ROOT = Path().absolute().parent.parent
sys.path.append(str(PATH_ROOT))
from scdf.src.scripts.remove_accents import remove_accents
def insert_words(texto, file, mycol):
texto = remove_accents(texto).lower()
palavras = list(set([w for w in texto.split() if (len(w) > 3 and not w.isdigit())]))
for p in palavras:
try:
if mycol.find_one({"_id": p}):
mycol.update_one({"_id": p}, {"$push": {"documents": file}})
else:
mycol.insert_one({"_id": p, "documents": [file]})
except:
pass
return True
| [
"danilopcarlotti@gmail.com"
] | danilopcarlotti@gmail.com |
b8dd98c18ccdd21447cdd662611c171db8b6af3a | 94d053907baa97189dc119925c86997627540273 | /carts/migrations/0001_initial.py | 4b00005f8f20921f572ff088c3662397115049f7 | [] | no_license | Abdulrahman-ahmed25/E4Healthylife | e1ccd6c1bbd7c62bb7b0b8c26681906a0d66ee55 | 799f2cf44193c4a080d99cd9a951788ac8ae79ce | refs/heads/main | 2023-06-19T23:31:36.954120 | 2021-07-16T22:41:16 | 2021-07-16T22:41:16 | 379,300,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | # Generated by Django 3.2.4 on 2021-06-26 20:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Hcollections', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Hcollections.meal')),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('hcollection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Hcollections.hcollection')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"abdulrahman.ahmed2544@gmail.com"
] | abdulrahman.ahmed2544@gmail.com |
c1feb307dffd2aea25928f6f5f36a52bc9ad23ca | 5b3caf64b77161748d0929d244798a8fb914d9c5 | /Python Excel Examples/CellsApiDemo/column/getColumns.py | d20eeddafa7c5d84e2657dd44dd573f53b90e2e3 | [] | no_license | EiceblueCloud/Spire.Cloud.Excel | 0d56864991eaf8d44c38f21af70db614b1d804b7 | d9845d5cefd15a3ab408b2c9f80828a4767e2b82 | refs/heads/master | 2021-07-20T23:44:39.068568 | 2021-07-15T03:04:49 | 2021-07-15T03:04:49 | 230,225,396 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
from spirecloudexcel.api.cells_api import CellsApi
appId = "your id"
appKey = "your key"
baseUrl = "https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.cells_api.CellsApi(configuration)
name = "GetColumns_1.xlsx"
sheetName = "Sheet1"
storage = ""
folder = "/ExcelDocument/"
result = api.get_columns(name, sheet_name=sheetName, folder=folder,storage=storage) | [
"noreply@github.com"
] | EiceblueCloud.noreply@github.com |
c9db20f7a8a337228360cfbcdbf0408e42283f56 | d2e80a7f2d93e9a38f37e70e12ff564986e76ede | /Python-cookbook-2nd/cb2_20/cb2_20_9_sol_1.py | 075a870681607401456ef830877f8759ab545ddb | [] | no_license | mahavivo/Python | ceff3d173948df241b4a1de5249fd1c82637a765 | 42d2ade2d47917ece0759ad83153baba1119cfa1 | refs/heads/master | 2020-05-21T10:01:31.076383 | 2018-02-04T13:35:07 | 2018-02-04T13:35:07 | 54,322,949 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | class IMinimalMapping(object):
def __getitem__(self, key): pass
def __setitem__(self, key, value): pass
def __delitem__(self, key): pass
def __contains__(self, key): pass
import UserDict
class IFullMapping(IMinimalMapping, UserDict.DictMixin):
def keys(self): pass
class IMinimalSequence(object):
def __len__(self): pass
def __getitem__(self, index): pass
class ICallable(object):
def __call__(self, *args): pass
| [
"mahavivo@126.com"
] | mahavivo@126.com |
819bd231a10c1c46da06e73dcbb0a779990ef66b | 2afb554f7aa6d45261fd450edce009eaac159340 | /ui/node_pie.py | 8f820fa5994e10a34ccc99f3a526a9e61644450b | [] | no_license | og76/animation_nodes | 708932fdf8339f2d9ef7c8d4a8e09d1ee314cb31 | 0f55b153487048ebd8faadb78633681d1508d46c | refs/heads/working-tests | 2020-12-25T21:00:52.094321 | 2016-03-21T16:03:11 | 2016-03-21T16:03:11 | 36,730,082 | 0 | 1 | null | 2016-04-08T11:51:17 | 2015-06-02T12:00:11 | Python | UTF-8 | Python | false | false | 1,575 | py | import bpy
from .. sockets.info import isList
from .. utils.blender_ui import PieMenuHelper
'''
###############
######### #########
Data Input #########
######### #########
Debug Node
'''
class ContextPie(bpy.types.Menu, PieMenuHelper):
bl_idname = "an.context_pie"
bl_label = "Context Pie"
@classmethod
def poll(cls, context):
try: return context.active_node.isAnimationNode
except: return False
def drawLeft(self, layout):
amount = len(self.activeNode.getVisibleInputs())
if amount == 0: self.empty(layout, text = "Has no visible inputs")
else: layout.operator("an.insert_data_input_node_template_operator", text = "Data Input")
def drawBottom(self, layout):
amount = len(self.activeNode.getVisibleOutputs())
if amount == 0: self.empty(layout, text = "Has no visible outputs")
else: layout.operator("an.insert_debug_node_template_operator", text = "Debug")
def drawRight(self, layout):
col = layout.column(align = True)
for socket in self.activeNode.outputs:
if isList(socket.bl_idname):
props = col.operator("an.insert_loop_for_iteration_template", text = "Loop through {}".format(repr(socket.getDisplayedName())))
props.nodeIdentifier = self.activeNode.identifier
props.socketIndex = socket.index
@property
def activeNode(self):
return bpy.context.active_node
| [
"mail@jlucke.com"
] | mail@jlucke.com |
636b4129b3302a724a975c1bf43d2d2f31718848 | 1edbb74f182350c8016e578464b0c9b62a4b401b | /non_resonant_spiral/init.py | 80d6c76729e61d590d9cc5ccb1c439af3f46ba75 | [
"MIT"
] | permissive | byronwasti/Wireless_Energy_Transfer_Resonant_Inductance | cd2257f9fe6796a7b8c9f7af0621c337708181d4 | 686b575919f49b9e3cc4c826b1f04815ec47629f | refs/heads/master | 2021-01-10T13:00:15.327108 | 2015-12-13T22:17:02 | 2015-12-13T22:17:02 | 46,503,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,810 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
# Global Definitions
u_0 = 4 * np.pi * 10**-7
# Integratttiionn
def norm( vect ):
return np.linalg.norm(vect)
def Biot_Savare(R, I, pos):
#inner = integrate.dblquad( lambda dlx, dly: np.linalg.norm(np.cross( [dlx, dly, 0], [pos[0]-dlx, pos[1]-dly, pos[2]] )/np.linalg.norm([pos[0]-dlx, pos[1]-dly, pos[2]])**3), -R, R, lambda x: -np.sqrt(1-x**2/R**2), lambda x: np.sqrt(1 - x**2/R**2))
#inner = integrate.dblquad( lambda dlx, dly: np.linalg.norm(np.cross( [dlx, dly, 0], [pos[0], pos[1], pos[2]] )), -R, R, lambda x: -np.sqrt(1-x**2/R**2), lambda x: np.sqrt(1 - x**2/R**2))
inner = integrate.quad( lambda theta: np.linalg.norm(\
np.cross( [ R*np.sin(theta), R*np.cos(theta), 0],
[pos[0] - R*np.cos(theta), pos[1] - R*np.sin(theta), pos[2]])
)/ \
np.linalg.norm([pos[0] - R*np.cos(theta), pos[1] - R*np.sin(theta), pos[2]])**3,
0, 2*np.pi)
#r3 = np.linalg.norm( [pos[0] )**3
B = u_0/(4*np.pi) * I * inner[0]
print("Error: {}".format( inner[1]))
#B = B * integrate.quad(lambda x: l * r, 0, 10.5)[0]
return B
def Biot_Savare2(R, I, pos):
#inner = integrate.dblquad( lambda dlx, dly: np.linalg.norm(np.cross( [dlx, dly, 0], [pos[0]-dlx, pos[1]-dly, pos[2]] )), -R, R, lambda x: -np.sqrt(1-x**2/R**2), lambda x: np.sqrt(1 - x**2/R**2))
r3 = np.linalg.norm(pos)**3
#B = u_0/(4*np.pi) * I * inner[0] / r3
Bx = u_0/(4*np.pi) * I / r3 * ( integrate.quad( lambda dy: pos[2], -R, R )[0] + integrate.quad( lambda dz: pos[1], 0, 0)[0] )
By = u_0/(4*np.pi) * I / r3 * ( integrate.quad( lambda dz: pos[0], 0, 0 )[0] + integrate.quad( lambda dx: pos[2], -R, R)[0] )
Bz = u_0/(4*np.pi) * I / r3 * ( integrate.quad( lambda dx: pos[1], -R, R )[0] + integrate.quad( lambda dy: pos[0], -R, R)[0] )
#print("Error: {}".format( inner[1]))
#B = B * integrate.quad(lambda x: l * r, 0, 10.5)[0]
return [Bx, By, Bz]
def inner_cross(dl, pos):
np.cross( [dlx, dly, 0] , pos)
np.cross( dl, pos )
# The main function
if __name__ == "__main__":
#circle = integrate.dblquad( lambda x,y: 1, -1, 1, lambda x: -np.sqrt(1-x**2), lambda x: np.sqrt(1-x**2))
R = 1
I = 1
pos = [0, 0, 1000]
#d = np.zeros([5, 5])
#for i in xrange(-5, 5, 1.1):
# for j in xrange(-5, 5, 1.1):
# d[i, j] = Biot_Savare(R, I, pos)
#fig = plt.figure()
#ax = fig.gca(projection='3d')
B = Biot_Savare(R, I, pos)
print(B)
B = Biot_Savare2(R, I, pos)
print(norm(B))
#test = u_0/float((4*np.pi)) * (2*np.pi)* R**2 * I / float(( pos[2]**2 + R**2)**(3/2))
#test = u_0 * I / ( 2 * R)
test = u_0 / 2 * R / (R**2 + pos[2])**(3/2)
print(test)
| [
"byron.wasti@gmail.com"
] | byron.wasti@gmail.com |
2275124374f69e14e6c3935b38f6c0ed3c4f360b | bc6e87f8e9a3f6c35f8080718ac409801dab3b24 | /server/workers/api/src/apis/create_vis.py | 64b1a981230722fd030a650e69e51620c127fbcd | [
"MIT"
] | permissive | OpenKnowledgeMaps/Headstart | b7f56d8562d044e8d96a08f9f7ae0bc6de1076cd | 94dcc248e1892de7b603d5a4dad175f5d8a128db | refs/heads/master | 2023-08-31T20:06:34.485558 | 2023-08-25T17:34:03 | 2023-08-25T17:34:03 | 15,936,466 | 132 | 36 | MIT | 2023-08-25T17:34:05 | 2014-01-15T13:52:50 | JavaScript | UTF-8 | Python | false | false | 2,918 | py | import os
import json
import uuid
import time
import redis
import asyncio
import aioredis
import pandas as pd
from flask import request, make_response, jsonify, abort
from flask_restx import Namespace, Resource, fields
from .request_validators import SearchParamSchema
from apis.utils import get_key
from apis.base import base_querymodel
vis_ns = Namespace("vis", description="Head Start data processing operations")
redis_config = {
"host": os.getenv("REDIS_HOST"),
"port": os.getenv("REDIS_PORT"),
"db": os.getenv("REDIS_DB"),
"password": os.getenv("REDIS_PASSWORD")
}
redis_store = redis.StrictRedis(**redis_config)
input_model = vis_ns.model("InputModel",
{"params": fields.Nested(base_querymodel),
"input_data": fields.String()})
@vis_ns.route('/create')
class Create(Resource):
@vis_ns.doc(responses={200: 'OK',
400: 'Invalid search parameters'})
@vis_ns.expect(input_model)
@vis_ns.produces(["application/json", "text/csv"])
def post(self):
"""
"""
data = request.get_json()
params = data["params"]
vis_ns.logger.debug(params)
input_data = data["input_data"]
k = str(uuid.uuid4())
d = {"id": k, "params": params,
"input_data": input_data}
redis_store.rpush("input_data", json.dumps(d).encode('utf8'))
q_len = redis_store.llen("input_data")
vis_ns.logger.info("Queue length: %s %d %s" %("input_data", q_len, k))
result = get_key(redis_store, k)
try:
headers = {}
if request.headers["Accept"] == "application/json":
headers["Content-Type"] = "application/json"
if request.headers["Accept"] == "text/csv":
if params.get("raw") is True:
df = pd.read_json(json.loads(result))
result = df.to_csv()
else:
result = pd.read_json(json.loads(result)).to_csv()
headers["Content-Type"] = "text/csv"
headers["Content-Disposition"] = "attachment; filename={0}.csv".format(k)
return make_response(result,
200,
headers)
except Exception as e:
vis_ns.logger.error(e)
abort(500, "Problem encountered, check logs.")
@vis_ns.route('/queue_length')
class ServiceVersion(Resource):
def get(self):
q_len = redis_store.llen("input_data")
result = {"queue_length": q_len}
return make_response(result, 200, {"Content-Type": "application/json"})
@vis_ns.route('/service_version')
class ServiceVersion(Resource):
def get(self):
result = {"service_version": os.getenv("SERVICE_VERSION")}
return make_response(result, 200, {"Content-Type": "application/json"})
| [
"web@christopherkittel.eu"
] | web@christopherkittel.eu |
22ebee6957c9e1b2e57e2e1050f512d372b4e5c0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/video/SiamRPN/pysot-master/toolkit/visualization/draw_utils.py | 55a73b2f7febc80738dd0f639810e32d4ada68ab | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,009 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
COLOR = ((1, 0, 0),
(0, 1, 0),
(1, 0, 1),
(1, 1, 0),
(0, 162 / 255, 232 / 255),
(0.5, 0.5, 0.5),
(0, 0, 1),
(0, 1, 1),
(136 / 255, 0, 21 / 255),
(255 / 255, 127 / 255, 39 / 255),
(0, 0, 0))
LINE_STYLE = ['-', '--', ':', '-', '--', ':', '-', '--', ':', '-']
MARKER_STYLE = ['o', 'v', '<', '*', 'D', 'x', '.', 'x', '<', '.']
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
56640dde5881f5e9a77457cc1c3d2013cc08db27 | 94f5bae62a2ed5bf5bd69995d9604c191b6333a0 | /Projects/GAE/src/TestApp/ClientCookie/_Debug.py | fa89f2b46db23edf4fe7c706430449b2b136a00d | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | sethc23/BD_Scripts | 5eef664af935fb38ad28581faaedb51075338553 | 989d62b77ca70d239ae3cf99149c5215f6e6119e | refs/heads/master | 2020-04-12T17:36:17.600971 | 2017-02-22T09:46:27 | 2017-02-22T09:46:27 | 30,630,547 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | import sys
import ClientCookie
try:
import warnings
except ImportError:
def warn(text):
ClientCookie.WARNINGS_STREAM.write("WARNING: " + text)
else:
def warn(text):
warnings.warn(text, stacklevel=2)
try:
import logging
except:
NOTSET = None
INFO = 20
DEBUG = 10
class NullHandler:
def write(self, data): pass
class Logger:
def __init__(self):
self.level = NOTSET
self.handler = NullHandler()
def log(self, level, text, *args):
if args:
text = text % args
if self.level is not None and level <= self.level:
self.handler.write(text + "\n")
def debug(self, text, *args):
apply(self.log, (DEBUG, text) + args)
def info(self, text, *args):
apply(self.log, (INFO, text) + args)
def setLevel(self, lvl):
self.level = lvl
def addHandler(self, handler):
self.handler = handler
LOGGER = Logger()
def getLogger(name): return LOGGER
class StreamHandler:
def __init__(self, strm=None):
if not strm:
strm = sys.stderr
self.stream = strm
def write(self, data):
self.stream.write(data)
else:
from logging import getLogger, StreamHandler, INFO, DEBUG, NOTSET
| [
"ub2@SERVER2.local"
] | ub2@SERVER2.local |
08ff4b4c4a0d3076b728f366be5738bfd7e083a0 | 538fd58e4f7d0d094fd6c93ba1d23f78a781c270 | /66_plus_one/solution.py | edb35fdf00c1f5300fc1eafa49c9f25912d50f84 | [] | no_license | FluffyFu/Leetcode | 4633e9e91e493dfc01785fd379ab9f0788726ac1 | 5625e6396b746255f3343253c75447ead95879c7 | refs/heads/master | 2023-03-21T08:47:51.863360 | 2021-03-06T21:36:43 | 2021-03-06T21:36:43 | 295,880,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | def plus_one(digits):
carry = 1
res = []
for d in digits[::-1]:
carry, new_d = divmod(d + carry, 10)
res.append(new_d)
return res[::-1]
| [
"fluffyfu400@gmail.com"
] | fluffyfu400@gmail.com |
9db76e9903e55d33ebde10b97970f0e7961a7766 | 97caa124ffa5da9819c39a16c734165176d90349 | /exams/exam1_answers.py | c870cdeb250a47bebf1cc9d55cce02c3c5bcdf2d | [
"Apache-2.0"
] | permissive | YAtOff/python0 | dd684731065321fd52d475fd2b2105db59f5c19c | b5af5004131d64dd52d42746eddb72b6c43a13c7 | refs/heads/master | 2021-01-18T21:19:11.990434 | 2019-05-29T20:14:23 | 2019-05-29T20:14:23 | 44,601,010 | 6 | 7 | Apache-2.0 | 2019-10-31T22:45:21 | 2015-10-20T11:13:11 | Jupyter Notebook | UTF-8 | Python | false | false | 2,459 | py | # -*- coding: utf-8 -*-
print('*' * 80)
print('Вариант 1')
print('-' * 80)
print('1. Какъва е стойността на следните изрази:')
print('2 ** 3 / 4', 2 ** 3 / 4)
print('(5 // 3) ** 1.5', (5 // 3) ** 1.5)
print('5 % 2 - 1', 5 % 2 - 1)
print('-' * 80)
print('2. Какъва е стойността на следните изрази:')
print('5 > 4 and 2 > 1 or 3 < 5', 5 > 4 and 2 > 1 or 3 < 5)
print('True and False or not False', True and False or not False)
print('2 ** 5 > 29 and 5 < 20 // 3', 2 ** 5 > 29 and 5 < 20 // 3)
print('-' * 80)
print('4. Какво ще се изпечата?')
temp = 100
temp = temp - 100
if temp > 99:
print("Hot")
elif temp > 100:
print("REALLY HOT!")
elif temp > 60:
print("Comfortable")
else:
print("Cold")
print('*' * 80)
print('Вариант 2')
print('-' * 80)
print('1. Какъва е стойността на следните изрази:')
print('5 ** 2 / 4', 5 ** 2 / 4)
print('(10 // 6) ** 9.9', (10 // 6) ** 9.9)
print('11 % 2 - 7', 11 % 2 - 7)
print('-' * 80)
print('2. Какъва е стойността на следните изрази:')
print('7 > 3 and 10 > 11 or 5 < 10', 7 > 3 and 10 > 11 or 5 < 10)
print('False or True and not True', False or True and not True)
print('10 ** 2 > 111 or 3 < 8 / 3 and 5 > 30 % 20', 10 ** 2 > 111 or 3 < 8 / 3 and 5 > 30 % 20)
print('-' * 80)
print('4. Какво ще се изпечата?')
temp = 10
temp = temp ** temp
if temp > 99:
print("Hot")
elif temp > 100:
print("REALLY HOT!")
elif temp > 60:
print("Comfortable")
else:
print("Cold")
print('*' * 80)
print('Вариант 3')
print('-' * 80)
print('1. Какъва е стойността на следните изрази:')
print('4 ** 4 / 4', 4 ** 4 / 4)
print('(11 // 10) ** 8.1', (11 // 10) ** 8.1)
print('13 % 3 - 9', 13 % 3 - 9)
print('-' * 80)
print('2. Какъва е стойността на следните изрази:')
print('10 > 9 and 5 > 7 or 3 < 11', 10 > 9 and 5 > 7 or 3 < 11)
print('True and (False or True) and not False', True and (False or True) and not False)
print('4 ** 2 > 60 or 3 < 10 // 3 and 5 > 10 % 2', 4 ** 2 > 60 or 3 < 10 // 3 and 5 > 10 % 2)
print('-' * 80)
print('4. Какво ще се изпечата?')
temp = 8
temp = temp * temp
if temp > 99:
print("Hot")
elif temp > 100:
print("REALLY HOT!")
elif temp > 60:
print("Comfortable")
else:
print("Cold")
| [
"yavor.atov@gmail.com"
] | yavor.atov@gmail.com |
22145f82f92e3cb3fafab99555c7a33cac27ef21 | 9e8d98c48035d4ee61fa930c324c822a61e5ae55 | /examples2/cvxqp.py | 98aa7cfdfc6bf372adff420ca02ec8b837b2d1c1 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | GRSEB9S/mystic | 59ac0c284a19f7b685a98420cd49d21bb10ff0cd | 748e0030c8d7d8b005f2eafa17a4581c2b3ddb47 | refs/heads/master | 2021-08-14T07:11:04.439139 | 2017-11-14T23:49:22 | 2017-11-14T23:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/env python
#
# Problem definition:
# Example in reference documentation for cvxopt
# http://cvxopt.org/examples/tutorial/qp.html
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/mystic/blob/master/LICENSE
"""
Minimize: f = 2*x[0]**2 + x[1]**2 + x[0]*x[1] + x[0] + x[1]
Subject to: x[0] >= 0
x[1] >= 0
x[0] + x[1] == 1
"""
def objective(x):
x0,x1 = x
return 2*x0**2 + x1**2 + x0*x1 + x0 + x1
equations = """
x0 + x1 - 1.0 == 0.0
"""
bounds = [(0.0, None),(0.0, None)]
# with penalty='penalty' applied, solution is:
xs = [0.25, 0.75]
ys = 1.875
from mystic.symbolic import generate_conditions, generate_penalty
pf = generate_penalty(generate_conditions(equations), k=1e4)
from mystic.symbolic import generate_constraint, generate_solvers, solve
cf = generate_constraint(generate_solvers(solve(equations)))
if __name__ == '__main__':
from mystic.solvers import diffev2, fmin_powell
from mystic.math import almostEqual
result = diffev2(objective, x0=bounds, bounds=bounds, constraint=cf, penalty=pf, npop=40, disp=False, full_output=True)
assert almostEqual(result[0], xs, rel=2e-2)
assert almostEqual(result[1], ys, rel=2e-2)
result = fmin_powell(objective, x0=[0.0,0.0], bounds=bounds, constraint=cf, penalty=pf, disp=False, full_output=True)
assert almostEqual(result[0], xs, rel=2e-2)
assert almostEqual(result[1], ys, rel=2e-2)
# EOF
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
87f38b0f703dd7b0778c20be782fc8e15aaad039 | 4ff0ff57e0fee60caf90cf1a2319b7615858b5ff | /cw_update/__manifest__.py | ad5e0715a6b2bd85a640e169b309849f8f5831ba | [] | no_license | akradore/ACC_12 | 257a590acfb1afc92122e46b6db0ccbfdb3969be | 5ed668bda8177586695f5dc2e68a48806eccf976 | refs/heads/master | 2023-03-17T08:53:58.822549 | 2020-02-24T12:32:05 | 2020-02-24T12:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | # -*- coding: utf-8 -*-
#
#################################################################################
# Author : Codeware Computer Trading L.L.C. (<www.codewareuae.com>)
# Copyright(c): 2017-Present Codeware Computer Trading L.L.C.
# All Rights Reserved.
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#################################################################################
{
"name": "Codeware Aldiyafah Update",
'summary': "Codeware Aldiyafah Update",
'description':"Codeware Aldiyafah Update",
'version' : '1.1',
'category': 'Human Resources',
'author': 'Codeware Computer Trading L.L.C, {Codeware Team}',
'website': 'http://www.codewareuae.com',
"depends": [
'web',
],
"demo": [],
'data':[
'data/hr_data.xml',
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"arun01@mmproject.net"
] | arun01@mmproject.net |
8ac5eba8fa64f38b75329a46825a629aefbbc29e | f48f9798819b12669a8428f1dc0639e589fb1113 | /util/admin/sysstat/actions.py | 4e90dddfe21cb33614b9b93aeedf0140477fafd0 | [] | no_license | vdemir/PiSiPackages-pardus-2011-devel | 781aac6caea2af4f9255770e5d9301e499299e28 | 7e1867a7f00ee9033c70cc92dc6700a50025430f | refs/heads/master | 2020-12-30T18:58:18.590419 | 2012-03-12T03:16:34 | 2012-03-12T03:16:34 | 51,609,831 | 1 | 0 | null | 2016-02-12T19:05:41 | 2016-02-12T19:05:40 | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-v")
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.insinto("/etc/sysstat", "sysstat.crond")
| [
"kaptan@pisipackages.org"
] | kaptan@pisipackages.org |
3f806bc3121b61c6aea7db50de0116f7b2894914 | 29dfa1deefc72493d1b1eecf1a8df62e24599a77 | /tests/path/vshadow_path_spec.py | d5a10b2e941541130e9ca314d706ec19bb4bf20b | [
"Apache-2.0"
] | permissive | log2timeline/dfvfs | fd301eaf721a9945641a44ff722aec963158a6b3 | 28756d910e951a22c5f0b2bcf5184f055a19d544 | refs/heads/main | 2023-08-07T22:45:45.432668 | 2023-07-30T12:17:56 | 2023-07-30T12:17:56 | 23,820,144 | 197 | 65 | Apache-2.0 | 2023-07-30T12:17:58 | 2014-09-09T05:06:44 | Python | UTF-8 | Python | false | false | 2,561 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the VSS path specification implementation."""
import unittest
from dfvfs.path import vshadow_path_spec
from tests.path import test_lib
class VShadowPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the VSS path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = vshadow_path_spec.VShadowPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
vshadow_path_spec.VShadowPathSpec(parent=None)
with self.assertRaises(ValueError):
vshadow_path_spec.VShadowPathSpec(
parent=self._path_spec, bogus='BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = vshadow_path_spec.VShadowPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW, location: /vss2',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW, store index: 1',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW, location: /vss2, store index: 1',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
80a64acd1c4ccffc3c6c1cae8c3dce4c53250b3d | 8138985dd7088a4e8046f5b908e1a5e06fb20366 | /djukebox/migrations/0011_auto__chg_field_album_cover_art.py | 6e77b43b9372549ea018700b3c8f75bf4d2605c2 | [
"BSD-2-Clause"
] | permissive | jmichalicek/djukebox | c9b4267cde01dbe7eef86a7f840651d932c3bb3c | 0b7628f886683887ed357688608fe223033c7e35 | refs/heads/master | 2022-11-29T01:23:15.401709 | 2013-08-03T14:41:39 | 2013-08-03T14:41:39 | 3,535,642 | 3 | 1 | BSD-2-Clause | 2022-11-22T00:20:14 | 2012-02-24T12:48:09 | Python | UTF-8 | Python | false | false | 6,853 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Album.cover_art'
db.alter_column('djukebox_album', 'cover_art', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
def backwards(self, orm):
# Changing field 'Album.cover_art'
db.alter_column('djukebox_album', 'cover_art', self.gf('django.db.models.fields.files.ImageField')(default='/tmp/fake', max_length=100))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'djukebox.album': {
'Meta': {'unique_together': "(['title', 'artist', 'user'],)", 'object_name': 'Album'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Artist']"}),
'cover_art': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'djukebox.artist': {
'Meta': {'object_name': 'Artist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'djukebox.audiofile': {
'Meta': {'object_name': 'AudioFile'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Track']"})
},
'djukebox.mp3file': {
'Meta': {'object_name': 'Mp3File', '_ormbases': ['djukebox.AudioFile']},
'audiofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['djukebox.AudioFile']", 'unique': 'True', 'primary_key': 'True'})
},
'djukebox.oggfile': {
'Meta': {'object_name': 'OggFile', '_ormbases': ['djukebox.AudioFile']},
'audiofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['djukebox.AudioFile']", 'unique': 'True', 'primary_key': 'True'})
},
'djukebox.track': {
'Meta': {'object_name': 'Track'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Album']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'track_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['djukebox']
| [
"jmichalicek@gmail.com"
] | jmichalicek@gmail.com |
acf6d31043453dbfc332459d6c426232bd9b76c0 | ed15e441d4cd7a54d989610b8070a5d14bfda4c8 | /1805/python高级/2/4-人.py | 898b21b29a2f5818918331c2304d466eb3915967 | [] | no_license | jmh9876/p1804_jmh | 24593af521749913b65685e21ffc37281c43998f | a52a6366c21ad7598e71d8e82aeee746ecee7c6b | refs/heads/master | 2020-03-15T23:30:02.769818 | 2018-08-02T09:10:20 | 2018-08-02T09:10:20 | 132,395,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | class People:
def eat(self):
print('天生会吃')
def drink(self):
print('天生会喝水')
def you(self):
print('会打游戏')
erha=People()
erha.eat()
erha.drink()
erha.you()
| [
"2210744940@qq.com"
] | 2210744940@qq.com |
867cc59cf749e496e45fe9e766918a2380491808 | dee8cb6589a7431ef3743d29375c92c3dea7a059 | /movie_reviews/NNmodel.py | dfdf5ef6cbac044fd53676edfec50b431c31efec | [
"MIT"
] | permissive | nitishast/MLworld | 55e6d03720aa446c00434ba9f5cbf53f31ff8754 | eb7e15e67772dfa3f12b59164af0603a3f36bc7c | refs/heads/master | 2020-12-28T02:47:35.995653 | 2019-03-27T04:01:08 | 2019-03-27T04:01:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | from keras import models
from keras import layers
from keras import optimizers
from matplotlib import pyplot as plt
import pickle
class NeuralNet:
def __init__(self, x_train, y_train, lr=0.001):
self.model = models.Sequential()
self.x_train = x_train
self.y_train = y_train
self.partial_x_train = None
self.partial_y_train = None
self.x_val = None
self.y_val = None
self.lr = lr
self.history = None
def divide_data(self):
self.x_val = self.x_train[:10000]
self.y_val = self.y_train[:10000]
self.partial_x_train = self.x_train[10000:]
self.partial_y_train = self.y_train[10000:]
def network(self):
self.model.add(layers.Dense(
16, activation='relu', input_shape=(10000,)))
self.model.add(layers.Dense(16, activation='relu'))
self.model.add(layers.Dense(1, activation='sigmoid'))
self.model.compile(optimizer=optimizers.RMSprop(self.lr),
loss='binary_crossentropy',
metrics=['accuracy'])
def train_model(self):
self.history = self.model.fit(self.partial_x_train,
self.partial_y_train,
epochs=20,
batch_size=512,
validation_data=(self.x_val, self.y_val))
self.save_model()
with open("history.pkl", "wb") as file:
pickle.dump(self.history.history, file)
def save_model(self):
model_json = self.model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model to disk")
def load_model(self):
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
| [
"prakhar2397@gmail.com"
] | prakhar2397@gmail.com |
fd06ff39a4ad3a10511390c6a725e1b15abf12e1 | 4c9cbae1beb009d9e322b2ea1fb6fc5a903c2c9d | /tensorflow_federated/python/core/templates/iterative_process.py | d9901cc50f78ccb2305e48f92b23008d074a2c41 | [
"Apache-2.0"
] | permissive | RITESG/STATIC | 78f93338886714cbf3d0bccc6c49ab389a6eb992 | cfe9d3e35ba033b1c4e47d347427a83f682f41de | refs/heads/master | 2021-05-17T14:17:08.054116 | 2020-06-19T14:42:30 | 2020-06-19T14:42:58 | 250,816,228 | 1 | 0 | Apache-2.0 | 2020-06-19T16:31:50 | 2020-03-28T14:31:59 | Python | UTF-8 | Python | false | false | 4,819 | py | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines functions and classes for constructing a TFF iterative process."""
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computation_types
class IterativeProcess(object):
"""A process that includes an initialization and iterated computation.
An iterated process will usually be driven by a control loop like:
```python
def initialize():
...
def next(state):
...
iterative_process = IterativeProcess(initialize, next)
state = iterative_process.initialize()
for round in range(num_rounds):
state = iterative_process.next(state)
```
The iteration step can accept arguments in addition to `state` (which must be
the first argument), and return additional arguments:
```python
def next(state, item):
...
iterative_process = ...
state = iterative_process.initialize()
for round in range(num_rounds):
state, output = iterative_process.next(state, round)
```
"""
def __init__(self, initialize_fn, next_fn):
"""Creates a `tff.templates.IterativeProcess`.
Args:
initialize_fn: a no-arg `tff.Computation` that creates the initial state
of the chained computation.
next_fn: a `tff.Computation` that defines an iterated function. If
`initialize_fn` returns a type _T_, then `next_fn` must return a type
_U_ which is compatible with _T_ or multiple values where the first type
is _U_, and accept either a single argument of type _U_ or multiple
arguments where the first argument must be of type _U_.
Raises:
TypeError: `initialize_fn` and `next_fn` are not compatible function
types.
"""
py_typecheck.check_type(initialize_fn, computation_base.Computation)
if initialize_fn.type_signature.parameter is not None:
raise TypeError(
'initialize_fn must be a no-arg tff.Computation, but found parameter '
'{}'.format(initialize_fn.type_signature))
initialize_result_type = initialize_fn.type_signature.result
py_typecheck.check_type(next_fn, computation_base.Computation)
if isinstance(next_fn.type_signature.parameter,
computation_types.NamedTupleType):
next_first_param_type = next_fn.type_signature.parameter[0]
else:
next_first_param_type = next_fn.type_signature.parameter
if not next_first_param_type.is_assignable_from(initialize_result_type):
raise TypeError('The return type of initialize_fn must be assignable '
'to the first parameter of next_fn, but found\n'
'initialize_fn.type_signature.result=\n{}\n'
'next_fn.type_signature.parameter[0]=\n{}'.format(
initialize_result_type, next_first_param_type))
next_result_type = next_fn.type_signature.result
if not next_first_param_type.is_assignable_from(next_result_type):
# This might be multiple output next_fn, check if the first argument might
# be the state. If still not the right type, raise an error.
if isinstance(next_result_type, computation_types.NamedTupleType):
next_result_type = next_result_type[0]
if next_first_param_type != next_result_type:
raise TypeError('The return type of next_fn must be assignable to the '
'first parameter, but found\n'
'next_fn.type_signature.parameter[0]=\n{}\n'
'actual next_result_type=\n{}'.format(
next_first_param_type, next_result_type))
self._initialize_fn = initialize_fn
self._next_fn = next_fn
@property
def initialize(self):
"""A no-arg `tff.Computation` that returns the initial state."""
return self._initialize_fn
@property
def next(self):
"""A `tff.Computation` that produces the next state.
The first argument of should always be the current state (originally
produced by `tff.templates.IterativeProcess.initialize`), and the first (or
only) returned value is the updated state.
Returns:
A `tff.Computation`.
"""
return self._next_fn
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
90bc917ebc15837e77a6afc1a736acc4a7adc1ad | 6b6147d4e1342facf916cd9ee695074f3404b1a4 | /arcade/almostIncreasingSequence.py | a32cadd250a2f868ace50716e44fb9dc8b86120b | [] | no_license | sandgate-dev/codesignal-practice | 419ba38076fa40ea698860a72b37c3bd4cf720cf | 26d67970fd0ddbbff38c4b5830e4a60dfaea5b2a | refs/heads/master | 2021-10-30T08:18:08.350026 | 2019-04-25T23:32:46 | 2019-04-25T23:32:46 | 182,485,052 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | """
Given a sequence of integers as an array, determine whether it is possible
to obtain a strictly increasing sequence by removing no more than one
element from the array.
Note: sequence a0, a1, ..., an is considered to be a strictly increasing
if a0 < a1 < ... < an. Sequence containing only one element is also
considered to be strictly increasing.
Example
For sequence = [1, 3, 2, 1], the output should be
almostIncreasingSequence(sequence) = false.
There is no one element in this array that can be removed in order to get
a strictly increasing sequence.
For sequence = [1, 3, 2], the output should be
almostIncreasingSequence(sequence) = true.
You can remove 3 from the array to get the strictly increasing sequence [1, 2].
Alternately, you can remove 2 to get the strictly increasing sequence [1, 3].
"""
def first_bad_pair(sequence):
for i in range(len(sequence)-1):
if sequence[i] >= sequence[i+1]:
return i
return -1
def almostIncreasingSequence(sequence):
j = first_bad_pair(sequence)
if j == -1:
return 1
if first_bad_pair(sequence[j - 1:j] + sequence[j + 1:]) == -1:
return 1
if first_bad_pair(sequence[j:j + 1] + sequence[j + 2:]) == -1:
return 1
return 0
| [
"stephanosterburg@me.com"
] | stephanosterburg@me.com |
d31ee165f2381edf38900b36664a845e50d23402 | def899a565a26c8f333db16d4dfc629b7a21baf9 | /blink/blink/wsgi.py | f5cd83d1d99979ed3bdbe9815b18525f43df4d54 | [] | no_license | Mostacosta/Blink-project | 8197468cecb27ac869509bfc890ed0178edd3f44 | 8aa5b85fb21b7c4695532c547db031d3019feaa4 | refs/heads/master | 2022-11-26T11:22:45.146615 | 2019-04-22T16:21:03 | 2019-04-22T16:21:03 | 177,580,002 | 0 | 1 | null | 2022-11-19T12:37:54 | 2019-03-25T12:14:54 | HTML | UTF-8 | Python | false | false | 387 | py | """
WSGI config for blink project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blink.settings')
application = get_wsgi_application()
| [
"mostafaelhassan910@gmail.com"
] | mostafaelhassan910@gmail.com |
39958df43cff7af85c4740e28c4d723acfd35f49 | 17b771514ea773b5d34d31576313a6294562c4c2 | /nplm/v2/gen_random_data.py | 222afbfdfb86ebd43be937a5d7d3e092217483f4 | [] | no_license | xuanhan863/neural_prob_lang_model | ce26353073078d1f2f13d645c21b3ffa83206402 | dc594773448cb444a1631797855cc5c5e751de05 | refs/heads/master | 2020-12-24T19:13:04.387633 | 2015-07-28T05:54:11 | 2015-07-28T05:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,290 | py | #!/usr/bin/env python
import networkx as nx
from collections import defaultdict
import random as r
import optparse
import sys
optparser = optparse.OptionParser(prog='nplp', version='0.0.1', description='simple neural probabilistic language model')
optparser.add_option('--seed', None, dest='seed', type='int', default=None, help='rng seed')
optparser.add_option('--to-dot', action="store_true", dest='to_dot', help='dump dot format and exit')
optparser.add_option('--er-n', None, dest='er_n', type='int', default=10, help='erdos_renyi n param (num nodes)')
optparser.add_option('--er-p', None, dest='er_p', type='float', default=0.15, help='erdos_renyi p param')
optparser.add_option('--num-labels', None, dest='num_labels', type='int', default=6, help='number of distinct labels')
optparser.add_option('--generate', None, dest='gen', type='int', default=10, help='number of sequences to generate')
opts, arguments = optparser.parse_args()
MAX_SEQ_LENGTH = 50
if opts.seed is not None:
r.seed(int(opts.seed))
er = nx.erdos_renyi_graph(opts.er_n, opts.er_p, directed=True)
if opts.num_labels == 6:
labels = ['A', 'B', 'C', 'D', 'E', 'F'] # backwards compat
else:
labels = ["n%s" % i for i in range(opts.num_labels)]
def label(i):
return labels[i % len(labels)]
if opts.to_dot:
print "digraph G { rankdir=LR; bgcolor=\"transparent\" "
for i, j in er.edges():
print "%s_%s -> %s_%s" % (label(i), i, label(j), j)
print "}"
exit(0)
# convert to adjacency matrix
adj = defaultdict(list)
for i, j in er.edges():
adj[i].append(j)
# and then to normalised transistion table
t = defaultdict(list)
for node, adj_nodes in adj.iteritems():
proportion = 1.0 / len(adj_nodes)
for i, adj_node in enumerate(adj_nodes):
t[node].append((adj_node, (i+1) * proportion))
def next_from(n):
rnd = r.random()
i = 0
while rnd >= t[n][i][1]:
i += 1
return t[n][i][0]
# generate a number of random walks using transistion table
generated = 0
while generated < opts.gen:
n = r.choice(t.keys())
seq = [n]
while n in t.keys() and len(seq) <= MAX_SEQ_LENGTH: # ie has neighbours and chain not too long
n = next_from(n)
seq.append(n)
print " ".join([label(i) for i in seq])
generated += 1
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
329d1f65c101f9ac1a31598d39ad9e4a3405baf9 | f20f7efd5dfe4e63f84a46f5b365619d5a9abe9b | /my_trials/case5/kernel.py | 4d4389f8d5131d69034c50e5f40c08f733518dc4 | [] | no_license | GINK03/kaggle-talkingdata-adtracking-fraud-detection | 78f6778833eef911795eee77eeb50cec45d4a71c | 6110da44df401666bef364c69a016b2b35e35bc2 | refs/heads/master | 2020-03-09T10:59:22.641715 | 2018-06-06T05:43:05 | 2018-06-06T05:43:05 | 128,750,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,702 | py | import pandas as pd
import time
import numpy as np
from sklearn.cross_validation import train_test_split
import lightgbm as lgb
import gc
path = 'inputs/'
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32'
}
print('load train...')
train_df = pd.read_csv(path+"train.csv", skiprows=range(1,144903891), nrows=40000000, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
print('load test...')
test_df = pd.read_csv(path+"test.csv", dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
len_train = len(train_df)
train_df = train_df.append(test_df)
print('data prep...')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')
# # of clicks for each ip-day-hour combination
print('group by...')
gp = train_df[['ip', 'day', 'hour', 'channel']].groupby(by=['ip','day','hour'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'qty'})
print('merge...')
train_df = train_df.merge(gp, on=['ip','day','hour'], how='left')
# # of clicks for each ip-app combination
print('group by...')
gp = train_df[['ip', 'app', 'channel']].groupby(by=['ip', 'app'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_count'})
train_df = train_df.merge(gp, on=['ip','app'], how='left')
# # of clicks for each ip-app-os combination
print('group by...')
gp = train_df[['ip','app', 'os', 'channel']].groupby(by=['ip', 'app', 'os'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_os_count'})
train_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')
print("vars and data type: ")
train_df['qty'] = train_df['qty'].astype('uint16')
train_df['ip_app_count'] = train_df['ip_app_count'].astype('uint16')
train_df['ip_app_os_count'] = train_df['ip_app_os_count'].astype('uint16')
# # of clicks for each ip-day-hour combination
print('group by...')
gp = train_df[['ip', 'day', 'hour', 'os', 'channel']].groupby(by=['ip', 'day', 'hour', 'os'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_os_hour_count'})
print('merge...')
train_df = train_df.merge(gp, on=['ip','day','hour', 'os'], how='left')
# # of clicks for each ip-day-hour combination
print('group by...')
gp = train_df[['ip', 'os', 'app', 'day', 'hour', 'channel']].groupby(by=['ip', 'os', 'app', 'day', 'hour'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_os_app_hour_count'})
print('merge...')
train_df = train_df.merge(gp, on=['ip', 'os', 'app', 'day','hour'], how='left')
train_df.head(20)
test_df = train_df[len_train:]
val_df = train_df[(len_train-3000000):len_train]
train_df = train_df[:(len_train-3000000)]
print("train size: ", len(train_df))
print("valid size: ", len(val_df))
print("test size : ", len(test_df))
target = 'is_attributed'
predictors = ['app', 'device', 'os', 'channel', 'hour', 'day', 'qty', 'ip_app_count', 'ip_app_os_count', 'ip_os_hour_count', 'ip_os_app_hour_count']
categorical = ['app', 'device', 'os', 'channel', 'hour']
sub = pd.DataFrame()
sub['click_id'] = test_df['click_id'].astype('int')
print("Training...")
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric':'auc',
'learning_rate': 0.1,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'scale_pos_weight':99, # because training data is extremely unbalanced
'num_leaves': 7, # we should let it be smaller than 2^(max_depth)
'max_depth': 3, # -1 means no limit
'min_child_samples': 100, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 100, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 1, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.7, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 0, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'verbose': 0,
}
xgtrain = lgb.Dataset(train_df[predictors].values, label=train_df[target].values,
feature_name=predictors,
categorical_feature=categorical
)
xgvalid = lgb.Dataset(val_df[predictors].values, label=val_df[target].values,
feature_name=predictors,
categorical_feature=categorical
)
evals_results = {}
bst1 = lgb.train(params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=800,
early_stopping_rounds=50,
verbose_eval=10,
feval=None)
n_estimators = bst1.best_iteration
print("Model Report")
print("n_estimators : ", n_estimators)
print("auc:", evals_results['valid']['auc'][n_estimators-1])
print("Predicting...")
sub['is_attributed'] = bst1.predict(test_df[predictors])
print("writing...")
sub.to_csv('sub_lgb_balanced99.csv',index=False)
print("done...")
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
bf990c401d77eee8d16bc33940a5b0306b067238 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/311/usersdata/303/74247/submittedfiles/ex11.py | c773f9586990aa4a3609f4e96bb1b45466888bc4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # -*- coding: utf-8 -*-
Dia1= int(input('Digite o primeiro dia:'))
Mes1= int(input('Digite o primeiro mes:'))
Ano1= int(input('Digite o primeiro ano:'))
print('\n')
Dia2= int(input('Digite o segundo dia:'))
Mes2= int(input('Digite o segundo mes:'))
Ano2= int(input('Digite o segundo ano:'))
print('\n')
if Ano1>Ano2:
print('Data 1')
elif Ano1<Ano2:
print('Data 2')
elif Ano1 == Ano2:
print('Iguais')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
29ea9337a3aa47516a84dcf1a71787020c5cfd5e | 8e8d9b53e7bf3a9f96bc0276ed5e3e05f64df6cb | /neutron_vpnaas_dashboard/test/test_data/utils.py | 405616208957eccbe185df9e0a349c83ccdb0633 | [
"Apache-2.0"
] | permissive | openstack/neutron-vpnaas-dashboard | 1f51723f09409406b508f4076c423925a16d9f91 | 8963c4ee73de773a8753763a08ccc1b5e1f31b82 | refs/heads/master | 2023-08-12T17:22:12.307219 | 2023-04-28T08:18:08 | 2023-04-28T08:18:20 | 94,869,893 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.test_data import utils
def load_data(load_onto=None):
from neutron_vpnaas_dashboard.test.test_data import vpnaas_data
# The order of these loaders matters, some depend on others.
loaders = (
vpnaas_data.data,
)
if load_onto:
for data_func in loaders:
data_func(load_onto)
return load_onto
else:
return utils.TestData(*loaders)
| [
"amotoki@gmail.com"
] | amotoki@gmail.com |
e8da57ca9c957fee695725375bf4a8e83d05965b | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/legacy_test/test_dist_hapi_model.py | 314a7621f07fc899774fad0abf7773aa4b49372e | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 4,031 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import subprocess
import time
import unittest
from paddle import fluid
from paddle.distributed.utils.launch_utils import (
TrainerProc,
find_free_ports,
get_cluster,
watch_local_trainers,
)
def get_cluster_from_args(selected_gpus):
cluster_node_ips = '127.0.0.1'
node_ip = '127.0.0.1'
node_ips = [x.strip() for x in cluster_node_ips.split(',')]
node_ips.index(node_ip)
free_ports = None
free_ports = find_free_ports(len(selected_gpus))
if free_ports is not None:
free_ports = list(free_ports)
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
return get_cluster(node_ips, node_ip, trainer_endpoints, selected_gpus)
def get_gpus(selected_gpus):
selected_gpus = [x.strip() for x in selected_gpus.split(',')]
return selected_gpus
def start_local_trainers(
cluster,
pod,
training_script,
training_script_args,
log_dir=None,
):
current_env = copy.copy(os.environ.copy())
# paddle broadcast ncclUniqueId use socket, and
# proxy maybe make trainers unreachable, so delete them.
# if we set them to "", grpc will log error message "bad uri"
# so just delete them.
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
procs = []
for t in pod.trainers:
proc_env = {
"FLAGS_selected_gpus": "%s" % ",".join([str(g) for g in t.gpus]),
"PADDLE_TRAINER_ID": "%d" % t.rank,
"PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint,
"PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(),
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
}
current_env.update(proc_env)
print(f"trainer proc env:{current_env}")
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
cmd = "python -m coverage run --branch -p " + training_script
else:
cmd = "python -u " + training_script
print(f"start trainer proc:{cmd} env:{proc_env}")
fn = None
proc = subprocess.Popen(cmd.split(" "), env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = t.rank
tp.log_fn = fn
tp.cmd = cmd
procs.append(tp)
return procs
class TestMultipleGpus(unittest.TestCase):
def run_mnist_2gpu(self, target_file_name):
if fluid.core.get_cuda_device_count() == 0:
return
selected_gpus = get_gpus('0,1')
cluster = None
pod = None
cluster, pod = get_cluster_from_args(selected_gpus)
procs = start_local_trainers(
cluster,
pod,
training_script=target_file_name,
training_script_args=[],
)
while True:
alive = watch_local_trainers(procs, cluster.trainers_nranks())
if not alive:
print(f"Local procs complete, POD info:{pod}")
break
time.sleep(3)
def test_hapi_multiple_gpus_static(self):
self.run_mnist_2gpu('dist_hapi_mnist_static.py')
def test_hapi_multiple_gpus_dynamic(self):
self.run_mnist_2gpu('dist_hapi_mnist_dynamic.py')
def test_hapi_amp_static(self):
self.run_mnist_2gpu('dist_hapi_pure_fp16_static.py')
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
c9fd0de7d478cac93dce44fd4f86648ebf8ead23 | dfb53581b4e6dbdc8e3789ea2678de1e1c4b5962 | /Python/Day13/exercise2_poker.py | 47ca8fc62b38ba666b2b1695be982d5c044c9a64 | [] | no_license | biabulinxi/Python-ML-DL | 7eff6d6898d72f00575045c5aa2acac45b4b0b82 | 217d594a3c0cba1e52550f74d100cc5023fb415b | refs/heads/master | 2020-06-01T09:13:17.314121 | 2019-06-08T03:59:36 | 2019-06-08T03:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # @Project:AID1810
# @Author:biabu
# @Date:2018-11-19 22:16
# @File_name:exercise2_poker.py
# @IDE:PyCharm
import random as R
def fun1(a,L):
lst = []
for i in L:
b = a + str(i)
lst.append(b)
return lst
def deal(lst):
pokers = []
count = 0
while True:
count += 1
poker = R.choice(lst)
pokers.append(poker)
lst.remove(poker)
if count == 17:
break
return pokers
def main():
spade = '\u2660'
hearts = '\u2666'
plum = '\u2663'
diamonds = '\u2665'
poker = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K']
spade_poker = fun1(spade, poker)
hearts_poker = fun1(hearts, poker)
plum_poker = fun1(plum, poker)
diamonds_poker = fun1(diamonds, poker)
list1 = spade_poker + hearts_poker + plum_poker + diamonds_poker + ["大王","小王"]
#洗牌
R.shuffle(list1)
#发牌
first = list1[:17]
secend = list1[17:34]
third = list1[34:51]
dipai = list1[51:]
# secend = deal(list1)
# third = deal(list1)
input()
print('第一个人的牌为:',first)
input()
print('第二个人的牌为:',secend)
input()
print('第三个人的牌为:',third)
input()
print("底牌为:",dipai)
main()
| [
"biabu1208@163.com"
] | biabu1208@163.com |
afd898b30ada95a30a5cc360df10d1e181cd34cc | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/fillParagraph/multilineDocstring.py | f2e2b56e1e3319e4b15e93bfb62638480f87c8a5 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 374 | py | __author__ = 'ktisha'
def foo():
"""
This is my docstring. <caret>There are many like it, but this one mine. My docstring is my best friend. it is my life. I must master it as I must master my life.
This is my docstring. There are many like it, but this one mine. My docstring is my best friend. it is my life. I must master it as I must master my life.
""" | [
"Ekaterina.Tuzova@jetbrains.com"
] | Ekaterina.Tuzova@jetbrains.com |
d042e3818d27e993259b958d638db9c7e825ccb7 | a65abed86de16bdf9d6c98a2ab08837029188d3a | /gather.py | db18218c16bd80b70408f046c8ad53437d9ce6b6 | [
"MIT"
] | permissive | jjmaldonis/mpi-parallelization | 82746f735d1cd918c9bacab80e506b20c729f827 | 4cc2ab1e6929352073cafb83b1cb0ea990acff15 | refs/heads/master | 2021-06-18T15:17:49.824144 | 2021-03-21T15:31:37 | 2021-03-21T15:31:37 | 55,423,691 | 18 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | """
This example creates data contained in a list.
The length of the list is equal to the number of cores mpi4py is using.
Each core gets assigned one piece of data in that list and modifies it.
The updated data is passed to the root via gather, where it is then
broadcast to all the other cores.
"""
import sys
from mpi4py import MPI
from random import shuffle
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
root = 0
data = [i*10 for i in range(size)]
shuffle(data)
data = comm.bcast(data)
print("Starting data for rank {}: {}".format(rank, data))
# Assign a piece of data to each core
positions_per_core = {i: i for i in range(len(data))}
# Update the data assigned to this core
data[positions_per_core[rank]] += 1
# Allgather all the data
data = comm.gather(data[positions_per_core[rank]])
print("Ending data for rank {}: {} (this is only correct on the root)".format(rank, data))
data = comm.bcast(data)
print("After broadcasting, rank {} has: {}".format(rank, data))
| [
"jjmaldonis@gmail.com"
] | jjmaldonis@gmail.com |
a3c7401949f2a716983bf88c8787d293909bb140 | a8aa8ecebda6c3bad4a27854d29371312cb152f8 | /src/ggrc/migrations/versions/20160321011353_3914dbf78dc1_add_comment_notification_type.py | 9576c092c4aa6441aec3058adec50f60d5fb5b40 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | xferra/ggrc-core | ef1f7016c717a391d927c128b2058e1fee6e2929 | b82333664db3978d85109f2d968239bd1260ee85 | refs/heads/develop | 2023-04-06T23:59:38.917995 | 2016-07-26T14:13:38 | 2016-07-26T14:13:38 | 64,231,198 | 1 | 1 | Apache-2.0 | 2023-04-03T23:37:20 | 2016-07-26T15:10:29 | Python | UTF-8 | Python | false | false | 1,834 | py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '11cee57a4149'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = tuple([notif["name"] for notif in NOTIFICATIONS])
op.execute(
"""
DELETE n
FROM notifications AS n
LEFT JOIN notification_types AS nt
ON n.notification_type_id = nt.id
WHERE nt.name = 'comment_created'
"""
)
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
| [
"zidarsk8@gmail.com"
] | zidarsk8@gmail.com |
51c9b5249f95231c63a910e266f355825108cbef | 74ee0d20ce56f0ec6880f93e55e8f55e6ce799a9 | /src/python/nimbusml/examples/examples_from_dataframe/MutualInformationSelector_df.py | 093a2d6f852be6a12da09faaf8a62d1da9005e8c | [
"MIT"
] | permissive | zyw400/NimbusML-1 | 100d8ac6ce98b3d79d93fc842e1980735d356a27 | b5f1c2e3422fadc81e21337bcddb7372682dd455 | refs/heads/master | 2020-04-08T10:58:44.427194 | 2019-01-04T22:10:21 | 2019-01-04T22:10:21 | 159,289,107 | 3 | 0 | NOASSERTION | 2019-01-04T22:10:22 | 2018-11-27T06:47:48 | Python | UTF-8 | Python | false | false | 2,303 | py | ###############################################################################
# Example of MutualInformationSelector
import pandas
from nimbusml import Pipeline
from nimbusml.feature_extraction.text import NGramFeaturizer
from nimbusml.feature_extraction.text.extractor import Ngram
from nimbusml.feature_selection import MutualInformationSelector
train_reviews = pandas.DataFrame(
data=dict(
review=[
"This is great",
"I hate it",
"Love it",
"Do not like it",
"Really like it",
"I hate it",
"I like it a lot",
"I kind of hate it",
"I do like it",
"I really hate it",
"It is very good",
"I hate it a bunch",
"I love it a bunch",
"I hate it",
"I like it very much",
"I hate it very much.",
"I really do love it",
"I really do hate it",
"Love it!",
"Hate it!",
"I love it",
"I hate it",
"I love it",
"I hate it",
"I love it"],
like=[
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True,
False,
True]))
X = train_reviews.loc[:, train_reviews.columns != 'like']
y = train_reviews['like']
# pipeline of transforms
transform_1 = NGramFeaturizer(word_feature_extractor=Ngram())
transform_2 = MutualInformationSelector(slots_in_output=2)
pipeline = Pipeline([transform_1, transform_2])
print(pipeline.fit_transform(X, y))
# Scikit compatibility (Compose transforms inside Scikit Pipeline).
# In this scenario, we do not provide {input, output} arguments
transform_1 = NGramFeaturizer(word_feature_extractor=Ngram())
transform_2 = MutualInformationSelector(slots_in_output=2)
pipe = Pipeline([
('text', transform_1),
('featureselect', transform_2)])
print(pipe.fit_transform(X, y))
| [
"ganaziro@microsoft.com"
] | ganaziro@microsoft.com |
b83fbaef3b1eb4573717c83f495a9ca4d2f88ef4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_diadems.py | 6efd67bbae2aea864e2e2d26cc51e3d47f88c54f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _DIADEMS():
def __init__(self,):
self.name = "DIADEMS"
self.definitions = diadem
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['diadem']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6c24ebcf721f0295066d4f6e7f2c47e5e3d7ebb9 | 7068d02c0abdd0775b7e5717fea2bccec28f656b | /mockup/spacer_pcb.py | ee22a0712fb15d33045d336fb522768f1b7c4b57 | [
"Apache-2.0"
] | permissive | iorodeo/nano_capillary_capsense | a6df4ab063301a460c0d317315b064e0719395fa | 714bed5fb77e71ccc5c9be5ac025d7c3170493b5 | refs/heads/master | 2022-11-11T14:48:07.989021 | 2011-05-27T17:19:49 | 2011-05-27T17:19:49 | 273,789,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | import scipy
from py2scad import *
import params
class SpacerPCB(object):
def __init__(self,params=params):
self.params = params
self.__make()
def __str__(self):
return self.part.__str__()
def __make(self):
x,y,z = self.params.spacer_pcb['size']
color = self.params.spacer_pcb['color']
hole_num = self.params.base_pcb['sandwich_hole_num']
hole_diam = self.params.base_pcb['sandwich_hole_diam']
hole_offset = self.params.base_pcb['sandwich_hole_offset']
hole_y_top = 0.5*y - hole_offset
hole_y_bot = -0.5*y + hole_offset
hole_y_pos = scipy.linspace(hole_y_bot, hole_y_top, hole_num)
hole_list = []
for y_pos in hole_y_pos:
hole_list.append((0,y_pos,hole_diam))
pcb = plate_w_holes(x,y,z,hole_list)
pcb = Color(pcb, rgba=color)
self.part = pcb
# -----------------------------------------------------------------------------
if __name__ == '__main__':
pcb = SpacerPCB()
prog = SCAD_Prog()
prog.fn = 100
prog.add(pcb)
prog.write('spacer_pcb.scad')
| [
"will@iorodeo.com"
] | will@iorodeo.com |
d4eaf42a27b1084fa85b364137c4d1453b0f5f99 | 0995deded97ed1793b25d93316921c25b2e7cf45 | /Bokeh Examples/app/gapminder/main.py | 701a98cf06a75f49086f926359117223c3b7c9d5 | [] | no_license | RichardAfolabi/Data_Visualization | b6ebc2d00258ab96082457f2976636a78853515c | 722837bab2d2539a8b175f13b3c800cc31815cbe | refs/heads/master | 2021-01-10T15:27:34.603818 | 2017-06-15T21:59:40 | 2017-06-15T21:59:40 | 47,613,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | # -*- coding: utf-8 -*-
import pandas as pd
from bokeh.core.properties import field
from bokeh.io import curdoc
from bokeh.layouts import layout
from bokeh.models import (
ColumnDataSource, HoverTool, SingleIntervalTicker, Slider, Button, Label,
CategoricalColorMapper,
)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
sources = {}
region_name = regions_df.Group
region_name.name = 'region'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
df = pd.concat([fertility, life, population, region_name], axis=1)
df = df.fillna('NaN')
sources[year] = ColumnDataSource(df)
source = sources[years[0]]
plot = figure(x_range=(1, 9), y_range=(20, 100), title='Gapminder Data', plot_height=300)
plot.xaxis.ticker = SingleIntervalTicker(interval=1)
plot.xaxis.axis_label = "Children per woman (total fertility)"
plot.yaxis.ticker = SingleIntervalTicker(interval=20)
plot.yaxis.axis_label = "Life expectancy at birth (years)"
label = Label(x=1.1, y=18, text=str(years[0]), text_font_size='70pt', text_color='#eeeeee')
plot.add_layout(label)
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
plot.circle(
x='fertility',
y='life',
size='population',
source=source,
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71',
line_width=0.5,
line_alpha=0.5,
legend=field('region'),
)
plot.add_tools(HoverTool(tooltips="@index", show_arrow=False, point_policy='follow_mouse'))
def animate_update():
year = slider.value + 1
if year > years[-1]:
year = years[0]
slider.value = year
def slider_update(attrname, old, new):
year = slider.value
label.text = str(year)
source.data = sources[year].data
slider = Slider(start=years[0], end=years[-1], value=years[0], step=1, title="Year")
slider.on_change('value', slider_update)
def animate():
if button.label == '► Play':
button.label = '❚❚ Pause'
curdoc().add_periodic_callback(animate_update, 200)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(animate_update)
button = Button(label='► Play', width=60)
button.on_click(animate)
layout = layout([
[plot],
[slider, button],
], sizing_mode='scale_width')
curdoc().add_root(layout)
curdoc().title = "Gapminder"
| [
"mailme@richardafolabi.com"
] | mailme@richardafolabi.com |
e6827295f20604189d9818e33dbb8550db43faf3 | 93ab050518092de3a433b03744d09b0b49b541a6 | /iniciante/Mundo 02/Exercícios Corrigidos/Exercício 060.py | ee6970e29fb45cd533fd00098499ce49e68a659d | [
"MIT"
] | permissive | ggsant/pyladies | 1e5df8772fe772f8f7d0d254070383b9b9f09ec6 | 37e11e0c9dc2fa2263ed5b42df5a395169408766 | refs/heads/master | 2023-01-02T11:49:44.836957 | 2020-11-01T18:36:43 | 2020-11-01T18:36:43 | 306,947,105 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | """
EXERCÍCIO 060: Cálculo do Fatorial
Faça um programa que leia um número qualquer e mostre seu fatorial.
Ex: 5! = 5 x 4 x 3 x 2 x 1 = 120
"""
"""
from math import factorial
n = int(input('Digite um número para calcular seu fatorial: '))
f = factorial(n)
print('O fatorial de {} é {}.'.format(n, f))
"""
n = int(input('Digite um número para calcular seu fatorial: '))
c = n
f = 1
print('Calculando {}! = '.format(n), end='')
while c > 0:
print('{}'.format(c), end='')
print(' x ' if c > 1 else ' = ', end='')
f *= c
c -= 1
print('{}'.format(f))
| [
"61892998+ggsant@users.noreply.github.com"
] | 61892998+ggsant@users.noreply.github.com |
a02e360fc8e8c93bdbb5401f26d1d323ce7e6fef | 83277e8b959de61b655f614b7e072394a99d77ae | /venv/lib/python3.7/site-packages/graphql/type/__init__.py | 41a11115c7f191c30859196df766155fae87ac30 | [
"MIT"
] | permissive | hskang9/scalable-django | b3ed144670c3d5b244168fdd38f33e1f596253c0 | 162e0f4a3d49f164af1d33298fa9a47b66508cbf | refs/heads/master | 2023-04-29T05:33:23.460640 | 2020-03-27T00:55:28 | 2020-03-27T00:55:28 | 247,036,359 | 2 | 1 | MIT | 2023-04-21T20:53:08 | 2020-03-13T09:40:37 | Python | UTF-8 | Python | false | false | 1,363 | py | # flake8: noqa
from .definition import ( # no import order
GraphQLScalarType,
GraphQLObjectType,
GraphQLField,
GraphQLArgument,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLInputObjectType,
GraphQLInputObjectField,
GraphQLList,
GraphQLNonNull,
get_named_type,
is_abstract_type,
is_composite_type,
is_input_type,
is_leaf_type,
is_type,
get_nullable_type,
is_output_type,
)
from .directives import (
# "Enum" of Directive locations
DirectiveLocation,
# Directive definition
GraphQLDirective,
# Built-in directives defined by the Spec
specified_directives,
GraphQLSkipDirective,
GraphQLIncludeDirective,
GraphQLDeprecatedDirective,
# Constant Deprecation Reason
DEFAULT_DEPRECATION_REASON,
)
from .scalars import ( # no import order
GraphQLInt,
GraphQLFloat,
GraphQLString,
GraphQLBoolean,
GraphQLID,
)
from .schema import GraphQLSchema
from .introspection import (
# "Enum" of Type Kinds
TypeKind,
# GraphQL Types for introspection.
__Schema,
__Directive,
__DirectiveLocation,
__Type,
__Field,
__InputValue,
__EnumValue,
__TypeKind,
# Meta-field definitions.
SchemaMetaFieldDef,
TypeMetaFieldDef,
TypeNameMetaFieldDef,
)
| [
"hyungsukkang@Hyungsuks-Mac-mini.local"
] | hyungsukkang@Hyungsuks-Mac-mini.local |
beff91b7fa1e76f823caa1ad1e68ed33480e7ec7 | 38e26d71712ec984797f9f8f5ef152460e2cb1ba | /sfepy/discrete/fem/fields_hierarchic.py | 7f4f8ccdb623ed828fb7d26856c5ec9d641f7f83 | [
"BSD-3-Clause"
] | permissive | mathboylinlin/sfepy | db39da5569312bcdf85c0facce04f00313728e71 | e11cfea931a3a16829bde33a6b79b6720757782f | refs/heads/master | 2021-01-16T21:15:49.558430 | 2016-01-20T08:03:08 | 2016-01-20T08:03:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,173 | py | import numpy as nm
from sfepy.base.base import assert_
from sfepy.discrete.fem.utils import prepare_remap, prepare_translate
from sfepy.discrete.common.dof_info import expand_nodes_to_dofs
from sfepy.discrete.fem.fields_base import VolumeField, H1Mixin
class H1HierarchicVolumeField(H1Mixin, VolumeField):
family_name = 'volume_H1_lobatto'
def _init_econn(self):
"""
Initialize the extended DOF connectivity and facet orientation array.
"""
VolumeField._init_econn(self)
self.ap.ori = nm.zeros_like(self.ap.econn)
def _setup_facet_orientations(self):
self.node_desc = self.interp.describe_nodes()
def _setup_edge_dofs(self):
"""
Setup edge DOF connectivity.
"""
if self.node_desc.edge is None:
return 0, None, None
return self._setup_facet_dofs(1,
self.node_desc.edge,
self.n_vertex_dof)
def _setup_face_dofs(self):
"""
Setup face DOF connectivity.
"""
if self.node_desc.face is None:
return 0, None, None
return self._setup_facet_dofs(self.domain.shape.tdim - 1,
self.node_desc.face,
self.n_vertex_dof + self.n_edge_dof)
def _setup_facet_dofs(self, dim, facet_desc, offset):
"""
Helper function to setup facet DOF connectivity, works for both
edges and faces.
"""
facet_desc = nm.array(facet_desc)
n_dof_per_facet = facet_desc.shape[1]
cmesh = self.domain.cmesh
facets = self.region.entities[dim]
ii = nm.arange(facets.shape[0], dtype=nm.int32)
all_dofs = offset + expand_nodes_to_dofs(ii, n_dof_per_facet)
# Prepare global facet id remapping to field-local numbering.
remap = prepare_remap(facets, cmesh.num[dim])
cconn = self.region.domain.cmesh.get_conn(self.region.tdim, dim)
offs = cconn.offsets
n_f = self.gel.edges.shape[0] if dim == 1 else self.gel.faces.shape[0]
n_fp = 2 if dim == 1 else self.gel.surface_facet.n_vertex
oris = cmesh.get_orientations(dim)
ap = self.ap
gcells = self.region.get_cells()
n_el = gcells.shape[0]
# Elements of facets.
iel = nm.arange(n_el, dtype=nm.int32).repeat(n_f)
ies = nm.tile(nm.arange(n_f, dtype=nm.int32), n_el)
aux = offs[gcells][:, None] + ies.reshape((n_el, n_f))
indices = cconn.indices[aux]
facets_of_cells = remap[indices].ravel()
# Define global facet dof numbers.
gdofs = offset + expand_nodes_to_dofs(facets_of_cells,
n_dof_per_facet)
# DOF columns in econn for each facet (repeating same values for
# each element.
iep = facet_desc[ies]
ap.econn[iel[:, None], iep] = gdofs
ori = oris[aux].ravel()
if (n_fp == 2) and (ap.interp.gel.name in ['2_4', '3_8']):
tp_edges = ap.interp.gel.edges
ecs = ap.interp.gel.coors[tp_edges]
# True = positive, False = negative edge orientation w.r.t.
# reference tensor product axes.
tp_edge_ori = (nm.diff(ecs, axis=1).sum(axis=2) > 0).squeeze()
aux = nm.tile(tp_edge_ori, n_el)
ori = nm.where(aux, ori, 1 - ori)
if n_fp == 2: # Edges.
# ori == 1 means the basis has to be multiplied by -1.
ps = ap.interp.poly_spaces['v']
orders = ps.node_orders
eori = nm.repeat(ori[:, None], n_dof_per_facet, 1)
eoo = orders[iep] % 2 # Odd orders.
ap.ori[iel[:, None], iep] = eori * eoo
elif n_fp == 3: # Triangular faces.
raise NotImplementedError
else: # Quadrilateral faces.
# ori encoding in 3 bits:
# 0: axis swap, 1: axis 1 sign, 2: axis 2 sign
# 0 = + or False, 1 = - or True
# 63 -> 000 = 0
# 0 -> 001 = 1
# 30 -> 010 = 2
# 33 -> 011 = 3
# 11 -> 100 = 4
# 7 -> 101 = 5
# 52 -> 110 = 6
# 56 -> 111 = 7
# Special cases:
# Both orders same and even -> 000
# Both orders same and odd -> 0??
# Bits 1, 2 are multiplied by (swapped) axial order % 2.
new = nm.repeat(nm.arange(8, dtype=nm.int32), 3)
translate = prepare_translate([31, 59, 63,
0, 1, 4,
22, 30, 62,
32, 33, 41,
11, 15, 43,
3, 6, 7,
20, 52, 60,
48, 56, 57], new)
ori = translate[ori]
eori = nm.repeat(ori[:, None], n_dof_per_facet, 1)
ps = ap.interp.poly_spaces['v']
orders = ps.face_axes_nodes[iep - ps.face_indx[0]]
eoo = orders % 2
eoo0, eoo1 = eoo[..., 0], eoo[..., 1]
i0 = nm.where(eori < 4)
i1 = nm.where(eori >= 4)
eori[i0] = nm.bitwise_and(eori[i0], 2*eoo0[i0] + 5)
eori[i0] = nm.bitwise_and(eori[i0], eoo1[i0] + 6)
eori[i1] = nm.bitwise_and(eori[i1], eoo0[i1] + 6)
eori[i1] = nm.bitwise_and(eori[i1], 2*eoo1[i1] + 5)
ap.ori[iel[:, None], iep] = eori
n_dof = n_dof_per_facet * facets.shape[0]
assert_(n_dof == nm.prod(all_dofs.shape))
return n_dof, all_dofs, remap
def _setup_bubble_dofs(self):
"""
Setup bubble DOF connectivity.
"""
if self.node_desc.bubble is None:
return 0, None, None
offset = self.n_vertex_dof + self.n_edge_dof + self.n_face_dof
n_dof_per_cell = self.node_desc.bubble.shape[0]
ap = self.ap
ii = self.region.get_cells()
remap = prepare_remap(ii, self.domain.cmesh.n_el)
n_cell = ii.shape[0]
n_dof = n_dof_per_cell * n_cell
all_dofs = nm.arange(offset, offset + n_dof, dtype=nm.int32)
all_dofs.shape = (n_cell, n_dof_per_cell)
iep = self.node_desc.bubble[0]
ap.econn[:,iep:] = all_dofs
return n_dof, all_dofs, remap
def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""
Set the values of given DOFs using a function of space coordinates or
value `fun`.
"""
if region is None:
region = self.region
if dpn is None:
dpn = self.n_components
# Hack - use only vertex DOFs.
gnods = self.get_dofs_in_region(region, merge=False)
nods = nm.concatenate(gnods)
n_dof = dpn * nods.shape[0]
if nm.isscalar(fun):
vals = nm.zeros(n_dof, dtype=nm.dtype(type(fun)))
vals[:gnods[0].shape[0] * dpn] = fun
elif callable(fun):
vv = fun(self.get_coor(gnods[0]))
vals = nm.zeros(n_dof, dtype=vv.dtype)
vals[:gnods[0].shape[0] * dpn] = vv
else:
raise NotImplementedError
nods, indx = nm.unique(nods, return_index=True)
ii = (nm.tile(dpn * indx, dpn)
+ nm.tile(nm.arange(dpn, dtype=nm.int32), indx.shape[0]))
vals = vals[ii]
return nods, vals
def create_basis_context(self):
"""
Create the context required for evaluating the field basis.
"""
# Hack for tests to pass - the reference coordinates are determined
# from vertices only - we can use the Lagrange basis context for the
# moment. The true context for Field.evaluate_at() is not implemented.
gps = self.ap.get_poly_space('v', from_geometry=True)
mesh = self.create_mesh(extra_nodes=False)
ctx = geo_ctx = gps.create_context(mesh.cmesh, 0, 1e-15, 100, 1e-8)
ctx.geo_ctx = geo_ctx
return ctx
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
c38d2f4074959f64a63a38c40ee1bf78bc782155 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/119/usersdata/251/26626/submittedfiles/al1.py | c7c23d19397e149137e86b0dc7dc112c50149323 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | # -*- coding: utf-8 -*-
c = float(input('Digite a temperatura em graus celsuis:'))
f = ((9*c)+160)/5
print ('O valor é %.2F%'f)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
147f5b33371de45e99d64728b37d8bdeaf805894 | d1c2d00078520cd556f60b7213c27856f8b3460d | /sdks/python/apache_beam/ml/inference/vertex_ai_inference_it_test.py | 02b4e5ec0703484092dfb7a13b70c5f6a75eaaff | [
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] | permissive | apache/beam | ed11b9e043465c720659eac20ac71b5b171bfa88 | 6d5048e05087ea54abc889ce402ae2a0abb9252b | refs/heads/master | 2023-09-04T07:41:07.002653 | 2023-09-01T23:01:05 | 2023-09-01T23:01:05 | 50,904,245 | 7,061 | 4,522 | Apache-2.0 | 2023-09-14T21:43:38 | 2016-02-02T08:00:06 | Java | UTF-8 | Python | false | false | 2,564 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""End-to-End test for Vertex AI Remote Inference"""
import logging
import unittest
import uuid
import pytest
from apache_beam.io.filesystems import FileSystems
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.examples.inference import vertex_ai_image_classification
except ImportError as e:
raise unittest.SkipTest(
"Vertex AI model handler dependencies are not installed")
_INPUT = "gs://apache-beam-ml/testing/inputs/vertex_images/*/*.jpg"
_OUTPUT_DIR = "gs://apache-beam-ml/testing/outputs/vertex_images"
_ENDPOINT_ID = "5384055553544683520"
_ENDPOINT_PROJECT = "apache-beam-testing"
_ENDPOINT_REGION = "us-central1"
_ENDPOINT_NETWORK = "projects/844138762903/global/networks/beam-test-vpc"
# pylint: disable=line-too-long
_SUBNETWORK = "https://www.googleapis.com/compute/v1/projects/apache-beam-testing/regions/us-central1/subnetworks/beam-test-vpc"
class VertexAIInference(unittest.TestCase):
@pytest.mark.uses_vertex_ai
@pytest.mark.it_postcommit
def test_vertex_ai_run_flower_image_classification(self):
output_file = '/'.join([_OUTPUT_DIR, str(uuid.uuid4()), 'output.txt'])
test_pipeline = TestPipeline(is_integration_test=True)
extra_opts = {
'input': _INPUT,
'output': output_file,
'endpoint_id': _ENDPOINT_ID,
'endpoint_project': _ENDPOINT_PROJECT,
'endpoint_region': _ENDPOINT_REGION,
'endpoint_network': _ENDPOINT_NETWORK,
'private': "True",
'subnetwork': _SUBNETWORK,
}
vertex_ai_image_classification.run(
test_pipeline.get_full_options_as_args(**extra_opts))
self.assertEqual(FileSystems().exists(output_file), True)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| [
"noreply@github.com"
] | apache.noreply@github.com |
ae419f06373181cf53f9592fa2af2b8bbf760143 | c2fd4ae194719b3f48cd7e268cde237b2efb93c9 | /a5/viterbi.py | f1f739c51933721da920a4c21c22439a44f2fef5 | [] | no_license | jcccf/cs4780 | cf5825b379d4acc4840633615d47379bb3c6140c | a341135c28b1087bf9d111b502f04cfc1249dab6 | refs/heads/master | 2020-05-19T15:30:20.120621 | 2011-11-16T16:52:32 | 2011-11-16T16:52:32 | 2,402,224 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | import operator
y_p = {"a": 0.1, "n": 0.4, "o":0.2, "t":0.3}
yy_p = {
"a": {"a": 0.05, "n": 0.35, "o":0.1, "t":0.4},
"n": {"a": 0.1, "n": 0.05, "o":0.5, "t":0.1},
"o": {"a": 0.25, "n": 0.5, "o":0.1, "t":0.4},
"t": {"a": 0.6, "n": 0.1, "o":0.3, "t":0.1},
}
xy_p = {
"A": {"a":0.4, "n":0.3, "o":0.1, "t":0.1},
"T": {"a":0.2, "n":0.1, "o":0.1, "t":0.4},
"N": {"a":0.1, "n":0.4, "o":0.1, "t":0.1},
"Y": {"a":0.2, "n":0.1, "o":0.2, "t":0.3},
"W": {"a":0.1, "n":0.1, "o":0.5, "t":0.1},
}
def viterbi(xs):
# Initialize arrays
T = [dict([(a,0.0) for a in y_p.keys()]) for j in range(len(xs))] # Values
T_prev = [dict([(a,None) for a in y_p.keys()]) for j in range(len(xs))] # Back-pointers
i = 0
for c in xs:
if i == 0: # START which is P(y_0) * P(x_0|y_0)
for k in T[0].keys():
T[0][k] = y_p[k] * xy_p[c][k]
else: # Then do prev * P(x_i|y_i) * P(y_i|y_{i-1})
for k2 in y_p.keys():
T_prev[i][k2], T[i][k2] = max([(k,T[i-1][k]* xy_p[c][k2] * yy_p[k2][k]) for k in y_p.keys()], key=operator.itemgetter(1))
i += 1
# Build up prediction
i -= 1
index = max(T[i].iteritems(), key=operator.itemgetter(1))[0] # Get final character
result = [index]
while i > 0: # Get previous characters by traversing back-pointers
index = T_prev[i][index]
result.append(index)
i -= 1
result.reverse() # Reverse the array
# Print tables nicely
print "Table of probabilities for partial paths"
transp = dict([(a,[]) for a in y_p.keys()])
for t in T:
for k,v in t.iteritems():
transp[k].append(v)
for k, vs in transp.iteritems():
s = '{}'.format(k)
for v in vs:
s += ' & {}'.format(v)
s += " \\\\"
print s
print "Back-pointer table"
transp = dict([(a,[]) for a in y_p.keys()])
for t in T_prev:
for k,v in t.iteritems():
transp[k].append(v)
for k, vs in transp.iteritems():
s = '{}'.format(k)
for v in vs:
s += ' & {}'.format(v)
s += " \\\\"
print s
print "Predicted Letters"
print result
# Viterbi
viterbi("TWY") | [
"jccccf@gmail.com"
] | jccccf@gmail.com |
66c36e93193a52cb71cc1125044c1215833a2320 | b2053d4776af7f99e70961b2329620e5aee44c5b | /algorithm/swap.py | 2f77147df78571d14336824d8f87f1a7c4649e6c | [] | no_license | YanYan0716/ComDis | c3839093aab28420f790e1e276c585fe570a3a9d | 286190075a0e960acd4a7f5ed4052a129ac5a113 | refs/heads/main | 2023-04-11T05:54:45.212881 | 2021-04-16T10:01:26 | 2021-04-16T10:01:26 | 350,293,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | import random
from PIL import Image
import matplotlib.pyplot as plt
import numbers
import torchvision.transforms as transforms
def swap(img, crop):
def crop_image(image, cropnum):
width, high = image.size
crop_x = [int((width / cropnum[0]) * i) for i in range(cropnum[0] + 1)]
crop_y = [int((high / cropnum[1]) * i) for i in range(cropnum[1] + 1)]
im_list = []
for j in range(len(crop_y) - 1):
for i in range(len(crop_x) - 1):
im_list.append(image.crop((crop_x[i], crop_y[j], min(crop_x[i + 1], width), min(crop_y[j + 1], high))))
return im_list
widthcut, highcut = img.size
img = img.crop((10, 10, widthcut - 10, highcut - 10))
images = crop_image(img, crop)
pro = 5
if pro >= 5:
tmpx = []
tmpy = []
count_x = 0
count_y = 0
k = 1
RAN = 2
for i in range(crop[1] * crop[0]):
tmpx.append(images[i])
count_x += 1
if len(tmpx) >= k:
tmp = tmpx[count_x - RAN:count_x]
random.shuffle(tmp)
tmpx[count_x - RAN:count_x] = tmp
if count_x == crop[0]:
tmpy.append(tmpx)
count_x = 0
count_y += 1
tmpx = []
if len(tmpy) >= k:
tmp2 = tmpy[count_y - RAN:count_y]
random.shuffle(tmp2)
tmpy[count_y - RAN:count_y] = tmp2
random_im = []
for line in tmpy:
random_im.extend(line)
# random.shuffle(images)
width, high = img.size
iw = int(width / crop[0])
ih = int(high / crop[1])
toImage = Image.new('RGB', (iw * crop[0], ih * crop[1]))
x = 0
y = 0
for i in random_im:
i = i.resize((iw, ih), Image.ANTIALIAS)
toImage.paste(i, (x * iw, y * ih))
x += 1
if x == crop[0]:
x = 0
y += 1
else:
toImage = img
toImage = toImage.resize((widthcut, highcut))
return toImage
class Randomswap(object):
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
def __call__(self, img):
return swap(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
if __name__ == '__main__':
img = Image.open('./test/463944.jpg').convert('RGB')
trans = transforms.Compose([Randomswap([5, 5])])
out = trans(img)
plt.imshow(out)
plt.show()
print(out.size) | [
"yanqian0716@gmail.com"
] | yanqian0716@gmail.com |
f1e23eae26489f18dd8beb6cad4879283671faae | 493a36f1f8606c7ddce8fc7fe49ce4409faf80be | /.history/B073040023/server_20210614192425.py | 078cb0bd812062835f1cdb632280e1f7578e684e | [] | no_license | ZhangRRz/computer_network | f7c3b82e62920bc0881dff923895da8ae60fa653 | 077848a2191fdfe2516798829644c32eaeded11e | refs/heads/main | 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null | UTF-8 | Python | false | false | 8,105 | py | import socket,struct
import threading
import time
from datetime import datetime
import dns.resolver
import tcppacket,random
class UDPServerMultiClient():
''' A simple UDP Server for handling multiple clients '''
def __init__(self, host, port):
self.socket_lock = threading.Lock()
self.host = host # Host address
self.port = port # Host port
self.sock = None # Socket
def dns_req(self,msglist,addr,temp_sock):
resolver = dns.resolver.Resolver()
resolver.nameservers=['8.8.8.8']
msg = resolver.resolve(msglist[1],'A')[0].to_text().encode('utf-8')
# self.sock.sendto(bytes(resolver.resolve(msglist[1],'A')[0].to_text(),'ascii'),addr)
# print('done!')
while True:
fin_flag = 1
tcp = tcppacket.TCPPacket(data=msg, flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
temp_sock.sendto(tcp.raw, addr)
#--------------ACK---------------
print("Waiting for ACK")
data, client_address = temp_sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
unpackdata = struct.unpack('!HHLLBBHHH', data[:s])
if(unpackdata[5] / 2**4):
print("recive ACK from :", client_address)
if(unpackdata[5] % 2 and unpackdata[5] / 2**4):
break
def doCalc(self,msglist,addr,temp_sock):
print("calculating...",addr)
if msglist[2] == '+':
ans = float(msglist[1]) + float(msglist[3])
elif msglist[2] == '-':
ans = float(msglist[1]) - float(msglist[3])
elif msglist[2] == '*':
ans = float(msglist[1]) * float(msglist[3])
elif msglist[2] == '/':
ans = float(msglist[1]) / float(msglist[3])
elif msglist[2] == '^':
ans = float(msglist[1]) ** float(msglist[3])
elif msglist[2] == 'sqrt':
ans = float(msglist[1]) ** 0.5
else:
print('Error form, return -1')
ans = -1
msg = str(ans).encode('utf-8')
while True:
fin_flag = 1
tcp = tcppacket.TCPPacket(data=msg, flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
temp_sock.sendto(tcp.raw, addr)
#--------------ACK---------------
print("Waiting for ACK")
data, client_address = temp_sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
unpackdata = struct.unpack('!HHLLBBHHH', data[:s])
if(unpackdata[5] / 2**4):
print("recive ACK from :", client_address)
if(unpackdata[5] % 2 and unpackdata[5] / 2**4):
break
def sendVideo(self,msg,addr,temp_sock):
videonumber = msg[-1]
target = "../"+str(videonumber)+".mp4"
f = open(target, "rb")
seq_num = 10
ack_seq = 0
seq = 0
pendingSendData = b''
chksum = 0
counter = 0
while True:
pendingSendData = f.read(1024)
if(pendingSendData == b''):
pendingSendData = ''
fin_flag = 1
break
chksum = maybe_make_packet_error()
tcp = tcppacket.TCPPacket(data=pendingSendData,
seq=seq, ack_seq=ack_seq,chksum=chksum)
tcp.assemble_tcp_feilds()
temp_sock.sendto(tcp.raw, addr)
print("send a packet to ", addr,
"with server seq :", seq)
seq += 1
counter += 1
#-----------Delay ACK with counter
if(counter == 3):
data, addr = temp_sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
unpackdata = struct.unpack('!HHLLBBHHH', data[:s])
if(unpackdata[5] / 2**4):
print("recive ACK from :", addr,\
"with ack seq: ", unpackdata[3], " and client seq: ", unpackdata[2])
counter = 0
print(fin_flag)
chksum = maybe_make_packet_error()
tcp = tcppacket.TCPPacket(data=pendingSendData.encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_fin=fin_flag,
chksum=chksum)
tcp.assemble_tcp_feilds()
temp_sock.sendto(tcp.raw, addr)
print("send a packet to ", addr,
"with server seq :", seq)
seq += 1
# receive ACK
data, addr = temp_sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
unpackdata = struct.unpack('!HHLLBBHHH', data[:s])
# unpackdata[5] is tcp flags
if(unpackdata[5] / 2**4):
print("recive ACK from :", addr,
"with ack seq: ", unpackdata[3], " and client seq: ", unpackdata[2])
pass
def configure_server(self):
''' Configure the server '''
# create UDP socket with IPv4 addressing
self.printwt('Creating socket...')
self.printwt('Socket created')
# bind server to the address
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((self.host,self.port))
self.printwt(f'Binding server to {self.host}:{self.port}...')
self.printwt(f'Server binded to {self.host}:{self.port}')
def handle_request(self, msglist, client_address):
''' Handle the client '''
# s = struct.calcsize('!HHLLBBH')
# unpackdata = struct.unpack('!HHLLBBH', data[:s])
# msg = data[s:].decode('utf-8')
# msglist = msg.split(' ')
if(msglist[0].find("calc") != -1):
self.doCalc(msglist,client_address)
elif(msglist[0].find("video") != -1):
self.sendVideo(msglist,client_address)
elif(msglist[0].find("dns") != -1):
self.dns_req(msglist,client_address)
pass
def printwt(self, msg):
''' Print message with current date and time '''
current_date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f'[{current_date_time}] {msg}')
def wait_for_client(self):
''' Wait for clients and handle their requests '''
try:
while True: # keep alive
try: # receive request from client
print("Waiting for client...")
data, client_address = self.sock.recvfrom(1024)
print("Received request from client:",client_address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
if(not isinstance(msg[0], int)):
msglist = msg.split(' ')
c_thread = threading.Thread(target = self.handle_request,
args = (msglist, client_address))
c_thread.daemon = True
c_thread.start()
else:
index = msg.find("***")
msglist1 = msg[:index].split(' ')
msglist2 = msg[index+3:index].split(' ')
print(msglist1,msglist2)
exit()
except OSError as err:
self.printwt(err)
except KeyboardInterrupt:
self.shutdown_server()
def shutdown_server(self):
''' Shutdown the UDP server '''
self.printwt('Shutting down server...')
self.sock.close()
def maybe_make_packet_error():
if(random.randint(1, 1000000) < 1000000):
# make packet error
return 1
return 0
def main():
''' Create a UDP Server and handle multiple clients simultaneously '''
udp_server_multi_client = UDPServerMultiClient('127.0.0.1', 12345)
udp_server_multi_client.configure_server()
udp_server_multi_client.wait_for_client()
if __name__ == '__main__':
main() | [
"tom95011@gmail.com"
] | tom95011@gmail.com |
0b0b5a744909d7f44929e708029840e847796406 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /build/android/gyp/bundletool.py | 85528157d28659380d37252cdae03e704e1421e5 | [
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 1,100 | py | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple wrapper around the bundletool tool.
Bundletool is distributed as a versioned jar file. This script abstracts the
location and version of this jar file, as well as the JVM invokation."""
import logging
import os
import sys
from util import build_utils
# Assume this is stored under build/android/gyp/
BUNDLETOOL_DIR = os.path.abspath(os.path.join(
__file__, '..', '..', '..', '..', 'third_party', 'android_build_tools',
'bundletool'))
BUNDLETOOL_VERSION = '0.13.3'
BUNDLETOOL_JAR_PATH = os.path.join(
BUNDLETOOL_DIR, 'bundletool-all-%s.jar' % BUNDLETOOL_VERSION)
def RunBundleTool(args):
args = [build_utils.JAVA_PATH, '-jar', BUNDLETOOL_JAR_PATH] + args
logging.debug(' '.join(args))
return build_utils.CheckOutput(
args,
print_stderr=True,
stderr_filter=build_utils.FilterReflectiveAccessJavaWarnings)
if __name__ == '__main__':
RunBundleTool(sys.argv[1:])
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
29b6645ee3c7424a00178ab034afa8757ff489bf | 49c150ef415fe2f61db86a220ae5d8e9ffb53460 | /jasper_report/models/jasper_report_settings.py | 2185947f9446717a972646b059e6b0b7a4f4c5ef | [] | no_license | haylahi/multidadosti-addons | ffd289112a1e0f53516a74cfc39fb03dfc601dd4 | e99b68a598be59a39191b743cdb377888e4ac0ff | refs/heads/master | 2021-01-14T08:36:44.548048 | 2017-02-03T18:03:18 | 2017-02-03T18:03:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2017 MultidadosTI (http://www.multidadosti.com.br)
# @author Aldo Soares <soares_aldo@hotmail.com>
# License LGPL-3 - See http://www.gnu.org/licenses/lgpl-3.0.html
#
# Based in module 'base_external_dbsource'
#
import logging
import psycopg2
from odoo import models, fields, api, _
from odoo.exceptions import Warning as UserError
import odoo.tools as tools
_logger = logging.getLogger(__name__)
CONNECTORS = [('postgres', 'PostgreSQL')]
class JasperReportDBSource(models.Model):
_name = "jasper.report.db.source"
_description = 'External Database Sources'
name = fields.Char('Data Source Name', required=True, size=64)
db_name = fields.Char('Database Name',
default=lambda self: self.env.cr.dbname)
user_field = fields.Char('User')
host = fields.Char('Host', default='localhost')
port = fields.Char('Port', default='5432')
password = fields.Char('Password', size=40)
connector = fields.Selection(CONNECTORS, 'Connector', required=True,
default='postgres',
help="If a connector is missing from the\
list, check the server log to confirm\
that the required components were\
detected.")
@api.multi
def conn_open(self):
"""The connection is open here."""
self.ensure_one()
# Get db source record
# Build the full connection string
if self.connector == 'postgres':
conn_str = "dbname='{0}' " \
"user='{1}' " \
"host='{2}' " \
"port='{3}' " \
"password=%s".format(self.db_name,
self.user_field,
self.host,
self.port)
conn = psycopg2.connect(conn_str % self.password)
return conn
@api.multi
def connection_test(self):
"""Test of connection."""
self.ensure_one()
conn = False
try:
conn = self.conn_open()
except Exception as e:
raise UserError(_("Connection test failed: \
Here is what we got instead:\n %s") % tools.ustr(e))
finally:
if conn:
conn.close()
raise UserError(_("Connection test succeeded: \
Everything seems properly set up!"))
| [
"michellstut@gmail.com"
] | michellstut@gmail.com |
f7104d5dbe3cb8bae506302c3f904bcce137838a | 447e9ec821dc7505cc9b73fb7abeb220fe2b3a86 | /rvpy/hypergeom.py | 939245fe972e20b12186e27d5476adaa859a901a | [
"MIT"
] | permissive | timbook/rvpy | ecd574f91ed50fd47b6ead8517954f01e33c03a7 | 301fd61df894d4b300176e287bf9e725378c38eb | refs/heads/master | 2020-03-19T04:01:49.283213 | 2018-12-18T19:21:07 | 2018-12-18T19:21:07 | 135,788,512 | 1 | 0 | MIT | 2018-12-18T19:21:08 | 2018-06-02T04:55:39 | Python | UTF-8 | Python | false | false | 1,091 | py | import numpy as np
from scipy.stats import hypergeom
from . import distribution
class Hypergeometric(distribution.Distribution):
"""
Hypergeometric Distribution using the following parameterization:
f(x | N, M, K) = (M x) (N-M K-x) / (N K)
Parameters
----------
N : integer, positive
Population size
M : integer, positive, M < N
Number tagged units in population
K : integer, positive, K < N
Sample size drawn
Methods
-------
None
Relationships
-------------
None implemented
"""
def __init__(self, N, M, K):
assert N >= 0 and M >= 0 and K >= 0, \
"All parameters of hypergeometric distribution must be nonnegative"
assert K < N and M < N, "K and M must be less than N"
# Parameters
self.N = N
self.M = M
self.K = K
# Scipy backend
self.sp = hypergeom(M=N, n=M, N=K)
# Initialize super
super().__init__()
def __repr__(self):
return f"Hypergeometric(N={self.N}, M={self.M}, K={self.K})"
| [
"timothykbook@gmail.com"
] | timothykbook@gmail.com |
f1b00b2a146c9d643bf6d6637a6b605d694df7f1 | bde5435074b92404524390a9aa0bfbbebd13124d | /pymps/tensor/flib/setup.py | 9284f32eae984cef9dc720c747bf8b7acb18af85 | [
"MIT"
] | permissive | GiggleLiu/pymps | adc113313725b38e50a2e633c67568fb04ec0ad6 | c8314581010d68d3fa34af6e87b6af2969fc261d | refs/heads/master | 2020-06-13T09:19:13.028827 | 2018-04-15T17:18:55 | 2018-04-15T17:18:55 | 75,427,737 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | # render templates
import os
template_list = ['beinsum.template.f90']
source_list = [tmplt[:-12] + 'f90' for tmplt in template_list]
extension_list = [source[:-4] for source in source_list]
libdir = os.path.dirname(__file__)
def render_f90s(templates=None):
from frender import render_f90
if templates is None:
templates = template_list
else:
templates = templates
for template in templates:
source = template[:-12] + 'f90'
pytime = os.path.getmtime(os.path.join(libdir, 'templates', template))
source_file = os.path.join(libdir, source)
if not os.path.isfile(source_file) or \
os.path.getmtime(source_file) < pytime:
render_f90(libdir, os.path.join('templates', template), {
'dtype_list': ['complex*16', 'complex*8', 'real*8', 'real*4']
}, out_file=os.path.join(libdir, source))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, NotFoundError, numpy_info
config = Configuration('lib', parent_package, top_path)
# get lapack options
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
atlas_version = ([v[3:-3] for k, v in lapack_opt.get('define_macros', [])
if k == 'ATLAS_INFO'] + [None])[0]
if atlas_version:
print(('ATLAS version: %s' % atlas_version))
# include_dirs=[os.curdir,'$MKLROOT/include']
# library_dirs=['$MKLROOT/lib/intel64']
# libraries=['mkl_intel_lp64','mkl_sequential','mkl_core', 'm', 'pthread']
# render f90 files if templates changed
render_f90s()
for extension, source in zip(extension_list, source_list):
# config.add_extension(
# extension, [os.path.join(libdir, source)], libraries=libraries,
# library_dirs=library_dirs, include_dirs=include_dirs)
config.add_extension(extension, [os.path.join(
libdir, source)], extra_info=lapack_opt)
return config
| [
"cacate0129@gmail.com"
] | cacate0129@gmail.com |
fb0b667e530371c5a9b1ca96537004f321830915 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02775/s686412442.py | 23cabfd09495510ed28ac5ab8223d013bce570f0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**7)
from collections import Counter, deque
from itertools import combinations, permutations, accumulate, groupby, product
from bisect import bisect_left,bisect_right
from heapq import heapify, heappop, heappush
import math
#from math import gcd
#inf = 10**17
#mod = 10**9 + 7
n = list(input().rstrip())
n = [0] + n
n = n[::-1]
ln = len(n)
res = 0
a = 0
for i in range(ln):
s = int(n[i])
s += a
if 0<=s<=4:
res += s
a = 0
elif 6<=s:
res += 10-s
a = 1
else:
if int(n[i+1])>=5:
res += 10-s
a = 1
else:
res += s
a = 0
print(res)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
306818b198106c3684b3f1aaeebc1fab6e1104cc | 7ec6a731b1fab8d4048e914f0e1a6ab571d73db5 | /mycoconut/celery.py | 74f87ee1f0ef9bc500be13b9828d2ee779461be8 | [] | no_license | salvacarrion/mycoconut | 230ff1aa9f3ecd36a60b0c01e456f441497fe2a6 | 285323c1212b0deb04b0dce3c07eb504f5169e69 | refs/heads/master | 2020-03-10T22:08:19.378487 | 2018-12-26T22:29:52 | 2018-12-26T22:29:52 | 129,611,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mycoconut.settings')
# The broker bydefault is RabbitMQ
app = Celery('mycoconut', backend='redis://localhost', broker='pyamqp://')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
#
# @app.task
# def test_celery():
# for i in range(100):
# print('Iter.: ' + str(i))
| [
"salva.carrion@outlook.com"
] | salva.carrion@outlook.com |
7e721b051bb6447c55ebdd45c63f1be488cc5e5d | dbd02bf7497a48df73dfd4731a4f4855bb436167 | /dailyPython/08_august/29_isSquare.py | e21c7c77f28fdbb49eea2468e9fc524e2d862b25 | [] | no_license | dfeusse/2018_practice | 372f0e6d83e16ab682ff20f9b9866701b377a4a5 | 8288c87a76a2f0db88cfb7f1eb78a8bc62c83e56 | refs/heads/master | 2020-03-17T07:48:12.629747 | 2018-10-02T16:11:52 | 2018-10-02T16:11:52 | 133,413,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | '''
Given an integral number, determine if it's a square number:
In mathematics, a square number or perfect square is an integer that is the square of an integer; in other words, it is the product of some integer with itself.
The tests will always use some integral number, so don't worry about that in dynamic typed languages.
Examples
is_square (-1) # => false
is_square 0 # => true
is_square 3 # => false
is_square 4 # => true
is_square 25 # => true
is_square 26 # => false
'''
def is_square(n):
#return True if i * i == n for i in range(0,n+1) else False
for i in range(0,n+1):
if i * i == n:
return True
return False
print is_square(-1)#, #False, "-1: Negative numbers cannot be square numbers")
print is_square(0)#, True, "0 is a square number")
print is_square( 3)#, False, "3 is not a square number")
print is_square( 4)#, True, "4 is a square number")
print is_square(25)#, #True, "25 is a square number")
print is_square(26)#, #False, "26 is not a square number")
| [
"dfeusse@gmail.com"
] | dfeusse@gmail.com |
219c3c7d1b62fecf1d44cba7c54e1e71e13b6427 | 40d8db9262a7ec846a66b636501881250b05fadb | /Chapter 6 - Code/Introduction to File Input and Output/Writing Data to a File/file_write.py | 8b5dd7dd94c7b983026b94e0074f7b6e166013ec | [] | no_license | grace-omotoso/CIS-202---Python-Programming | ceeb036d0ef75cbedc11c1707fd5902dc883085b | 3bbbb4b567035dafd195d07e0210a1cc409c7937 | refs/heads/master | 2023-04-14T12:30:14.864195 | 2021-04-18T03:33:26 | 2021-04-18T03:33:26 | 332,593,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # This program writes three lines of data
# to a file.
def main():
# Open a file named philosophers.txt.
outfile = open('philosophers.txt', 'w')
# Write the names of three philosphers
# to the file.
outfile.write('John Locke\n')
outfile.write('David Hume\n')
outfile.write('Edmund Burke\n')
# Close the file.
outfile.close()
# Call the main function.
if __name__ == '__main__':
main()
| [
"gomotoso@calhoun.local"
] | gomotoso@calhoun.local |
204f5b44f568330b0d5f830586bc087894676ef7 | 99052370591eadf44264dbe09022d4aa5cd9687d | /build/learning_ros/Part_5/baxter/baxter_playfile_nodes/catkin_generated/pkg.installspace.context.pc.py | 453b09a45ff584b7615f92f40dd01f7e585ab235 | [] | no_license | brucemingxinliu/ros_ws | 11b1a3e142132925d35b3adf929f1000392c5bdc | 45f7e553ea20b79e3e93af5f77a1b14b64184875 | refs/heads/master | 2021-01-24T03:36:47.043040 | 2018-02-26T00:53:37 | 2018-02-26T00:53:37 | 122,892,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/toshiki/ros_ws/install/include".split(';') if "/home/toshiki/ros_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;baxter_trajectory_streamer;baxter_core_msgs;actionlib_msgs;actionlib;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_playfile_nodes"
PROJECT_SPACE_DIR = "/home/toshiki/ros_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"mxl592@case.edu"
] | mxl592@case.edu |
50f4aa6670ba93b6be9ffddf5d7518302da462c4 | 97dc3a722f93114028533d4c6f6a2f8a9edc5677 | /logger.py | ff97fb7e8f44939f1fc74c7fbc000856790c8f5d | [
"MIT"
] | permissive | seungjaeryanlee/implementations-utils | 9cb92576d3550bd5b7628f5037fa20425dec52ae | d60ca4edd777e3033e00e8cae83557bb843130ec | refs/heads/master | 2020-06-27T15:48:22.795568 | 2019-09-20T14:35:37 | 2019-09-20T14:35:37 | 199,986,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | """Various logging modules."""
import logging
import coloredlogs
def get_logger(log_to_console=True, log_to_file=True):
"""Initialize Python logger that outputs to file and console."""
assert log_to_console or log_to_file
logger = logging.getLogger("main_logger")
logger.setLevel(logging.DEBUG)
formatter = coloredlogs.ColoredFormatter(
"%(asctime)s | %(filename)12s | %(levelname)8s | %(message)s"
)
if log_to_file:
fh = logging.FileHandler("run.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if log_to_console:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Fix TensorFlow doubling logs
# https://stackoverflow.com/questions/33662648/tensorflow-causes-logging-messages-to-double
logger.propagate = False
return logger
| [
"seungjaeryanlee@gmail.com"
] | seungjaeryanlee@gmail.com |
b5967802e4c740dadf0d22f485eb33d7eab1e8c0 | 41788da95153e3377425aac7e600751dd6586ee2 | /smallslive/artists/migrations/0015_currentpayoutperiod_current_total_seconds.py | bb7164ad6d2a09a63e28338c19e0c276f9cdba80 | [] | no_license | SmallsLIVE/smallslive | 35ea9e53b218f499639172b1ba943ab3fb01eb14 | b10fb72668d558fbab8bc17bebeecd7bd97fd74f | refs/heads/develop | 2023-07-27T20:30:35.569000 | 2022-04-28T15:42:37 | 2022-04-28T15:42:37 | 17,846,690 | 6 | 5 | null | 2023-02-28T10:20:35 | 2014-03-17T23:06:40 | Python | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('artists', '0014_currentpayoutperiod'),
]
operations = [
migrations.AddField(
model_name='currentpayoutperiod',
name='current_total_seconds',
field=models.BigIntegerField(default=0),
preserve_default=True,
),
]
| [
"filip@jukic.me"
] | filip@jukic.me |
e34db10ab49412905d760bcf6d8e17840d73dda2 | 8c6816435093cb8e9e45593d3ffdd67028a011b6 | /Graph/GraphEdge.py | 37aa6144d90c36ac003c388562072e6515d34b9e | [] | no_license | Keeady/daily-coding-challenge | 6ee74a5fe639a1f5b4753dd4848d0696bef15c28 | 31eebbf4c1d0eb88a00f71bd5741adf5e07d0e94 | refs/heads/master | 2020-03-27T07:58:05.713290 | 2019-03-08T15:03:05 | 2019-03-08T15:03:05 | 146,210,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | class GraphEdge:
def __init__(self, movie_name, destination_node):
self.name = movie_name
self.destination = destination_node | [
"cbevavy@datto.com"
] | cbevavy@datto.com |
47d665103b0821b75df617157e09b106d312ee91 | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/item/theme_park/alderaan/act3/shared_dead_eye_prototype.py | e3794675658cc5ed1ad15029ca10f7f7afea1dd1 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 477 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/item/theme_park/alderaan/act3/shared_dead_eye_prototype.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
8d04ce9c1c27ca4f34376387d60b1501e479b7d4 | c1b901ed1eee4d5dc2ee252cd51b4e3c14f02554 | /Misc/mem_load_test.py | 1e464cb5db3ba532613f3e82f89e87fd37fd338c | [
"MIT"
] | permissive | lengjiayi/SpeakerVerifiaction-pytorch | 70a86c9c9029a214679e636917fb305a85a94182 | 99eb8de3357c85e2b7576da2a742be2ffd773ead | refs/heads/master | 2023-07-09T20:09:07.715305 | 2021-08-19T11:03:28 | 2021-08-19T11:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: mem_load_test.py
@Time: 2020/3/29 10:30 PM
@Overview:
"""
import torch
import numpy as np
from torchvision.models import ResNet
from torchvision.models.resnet import BasicBlock
model = ResNet(BasicBlock, [1, 1, 1, 1], num_classes=1000)
| [
"874681044@qq.com"
] | 874681044@qq.com |
1ffe2ab84d769ef4a8648179baf02799c58313b5 | 4d068a6ff1461256edc72092ec7a687c4899a7e9 | /redq/config.py | 801d30d969d2830a9b0706e162b9d006bf081042 | [] | no_license | hackrole/flask_demo | 4546cc6d964bae47840631bad0b923ba4279946d | c4f87cdbaea30fc12cdadb78ba9c784118d3a679 | refs/heads/master | 2020-04-02T05:12:47.496964 | 2016-08-02T03:14:53 | 2016-08-02T03:14:53 | 64,720,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | # -*- coding: utf-8 -*-
# pylint: disable=no-init,too-few-public-methods
import os
basedir = os.path.abspath(os.path.dirname(__file__))
updir = os.path.dirname(basedir)
class BaseConfig(object):
DEBUG = True
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
# pony orm config
PONY_CREATE_DB = True
PONY_CREATE_TABLES = True
PONY_DATABASE_TYPE = 'sqlite'
PONY_SQLITE_FILE = ':memory:'
# celery config
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
# mail config
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
class DevConfig(BaseConfig):
DEBUG = True
TESTING = False
# pony orm config
PONY_SQLITE_FILE = os.path.join(updir, 'tmp/data-dev.sqlite')
class TestConfig(BaseConfig):
DEBUG = False
TESTING = True
# disable csrf token check
WTF_CSRF_ENABLED = False
class ProdConfig(BaseConfig):
DEBUG = False
TESTING = False
# todo pony orm config
| [
"daipeng123456@gmail.com"
] | daipeng123456@gmail.com |
3188b814c7c340f22ce2d42690d05fc179c3ab04 | 59fba4703b8f2fea535de42d8f0668879ca2d970 | /Recursion/combination_sum_II.py | 6d5dcbe9546c81c16ddc7ca3e7de8e9ee190faf6 | [] | no_license | viswan29/Leetcode | 3972796585fb9daa3b7f4b51d378514444db26b0 | aefc8006ccac4a4720dda1bd932a04fd1880ec9d | refs/heads/master | 2023-02-11T02:15:09.650498 | 2021-01-04T06:17:24 | 2021-01-04T06:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | '''
https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note: All numbers (including target) will be positive integers. The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
'''
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
n, ans, _ = len(candidates), [], candidates.sort()
def findCombo(summ, path, idx):
if summ == target:
ans.append(path)
return
for i in range(idx, n):
# at first level idx = 0 but after completing left, we pop 1 at 0th pos and 1 at 1st pos but we dont have to traverse 1 again
if i > idx and candidates[i] == candidates[i-1]:
continue
if summ+candidates[i] > target:
break
findCombo(summ+candidates[i], path + [candidates[i]], i + 1)
findCombo(0, [], 0)
return ans | [
"komalbansal97@gmail.com"
] | komalbansal97@gmail.com |
09049212304a9bc3c61fd7497adaeb7bdfcaabbf | 13808d3f3e53ab8abb685de1c0d587abb062742f | /plc_api/PLC/Methods/AddPersonTag.py | 244b546ac70ebfe32df7ce134cb152f8d0354cf1 | [] | no_license | nfvproject/Myplc | ea3635ac939dd7623f0848bcfebf09926b336400 | 88b39d9649936b8ce545896162ac3a944f135c7e | refs/heads/master | 2021-01-18T17:03:25.338207 | 2014-10-16T08:39:30 | 2014-10-16T08:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | #
# Thierry Parmentelat - INRIA
#
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Auth import Auth
from PLC.Persons import Person, Persons
from PLC.TagTypes import TagType, TagTypes
from PLC.PersonTags import PersonTag, PersonTags
# need to import so the core classes get decorated with caller_may_write_tag
from PLC.AuthorizeHelpers import AuthorizeHelpers
class AddPersonTag(Method):
"""
Sets the specified setting for the specified person
to the specified value.
Admins have full access. Non-admins can change their own tags.
Returns the new person_tag_id (> 0) if successful, faults
otherwise.
"""
roles = ['admin', 'pi', 'tech', 'user']
accepts = [
Auth(),
# no other way to refer to a person
PersonTag.fields['person_id'],
Mixed(TagType.fields['tag_type_id'],
TagType.fields['tagname']),
PersonTag.fields['value'],
]
returns = Parameter(int, 'New person_tag_id (> 0) if successful')
def call(self, auth, person_id, tag_type_id_or_name, value):
persons = Persons(self.api, [person_id])
if not persons:
raise PLCInvalidArgument, "No such person %r"%person_id
person = persons[0]
tag_types = TagTypes(self.api, [tag_type_id_or_name])
if not tag_types:
raise PLCInvalidArgument, "No such tag type %r"%tag_type_id_or_name
tag_type = tag_types[0]
# checks for existence - does not allow several different settings
conflicts = PersonTags(self.api, {'person_id':person['person_id'],
'tag_type_id':tag_type['tag_type_id']})
if len(conflicts) :
raise PLCInvalidArgument, "Person %d (%s) already has setting %d"% \
(person['person_id'],person['email'], tag_type['tag_type_id'])
# check authorizations
person.caller_may_write_tag (self.api,self.caller,tag_type)
person_tag = PersonTag(self.api)
person_tag['person_id'] = person['person_id']
person_tag['tag_type_id'] = tag_type['tag_type_id']
person_tag['value'] = value
person_tag.sync()
self.object_ids = [person_tag['person_tag_id']]
return person_tag['person_tag_id']
| [
"wangyang2013@ict.ac.cn"
] | wangyang2013@ict.ac.cn |
7e03196fc21efab80698ed047ba7d5dbdaaaf15d | bf9f2c6ee0a1a3989fc25ea764f312095afc799f | /irc_bot.py | f9792d2daaed75384ef7879a9031f33b323b135c | [] | no_license | shreyansh26/IRC_Bot | 474ef5523f4f7ee3a7c5f12e6dbab093c67cf7e2 | 5676fad983e8d39e94ab8ec07b989908a8320961 | refs/heads/master | 2021-01-25T10:51:13.596797 | 2017-06-09T19:20:41 | 2017-06-09T19:20:41 | 93,888,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,752 | py | # Help taken from Linux Academy :)
#!/usr/bin/python3
import socket
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = "chat.freenode.net" # Server
channel = "##bot-testing" # Channel
botnick = "shreyansh26Bot" # Your bot's nickname.
adminname = "shreyansh26Bot" #Your IRC nickname.
exitcode = "bye " + botnick
ircsock.connect((server, 6667)) # Here we connect to the server using the port 6667
ircsock.send(bytes("USER "+ adminname +" "+ adminname +" "+ adminname + " " + adminname + "\n", "UTF-8")) # user information
ircsock.send(bytes("NICK "+ botnick +"\n", "UTF-8"))
def joinchan(chan): # join channel(s).
ircsock.send(bytes("JOIN "+ chan +"\n", "UTF-8"))
ircmsg = ""
while ircmsg.find("End of /NAMES list.") == -1:
ircmsg = ircsock.recv(2048).decode("UTF-8")
ircmsg = ircmsg.strip('\n\r')
print(ircmsg)
#This function doesn’t need to take any arguments as the response will always be the same. Just respond with "PONG :pingis" to any PING.
#Different servers have different requirements for responses to PING so you may need to adjust/update this depending on your server. I’ve used this particular example with Freenode and have never had any issues.
def ping(): # respond to server Pings.
ircsock.send(bytes("PONG :pingis\n", "UTF-8"))
#All we need for this function is to accept a variable with the message we’ll be sending and who we’re sending it to. We will assume we are sending to the channel by default if no target is defined.
#Using target=channel in the parameters section says if the function is called without a target defined, example below in the Main Function section, then assume the target is the channel.
def sendmsg(msg, target=channel): # sends messages to the target.
#With this we are sending a ‘PRIVMSG’ to the channel. The ":” lets the server separate the target and the message.
ircsock.send(bytes("PRIVMSG "+ target +" :"+ msg +"\n", "UTF-8"))
#Main function of the bot. This will call the other functions as necessary and process the information received from IRC and determine what to do with it.
def main():
# start by joining the channel we defined in the Global Variables section.
joinchan(channel)
#Start infinite loop to continually check for and receive new info from server. This ensures our connection stays open.
#We don’t want to call main() again because, aside from trying to rejoin the channel continuously, you run into problems when recursively calling a function too many times in a row.
#An infinite while loop works better in this case.
while 1:
#Here we are receiving information from the IRC server. IRC will send out information encoded in UTF-8 characters so we’re telling our socket connection to receive up to 2048 bytes and decode it as UTF-8 characters.
#We then assign it to the ircmsg variable for processing.
ircmsg = ircsock.recv(2048).decode("UTF-8")
# This part will remove any line break characters from the string. If someone types in "\n” to the channel, it will still include it in the message just fine.
#This only strips out the special characters that can be included and cause problems with processing.
ircmsg = ircmsg.strip('\n\r')
#This will print the received information to your terminal. You can skip this if you don’t want to see it, but it helps with debugging and to make sure your bot is working.
print(ircmsg)
#Here we check if the information we received was a PRIVMSG. PRIVMSG is how standard messages in the channel (and direct messages to the bot) will come in.
#Most of the processing of messages will be in this section.
if ircmsg.find("PRIVMSG") != -1:
#First we want to get the nick of the person who sent the message. Messages come in from from IRC in the format of ":[Nick]!~[hostname]@[IP Address] PRIVMSG [channel] :[message]”
#We need to split and parse it to analyze each part individually.
name = ircmsg.split('!',1)[0][1:].replace('_','')
#Above we split out the name, here we split out the message.
message = ircmsg.split('PRIVMSG',1)[1].split(':',1)[1]
#print(name)
#print(message)
#Now that we have the name information, we check if the name is less than 17 characters. Usernames (at least for Freenode) are limited to 16 characters.
#So with this check we make sure we’re not responding to an invalid user or some other message.
if len(name) < 17:
#And this is our first detection block! We’ll use things like this to check the message and then perform actions based on what the message is.
#With this one, we’re looking to see if someone says Hi to the bot anywhere in their message and replying. Since we don’t define a target, it will get sent to the channel.
if message.find('Hi ' + botnick) != -1:
sendmsg("Hello " + name + "!")
#Here is an example of how you can look for a ‘code’ at the beginning of a message and parse it to do a complex task.
#In this case, we’re looking for a message starting with ".tell” and using that as a code to look for a message and a specific target to send to.
#The whole message should look like ".tell [target] [message]” to work properly.
if message[:5].find('.tell') != -1:
#First we split the command from the rest of the message. We do this by splitting the message on the first space and assigning the target variable to everything after it.
target = message.split(' ', 1)[1]
#After that, we make sure the rest of it is in the correct format. If there is not another then we don’t know where the username ends and the message begins!
if target.find(' ') != -1:
#If we make it here, it means we found another space to split on. We save everything after the first space (so the message can include spaces as well) to the message variable.
message = target.split(' ', 1)[1]
#Make sure to cut the message off from the target so it is only the target now.
target = target.split(' ')[0]
#if there is no defined message and target separation, we send a message to the user letting them know they did it wrong.
else:
#We do this by setting the target to the name of the user who sent the message (parsed from above)
target = name
#and then setting a new message. Note we use single quotes inside double quotes here so we don’t need to escape the inside quotes.
message = "Could not parse. The message should be in the format of ‘.tell [target] [message]’ to work properly."
#And finally we send the message to our target.
sendmsg(message, target)
if name.lower() == adminname.lower() and message.rstrip() == exitcode:
#If we do get sent the exit code, then send a message (no target defined, so to the channel) saying we’ll do it, but making clear we’re sad to leave.
sendmsg("oh...okay. :'(")
#Send the quit command to the IRC server so it knows we’re disconnecting.
ircsock.send(bytes("QUIT \n", "UTF-8"))
#The return command returns to when the function was called (we haven’t gotten there yet, see below) and continues with the rest of the script.
#In our case, there is not any more code to run through so it just ends.
return
#If the message is not a PRIVMSG it still might need some response.
else:
#Check if the information we received was a PING request. If so, we call the ping() function we defined earlier so we respond with a PONG.
if ircmsg.find("PING :") != -1:
ping()
main()
| [
"shreyansh.pettswood@gmail.com"
] | shreyansh.pettswood@gmail.com |
98f5d949e1f7cbf626eacc6e30fc7f0aa363652b | 289505dbe6418183248edd007bd887b9b22f4519 | /todo_rest/todo/models.py | 661d7ee44d9e1538986bd0532bebe84dafa01ca1 | [
"MIT"
] | permissive | OmkarPathak/django-rest-todo | c8aecc9cbdeae6b356c1a51f8bba99d7d1194a9d | e2ae6e95431616f440c7d285630727252aba461e | refs/heads/master | 2020-03-19T09:35:57.238234 | 2018-10-05T03:17:03 | 2018-10-05T03:17:03 | 136,302,304 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from django.db import models
# Create your models here.
class Task(models.Model):
title = models.CharField(max_length=100)
description = models.CharField(max_length=250, blank=True, null=True)
date_added = models.DateField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Task'
verbose_name_plural = 'Tasks' | [
"omkarpathak27@gmail.com"
] | omkarpathak27@gmail.com |
44b3f5120c4172136173c018f24d90694dd1c3c6 | de904ae3836d7fb4ef263cb69922b9b710c1fd65 | /components/printing/DEPS | 358c0a6cc55745920267ea18992abeb8a0b9fc7e | [
"BSD-3-Clause"
] | permissive | nelolee/chromium | 34b4194c8514864db438bd663a6c45b1e5f02ca0 | b59e9376b4d6907d28f5174a336bd116e404fe57 | refs/heads/master | 2023-03-16T21:38:35.139379 | 2018-02-28T12:30:52 | 2018-02-28T12:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | include_rules = [
"+components/cloud_devices/common",
"-components/printing",
"+components/printing/common",
"+content/public/common",
"+ipc",
"+printing",
"+third_party/WebKit/common",
"+third_party/WebKit/public",
"+ui/gfx"
]
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
4e69a8be12f341e4f31b908bb3de703a747d8e30 | ba7c76345bb41c10705cf759d5742a1eaf06b998 | /ppa/admin.py | 53efc07a1c4bb64566d56cdb0bb114618b37327b | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | Princeton-CDH/ppa-django | e38e4507c6b693dba073b6e666e9ac6ee753b3cb | 99e751b0d656d0d28c7e995cc44c351622313593 | refs/heads/main | 2023-07-07T08:39:17.459523 | 2023-06-23T13:32:33 | 2023-06-23T13:32:33 | 110,731,137 | 5 | 2 | Apache-2.0 | 2023-09-06T18:34:18 | 2017-11-14T18:52:41 | Python | UTF-8 | Python | false | false | 256 | py | from django.contrib import admin
class LocalAdminSite(admin.AdminSite):
"""Custom admin site for PPA to override header & label."""
site_header = "Princeton Prosody Archive administration"
site_title = "Princeton Prosody Archive site admin"
| [
"rebecca.s.koeser@princeton.edu"
] | rebecca.s.koeser@princeton.edu |
d531f3f1f97fea5ac0efd51e58717f80753ff7ea | 45a506c5622f366e7013f1276f446a18fc2fc00d | /kedro/runner/thread_runner.py | 3fd42c177eda064332c32c00d7431995ea0264f0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sbrugman/kedro | 3e48bcc56cc61fbe575d1a52c4f5bf3e84b6f974 | 25c92b765fba4605a748bdaaa801cee540da611e | refs/heads/develop | 2023-07-20T11:24:07.242114 | 2021-10-08T14:05:03 | 2021-10-08T14:05:03 | 404,517,683 | 1 | 2 | NOASSERTION | 2021-09-08T22:53:09 | 2021-09-08T22:53:09 | null | UTF-8 | Python | false | false | 7,101 | py | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``ThreadRunner`` is an ``AbstractRunner`` implementation. It can
be used to run the ``Pipeline`` in parallel groups formed by toposort
using threads.
"""
import warnings
from collections import Counter
from concurrent.futures import FIRST_COMPLETED, ThreadPoolExecutor, wait
from itertools import chain
from typing import Set
from kedro.io import AbstractDataSet, DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node
from kedro.runner.runner import AbstractRunner, run_node
class ThreadRunner(AbstractRunner):
"""``ThreadRunner`` is an ``AbstractRunner`` implementation. It can
be used to run the ``Pipeline`` in parallel groups formed by toposort
using threads.
"""
def __init__(self, max_workers: int = None, is_async: bool = False):
"""
Instantiates the runner.
Args:
max_workers: Number of worker processes to spawn. If not set,
calculated automatically based on the pipeline configuration
and CPU core count.
is_async: If True, set to False, because `ThreadRunner`
doesn't support loading and saving the node inputs and
outputs asynchronously with threads. Defaults to False.
Raises:
ValueError: bad parameters passed
"""
if is_async:
warnings.warn(
"`ThreadRunner` doesn't support loading and saving the "
"node inputs and outputs asynchronously with threads. "
"Setting `is_async` to False."
)
super().__init__(is_async=False)
if max_workers is not None and max_workers <= 0:
raise ValueError("max_workers should be positive")
self._max_workers = max_workers
def create_default_data_set(self, ds_name: str) -> AbstractDataSet:
"""Factory method for creating the default data set for the runner.
Args:
ds_name: Name of the missing data set
Returns:
An instance of an implementation of AbstractDataSet to be used
for all unregistered data sets.
"""
return MemoryDataSet()
def _get_required_workers_count(self, pipeline: Pipeline):
"""
Calculate the max number of processes required for the pipeline
"""
# Number of nodes is a safe upper-bound estimate.
# It's also safe to reduce it by the number of layers minus one,
# because each layer means some nodes depend on other nodes
# and they can not run in parallel.
# It might be not a perfect solution, but good enough and simple.
required_threads = len(pipeline.nodes) - len(pipeline.grouped_nodes) + 1
return (
min(required_threads, self._max_workers)
if self._max_workers
else required_threads
)
def _run( # pylint: disable=too-many-locals,useless-suppression
self, pipeline: Pipeline, catalog: DataCatalog, run_id: str = None
) -> None:
"""The abstract interface for running pipelines.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
run_id: The id of the run.
Raises:
Exception: in case of any downstream node failure.
"""
nodes = pipeline.nodes
load_counts = Counter(chain.from_iterable(n.inputs for n in nodes))
node_dependencies = pipeline.node_dependencies
todo_nodes = set(node_dependencies.keys())
done_nodes = set() # type: Set[Node]
futures = set()
done = None
max_workers = self._get_required_workers_count(pipeline)
with ThreadPoolExecutor(max_workers=max_workers) as pool:
while True:
ready = {n for n in todo_nodes if node_dependencies[n] <= done_nodes}
todo_nodes -= ready
for node in ready:
futures.add(
pool.submit(run_node, node, catalog, self._is_async, run_id)
)
if not futures:
assert not todo_nodes, (todo_nodes, done_nodes, ready, done)
break
done, futures = wait(futures, return_when=FIRST_COMPLETED)
for future in done:
try:
node = future.result()
except Exception:
self._suggest_resume_scenario(pipeline, done_nodes)
raise
done_nodes.add(node)
self._logger.info("Completed node: %s", node.name)
self._logger.info(
"Completed %d out of %d tasks", len(done_nodes), len(nodes)
)
# decrement load counts and release any data sets we've finished
# with this is particularly important for the shared datasets we
# create above
for data_set in node.inputs:
load_counts[data_set] -= 1
if (
load_counts[data_set] < 1
and data_set not in pipeline.inputs()
):
catalog.release(data_set)
for data_set in node.outputs:
if (
load_counts[data_set] < 1
and data_set not in pipeline.outputs()
):
catalog.release(data_set)
| [
"noreply@github.com"
] | sbrugman.noreply@github.com |
030a3d432bcf422ddfdad9ef534758ee113127cd | 62e45255088abb536e9ea6fcbe497e83bad171a0 | /ippython/circulo.py | 5002184bae8789682a1c92339e1c73e4484a02c7 | [] | no_license | jmery24/python | a24f562c8d893a97a5d9011e9283eba948b8b6dc | 3e35ac9c9efbac4ff20374e1dfa75a7af6003ab9 | refs/heads/master | 2020-12-25T21:56:17.063767 | 2015-06-18T04:59:05 | 2015-06-18T04:59:05 | 36,337,473 | 0 | 0 | null | 2015-05-27T02:26:54 | 2015-05-27T02:26:54 | null | UTF-8 | Python | false | false | 723 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 07:08:51 2013
@author: daniel
"""
# Programa: circulo.py
# Proposito: calcula el area y el perimetro de un circulo conociendo su radio
# Autor: Daniel Mery
# Fecha: 02/09/2013
# Math Module: carga funcion pi
from math import pi
# Data input: valor del radio (en metros)
radio = float(raw_input("Escribe el valor del radio: "))
# data computation: calcula el perimetro y el area del circulo
area = pi*radio**2
perimetro = pi*radio*2
# data output: muestra en pantalla el valor del area y perimetro
print "El area del circulo es de %6.3f metros cuadrados" % area #muestra 3 decimales
print "El perimetro del circulo es de %6.3f metros" % perimetro #muestra 3 decimales | [
"danmery@gmail.com"
] | danmery@gmail.com |
f78db2b1574564bd06345ff2853100f9c9a0b07c | 3e4ec719074d50d02b3dba8d431dad9895a3144a | /branches/1.0/available plugins/quote.py | d8f9ae85568034b3f7c246edcaa0066694c422d0 | [] | no_license | BGCX067/eyercbot-svn-to-git | 76b9404415b1eb0fef33357bec49c02519005331 | 5e71e71b05f4fa0c06cf5e2d15fb15d882d55864 | refs/heads/master | 2016-09-01T08:50:48.015417 | 2015-12-28T14:32:02 | 2015-12-28T14:32:02 | 48,757,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,204 | py | # Quotes script
# Stores quotes in the user file in a list
# user_list[user_name]['quotes'] = [[Submitter, Quote, [optional, tags, for, search], [Submitter, Quote, [optional, tags, for, search]]
# MUST MAKE SURE USER PLUGIN DOES NOT OVERWRITE QUOTES WHEN REGISTERING OR ANY DISK WRITE
# Features: Users can add any quote to anyone
# Desired
# Features: Users can save/delete (only) their own quotes.
# Configurable permissions for adding/deleting quotes for other people or categories.
# Configurable number of quotes to save (users and "categories" have seperate limits.)
# Configurable command prefix, PRIVMSG/NOTICE, etc.
# Logs adding/deleting quotes/categories into a "quotelog" section of the datafile.
# Special "any" category for general, unlimited number of quotes.
# Command to create/delete "categories" for quotes (deletes all quotes for category as well).
# Command to list all "categories" by name.
# Command to show quote from yourself by number or at random.
# Command to show quote from username or category by number or at random.
# Command to show quote randomly from entire datafile.
# Command to show quote randomly from the "any" category.
# Command to show all quotes for username or category (in privmsg).
# Command to show all of your own quotes (in privmsg).
# Command to search quote datafile by keywords/text string (results shown in privmsg).
# Command to show statistics for all quotes in the datafile (total number of quotes, users/quotes, etc.)
# Quotes within a user's or category's saved quotes are automatically renumbered when one line is deleted.
# Properly handles all tcl-special chars, so quotes can contain ANY input.
# -----
# Configuration
path_users = 'users/'
# -----
import EyeRCbot
import glob
import random
import yaml
# May be able to get away with not using this
quotes_dict = {}
def on_load(connection):
pass
def on_unload(connection, event):
save_quotes()
def index(connection, event, channels):
if len(event.arguments()[0].split()) == 1:
connection.privmsg(event.target(), 'Quote script plugin. !quote add nick:nickname tags:tag1,tag2 quote:line to be quoted will add the quote to the user. Tags are optional parameters for searching purposes.')
return None
if event.arguments()[0].split()[1].upper() == 'HELP':
connection.privmsg(event.target(), 'Quote script plugin. !quote add nick:nickname tags:tag1,tag2 quote:line to be quoted will add the quote to the user. Tags are optional parameters for searching purposes.')
if event.arguments()[0].split()[1].upper() == 'ADD':
if len(event.arguments()[0].split()) == 2 or len(event.arguments()[0].split()):
connection.privmsg(event.target(), 'Quote script plugin. !quote add nick:nickname tags:tag1,tag2 quote:line to be quoted will add the quote to the user. Tags are optional parameters for searching purposes.')
# We make sure the user has AT LEAST nick:nick and quote:quote
if event.arguments()[0].find('nick:') != -1 and event.arguments()[0].find('quote:') != -1:
add_quote(connection, event)
else:
connection.privmsg(event.target(), 'Quote script plugin. !quote add nick:nickname tags:tag1,tag2 quote:line to be quoted will add the quote to the user. Tags are optional parameters for searching purposes.')
# Logic for pulling a random quote
if event.arguments()[0].split()[1].upper() == 'RANDOM' or event.arguments()[0].split()[1].upper() == 'RAND':
user_nick2 = None
quote_sub2 = None
quote_tags2 = None
quote_number2 = None
param2 = 'rand'
if event.arguments()[0].find('nick:') != -1:
for word in event.arguments()[0].split():
if word.find('nick:') != -1:
user_nick2 = word.replace('nick:' , '')
if event.arguments()[0].find('tags:') != -1:
for word in event.arguments()[0].split():
if word.find('tags:') != -1:
pass
quote_entry = get_quote(connection, event, user_nick = user_nick2, quote_sub = quote_sub2, quote_tags = quote_tags2, quote_number = quote_number2, param = param2)
connection.privmsg(event.target(), quote_entry)
def save_quotes(user='ALL'):
if user == 'ALL':
for user_name in EyeRCbot.bot.user_list:
stream = file(path_users + user_name + '.yaml', 'w')
yaml.dump(EyeRCbot.bot.user_list[user_name], stream)
stream.close()
else:
stream = file(path_users + user + '.yaml', 'w')
yaml.dump(EyeRCbot.bot.user_list[user], stream)
stream.close()
def add_quote(connection, event):
# We process the nick and quote
quote_string = quote_string_search = event.arguments()[0].replace('!quote add ','')
for word in quote_string_search.split():
if word.find('nick:') != -1:
quote_nick = word.replace('nick:' , '')
quote_string = quote_string.replace(word, '')
# We then process out the tags, if any
quote_tags = None
if event.arguments()[0].find('tags:') != -1:
for word in event.arguments()[0].split():
if word.find('tags:') != -1:
quote_tags = word.replace('tags:', '').split(',')
quote_string = quote_string.replace(word, '')
if word.find('quote:') != -1:
#quote_quote = word.replace('quote:', '')
quote_string = quote_string.replace('quote:', '').lstrip()
# Now we identify the submitter
quote_submitter = event.source().split('!')[0]
print EyeRCbot.bot.user_list[quote_nick]
if quote_tags == None:
quote_entry = [quote_submitter, quote_string, ['']]
print quote_entry
else:
quote_entry = [quote_submitter, quote_string, quote_tags]
if EyeRCbot.bot.user_list[quote_nick].has_key('quotes') == True:
print EyeRCbot.bot.user_list[quote_nick]
print EyeRCbot.bot.user_list[quote_nick]['quotes']
EyeRCbot.bot.user_list[quote_nick]['quotes'].append(quote_entry)
print EyeRCbot.bot.user_list[quote_nick]['quotes']
else:
EyeRCbot.bot.user_list[quote_nick]['quotes'] = [quote_entry]
connection.privmsg(event.target(), 'Successfully added quote for ' + quote_nick + '.')
save_quotes(quote_nick)
def get_quote(connection, event, user_nick = None, quote_sub = None, quote_tags = None, quote_number = None, param = None):
# We copy the user database then cut out entries with no quotes
quote_db = EyeRCbot.bot.user_list
if quote_db == {}:
return 'No quotes stored'
quote_key = quote_db.keys()
for name in quote_key:
if quote_db[name].has_key('quotes') == False:
del quote_db[name]
if quote_db == {}:
return 'No quotes stored'
# If a random quote is called with no other parameters
if param == 'rand' and user_nick == None and quote_sub == None and quote_tags == None and quote_number == None:
quote_keys = quote_db.keys()
quote_user = quote_db[quote_keys[random.randint(0,len(quote_db)-1)]]['quotes']
quote_entry = quote_user[random.randint(0,len(quote_user)-1)]
# Random quote from a user
if param == 'rand' and user_nick != None and quote_sub == None and quote_tags == None and quote_number == None:
if quote_db[user_nick].has_key('quotes') == True:
quote_entry = quote_db[user_nick][quote_keys[random.randint(0,len(quote_user))]]
else:
quote_entry = 'That user has no quotes.'
return quote_entry
| [
"you@example.com"
] | you@example.com |
255ea063cee865295a94a328d6d88f24d8c1727f | 4bb1a23a62bf6dc83a107d4da8daefd9b383fc99 | /work/abc117_d.py | f7fb459cfbdc6cb27eb021374354fccc78e686d7 | [] | no_license | takushi-m/atcoder-work | 0aeea397c85173318497e08cb849efd459a9f6b6 | f6769f0be9c085bde88129a1e9205fb817bb556a | refs/heads/master | 2021-09-24T16:52:58.752112 | 2021-09-11T14:17:10 | 2021-09-11T14:17:10 | 144,509,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # -*- coding: utf-8 -*-
n,k = map(int, input().split())
al = list(map(int, input().split()))
# def f(x):
# res = 0
# for a in al:
# res += a^x
# return res
# res = 0
# for x in range(k+1):
# res = max(res, f(x))
# print(res)
maxd = 50
dp = [[-1, -1] for _ in range(maxd+1)]
dp[0][0] = 0
for d in range(maxd):
s1 = 0
s0 = 0
c = 1<<(maxd-d-1)
for a in al:
if a&c>0:
s0 += c
else:
s1 += c
if dp[d][1]!=-1:
dp[d+1][1] = max(dp[d+1][1], dp[d][1]+max(s1,s0))
if dp[d][0]!=-1:
if k&c>0:
dp[d+1][1] = max(dp[d+1][1], dp[d][0]+s0)
dp[d+1][0] = max(dp[d+1][0], dp[d][0]+s1)
else:
dp[d+1][0] = max(dp[d+1][0], dp[d][0]+s0)
# print(dp)
print(max(dp[maxd][0], dp[maxd][1]))
| [
"takushi-m@users.noreply.github.com"
] | takushi-m@users.noreply.github.com |
bcc525f80c79bc4da2cc9f6e9dfa77a1eec59e61 | e9de427c184d518b8230ce0a87ea45b19374869b | /silvia/03_class/for.py | a5e69545198573dec0f692e02ac8dabadcd52c76 | [] | no_license | andresalbertoramos/Master-en-Programacion-con-Python_ed2 | a78eea1ee2d5545384f6bc351369e75631f04e6c | a5ec6418fadedfab6f6cc56e581b41ca61d5215f | refs/heads/master | 2022-03-23T14:17:27.038061 | 2019-12-18T18:16:43 | 2019-12-18T18:16:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | pair_numbers = list()
for number in range(0, 10, 2):
pair_numbers.append(number)
print(pair_numbers)
text = '''En un lugar de la Mancha de cuyo nombre no puedo acordarme
vivía un hidalgo que no me acuerdo si tenía una talla XL'''
new_text = ''
index = 0
for char in text:
if char == 'x' or char == 'X':
break
index += 1
print(text[0:index])
# Otra solución:
index = text.rindex('X')
print(text[0:index])
# Método split:
text1 = 'banana, pear, melon, watermelon'
my_list = text1.split(', ')
print(my_list)
shopping_list = ['mouse', 'keyboard', 'monitor', 'operating system', 'windows']
for item in shopping_list:
print(item)
numbers = [2, 8, 7, 5, 0, 9, 22, 99]
double_numbers = list()
for number in numbers: # Para cada número dentro de la lista números
double = number * 2
double_numbers.append(double)
print(double_numbers)
# for number in range(0, 101, 2): # Si pones (100), va de 0 a 99. El step permite decir de cuanto en cuanto salta el número
# print(number)
| [
"sarnaizgarcia@gmail.com"
] | sarnaizgarcia@gmail.com |
069f0f7688d0e991ef7c3c4026397ef21685d7ad | 31e9fdbfacfea4bdee39bf4ad2e6db7b6a01324a | /models/tss_capsnet/wst_capsnet_e1_graph_mnist.py | 9839fe44777869098cfd3a8aac62ec765d911ae9 | [
"Apache-2.0"
] | permissive | StephenTaylor1998/TSSCapsNet | 1413bc8c06104b4ab0e4494b3970caf86f30027b | edc01b85987da641f4797c1bf60355bc78a6d51f | refs/heads/master | 2023-04-11T11:40:59.910605 | 2021-04-26T12:55:49 | 2021-04-26T12:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,966 | py | import numpy as np
import tensorflow as tf
from kymatio.keras import Scattering2D
from ..layers.layers_efficient import PrimaryCaps, FCCaps, Length, Mask, generator_graph_mnist
def wst_capsnet_graph(input_shape, name):
inputs = tf.keras.Input(input_shape)
# (28, 28, 1) ==>> (24, 24, 32)
x = tf.keras.layers.Conv2D(32, 5, activation="relu", padding='valid', kernel_initializer='he_normal')(inputs)
x = tf.keras.layers.BatchNormalization()(x)
# (24, 24, 32) ==>> (22, 22, 64)
x = tf.keras.layers.Conv2D(64, 3, activation='relu', padding='valid', kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
# (22, 22, 64) ==>> (20, 20, 64)
x = tf.keras.layers.Conv2D(64, 3, activation='relu', padding='valid', kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
# (20, 20, 64) ==>> (18, 18, 32)
x = tf.keras.layers.Conv2D(32, 3, activation='relu', padding='valid', kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
shape = x.shape
# (18, 18, 32) ==>> (9, 9, 128)
x = tf.transpose(x, (0, 3, 1, 2))
x = Scattering2D(J=1, L=3)(x)
x = tf.keras.layers.Reshape(target_shape=(128, shape[1]//2, shape[2]//2))(x)
x = tf.transpose(x, (0, 2, 3, 1))
x = tf.keras.layers.BatchNormalization()(x)
x = PrimaryCaps(128, x.shape[1], 16, 8)(x)
digit_caps = FCCaps(10, 16)(x)
digit_caps_len = Length(name='length_capsnet_output')(digit_caps)
return tf.keras.Model(inputs=inputs, outputs=[digit_caps, digit_caps_len], name=name)
def build_graph(input_shape, mode, name):
inputs = tf.keras.Input(input_shape)
y_true = tf.keras.layers.Input(shape=(10,))
noise = tf.keras.layers.Input(shape=(10, 16))
efficient_capsnet = wst_capsnet_graph(input_shape, name)
efficient_capsnet.summary()
print("\n\n")
digit_caps, digit_caps_len = efficient_capsnet(inputs)
noised_digitcaps = tf.keras.layers.Add()([digit_caps, noise]) # only if mode is play
masked_by_y = Mask()([digit_caps, y_true])
masked = Mask()(digit_caps)
masked_noised_y = Mask()([noised_digitcaps, y_true])
generator = generator_graph_mnist(input_shape)
generator.summary()
print("\n\n")
x_gen_train = generator(masked_by_y)
x_gen_eval = generator(masked)
x_gen_play = generator(masked_noised_y)
if mode == 'train':
return tf.keras.models.Model([inputs, y_true], [digit_caps_len, x_gen_train],
name='WST_Efficinet_CapsNet_Generator')
elif mode == 'test':
return tf.keras.models.Model(inputs, [digit_caps_len, x_gen_eval], name='WST_Efficinet_CapsNet_Generator')
elif mode == 'play':
return tf.keras.models.Model([inputs, y_true, noise], [digit_caps_len, x_gen_play],
name='WST_Efficinet_CapsNet_Generator')
else:
raise RuntimeError('mode not recognized')
| [
"2684109034@qq.com"
] | 2684109034@qq.com |
80e09f046d77335977d095a563e932165a43a4fa | ae504b24cfc9567df0e009970a416654d224460e | /tools/patch_h_codegen.py | c9bb1b9a0a8888f963c6cba50960cd09b31a7fc7 | [
"BSD-3-Clause"
] | permissive | MicrohexHQ/src | 0c300228373e6b4b3c998d0ffbcbea3b0c50fe41 | c079873c182067002b6a7a5564094ea0a4fe0aef | refs/heads/master | 2020-08-12T04:34:19.714609 | 2019-10-12T17:51:33 | 2019-10-12T17:51:33 | 214,690,453 | 0 | 0 | NOASSERTION | 2019-10-12T17:50:50 | 2019-10-12T17:50:50 | null | UTF-8 | Python | false | false | 937 | py | from __future__ import print_function
import os
from argparse import ArgumentParser
parser = ArgumentParser(description='Patch some header code generation, so it builds')
parser.add_argument("-f", "--file", required=True)
parser.add_argument("-p", "--patches", required=True)
parser.add_argument("-v", "--verbose", default=False, action="store_true")
parser.add_argument("-m", "--module", required=True)
args = parser.parse_args()
if os.path.isfile(args.patches):
with open(args.file) as fin:
lines = map(str.rstrip, fin.readlines())
patches = {}
with open(args.patches) as fin:
patches = eval(fin.read())
all_lines = []
for l in lines:
l = l.replace(", ...arg0)", ", ...)")
all_lines.append(l)
import tempfile
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write("\n".join(all_lines))
temp.close()
import shutil
shutil.move(temp.name, args.file)
| [
"Arnaud Diederen arnaud@hex-rays.com"
] | Arnaud Diederen arnaud@hex-rays.com |
4e6e5dc78569c25cb8bc3c04fda94b4b74c5e6b8 | 22819d9a4df8be1653c9b33b136d2b5f3864d349 | /catalog/Data_Setup.py | 42721ddc570a83364896424d27399e4269551eba | [] | no_license | Srinivasareddymediboina/catalog | 20abbc1091757754d2c4cf256761548d2d705122 | 823a8ba1064f2b8bb3016f7bd63c01674959b0e0 | refs/heads/master | 2020-05-01T03:55:03.653503 | 2019-03-23T07:41:55 | 2019-03-23T07:41:55 | 177,258,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | import sys
import os
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(200), nullable=False)
email = Column(String(200), nullable=False)
picture = Column(String(300))
class PerfumeCompanyName(Base):
__tablename__ = 'perfumecompanyname'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User, backref="perfumecompanyname")
@property
def serialize(self):
"""Return objects data in easily serializeable formats"""
return {
'name': self.name,
'id': self.id
}
class PerfumeName(Base):
__tablename__ = 'pefumename'
id = Column(Integer, primary_key=True)
name = Column(String(350), nullable=False)
flavour = Column(String(150))
color = Column(String(150))
cost = Column(String(150))
rlink = Column(String(500))
date = Column(DateTime, nullable=False)
perfumecompanynameid = Column(Integer, ForeignKey('perfumecompanyname.id'))
perfumecompanyname = relationship(
PerfumeCompanyName, backref=backref('pefumename', cascade='all, delete'))
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User, backref="pefumename")
@property
def serialize(self):
"""Return objects data in easily serializeable formats"""
return {
'name': self. name,
'flavour': self. flavour,
'cost': self. cost,
'color': self. color,
'rlink': self.rlink,
'date': self. date,
'id': self. id
}
engine = create_engine('sqlite:///perfumes.db')
Base.metadata.create_all(engine)
| [
"nivas0803@gmail.com"
] | nivas0803@gmail.com |
eecaf401f781889eb02e272ac9c1e26f2aa01fd3 | c59fd33c32211b3770273a43f580726c0015f6cb | /airmozilla/main/forms.py | 0226f73164fd3af249d2562658075e8c145825f1 | [] | no_license | maciejczyzewski/airmozilla | cc9d654653ffcd950d4e4abaf4c5c2c84036cb57 | 12a79074d97e84c3f419a3776963b3ee4654425e | refs/heads/master | 2020-12-25T11:20:35.270038 | 2014-06-11T23:46:40 | 2014-06-11T23:46:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | import datetime
from django import forms
from airmozilla.base.forms import BaseModelForm, BaseForm
from airmozilla.main.models import EventRevision
class CalendarDataForm(BaseForm):
start = forms.IntegerField()
end = forms.IntegerField()
def clean_start(self):
try:
return datetime.datetime.fromtimestamp(
self.cleaned_data['start']
)
except ValueError as x:
raise forms.ValidationError(x)
def clean_end(self):
try:
return datetime.datetime.fromtimestamp(
self.cleaned_data['end']
)
except ValueError as x:
raise forms.ValidationError(x)
def clean(self):
cleaned_data = super(CalendarDataForm, self).clean()
if 'end' in cleaned_data and 'start' in cleaned_data:
if cleaned_data['end'] <= cleaned_data['start']:
raise forms.ValidationError('end <= start')
return cleaned_data
class PinForm(BaseForm):
pin = forms.CharField(max_length=20)
def __init__(self, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs.pop('instance')
assert self.instance.pin, "event doesn't have a pin"
else:
self.instance = None
super(PinForm, self).__init__(*args, **kwargs)
def clean_pin(self):
value = self.cleaned_data['pin'].strip()
if value != self.instance.pin:
raise forms.ValidationError("Incorrect pin")
return value
class EventEditForm(BaseModelForm):
tags = forms.CharField(required=False)
class Meta:
model = EventRevision
exclude = ('event', 'user', 'created', 'change')
def __init__(self, *args, **kwargs):
super(EventEditForm, self).__init__(*args, **kwargs)
self.fields['placeholder_img'].required = False
self.fields['channels'].help_text = ""
| [
"mail@peterbe.com"
] | mail@peterbe.com |
91a69076b9f5e324c4b3087707fbb180b5bcbeeb | 13c5b9fc590954a4a25b9d38e8140eb83a63c9a1 | /src/bxcommon/services/extension_cleanup_service_helpers.py | d3f6a8244bf929332ca52407dcdba13c0767f6de | [
"MIT"
] | permissive | aspin/bxcommon | f746c405c693f4efb8af815cf4f9408284299e50 | 325a0844e3fc16176e90ea574eb45fff1177c527 | refs/heads/master | 2020-09-10T16:26:55.814270 | 2019-11-07T21:53:23 | 2019-11-07T21:53:23 | 221,758,675 | 0 | 0 | null | 2019-11-14T18:08:11 | 2019-11-14T18:08:10 | null | UTF-8 | Python | false | false | 2,134 | py | from datetime import datetime
import time
import typing
from bxutils import logging
from bxutils.logging.log_record_type import LogRecordType
from bxcommon.utils.proxy import task_pool_proxy
from bxcommon.services.transaction_service import TransactionService
from bxcommon.services.extension_transaction_service import ExtensionTransactionService
from bxcommon.messages.bloxroute.abstract_cleanup_message import AbstractCleanupMessage
import task_pool_executor as tpe # pyre-ignore for now, figure this out later (stub file or Python wrapper?)
logger = logging.get_logger(LogRecordType.TransactionCleanup)
def contents_cleanup(transaction_service: TransactionService,
block_confirmation_message: AbstractCleanupMessage,
cleanup_tasks
):
start_datetime = datetime.utcnow()
start_time = time.time()
tx_service = typing.cast(ExtensionTransactionService, transaction_service)
cleanup_task = cleanup_tasks.borrow_task()
cleanup_task.init(tpe.InputBytes(block_confirmation_message.buf), tx_service.proxy)
task_pool_proxy.run_task(cleanup_task)
short_ids = cleanup_task.short_ids()
total_content_removed = cleanup_task.total_content_removed()
tx_count = cleanup_task.tx_count()
message_hash = block_confirmation_message.message_hash()
tx_service.update_removed_transactions(total_content_removed, short_ids)
transaction_service.on_block_cleaned_up(message_hash)
end_datetime = datetime.utcnow()
end_time = time.time()
duration = end_time - start_time
logger.statistics(
{
"type": "MemoryCleanup",
"event": "CacheStateAfterBlockCleanup",
"data": transaction_service.get_cache_state_json(),
"start_datetime": start_datetime,
"end_datetime": end_datetime,
"duration": duration,
"total_content_removed": total_content_removed,
"tx_count": tx_count,
"short_ids_count": len(short_ids),
"message_hash": repr(message_hash),
}
)
cleanup_tasks.return_task(cleanup_task)
| [
"vc.shane@gmail.com"
] | vc.shane@gmail.com |
7cfd56cb75ae9696707e8870b029db578f876a65 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.65_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=75/params.py | fde40d367ebfcc5a6486f3e5f4431fd1710da7b8 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.029357',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 75,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
3464ba1d15269d36527b6fd68902e54095670305 | 55a273347cb103fe2b2704cb9653956956d0dd34 | /code/tmp_rtrip/test/test_structseq.py | 1ad4504770e6f37dab7e87c3e9b0a629e0057df8 | [
"MIT"
] | permissive | emilyemorehouse/ast-and-me | 4af1bc74fc967ea69ac1aed92664f6428acabe6a | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | refs/heads/master | 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 | MIT | 2022-11-04T11:36:43 | 2017-12-21T18:27:19 | Python | UTF-8 | Python | false | false | 3,595 | py | import os
import time
import unittest
class StructSeqTest(unittest.TestCase):
def test_tuple(self):
t = time.gmtime()
self.assertIsInstance(t, tuple)
astuple = tuple(t)
self.assertEqual(len(t), len(astuple))
self.assertEqual(t, astuple)
for i in range(-len(t), len(t)):
self.assertEqual(t[i:], astuple[i:])
for j in range(-len(t), len(t)):
self.assertEqual(t[i:j], astuple[i:j])
for j in range(-len(t), len(t)):
self.assertEqual(t[:j], astuple[:j])
self.assertRaises(IndexError, t.__getitem__, -len(t) - 1)
self.assertRaises(IndexError, t.__getitem__, len(t))
for i in range(-len(t), len(t) - 1):
self.assertEqual(t[i], astuple[i])
def test_repr(self):
t = time.gmtime()
self.assertTrue(repr(t))
t = time.gmtime(0)
self.assertEqual(repr(t),
'time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)'
)
st = os.stat(__file__)
rep = repr(st)
self.assertTrue(rep.startswith('os.stat_result'))
self.assertIn('st_mode=', rep)
self.assertIn('st_ino=', rep)
self.assertIn('st_dev=', rep)
def test_concat(self):
t1 = time.gmtime()
t2 = t1 + tuple(t1)
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i + len(t1)])
def test_repeat(self):
t1 = time.gmtime()
t2 = 3 * t1
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i + len(t1)])
self.assertEqual(t2[i], t2[i + 2 * len(t1)])
def test_contains(self):
t1 = time.gmtime()
for item in t1:
self.assertIn(item, t1)
self.assertNotIn(-42, t1)
def test_hash(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1)))
def test_cmp(self):
t1 = time.gmtime()
t2 = type(t1)(t1)
self.assertEqual(t1, t2)
self.assertTrue(not t1 < t2)
self.assertTrue(t1 <= t2)
self.assertTrue(not t1 > t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
def test_fields(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_sequence_fields)
self.assertEqual(t.n_unnamed_fields, 0)
self.assertEqual(t.n_fields, time._STRUCT_TM_ITEMS)
def test_constructor(self):
t = time.struct_time
self.assertRaises(TypeError, t)
self.assertRaises(TypeError, t, None)
self.assertRaises(TypeError, t, '123')
self.assertRaises(TypeError, t, '123', dict={})
self.assertRaises(TypeError, t, '123456789', dict=None)
s = '123456789'
self.assertEqual(''.join(t(s)), s)
def test_eviltuple(self):
class Exc(Exception):
pass
class C:
def __getitem__(self, i):
raise Exc
def __len__(self):
return 9
self.assertRaises(Exc, time.struct_time, C())
def test_reduce(self):
t = time.gmtime()
x = t.__reduce__()
def test_extended_getslice(self):
t = time.gmtime()
L = list(t)
indices = 0, None, 1, 3, 19, 300, -1, -2, -31, -300
for start in indices:
for stop in indices:
for step in indices[1:]:
self.assertEqual(list(t[start:stop:step]), L[start:stop
:step])
if __name__ == '__main__':
unittest.main()
| [
"emily@cuttlesoft.com"
] | emily@cuttlesoft.com |
5f597ed8252beb643adaf72802bbf488000e1275 | f5bc3b2c401ed324d5d2e8c7f8f04af1cf7b6d6b | /src/Learn v1.py | 7575afde9f99b0869d6e7b6fdf1de198f8ac4083 | [] | no_license | pkumusic/HCE | 68ca4eb8bfd74ab6c3d8693706e37779bbb75752 | 9a02c058871ecc0a4a655422f87b69b1a3bc19c5 | refs/heads/master | 2021-01-10T06:21:28.741605 | 2016-10-01T23:55:15 | 2016-10-01T23:55:15 | 45,494,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | from __future__ import division
import numpy as np
class Learn:
# dim -- dimension
# dim_list -- dimension of each layer [500,500]
# node_num_list how many nodes in each layer [100,1000]
def __init__(self, dim_list, node_num_list, batch_size, neg_sample_size, gradient_step, converge_tresh):
layer_num = 2 # len(dim_list) #for now always == 2
# layer * dim * node num
self.idToVector = []
for i in range(layer_num):
dim = dim_list[i]
node_num = node_num_list[i]
self.idToVector[i] = np.empty((dim, node_num,))
################### initialize each vector to sqrt(1/dim) ###################
self.idToVector[i][:] = sqrt(1 / dim)
self.dim_list = dim_list
self.node_num_list = node_num_list
self.batch_size = batch_size
self.neg_sample_size = neg_sample_size
self.gradient_step = gradient_step
self.is_converge = false
self.converge_tresh = converge_tresh
def init_vector(self, e):
v = np.empty(n)
v.fill(1 / self.d)
self.idToVector[e] = v
def normalize(self, vec):
return vec / norm(vec)
def solve(self):
samples = XXX.getNextSampleBatch(self.batch_size) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<
neg_samples = [] # batch_size * neg_sample_size matrix
for i in self.batch_size:
(e_t, e_c) = samples[i]
################# which one to use in negative sampling #################
neg_samples[i] = XXX.getNegativeSamples(e_t, self.neg_sample_size) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<
while not self.is_converge:
for i in self.batch_size:
(e_t, e_c) = samples[i]
v_t = self.idToVector[e_t.layer][e_t.id, :]
v_c = self.idToVector[e_c.layer][e_c.id, :]
################# gradient descent, not checked ####################
v_t_gradient = exp(- v_t * v_c) * v_c / (1 + exp(- v_t * v_c))
for e_i in neg_samples[i]:
v_i = self.idToVector[e_i.layer][e_i.id, :]
###这里中间我怎么感觉是用减号 --- Music
v_t_gradient = v_t_gradient + exp(- v_t * v_i) * v_i / (1 + exp(- v_t * v_i))
v_c_gradient = exp(- v_t * v_c) * v_t / (1 + exp(- v_t * v_c))
v_t = v_t + gradient_step * v_t_gradient
v_c = v_c + gradient_step * v_c_gradient
v_t = self.normalize(v_t)
v_c = self.normalize(v_c)
if norm(v_t_gradient) + norm(v_t_gradient) < converge_tresh:
self.is_converge = true
return self.idToVector
if __name__ == "__main__":
learn = Learn()
| [
"635716260@qq.com"
] | 635716260@qq.com |
2b1365b5eee6b4656d952942b35e4f370033e6a9 | cb35c73cbbce20f5a424a87ba7b51ea0cf184eaf | /utils/image_utils.py | d15b446d0cb9447433ee10e1bd0d206ded19c930 | [] | no_license | kl456123/instance_detection | 9d3942ab3ba90a6267f282d36eba29e59052cbf8 | 3d4e822e5a5b717b1b5f7071eb85d9a04fcef6ab | refs/heads/master | 2023-03-04T07:37:57.457722 | 2022-03-25T11:09:30 | 2022-03-25T11:09:30 | 191,766,579 | 1 | 0 | null | 2022-11-22T10:22:47 | 2019-06-13T13:17:55 | Python | UTF-8 | Python | false | false | 2,628 | py | # -*- coding: utf-8 -*-
"""
some preprocessing operators
"""
import torch
def drawGaussian(pt, image_shape, sigma=2):
"""
Args:
pt: shape(N, M, K, 2)
sigma: scalar 2 or 3 in common case
image_shape: shape(2)
Returns:
keypoint_heatmap: shape(N, M, K, S, S)
"""
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
N, M, K = pt.shape[:3]
pt = pt.view(-1, 2).long()
ul = torch.stack([pt[..., 0] - tmpSize, pt[..., 1] - tmpSize], dim=-1)
br = torch.stack(
[pt[..., 0] + tmpSize + 1, pt[..., 1] + tmpSize + 1], dim=-1)
cond = (ul[..., 0] >= image_shape[1]) | (ul[..., 1] >= image_shape[0]) | (
br[..., 0] < 0) | (br[..., 1] < 0)
# Generate gaussian
size = 2 * tmpSize + 1
x = torch.arange(size, dtype=torch.float)
y = x[:, None]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = torch.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)).float().to(
pt.device)
# Usable gaussian range
g_x_start = (-ul[..., 0]).clamp(min=0)
g_x_end = br[..., 0].clamp(max=image_shape[1]) - ul[..., 0]
g_y_start = (-ul[..., 1]).clamp(min=0)
g_y_end = br[..., 1].clamp(max=image_shape[0]) - ul[..., 1]
# g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
# g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x_start = ul[..., 0].clamp(min=0)
img_y_start = ul[..., 1].clamp(min=0)
img_x_end = br[..., 0].clamp(max=image_shape[1])
img_y_end = br[..., 1].clamp(max=image_shape[0])
# img_x = max(0, ul[0]), min(br[0], img.shape[1])
# img_y = max(0, ul[1]), min(br[1], img.shape[0])
# assign from gaussian distribution
# img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
h, w = image_shape
img = torch.zeros(N, M, K, h, w).type_as(pt).float()
g_index = torch.nonzero(g > 0).unsqueeze(-1)
g_cond = (g_index[:, 1] >= g_x_start) & (g_index[:, 0] >= g_y_start) & (
g_index[:, 1] < g_x_end) & (g_index[:, 0] < g_y_end)
img_index = torch.nonzero(img.view(-1, h, w)[0] > -1).unsqueeze(-1)
img_cond = (img_index[:, 1] >= img_x_start) & (
img_index[:, 0] >= img_y_start) & (img_index[:, 1] < img_x_end) & (
img_index[:, 0] < img_y_end)
img_cond = img_cond.transpose(0, 1).view(N, M, K, h, w)
g_cond = g_cond.transpose(0, 1).view(N, M, K, g.shape[0], g.shape[1])
# import ipdb
# ipdb.set_trace()
img[img_cond] = g.expand_as(g_cond)[g_cond]
return img
| [
"liangxiong@deepmotion.ai"
] | liangxiong@deepmotion.ai |
9354fa5a3102914c528a016c37ea8b17f81643f1 | 0211406df71484eefd31e464667ef4d0ddeeb23e | /tracerbullet/helpers.py | 17412424f0abe4051f9d15cc798819f193a74894 | [] | no_license | adewes/tracey | d557b263693046f4094108c6c33002609a25dcd6 | 7dcc2c24a08b86290178d8ca4a6b3cc089e5eff0 | refs/heads/master | 2021-01-01T18:56:01.277460 | 2014-09-27T22:51:25 | 2014-09-27T22:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import os
import json
def get_project_path(path = None):
if not path:
path = os.getcwd()
while path != "/":
files = os.listdir(path)
if ".tracerbullet" in files and os.path.isdir(path+"/.tracerbullet"):
return path
path = os.path.dirname(path)
return None
def get_project_config(path):
with open(path+"/config.json","r") as config_file:
return json.loads(config_file.read())
def save_project_config(path,config):
with open(path+"/config.json","w") as config_file:
config_file.write(json.dumps(config))
| [
"andreas.dewes@gmail.com"
] | andreas.dewes@gmail.com |
395f6bf5838d88f4c5e1ef94639f2d06410c6221 | baf3996414315ffb60470c40c7ad797bf4e6897f | /10_front_dev/15_livrao_Microservices_with_Docker_Flask_and React_577p_code/services/exercises/project/__init__.py | 94ffee9bb7ac3e1e8b4be741f59d3dab288b0a1e | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 994 | py | # services/exercises/project/__init__.py
import os
from flask import Flask
from flask_cors import CORS
from flask_debugtoolbar import DebugToolbarExtension
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# instantiate the extensions
db = SQLAlchemy()
migrate = Migrate()
toolbar = DebugToolbarExtension()
def create_app(script_info=None):
# instantiate the app
app = Flask(__name__)
# enable CORS
CORS(app)
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
# set up extensions
toolbar.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
# register blueprints
from project.api.base import base_blueprint
app.register_blueprint(base_blueprint)
from project.api.exercises import exercises_blueprint
app.register_blueprint(exercises_blueprint)
# shell context for flask cli
app.shell_context_processor({'app': app, 'db': db})
return app
| [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
a252019b8e7b8354c2d17febc390d7c0f0544e70 | 40132307c631dccbf7aa341eb308f69389715c73 | /OLD/idmt/maya/RIG/tools/IOWeights/IOWeightsUI.py | 81882e731c76ce998c1dc6b2bd9b3fff6277d844 | [] | no_license | Bn-com/myProj_octv | be77613cebc450b1fd6487a6d7bac991e3388d3f | c11f715996a435396c28ffb4c20f11f8e3c1a681 | refs/heads/master | 2023-03-25T08:58:58.609869 | 2021-03-23T11:17:13 | 2021-03-23T11:17:13 | 348,676,742 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | #-*- coding: utf-8 -*-
import maya.cmds as rig
from RIG.tools.IOWeights.IOMainFun import *
from RIG.simulation.simulationMain import SM_warning
from RIG.commonly.base import SK_getSkinCluster
class SK_IOWeightsUI(object):
def __init__(self):
self.displayUI()
def displayUI(self):
IDMTRigGUI='IOWeightsUI'
if rig.window(IDMTRigGUI,exists=True):
rig.deleteUI(IDMTRigGUI)
rig.window(IDMTRigGUI,title= u'导入导出权重工具1.0',menuBar=True,wh= (325,500),minimizeButton=True,maximizeButton=True)
self.mainCLT = rig.columnLayout()
rig.button(l = u'导出选择的物体的权重',w = 320,c = lambda x:self.exportWeights())
rig.button(l = u'导出场景中所有蒙皮的polygon物体权重',w = 320,c = lambda x:self.exportAllPloygon())
rig.separator(w = 312,h=15,style='in')
rig.button(l = u'导入权重',w = 320,c = lambda x:self.importWeigths())
rig.showWindow(IDMTRigGUI)
rig.window(IDMTRigGUI,e=True,wh=(330,110))
#--------------------------------------------------------------- 列出场景中所有蒙皮物体
def allPolygon(self):
allMesh = rig.ls(type = 'mesh')
allMeshTransforms = [rig.listRelatives(mesh,p = True)[0] for mesh in allMesh]
getMesh = []
for mesh in allMeshTransforms:
if 1 == len(rig.ls(mesh)):#检测重命名
if not(mesh in getMesh) and SK_getSkinCluster(mesh):
getMesh.append(mesh)
else:
rig.warning(u'导出失败! 物体:'+mesh+u'有重命名')
if getMesh:
return getMesh
else:
return False
#------------------------------------------------------------- 导出所有ploygon物体
def exportAllPloygon(self):
version = rig.about(v = True)
if '2011' == version.split()[0] or '2012' == version.split()[0]:#maya版本
objs = self.allPolygon()
if objs:
IO_exportWeights(objs)
else:
SM_warning(u'场景中没有找到蒙皮物体')
else:
SM_warning(u'此功能仅maya2011以上版本可用')
def exportWeights(self):
version = rig.about(v = True)
if '2011' == version.split()[0] or '2012' == version.split()[0]:#maya版本
IO_exportWeights(False)
else:
SM_warning(u'此功能仅maya2011以上版本可用')
def importWeigths(self):
IO_importWeights()
| [
"snakelonely@outlook.com"
] | snakelonely@outlook.com |
1b831a4be209d24a894f5517e77c542c5fc2ed17 | 3e5c7a50996be69570bf4bf7284836732dd57bf0 | /pytsammalex/commands.py | 699409b0d4d0f549c691f31a2a706a80f07c7571 | [
"CC-BY-4.0"
] | permissive | tsammalex/tsammalex | 844ac880f6ec856d533117e945c4e4d21911ba13 | 4b0bace93afef58af8f02275962a0a93499d0267 | refs/heads/master | 2021-09-16T21:39:56.394565 | 2018-06-25T11:50:43 | 2018-06-25T11:50:43 | 17,407,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | from __future__ import print_function, unicode_literals, absolute_import, division
import os
from cdstarcat.catalog import Catalog
from tqdm import tqdm
from clldutils.clilib import command
from pytsammalex.util import MediaCatalog, add_rows, filter_rows, data_file
from pytsammalex.data_providers.gbif import GBIF
from pytsammalex.data_providers.catalogueoflife import CatalogueOfLife
from pytsammalex.data_providers.eol import EOL
from pytsammalex.taxa import TaxaData
from pytsammalex import distribution
from pytsammalex.image_providers import PROVIDERS
from pytsammalex import models
@command()
def update_taxa(args):
"""
Update the supplemental data for taxa from external sources.
We go through the taxa listed in taxa.csv and look for additional information at
GBIF, EOL and Catalogue Of Life.
"""
with TaxaData(repos=args.tsammalex_data) as taxa:
# add stubs for new entries in taxa.csv:
for i, item in enumerate(models.CsvData('taxa', repos=args.tsammalex_data)):
taxa.add(i, item)
for cls in [CatalogueOfLife, GBIF, EOL]:
print(cls.__name__)
with cls(args.tsammalex_data) as provider:
for spec in tqdm(taxa, leave=False):
provider.update_taxon(spec)
@command()
def upload_images(args):
"""
tsammalex upload_images path/to/cdstar/catalog
"""
images_path = data_file('images.csv', repos=args.tsammalex_data)
staged_images_path = data_file('staged_images.csv', repos=args.tsammalex_data)
checksums = set(d.id for d in models.CsvData('images', repos=args.tsammalex_data))
providers = [prov(args.tsammalex_data) for prov in PROVIDERS]
with MediaCatalog(
'cdstar.json', repos=args.tsammalex_data, json_opts=dict(indent=4)) as mcat:
with Catalog(
args.args[0],
cdstar_url=os.environ['CDSTAR_URL'],
cdstar_user=os.environ['CDSTAR_USER'],
cdstar_pwd=os.environ['CDSTAR_PWD']) as cat:
for item in models.CsvData('staged_images', repos=args.tsammalex_data):
for provider in providers:
if item in provider:
img = provider.retrieve(item, cat, checksums, mcat)
if img:
try:
add_rows(images_path, img.csv_row())
except:
print(img)
raise
filter_rows(staged_images_path, lambda d: d['id'] != item.id)
break
@command()
def update_distribution(args):
distribution.update(args.tsammalex_data, args.log)
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
9f863cf626599c64aa76b69984e0aff2715a066f | 868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30 | /homework/데이터 과학/p029_1_counter.py | 10a63af422d34e102893a22a50e6379fe433c49e | [] | no_license | inJAJA/Study | 35d4e410df7b476a4c298664bb99ce9b09bf6296 | c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4 | refs/heads/master | 2022-12-21T11:41:15.396610 | 2020-09-20T23:51:45 | 2020-09-20T23:51:45 | 263,212,524 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | """
# Counter
: 연속된 값을 defaultdict(int)와 유사한 객체로 젼환해 준다.
key와 value의 빈도를 연결시켜 줌
"""
from collections import Counter
c = Counter([0, 1, 2, 0]) # c = {0 : 2, 1 : 1, 2 : 1}
# document는 단어의 list
word_counts = Counter(document)
""" most_common """
# 가장 자주 나오는 단어 10개와 이 단어들의 빈도수를 출력
for word, count in word_counts.most_common(10):
print(word, count) | [
"zaiin4050@gmail.com"
] | zaiin4050@gmail.com |
f0b68cdbcaccd984f4b20eee1d352aef049b3440 | 9e11839f2396e2aa2a20cffcf32683c06063236c | /Exercise/e1/e1/submit/pd_summary.py | 6a95a2c01322c7c99d0b3b0a1a50589ca5890780 | [] | no_license | zhonghong030/SFU_CMPT353 | 11d06ef01f588686e1ffa2b2cd8892b63f800d2c | 076b986ee22f34c4151089317fa2644184597b04 | refs/heads/master | 2022-12-16T13:41:33.858075 | 2020-09-04T04:45:52 | 2020-09-04T04:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import pandas as pd
totals = pd.read_csv('totals.csv').set_index(keys=['name'])
counts = pd.read_csv('counts.csv').set_index(keys=['name'])
print("City with lowest total precipitation:")
totals.sum(axis=1).idxmin()
print("Average precipitation in each month:")
totals.sum(axis=0).div(counts.sum(axis=0))
print("Average precipitation in each city:")
totals.sum(axis=1).div(counts.sum(axis=1))
| [
"ison@sfu.ca"
] | ison@sfu.ca |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.