hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f78374a6b9c098ca930042e6331630796196647c | 4,902 | py | Python | temp logger complete.py | nevillethenev/Beer | a8fae43e7b2f846e208daad4a9b025703f0acb2a | [
"Unlicense"
] | null | null | null | temp logger complete.py | nevillethenev/Beer | a8fae43e7b2f846e208daad4a9b025703f0acb2a | [
"Unlicense"
] | null | null | null | temp logger complete.py | nevillethenev/Beer | a8fae43e7b2f846e208daad4a9b025703f0acb2a | [
"Unlicense"
] | null | null | null | #/usr/bin/python
import serial
import time
import matplotlib.pyplot as plt
import numpy as np
import os
"""""""""""""""""""""""""""""""""""
"""""""NEVS BEER SCRIPT""""""""""""
"""""""""""""""""""""""""""""""""""
###need to add exception handler for serial disconnection
## SETUP SERIAL PORT
try:
ser = serial.Serial('COM3',9600) # open serial port
print('Serial connection established!')
except:
print('ERR: Unable to connect to arduino...retrying')
time.sleep(3)
try:
ser = serial.Serial('COM3',9600)
except:
raw_input('ERR: Unable to connect to arduino....check connections and press Enter to continue')
try:
ser = serial.Serial('COM3',9600)
except:
raw_input('ERR: Unable to connect to arduino...Press Enter to exit..')
## STRIKE WATER CALCULATOR
##strike water calculator
##volume of water is heated inside an insulated mash tun
##grain is added to mash tun
## Tw = (Tm((Sw*mw)+(Sg*mg))-(Sg*mg*Tg))/(Sw*mw)
## Tw = strike water temp.
## Tm = mash temp.
Sw = 1; ##Specific heat water
Sg = 0.4; ##Specific heat grain
beername = raw_input("Please enter the name of the beer:")
Tm = input("Mash Temp.(\xb0C)")
Vw = input("Water Volume(L)")
mw = Vw; ##mass water(kg) = volume water(L)
mg = input("Grain mass(kg)")
Tg = input("Grain temp.(\xb0C)")
print("Calculating...")
time.sleep(1)
Tw = (Tm*((Sw*mw)+(Sg*mg))-(Sg*mg*Tg))/(Sw*mw)
Tw = round(Tw,1)
##print "Strike temp.(\xb0C) = "+str(Tw)
## MASH INSTRUCTIONS
print 'Set strike temperature to ' + str(Tw) + '\xb0C'
raw_input('Press Enter to continue...')
temperaturefloat = 0
##measure temperature
while True:
try:
temperaturefloat = round(float((ser.read(7))),1) #read
except: ##handle all serial read errors
try:
ser = serial.Serial('COM3',9600) # open serial port
except:
ser.close()
ser = serial.Serial('COM3',9600) # open serial port
temperaturefloat = 0
time.sleep(0.1)
print str(temperaturefloat) + '\xb0C'
time.sleep(0.1)
## if temperaturefloat > Tm: #### check temperature 5 times
## dragon = np.ones(5)
## for i in range(0,4):
## try:
## temperaturefloat = round(float(ser.read(7)),1)
## except: ##handle all serial read errors
## temperaturefloat = 0
##
## if temperaturefloat < 0:
## temperaturefloat = 0
##
## print str(temperaturefloat) + '\xb0C'
## dragon[i] = temperaturefloat
## print str(dragon)
## time.sleep(0.1)
## if sum(dragon)/5 > Tm:
## print 'SUCCESS'
## break
if temperaturefloat > Tm:
print 'Stike temperature reached! Please stir the water and prepare grain for submersion...'
mashtime1 = 60*input('Enter total mash time (min):')
raw_input('Submerge grain and press enter to coninue...')
print 'Mash in progress, please wait ' + str(mashtime1/60) + ' minutes...'
break
## TEMPERATURE LOGGING
ser.close() ## restart Com port
ser = serial.Serial('COM3',9600)
print 'Temp(\xb0C)\tTime(s)'
nowtimefloat = 0
temperaturefloat = 0
#read from serial and exit when user wants
while nowtimefloat < mashtime1:
try:
temperaturefloat = round(float((ser.read(7))),1) #read
except: ##handle all serial read errors
try:
ser = serial.Serial('COM3',9600) # open serial port
except:
ser.close()
ser = serial.Serial('COM3',9600) # open serial port
temperaturefloat = 0
time.sleep(0.1)
nowtimefloat = round(time.clock(),1)
nowtimestring = str(nowtimefloat)
temperaturesting = str(temperaturefloat)
goblin = open('templog.txt','a') #open txt file
datastring = temperaturesting + '\t' + nowtimestring + '\n'
print(datastring) #print temp to console
goblin.write(datastring)
## goblin.flush()
## ser.close() # close port
else:
print "Mash complete!"
raw_input('Press Enter to save the data..')
goblin.close()
os.rename('templog.txt',beername + 'templog.txt')
print 'Data saved!'
raw_input('Press Enter to exit...')
## DATA ANALYSIS
##plt.axis([0,3600,55,75])
###temperature lines
##plt.hlines(70,0,3600,colors='r')
##plt.hlines(60,0,3600,colors='r')
##
##dragon = np.loadtxt('templog.txt', delimiter="\t")
##x = dragon[:,1]
##y = dragon[:,0]
##
##plt.scatter(x,y)
####plt.draw()
##plt.show()
##plt.waitforbuttonpress()
####plt.pause(0.1)
##
##raw_input('Press Enter to exit...')
##
| 29.178571 | 104 | 0.563035 |
f78376ac5696d0e27ff83ec0f818efffebaf1f64 | 874 | py | Python | src/domain/usecases/get_all_glaucomatous_images_paths.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | src/domain/usecases/get_all_glaucomatous_images_paths.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | src/domain/usecases/get_all_glaucomatous_images_paths.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from domain.errors.failure import Failure
from domain.errors.image_failure import ImageFailure
from domain.repositories.image_repository_abstraction import ImageRepositoryAbstraction
| 32.37037 | 87 | 0.756293 |
f783bb5a51fe8b1b4c8ff1d1556d8997b3dd57bd | 944 | py | Python | numsgraph.py | FNut/PyDev | f591aa6ace1b9032e4a9159c03478571c75a38b1 | [
"MIT"
] | 2 | 2021-01-21T12:54:50.000Z | 2021-12-26T13:45:19.000Z | numsgraph.py | FNut/PyDev | f591aa6ace1b9032e4a9159c03478571c75a38b1 | [
"MIT"
] | 12 | 2021-01-21T14:12:02.000Z | 2021-02-07T06:12:44.000Z | numsgraph.py | FNut/PyDev | f591aa6ace1b9032e4a9159c03478571c75a38b1 | [
"MIT"
] | null | null | null | import pygame
import math
pygame.init()
pi = ('Pi = ' + str(math.pi))
e = ('E = ' + str(math.e))
f = ('F = 0,1,1,2,3,5,8,13...')
p = ('P = 1,2,5,12,29...')
l = ('L = 2,1,3,4,7,11,18,29...')
pl = ('P-L = 2,6,14,34,82...')
display = pygame.display.set_mode((800,600))
pygame.display.set_caption('Nums')
font = pygame.font.SysFont('None', 72)
pitxt = font.render(pi, 0, (0,255,0))
etxt = font.render(e, 0, (0,255,0))
ftxt = font.render(f, 0, (0,255,0))
ptxt = font.render(p, 0, (0,255,0))
ltxt = font.render(l, 0, (0,255,0))
pltxt = font.render(pl, 0, (0,255,0))
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.update()
display.blit(pitxt, (0,0))
display.blit(etxt, (0,40))
display.blit(ftxt, (0,80))
display.blit(ptxt, (0,120))
display.blit(ltxt, (0,160))
display.blit(pltxt, (0,200))
pygame.quit()
| 29.5 | 45 | 0.559322 |
f783f229f95c2d9280dddc95def69a100dcd1954 | 5,980 | py | Python | scoap3/modules/tools/tasks.py | Lilykos/scoap3-next | b779b8c32504e09b2c5330aa6a18e1e1c9affd33 | [
"CC-BY-4.0"
] | 1 | 2021-08-04T09:48:38.000Z | 2021-08-04T09:48:38.000Z | scoap3/modules/tools/tasks.py | Lilykos/scoap3-next | b779b8c32504e09b2c5330aa6a18e1e1c9affd33 | [
"CC-BY-4.0"
] | 158 | 2018-09-10T07:31:14.000Z | 2022-03-30T07:18:51.000Z | scoap3/modules/tools/tasks.py | Lilykos/scoap3-next | b779b8c32504e09b2c5330aa6a18e1e1c9affd33 | [
"CC-BY-4.0"
] | 9 | 2015-04-28T11:55:04.000Z | 2021-09-28T12:14:53.000Z | import io
import csv
import logging
from StringIO import StringIO
from datetime import datetime
from gzip import GzipFile
import boto3
from celery import shared_task
from flask import current_app
from flask_mail import Attachment
from invenio_mail.api import TemplatedMessage
logger = logging.getLogger(__name__)
def encode_element(element):
"""
Converts element to utf-8 string.
None value will be converted to an empty string.
"""
if element is None:
return ""
if isinstance(element, basestring):
return element.encode('utf-8')
return element
def to_csv(data):
"""
Serialize generated tool data to CSV.
:param data: dictionary representing the data to be serialized.
'header' key has to contain a list of string, 'data' key has to contain a list of list of string.
:return: (content_type, data) 2-tuple: corresponding MIME type as string and the serialized value as string.
"""
if not data or 'header' not in data or 'data' not in data:
raise ValueError('Invalid parameter to be serialized.')
result = StringIO()
cw = csv.writer(result, delimiter=";", quoting=csv.QUOTE_ALL)
cw.writerow(data['header'])
for row in data['data']:
cw.writerow([encode_element(element) for element in row])
return 'text/csv', result.getvalue()
def send_result(result_data, content_type, recipients, tool_name):
"""
Sends the result via email to the user who requested it.
:param result_data: generated data in a serialized form.
:param content_type: MIME type of the attachment.
:param recipients: recipients who will receive the email.
:param tool_name: name of the tool, which will be used in the subject of the email.
"""
timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
filename = 'scoap3_export_%s_%s.csv' % (tool_name, timestamp)
# compress data if needed
# try:
# compress = current_app.config.get('TOOL_COMPRESS_ATTACHMENT', False)
# if compress:
# compressed_buffer = StringIO()
# gzip_file = GzipFile(fileobj=compressed_buffer, mode="wt")
# gzip_file.write(result_data)
# gzip_file.close()
#
# result_data = compressed_buffer.getvalue()
# content_type = 'application/gzip'
# filename += '.gz'
# except Exception as e:
# logger.error('Error in csv compression: {}'.format(e.message))
#
# attachment = Attachment(filename=filename, content_type=content_type, data=result_data)
host = current_app.config.get('S3_HOSTNAME')
bucket = current_app.config.get('S3_BUCKET')
s3 = boto3.resource('s3', endpoint_url='http://s3.cern.ch/')
s3.meta.client.upload_fileobj(
io.BytesIO(result_data), bucket, filename,
ExtraArgs={'ACL': 'public-read'}
)
file_url = "{}/{}/{}".format(host, bucket, filename)
msg = TemplatedMessage(
template_html='scoap3_tools/email/result.html',
ctx={'attachment_url': file_url},
subject='SCOAP3 - Export %s result' % tool_name,
sender=current_app.config.get('MAIL_DEFAULT_SENDER'),
recipients=recipients,
# attachments=[attachment],
)
current_app.extensions['mail'].send(msg)
def send_failed_email(recipients, tool_name, task_id=None):
"""
Notifies the user about a failed generation.
:param recipients: recipients who will receive the email.
:param tool_name: name of the tool, which will be used in the subject of the email.
:param task_id: celery task id, if available.
"""
msg = TemplatedMessage(
template_html='scoap3_tools/email/failed.html',
subject='SCOAP3 - Export %s result error' % tool_name,
sender=current_app.config.get('MAIL_DEFAULT_SENDER'),
recipients=recipients,
ctx={'task_id': task_id}
)
current_app.extensions['mail'].send(msg)
| 37.610063 | 112 | 0.678595 |
f78676da21ba7106ed5e99f74d32df70174e47d8 | 756 | py | Python | telegram_bot/handlers/commands/detailed_mode.py | ProgrammingLanguageLeader/MathematicianBot | a4627962a6c8bfac76013d80780997ab4b0f7952 | [
"MIT"
] | null | null | null | telegram_bot/handlers/commands/detailed_mode.py | ProgrammingLanguageLeader/MathematicianBot | a4627962a6c8bfac76013d80780997ab4b0f7952 | [
"MIT"
] | 16 | 2018-03-05T14:25:16.000Z | 2022-03-11T23:46:56.000Z | telegram_bot/handlers/commands/detailed_mode.py | ProgrammingLanguageLeader/MathematicianBot | a4627962a6c8bfac76013d80780997ab4b0f7952 | [
"MIT"
] | null | null | null | from system.db import db
from telegram_bot.handlers.utils.decorators import remember_new_user, \
send_typing, write_logs
from telegram_bot.handlers.utils.menu_entries import MenuEntry
from telegram_bot.handlers.utils.reply_markup import create_main_reply_markup
from telegram_bot.models import User
| 30.24 | 77 | 0.756614 |
f788b1d1658062d96ad83c42b9cd26071a4b8418 | 374 | py | Python | my_spotless_app/migrations/0002_alter_service_picture_url.py | AntociM/Spotless | 8cd2d7f76eccee046d42f7a836cf91af04527186 | [
"ADSL"
] | null | null | null | my_spotless_app/migrations/0002_alter_service_picture_url.py | AntociM/Spotless | 8cd2d7f76eccee046d42f7a836cf91af04527186 | [
"ADSL"
] | 29 | 2022-01-22T19:05:56.000Z | 2022-03-01T08:57:14.000Z | my_spotless_app/migrations/0002_alter_service_picture_url.py | AntociM/Project-4 | 8cd2d7f76eccee046d42f7a836cf91af04527186 | [
"ADSL"
] | 1 | 2022-03-02T11:00:59.000Z | 2022-03-02T11:00:59.000Z | # Generated by Django 3.2 on 2022-02-27 11:38
from django.db import migrations, models
| 19.684211 | 45 | 0.590909 |
f788b46ad9abee669c31dc3a176027a5ef06cdbd | 8,983 | py | Python | robot/TTS.py | mluyuchen/wukong-robot | 67f5cdb06db9e5e256017925a5efe6721cb2bd1d | [
"MIT"
] | 8 | 2021-02-01T06:33:49.000Z | 2022-02-02T11:06:58.000Z | robot/TTS.py | mluyuchen/wukong-robot | 67f5cdb06db9e5e256017925a5efe6721cb2bd1d | [
"MIT"
] | 1 | 2020-06-10T10:59:02.000Z | 2020-06-10T10:59:02.000Z | robot/TTS.py | mluyuchen/wukong-robot | 67f5cdb06db9e5e256017925a5efe6721cb2bd1d | [
"MIT"
] | 6 | 2021-01-20T03:22:19.000Z | 2022-03-21T14:19:32.000Z | # -*- coding: utf-8-*-
import os
import base64
import tempfile
import pypinyin
from aip import AipSpeech
from . import utils, config, constants
from robot import logging
from pathlib import Path
from pypinyin import lazy_pinyin
from pydub import AudioSegment
from abc import ABCMeta, abstractmethod
from .sdk import TencentSpeech, AliSpeech, XunfeiSpeech, atc
logger = logging.getLogger(__name__)
def get_engine_by_slug(slug=None):
"""
Returns:
A TTS Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError(" TTS slug '%s'", slug)
selected_engines = list(filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines()))
if len(selected_engines) == 0:
raise ValueError(" {} TTS ".format(slug))
else:
if len(selected_engines) > 1:
logger.warning(": TTS {} ").format(slug)
engine = selected_engines[0]
logger.info(" {} TTS ".format(engine.SLUG))
return engine.get_instance()
def get_engines():
return [engine for engine in
list(get_subclasses(AbstractTTS))
if hasattr(engine, 'SLUG') and engine.SLUG]
| 32.665455 | 155 | 0.601247 |
f7897d0bfd7b98594f64cf998c02d21b938fb01d | 392 | py | Python | app/utils.py | Chimmahh/StarJumper | 6003ede1de61a17f1f8302faacf5f76033f8045d | [
"MIT"
] | null | null | null | app/utils.py | Chimmahh/StarJumper | 6003ede1de61a17f1f8302faacf5f76033f8045d | [
"MIT"
] | 3 | 2020-06-05T18:39:20.000Z | 2022-02-11T03:40:48.000Z | app/utils.py | Chimmahh/StarJumper | 6003ede1de61a17f1f8302faacf5f76033f8045d | [
"MIT"
] | 1 | 2018-07-26T16:44:04.000Z | 2018-07-26T16:44:04.000Z | from channels.db import database_sync_to_async
from .exceptions import ClientError
from .models import Game | 30.153846 | 46 | 0.75 |
f78ade6802218bb90c0b57cf1feec7d8f2242c2e | 2,328 | py | Python | tests/utils/test_file.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
] | 4 | 2020-06-11T20:54:47.000Z | 2020-09-22T13:07:17.000Z | tests/utils/test_file.py | gfi-centre-ouest/docker-devbox-ddb | 1597d85ef6e9e8322cce195a454de54186ce9ec7 | [
"MIT"
] | 113 | 2019-11-07T00:40:36.000Z | 2021-01-18T12:50:16.000Z | tests/utils/test_file.py | inetum-orleans/docker-devbox-ddb | 20c713cf7bfcaf289226a17a9648c17d16003b4d | [
"MIT"
] | null | null | null | import os
import pytest
from ddb.__main__ import load_registered_features
from ddb.config import config
from ddb.feature import features
from ddb.feature.core import CoreFeature
from ddb.utils import file
from ddb.utils.file import FileWalker, FileUtils
| 40.137931 | 139 | 0.714777 |
f78b0bc589ac5d9426f05edb7fe27d25d4add06c | 9,666 | py | Python | test/test_datasets.py | pyronear/pyro-dataset | b6445f6051058f20f2fc821040ec3705dc60464c | [
"Apache-2.0"
] | null | null | null | test/test_datasets.py | pyronear/pyro-dataset | b6445f6051058f20f2fc821040ec3705dc60464c | [
"Apache-2.0"
] | null | null | null | test/test_datasets.py | pyronear/pyro-dataset | b6445f6051058f20f2fc821040ec3705dc60464c | [
"Apache-2.0"
] | 1 | 2022-02-14T12:37:24.000Z | 2022-02-14T12:37:24.000Z | # Copyright (C) 2021, Pyronear contributors.
# This program is licensed under the GNU Affero General Public License version 3.
# See LICENSE or go to <https://www.gnu.org/licenses/agpl-3.0.txt> for full license details.
import unittest
import tempfile
from pathlib import Path
import json
from PIL.Image import Image
import pandas as pd
import random
import requests
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from torchvision.datasets import VisionDataset
from pyrodataset.wildfire import WildFireDataset, WildFireSplitter, computeSubSet
if __name__ == '__main__':
unittest.main()
| 38.975806 | 114 | 0.658183 |
f78b62473ace335a7a8a2b3f902ea2441941d851 | 26,116 | py | Python | python/dgllife/model/pretrain/__init__.py | VIGNESHinZONE/dgl-lifesci | 9a892fd0935a7d8ab125530f54ce1e2a38b2377a | [
"Apache-2.0"
] | null | null | null | python/dgllife/model/pretrain/__init__.py | VIGNESHinZONE/dgl-lifesci | 9a892fd0935a7d8ab125530f54ce1e2a38b2377a | [
"Apache-2.0"
] | null | null | null | python/dgllife/model/pretrain/__init__.py | VIGNESHinZONE/dgl-lifesci | 9a892fd0935a7d8ab125530f54ce1e2a38b2377a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable= no-member, arguments-differ, invalid-name
#
# Utilities for using pre-trained models.
import torch
from dgl.data.utils import _get_dgl_url, download
from .moleculenet import *
from .generative_models import *
from .property_prediction import *
from .reaction import *
__all__ = ['load_pretrained']
url = {**moleculenet_url, **generative_url, **property_url, **reaction_url}
def download_and_load_checkpoint(model_name, model, model_postfix,
local_pretrained_path='pre_trained.pth', log=True):
"""Download pretrained model checkpoint
The model will be loaded to CPU.
Parameters
----------
model_name : str
Name of the model
model : nn.Module
Instantiated model instance
model_postfix : str
Postfix for pretrained model checkpoint
local_pretrained_path : str
Local name for the downloaded model checkpoint
log : bool
Whether to print progress for model loading
Returns
-------
model : nn.Module
Pretrained model
"""
url_to_pretrained = _get_dgl_url(model_postfix)
local_pretrained_path = '_'.join([model_name, local_pretrained_path])
download(url_to_pretrained, path=local_pretrained_path, log=log)
checkpoint = torch.load(local_pretrained_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
if log:
print('Pretrained model loaded')
return model
# pylint: disable=I1101
def load_pretrained(model_name, log=True):
"""Load a pretrained model
Parameters
----------
model_name : str
Currently supported options include
* ``'GCN_Tox21'``: A GCN-based model for molecular property prediction on Tox21
* ``'GAT_Tox21'``: A GAT-based model for molecular property prediction on Tox21
* ``'Weave_Tox21'``: A Weave model for molecular property prediction on Tox21
* ``'AttentiveFP_Aromaticity'``: An AttentiveFP model for predicting number of
aromatic atoms on a subset of Pubmed
* ``'DGMG_ChEMBL_canonical'``: A DGMG model trained on ChEMBL with a canonical
atom order
* ``'DGMG_ChEMBL_random'``: A DGMG model trained on ChEMBL for molecule generation
with a random atom order
* ``'DGMG_ZINC_canonical'``: A DGMG model trained on ZINC for molecule generation
with a canonical atom order
* ``'DGMG_ZINC_random'``: A DGMG model pre-trained on ZINC for molecule generation
with a random atom order
* ``'JTNN_ZINC'``: A JTNN model pre-trained on ZINC for molecule generation
* ``'wln_center_uspto'``: A WLN model pre-trained on USPTO for reaction prediction
* ``'wln_rank_uspto'``: A WLN model pre-trained on USPTO for candidate product ranking
* ``'gin_supervised_contextpred'``: A GIN model pre-trained with supervised learning
and context prediction
* ``'gin_supervised_infomax'``: A GIN model pre-trained with supervised learning
and deep graph infomax
* ``'gin_supervised_edgepred'``: A GIN model pre-trained with supervised learning
and edge prediction
* ``'gin_supervised_masking'``: A GIN model pre-trained with supervised learning
and attribute masking
* ``'GCN_canonical_BACE'``: A GCN model trained on BACE with canonical
featurization for atoms
* ``'GCN_attentivefp_BACE'``: A GCN model trained on BACE with attentivefp
featurization for atoms
* ``'GAT_canonical_BACE'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_BACE'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_BACE'``: A Weave model trained on BACE with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BACE'``: A Weave model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BACE'``: An MPNN model trained on BACE with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BACE'``: An MPNN model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BACE'``: An AttentiveFP model trained on BACE with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on BACE with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BACE'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BACE
* ``'gin_supervised_infomax_BACE'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BACE
* ``'gin_supervised_edgepred_BACE'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BACE
* ``'gin_supervised_masking_BACE'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BACE
* ``'NF_canonical_BACE'``: An NF model trained on BACE with canonical
featurization for atoms
* ``'GCN_canonical_BBBP'``: A GCN model trained on BBBP with canonical
featurization for atoms
* ``'GCN_attentivefp_BBBP'``: A GCN model trained on BBBP with attentivefp
featurization for atoms
* ``'GAT_canonical_BBBP'``: A GAT model trained on BBBP with canonical
featurization for atoms
* ``'GAT_attentivefp_BBBP'``: A GAT model trained on BBBP with attentivefp
featurization for atoms
* ``'Weave_canonical_BBBP'``: A Weave model trained on BBBP with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BBBP'``: A Weave model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BBBP'``: An MPNN model trained on BBBP with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BBBP'``: An MPNN model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BBBP'``: An AttentiveFP model trained on BBBP with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BBBP'``: An AttentiveFP model trained on BBBP with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BBBP'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BBBP
* ``'gin_supervised_infomax_BBBP'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BBBP
* ``'gin_supervised_edgepred_BBBP'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BBBP
* ``'gin_supervised_masking_BBBP'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BBBP
* ``'NF_canonical_BBBP'``: An NF model pre-trained on BBBP with canonical
featurization for atoms
* ``'GCN_canonical_ClinTox'``: A GCN model trained on ClinTox with canonical
featurization for atoms
* ``'GCN_attentivefp_ClinTox'``: A GCN model trained on ClinTox with attentivefp
featurization for atoms
* ``'GAT_canonical_ClinTox'``: A GAT model trained on ClinTox with canonical
featurization for atoms
* ``'GAT_attentivefp_ClinTox'``: A GAT model trained on ClinTox with attentivefp
featurization for atoms
* ``'Weave_canonical_ClinTox'``: A Weave model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ClinTox'``: A Weave model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ClinTox'``: An MPNN model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ClinTox'``: An MPNN model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ClinTox'``: An AttentiveFP model trained on ClinTox with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on ClinTox with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_ESOL'``: A GCN model trained on ESOL with canonical
featurization for atoms
* ``'GCN_attentivefp_ESOL'``: A GCN model trained on ESOL with attentivefp
featurization for atoms
* ``'GAT_canonical_ESOL'``: A GAT model trained on ESOL with canonical
featurization for atoms
* ``'GAT_attentivefp_ESOL'``: A GAT model trained on ESOL with attentivefp
featurization for atoms
* ``'Weave_canonical_ESOL'``: A Weave model trained on ESOL with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ESOL'``: A Weave model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ESOL'``: An MPNN model trained on ESOL with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ESOL'``: An MPNN model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ESOL'``: An AttentiveFP model trained on ESOL with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ESOL'``: An AttentiveFP model trained on ESOL with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ESOL'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ESOL
* ``'gin_supervised_infomax_ESOL'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ESOL
* ``'gin_supervised_edgepred_ESOL'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ESOL
* ``'gin_supervised_masking_ESOL'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ESOL
* ``'GCN_canonical_FreeSolv'``: A GCN model trained on FreeSolv with canonical
featurization for atoms
* ``'GCN_attentivefp_FreeSolv'``: A GCN model trained on FreeSolv with attentivefp
featurization for atoms
* ``'GAT_canonical_FreeSolv'``: A GAT model trained on FreeSolv with canonical
featurization for atoms
* ``'GAT_attentivefp_FreeSolv'``: A GAT model trained on FreeSolv with attentivefp
featurization for atoms
* ``'Weave_canonical_FreeSolv'``: A Weave model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_FreeSolv'``: A Weave model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_FreeSolv'``: An MPNN model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_FreeSolv'``: An MPNN model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_FreeSolv'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_infomax_FreeSolv'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on FreeSolv
* ``'gin_supervised_edgepred_FreeSolv'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_masking_FreeSolv'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on FreeSolv
* ``'GCN_canonical_HIV'``: A GCN model trained on HIV with canonical
featurization for atoms
* ``'GCN_attentivefp_HIV'``: A GCN model trained on HIV with attentivefp
featurization for atoms
* ``'GAT_canonical_HIV'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_HIV'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_HIV'``: A Weave model trained on HIV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_HIV'``: A Weave model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_HIV'``: An MPNN model trained on HIV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_HIV'``: An MPNN model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_HIV'``: An AttentiveFP model trained on HIV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_HIV'``: An AttentiveFP model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_HIV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on HIV
* ``'gin_supervised_infomax_HIV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on HIV
* ``'gin_supervised_edgepred_HIV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on HIV
* ``'gin_supervised_masking_HIV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on HIV
* ``'NF_canonical_HIV'``: An NF model trained on HIV with canonical
featurization for atoms
* ``'GCN_canonical_Lipophilicity'``: A GCN model trained on Lipophilicity with canonical
featurization for atoms
* ``'GCN_attentivefp_Lipophilicity'``: A GCN model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'GAT_canonical_Lipophilicity'``: A GAT model trained on Lipophilicity with canonical
featurization for atoms
* ``'GAT_attentivefp_Lipophilicity'``: A GAT model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'Weave_canonical_Lipophilicity'``: A Weave model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'Weave_attentivefp_Lipophilicity'``: A Weave model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'MPNN_canonical_Lipophilicity'``: An MPNN model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'MPNN_attentivefp_Lipophilicity'``: An MPNN model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'AttentiveFP_canonical_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_infomax_Lipophilicity'``: A GIN model pre-trained with supervised
learning and infomax, and fine-tuned on Lipophilicity
* ``'gin_supervised_edgepred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and edge prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_masking_Lipophilicity'``: A GIN model pre-trained with supervised
learning and masking, and fine-tuned on Lipophilicity
* ``'GCN_canonical_MUV'``: A GCN model trained on MUV with canonical
featurization for atoms
* ``'GCN_attentivefp_MUV'``: A GCN model trained on MUV with attentivefp
featurization for atoms
* ``'GAT_canonical_MUV'``: A GAT model trained on MUV with canonical
featurization for atoms
* ``'GAT_attentivefp_MUV'``: A GAT model trained on MUV with attentivefp
featurization for atoms
* ``'Weave_canonical_MUV'``: A Weave model trained on MUV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_MUV'``: A Weave model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_MUV'``: An MPNN model trained on MUV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_MUV'``: An MPNN model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_MUV'``: An AttentiveFP model trained on MUV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_MUV'``: An AttentiveFP model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_MUV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on MUV
* ``'gin_supervised_infomax_MUV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on MUV
* ``'gin_supervised_edgepred_MUV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on MUV
* ``'gin_supervised_masking_MUV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on MUV
* ``'GCN_canonical_PCBA'``: A GCN model trained on PCBA with canonical
featurization for atoms
* ``'GCN_attentivefp_PCBA'``: A GCN model trained on PCBA with attentivefp
featurization for atoms
* ``'GAT_canonical_PCBA'``: A GAT model trained on PCBA with canonical
featurization for atoms
* ``'GAT_attentivefp_PCBA'``: A GAT model trained on PCBA with attentivefp
featurization for atoms
* ``'Weave_canonical_PCBA'``: A Weave model trained on PCBA with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_PCBA'``: A Weave model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_PCBA'``: An MPNN model trained on PCBA with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_PCBA'``: An MPNN model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_PCBA'``: An AttentiveFP model trained on PCBA with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_PCBA'``: An AttentiveFP model trained on PCBA with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_SIDER'``: A GCN model trained on SIDER with canonical
featurization for atoms
* ``'GCN_attentivefp_SIDER'``: A GCN model trained on SIDER with attentivefp
featurization for atoms
* ``'GAT_canonical_SIDER'``: A GAT model trained on SIDER with canonical
featurization for atoms
* ``'GAT_attentivefp_SIDER'``: A GAT model trained on SIDER with attentivefp
featurization for atoms
* ``'Weave_canonical_SIDER'``: A Weave model trained on SIDER with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_SIDER'``: A Weave model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_SIDER'``: An MPNN model trained on SIDER with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_SIDER'``: An MPNN model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_SIDER'``: An AttentiveFP model trained on SIDER with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_SIDER'``: An AttentiveFP model trained on SIDER with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_SIDER'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on SIDER
* ``'gin_supervised_infomax_SIDER'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on SIDER
* ``'gin_supervised_edgepred_SIDER'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on SIDER
* ``'gin_supervised_masking_SIDER'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on SIDER
* ``'NF_canonical_SIDER'``: An NF model trained on SIDER with canonical
featurization for atoms
* ``'GCN_canonical_Tox21'``: A GCN model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_attentivefp_Tox21'``: A GCN model trained on Tox21 with attentivefp
featurization for atoms
* ``'GAT_canonical_Tox21'``: A GAT model trained on Tox21 with canonical
featurization for atoms
* ``'GAT_attentivefp_Tox21'``: A GAT model trained on Tox21 with attentivefp
featurization for atoms
* ``'Weave_canonical_Tox21'``: A Weave model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_Tox21'``: A Weave model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_Tox21'``: An MPNN model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_Tox21'``: An MPNN model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_Tox21'``: An AttentiveFP model trained on Tox21 with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Tox21'``: An AttentiveFP model trained on Tox21 with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Tox21'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Tox21
* ``'gin_supervised_infomax_Tox21'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on Tox21
* ``'gin_supervised_edgepred_Tox21'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on Tox21
* ``'gin_supervised_masking_Tox21'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on Tox21
* ``'NF_canonical_Tox21'``: An NF model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_canonical_ToxCast'``: A GCN model trained on ToxCast with canonical
featurization for atoms
* ``'GCN_attentivefp_ToxCast'``: A GCN model trained on ToxCast with attentivefp
featurization for atoms
* ``'GAT_canonical_ToxCast'``: A GAT model trained on ToxCast with canonical
featurization for atoms
* ``'GAT_attentivefp_ToxCast'``: A GAT model trained on ToxCast with attentivefp
featurization for atoms
* ``'Weave_canonical_ToxCast'``: A Weave model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ToxCast'``: A Weave model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ToxCast'``: An MPNN model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ToxCast'``: An MPNN model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ToxCast'``: An AttentiveFP model trained on ToxCast with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ToxCast'``: An AttentiveFP model trained on ToxCast with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ToxCast'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ToxCast
* ``'gin_supervised_infomax_ToxCast'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ToxCast
* ``'gin_supervised_edgepred_ToxCast'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ToxCast
* ``'gin_supervised_masking_ToxCast'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ToxCast
* ``'NF_canonical_ToxCast'``: An NF model trained on ToxCast with canonical
featurization for atoms and bonds
log : bool
Whether to print progress for model loading
Returns
-------
model
"""
if model_name not in url:
raise RuntimeError("Cannot find a pretrained model with name {}".format(model_name))
for func in [create_moleculenet_model, create_generative_model,
create_property_model, create_reaction_model]:
model = func(model_name)
if model is not None:
break
return download_and_load_checkpoint(model_name, model, url[model_name], log=log)
| 59.219955 | 98 | 0.689501 |
f78b86d747c672620428e6bd3b8435d4dd3f3512 | 5,608 | py | Python | src/pyrobot/vrep_locobot/camera.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 2,150 | 2019-06-12T20:55:41.000Z | 2022-03-21T07:14:51.000Z | src/pyrobot/vrep_locobot/camera.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 124 | 2019-06-22T17:12:27.000Z | 2022-02-26T11:43:13.000Z | src/pyrobot/vrep_locobot/camera.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 329 | 2019-06-13T03:03:54.000Z | 2022-03-30T07:04:55.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pyrobot.utils.util as prutil
from pyrobot.core import Camera
from pyrobot.utils.util import try_cv2_import
cv2 = try_cv2_import()
from cv_bridge import CvBridge, CvBridgeError
from pyrep.objects.vision_sensor import VisionSensor
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
from pyrep.objects.joint import Joint
| 26.578199 | 88 | 0.63766 |
f78c09a2c8173da1710410d85fa882bb6b674769 | 105 | py | Python | main.py | lmkhkm/SerialMonitor | 05b00e7a05e2a71ddfc9b0e30e42a83b073f88e1 | [
"MIT"
] | null | null | null | main.py | lmkhkm/SerialMonitor | 05b00e7a05e2a71ddfc9b0e30e42a83b073f88e1 | [
"MIT"
] | 1 | 2021-05-08T20:52:27.000Z | 2021-05-08T20:52:27.000Z | main.py | lmkhkm/SerialMonitor | 05b00e7a05e2a71ddfc9b0e30e42a83b073f88e1 | [
"MIT"
] | null | null | null | import serial
ser = serial.Serial('COM7',115200, timeout=1)
while True:
print("R: ", ser.readline()) | 21 | 45 | 0.67619 |
f78d0e1c968d2c094e91caabfbf0e1efad8f0eb3 | 1,009 | py | Python | examples/avro/py/generate_avro_users.py | kikkomep/pydoop | 4f855ef775b925b8c9f2adf1c0ef13337323ee24 | [
"Apache-2.0"
] | null | null | null | examples/avro/py/generate_avro_users.py | kikkomep/pydoop | 4f855ef775b925b8c9f2adf1c0ef13337323ee24 | [
"Apache-2.0"
] | null | null | null | examples/avro/py/generate_avro_users.py | kikkomep/pydoop | 4f855ef775b925b8c9f2adf1c0ef13337323ee24 | [
"Apache-2.0"
] | null | null | null | import sys
import random
import avro.schema
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
NAME_POOL = ['george', 'john', 'paul', 'ringo']
OFFICE_POOL = ['office-%d' % _ for _ in xrange(4)]
COLOR_POOL = ['black', 'cyan', 'magenta', 'yellow']
if __name__ == '__main__':
main(sys.argv)
| 28.027778 | 69 | 0.597621 |
f78d23bb7041a7dd86f556d3f4cd134329c150dd | 2,604 | py | Python | tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py | souravsingh/chainercv | 8f76510472bc95018c183e72f37bc6c34a89969c | [
"MIT"
] | 1 | 2018-12-27T03:47:45.000Z | 2018-12-27T03:47:45.000Z | tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py | souravsingh/chainercv | 8f76510472bc95018c183e72f37bc6c34a89969c | [
"MIT"
] | null | null | null | tests/utils_tests/testing_tests/assertions_tests/test_assert_is_bbox_dataset.py | souravsingh/chainercv | 8f76510472bc95018c183e72f37bc6c34a89969c | [
"MIT"
] | 2 | 2019-12-16T02:20:26.000Z | 2022-01-17T02:00:49.000Z | import numpy as np
import unittest
from chainer.dataset import DatasetMixin
from chainer import testing
from chainercv.utils import assert_is_bbox_dataset
from chainercv.utils import generate_random_bbox
testing.run_module(__name__, __file__)
| 28.304348 | 78 | 0.656298 |
f78da1263e700a0f21ebec44c019c94ee9c11482 | 3,002 | py | Python | seahub/utils/http.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | seahub/utils/http.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | seahub/utils/http.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012-2016 Seafile Ltd.
from __future__ import unicode_literals
import unicodedata
import urlparse
import json
from functools import wraps
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
JSON_CONTENT_TYPE = 'application/json; charset=utf-8'
def is_safe_url(url, host=None):
"""
https://github.com/django/django/blob/fc6d147a63f89795dbcdecb0559256470fff4380/django/utils/http.py
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
| 35.317647 | 103 | 0.676549 |
f78df0be4fb31a9f76737c561c3fe76708dbdcd5 | 187 | py | Python | biopipen/core/defaults.py | pwwang/bioprocs | 4055a62eed8ef4fba0a5f1be430af72a9e22143d | [
"MIT"
] | 4 | 2018-01-16T08:25:53.000Z | 2019-01-03T23:41:31.000Z | biopipen/core/defaults.py | pwwang/bioprocs | 4055a62eed8ef4fba0a5f1be430af72a9e22143d | [
"MIT"
] | 3 | 2018-05-22T20:11:46.000Z | 2019-08-19T17:37:04.000Z | biopipen/core/defaults.py | pwwang/bioprocs | 4055a62eed8ef4fba0a5f1be430af72a9e22143d | [
"MIT"
] | 1 | 2019-01-14T23:14:24.000Z | 2019-01-14T23:14:24.000Z | """Provide default settgins"""
from pathlib import Path
BIOPIPEN_DIR = Path(__file__).parent.parent.resolve()
REPORT_DIR = BIOPIPEN_DIR / "reports"
SCRIPT_DIR = BIOPIPEN_DIR / "scripts"
| 26.714286 | 53 | 0.770053 |
f7904ac31330990ac63a4b3068ea84654cf9b168 | 6,172 | py | Python | pextant/sextant.py | norheim/pextant | f4235719279c0e6f178ae1e0f8b1ea3346533915 | [
"MIT"
] | null | null | null | pextant/sextant.py | norheim/pextant | f4235719279c0e6f178ae1e0f8b1ea3346533915 | [
"MIT"
] | 1 | 2019-12-03T03:52:41.000Z | 2019-12-04T14:50:36.000Z | pextant/sextant.py | norheim/pextant | f4235719279c0e6f178ae1e0f8b1ea3346533915 | [
"MIT"
] | 1 | 2019-12-03T02:37:57.000Z | 2019-12-03T02:37:57.000Z | from flask_settings import GEOTIFF_FULL_PATH
import sys
import traceback
sys.path.append('../')
import numpy as np
import json
from datetime import timedelta
from functools import update_wrapper
from pextant.EnvironmentalModel import GDALMesh
from pextant.explorers import Astronaut
from pextant.analysis.loadWaypoints import JSONloader
from pextant.lib.geoshapely import GeoPolygon, LAT_LONG
from pextant.solvers.astarMesh import astarSolver
from flask import Flask
from flask import make_response, request, current_app
app = Flask(__name__)
# if __name__ == "__main__":
main(sys.argv[1:])
#main(['../data/maps/dem/HI_air_imagery.tif']) | 38.098765 | 121 | 0.638043 |
f7905a650574afa3ef5e426f0e640ab9b3607fe3 | 10,085 | py | Python | packages/gtmapi/service.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | packages/gtmapi/service.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | packages/gtmapi/service.py | griffinmilsap/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | #!/usr/bin/python3
import shutil
import os
import base64
from time import sleep
import flask
import requests.exceptions
import blueprint
from flask_cors import CORS
from confhttpproxy import ProxyRouter, ProxyRouterException
from flask import Flask, jsonify
import rest_routes
from lmsrvcore.utilities.migrate import migrate_work_dir_structure_v2
from gtmcore.dispatcher import Dispatcher
from gtmcore.dispatcher.jobs import update_environment_repositories
from gtmcore.configuration import Configuration
from gtmcore.logging import LMLogger
from gtmcore.auth.identity import AuthenticationError, get_identity_manager_class
from gtmcore.labbook.lock import reset_all_locks
logger = LMLogger.get_logger()
def configure_chp(proxy_dict: dict, is_hub_client: bool) -> str:
"""Set up the configurable HTTP proxy (CHP)
Args:
proxy_dict: obtained from the config dict inside the config instance
is_hub_client: are we running on the hub? (also obtained from config instance)
Returns:
the final api_prefix used by the router
We define this as a function mostly so we can optionally wrap it in a try block below
"""
# /api by default
api_prefix = proxy_dict["labmanager_api_prefix"]
proxy_router = ProxyRouter.get_proxy(proxy_dict)
# Wait up to 10 seconds for the CHP to be available
for _ in range(20):
try:
# This property raises an exception if the underlying request doesn't yield a status code of 200
proxy_router.routes # noqa
except (requests.exceptions.ConnectionError, ProxyRouterException):
sleep(0.5)
continue
# If there was no exception, the CHP is up and responding
break
else:
# We exhausted our for-loop
logger.error("Could not reach router after 20 tries (10 seconds), proxy_router.add() will likely fail")
if is_hub_client:
# Use full route prefix, including run/<client_id> if running in the Hub
api_target = f"run/{os.environ['GIGANTUM_CLIENT_ID']}{api_prefix}"
api_prefix = f"/{api_target}"
# explicit routes for UI with full route prefix
proxy_router.add("http://localhost:10002", f"run/{os.environ['GIGANTUM_CLIENT_ID']}")
else:
api_target = "api"
proxy_router.add("http://localhost:10001", api_target)
logger.info(f"Proxy routes ({type(proxy_router)}): {proxy_router.routes}")
return api_prefix
def configure_default_server(config_instance: Configuration) -> None:
"""Function to check if a server has been configured, and if not, configure and select the default server"""
try:
# Load the server configuration. If you get a FileNotFoundError there is no configured server
config_instance.get_server_configuration()
except FileNotFoundError:
default_server = config_instance.config['core']['default_server']
logger.info(f"Configuring Client with default server via auto-discovery: {default_server}")
try:
server_id = config_instance.add_server(default_server)
config_instance.set_current_server(server_id)
# Migrate any user dirs if needed. Here we assume all projects belong to the default server, since
# at the time it was the only available server.
migrate_work_dir_structure_v2(server_id)
except Exception as err:
logger.exception(f"Failed to configure default server! Restart Client to try again: {err}")
# Re-raise the exception so the API doesn't come up
raise
# Start Flask Server Initialization and app configuration
app = Flask("lmsrvlabbook")
random_bytes = os.urandom(32)
app.config["SECRET_KEY"] = base64.b64encode(random_bytes).decode('utf-8')
app.config["LABMGR_CONFIG"] = config = Configuration(wait_for_cache=10)
configure_default_server(config)
app.config["ID_MGR_CLS"] = get_identity_manager_class(config)
# Set Debug mode
app.config['DEBUG'] = config.config["flask"]["DEBUG"]
app.register_blueprint(blueprint.complete_labbook_service)
# Set starting flags
# If flask is run in debug mode the service will restart when code is changed, and some tasks
# we only want to happen once (ON_FIRST_START)
# The WERKZEUG_RUN_MAIN environmental variable is set only when running under debugging mode
ON_FIRST_START = app.config['DEBUG'] is False or os.environ.get('WERKZEUG_RUN_MAIN') != 'true'
ON_RESTART = os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
if os.environ.get('CIRCLECI') == 'true':
try:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
except requests.exceptions.ConnectionError:
url_prefix = config.config['proxy']["labmanager_api_prefix"]
else:
url_prefix = configure_chp(config.config['proxy'], config.is_hub_client)
# Add rest routes
app.register_blueprint(rest_routes.rest_routes, url_prefix=url_prefix)
if config.config["flask"]["allow_cors"]:
# Allow CORS
CORS(app, max_age=7200)
if ON_FIRST_START:
# Empty container-container share dir as it is ephemeral
share_dir = os.path.join(os.path.sep, 'mnt', 'share')
logger.info("Emptying container-container share folder: {}.".format(share_dir))
try:
for item in os.listdir(share_dir):
item_path = os.path.join(share_dir, item)
if os.path.isfile(item_path):
os.unlink(item_path)
else:
shutil.rmtree(item_path)
except Exception as e:
logger.error(f"Failed to empty share folder: {e}.")
raise
post_save_hook_code = """
import subprocess, os
def post_save_hook(os_path, model, contents_manager, **kwargs):
try:
client_ip = os.environ.get('GIGANTUM_CLIENT_IP')
if os.environ.get('HUB_CLIENT_ID'):
# Running in the Hub
service_route = "run/{}/api/savehook".format(os.environ.get('HUB_CLIENT_ID'))
else:
# Running locally
service_route = "api/savehook"
tokens = open('/home/giguser/jupyter_token').read().strip()
username, owner, lbname, jupyter_token = tokens.split(',')
url_args = "file={}&jupyter_token={}&email={}".format(os.path.basename(os_path), jupyter_token, os.environ['GIGANTUM_EMAIL'])
url = "http://{}:10001/{}/{}/{}/{}?{}".format(client_ip,service_route,username,owner,lbname,url_args)
subprocess.run(['wget', '--spider', url], cwd='/tmp')
except Exception as e:
print(e)
"""
os.makedirs(os.path.join(share_dir, 'jupyterhooks'))
with open(os.path.join(share_dir, 'jupyterhooks', '__init__.py'), 'w') as initpy:
initpy.write(post_save_hook_code)
# Reset distributed lock, if desired
if config.config["lock"]["reset_on_start"]:
logger.info("Resetting ALL distributed locks")
reset_all_locks(config.config['lock'])
# Create local data (for local dataset types) dir if it doesn't exist
local_data_dir = os.path.join(config.config['git']['working_directory'], 'local_data')
if os.path.isdir(local_data_dir) is False:
os.makedirs(local_data_dir, exist_ok=True)
logger.info(f'Created `local_data` dir for Local Filesystem Dataset Type: {local_data_dir}')
# Create certificates file directory for custom CA certificate support.
certificate_dir = os.path.join(config.config['git']['working_directory'], 'certificates', 'ssl')
if os.path.isdir(certificate_dir) is False:
os.makedirs(certificate_dir, exist_ok=True)
logger.info(f'Created `certificates` dir for SSL and custom CA certificates: {certificate_dir}')
# make sure temporary upload directory exists and is empty
tempdir = config.upload_dir
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
logger.info(f'Cleared upload temp dir: {tempdir}')
os.makedirs(tempdir)
# Start background startup tasks
d = Dispatcher()
# Make sure the queue is up before we start using RQ
for _ in range(20):
if d.ready_for_job(update_environment_repositories):
break
sleep(0.5)
else:
# We exhausted our for-loop
err_message = "Worker queue not ready after 20 tries (10 seconds) - fatal error"
logger.error(err_message)
raise RuntimeError(err_message)
# Run job to update Base images in the background
d.dispatch_task(update_environment_repositories, persist=True)
# Set auth error handler
# TEMPORARY KLUDGE
# Due to GitPython implementation, resources leak. This block deletes all GitPython instances at the end of the request
# Future work will remove GitPython, at which point this block should be removed.
# TEMPORARY KLUDGE
def main(debug=False) -> None:
try:
# Run app on 0.0.0.0, assuming not an issue since it should be in a container
# Please note: Debug mode must explicitly be set to False when running integration
# tests, due to properties of Flask werkzeug dynamic package reloading.
if debug:
# This is to support integration tests, which will call main
# with debug=False in order to avoid runtime reloading of Python code
# which causes the interpreter to crash.
app.run(host="0.0.0.0", port=10001, debug=debug)
else:
# If debug arg is not explicitly given then it is loaded from config
app.run(host="0.0.0.0", port=10001)
except Exception as err:
logger.exception(err)
raise
if __name__ == '__main__':
main()
| 38.938224 | 133 | 0.69529 |
f79637ff2082c4edbb504887dfd73b4aed28edc7 | 37,112 | py | Python | bitten/model.py | dokipen/bitten | d4d2829c63eec84bcfab05ec7035a23e85d90c00 | [
"BSD-3-Clause"
] | 1 | 2016-08-28T03:13:03.000Z | 2016-08-28T03:13:03.000Z | bitten/model.py | dokipen/bitten | d4d2829c63eec84bcfab05ec7035a23e85d90c00 | [
"BSD-3-Clause"
] | null | null | null | bitten/model.py | dokipen/bitten | d4d2829c63eec84bcfab05ec7035a23e85d90c00 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Model classes for objects persisted in the database."""
from trac.attachment import Attachment
from trac.db import Table, Column, Index
from trac.resource import Resource
from trac.util.text import to_unicode
import codecs
import os
__docformat__ = 'restructuredtext en'
schema = BuildConfig._schema + TargetPlatform._schema + Build._schema + \
BuildStep._schema + BuildLog._schema + Report._schema
schema_version = 10
| 37.000997 | 148 | 0.551304 |
f797289b8fbe1305efddd975d80c58646d9ec219 | 2,555 | py | Python | ml_model.py | CristopherNim/student_performance | f1ec90329e91c44a8155d83c0ac1569eb038954e | [
"MIT"
] | null | null | null | ml_model.py | CristopherNim/student_performance | f1ec90329e91c44a8155d83c0ac1569eb038954e | [
"MIT"
] | null | null | null | ml_model.py | CristopherNim/student_performance | f1ec90329e91c44a8155d83c0ac1569eb038954e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import OneHotEncoder
import pickle
from flask import Flask, request
np.random.seed(42)
df = pd.read_csv('StudentsPerformance.csv')
df.rename(columns={'race/ethnicity': 'race', 'parental level of education': 'parent_level_of_education',
'test preparation course': 'test_prep_course', 'math score': 'math_score',
'reading score': 'reading_score', 'writing score': 'writing_score'}, inplace=True)
# creating a categorical boolean mask
categorical_feature_mask = df.dtypes == object
# filtering out the categorical columns
categorical_cols = df.columns[categorical_feature_mask].tolist()
# instantiate the OneHotEncoder Object
one_hot = OneHotEncoder(handle_unknown='ignore', sparse=False)
# applying data
one_hot.fit(df[categorical_cols])
cat_one_hot = one_hot.transform(df[categorical_cols])
# creating Dataframe of the hot encoded columns
hot_df = pd.DataFrame(cat_one_hot, columns=one_hot.get_feature_names(input_features=categorical_cols))
df_OneHotEncoder = pd.concat([df, hot_df], axis=1).drop(columns=categorical_cols, axis=1)
X = df_OneHotEncoder.drop('math_score', axis=1)
y = df_OneHotEncoder['math_score']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
model = Ridge(alpha=.99).fit(X_train, y_train)
model_scores = cross_val_score(estimator=model, X=X_test, y=y_test, cv=5)
print('accuracy for ridge model: %.1f' % (model_scores.mean() * 100))
pickle.dump(model, open('model.pkl', 'wb'))
row = ['male', 'group_a', 'some high school', 'standard', 'none', 80, 80]
result = row_pred(row)
print(result)
| 35.486111 | 111 | 0.720157 |
f7977957e2a1122df1f177f30c24042002dc1e88 | 1,088 | py | Python | src/ralph/models/edx/enrollment/fields/contexts.py | p-bizouard/ralph | 50a37f6b070dcb4109dcc49d8d885949a0099643 | [
"MIT"
] | 5 | 2020-06-26T10:44:23.000Z | 2022-01-26T11:41:03.000Z | src/ralph/models/edx/enrollment/fields/contexts.py | p-bizouard/ralph | 50a37f6b070dcb4109dcc49d8d885949a0099643 | [
"MIT"
] | 73 | 2020-02-18T15:09:25.000Z | 2022-03-14T13:32:20.000Z | src/ralph/models/edx/enrollment/fields/contexts.py | p-bizouard/ralph | 50a37f6b070dcb4109dcc49d8d885949a0099643 | [
"MIT"
] | 4 | 2020-02-27T12:52:10.000Z | 2021-11-23T19:45:07.000Z | """Enrollment event models context fields definitions"""
from typing import Literal, Union
from ...base import BaseContextField
| 31.085714 | 86 | 0.731618 |
f7979a1edf5e664d9fd5011a9f7390b351722d3b | 834 | py | Python | tests/profiles/fontval_test.py | kennethormandy/fontbakery | ec569215cd7919e125089bd6f65346afa9e75546 | [
"Apache-2.0"
] | null | null | null | tests/profiles/fontval_test.py | kennethormandy/fontbakery | ec569215cd7919e125089bd6f65346afa9e75546 | [
"Apache-2.0"
] | null | null | null | tests/profiles/fontval_test.py | kennethormandy/fontbakery | ec569215cd7919e125089bd6f65346afa9e75546 | [
"Apache-2.0"
] | 1 | 2020-06-14T17:13:59.000Z | 2020-06-14T17:13:59.000Z | import os
import pytest
from fontbakery.utils import TEST_FILE
from fontbakery.checkrunner import ERROR
def test_check_fontvalidator():
""" MS Font Validator checks """
from fontbakery.profiles.fontval import com_google_fonts_check_fontvalidator as check
font = TEST_FILE("mada/Mada-Regular.ttf")
# we want to run all FValidator checks only once,
# so here we cache all results:
fval_results = list(check(font))
# Then we make sure that there wasn't an ERROR
# which would mean FontValidator is not properly installed:
for status, message in fval_results:
assert status != ERROR
# Simulate FontVal missing.
old_path = os.environ["PATH"]
os.environ["PATH"] = ""
with pytest.raises(OSError) as _:
status, message = list(check(font))[-1]
assert status == ERROR
os.environ["PATH"] = old_path
| 28.758621 | 87 | 0.732614 |
f797a2004904bea8641ef96760d4f8b68d968963 | 3,662 | py | Python | app/views.py | sinantan/TechRSS | f07d21b5553534ef6ecb6da6dc89524a8bbdb505 | [
"MIT"
] | 3 | 2019-10-26T13:31:21.000Z | 2020-02-26T20:46:35.000Z | app/views.py | sinantan/TechRSS | f07d21b5553534ef6ecb6da6dc89524a8bbdb505 | [
"MIT"
] | null | null | null | app/views.py | sinantan/TechRSS | f07d21b5553534ef6ecb6da6dc89524a8bbdb505 | [
"MIT"
] | null | null | null | from run import app
from functools import wraps
from flask import render_template,flash,redirect,logging,session,url_for,request
from .models.database import user_register, user_login, get_feed, get_user_info, update_feed, change_password
#kullanc giri decorator'u. bu yap tm decoratorlarda ayn.
| 34.87619 | 176 | 0.706445 |
f797b24b3f7362f5aa140f0cf6036ab769afd566 | 4,671 | py | Python | kubails/commands/service.py | DevinSit/kubails | b3b2f9487d815868f0fbe9fae649789a40b50ad8 | [
"MIT"
] | 2 | 2019-05-28T00:26:52.000Z | 2019-08-02T23:02:19.000Z | kubails/commands/service.py | DevinSit/kubails | b3b2f9487d815868f0fbe9fae649789a40b50ad8 | [
"MIT"
] | 51 | 2019-12-23T04:34:40.000Z | 2022-02-12T02:28:44.000Z | kubails/commands/service.py | DevinSit/kubails | b3b2f9487d815868f0fbe9fae649789a40b50ad8 | [
"MIT"
] | 1 | 2019-09-11T20:12:18.000Z | 2019-09-11T20:12:18.000Z | import click
import logging
import sys
from typing import Tuple
from kubails.commands import helpers
from kubails.services.config_store import ConfigStore
from kubails.services.service import Service
from kubails.resources.templates import SERVICE_TEMPLATES
from kubails.utils.command_helpers import log_command_args_factory
logger = logging.getLogger(__name__)
log_command_args = log_command_args_factory(logger, "Service '{}' args")
config_store = None
service_service = None
############################################################
# Images sub-group
############################################################
| 26.241573 | 84 | 0.676301 |
f797e5f31f0f4940006d8b4a1e545eb141db847d | 10,703 | py | Python | tests/sentry/integrations/cloudflare/test_webhook.py | jianyuan/sentry | ceb8389c54d29f80b27703bb76c3880d923a3a5a | [
"BSD-3-Clause"
] | 1 | 2017-10-18T19:40:14.000Z | 2017-10-18T19:40:14.000Z | tests/sentry/integrations/cloudflare/test_webhook.py | Munyola/sentry | ab8923b2801d7d72d6903e0d9180584817bb1b9a | [
"BSD-3-Clause"
] | 1 | 2021-02-24T04:32:19.000Z | 2021-02-24T04:32:19.000Z | tests/sentry/integrations/cloudflare/test_webhook.py | Munyola/sentry | ab8923b2801d7d72d6903e0d9180584817bb1b9a | [
"BSD-3-Clause"
] | 2 | 2021-01-26T09:53:39.000Z | 2022-03-22T09:01:47.000Z | from __future__ import absolute_import
from hashlib import sha256
import hmac
import json
import six
from sentry import options
from sentry.models import ApiToken, ProjectKey
from sentry.testutils import TestCase
UNSET = object()
| 41.484496 | 98 | 0.613566 |
f79944d2cfc0c1247874648a3a289225bce5b0b8 | 669 | py | Python | teacher_files/ia_fopera/version sockets (unix only) d'H. Roussille/neurones.py | zomboyd/epi-ml | 383c28f27e4fdef715e94d1a0e0cd24afe368f86 | [
"MIT"
] | null | null | null | teacher_files/ia_fopera/version sockets (unix only) d'H. Roussille/neurones.py | zomboyd/epi-ml | 383c28f27e4fdef715e94d1a0e0cd24afe368f86 | [
"MIT"
] | null | null | null | teacher_files/ia_fopera/version sockets (unix only) d'H. Roussille/neurones.py | zomboyd/epi-ml | 383c28f27e4fdef715e94d1a0e0cd24afe368f86 | [
"MIT"
] | null | null | null | from math import exp,sqrt
from random import randrange
n = 100
X_app = [(randrange(-500,501)/1000,randrange(-500,501)/1000) for i in range(n)]
Y_app = [1 if ((x[0]-0.3)+(x[1]-0.3))<0.2 else 0 for x in X_app]
a=1
Y_pred,Y_score = [None for i in range(1001)], [None for i in range(1001)]
for i in range(1001):
b=i/1000*4-1
ne = neurone(a,b)
Y_pred[i] = [ne.proceed(z) for z in X_app]
Y_score[i] = sum([abs(Y_pred[i][j]-Y_app[j]) for j in range(n)])
opt = min(Y_score)
print(Y_score) | 27.875 | 79 | 0.600897 |
f799652a7ef1e45432c147799e5cb274b7f73f41 | 29,113 | py | Python | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/apps/storymap/storymap.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | null | null | null | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/apps/storymap/storymap.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | 9 | 2020-02-03T15:50:10.000Z | 2022-03-02T07:11:34.000Z | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/apps/storymap/storymap.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | null | null | null | import json
import datetime
import mimetypes
from urllib.parse import urlparse
from arcgis import env
from arcgis.gis import GIS
from arcgis.gis import Item
from ._ref import reference
def _add_webpage(self,
title,
url,
content=None,
actions=None,
visible=True,
alt_text="",
display='stretch'):
"""
Adds a webpage to the storymap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the webpage
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
self._properties['values']['story']['sections'].append(
{
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "webpage",
"webpage": {
"url": url,
"type": "webpage",
"altText": alt_text,
"display": display,
"unload": True,
"hash": "5"
}
}
}
)
return True
#----------------------------------------------------------------------
def _add_video(self,
url,
title,
content,
actions=None,
visible=True,
alt_text="",
display='stretch'
):
"""
Adds a video section to the StoryMap.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
video = {
"title": title,
"content": content,
"contentActions": actions,
"creaDate": 1523450612336,
"pubDate": 1523450580000,
"status": visible,
"media": {
"type": "video",
"video": {
"url": url,
"type": "video",
"altText": alt_text,
"display": display
}
}
}
self._properties['values']['story']['sections'].append(video)
return True
#----------------------------------------------------------------------
def _add_webmap(self,
item,
title,
content,
actions=None,
visible=True,
alt_text="",
display='stretch',
show_legend=False,
show_default_legend=False,
extent=None,
layer_visibility=None,
popup=None
):
"""
Adds a WebMap to the Section.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
item Required string/Item. The webmap Item Id or Item of a webmap.
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if isinstance(item, Item):
item = item.itemid
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
wm = {
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "webmap",
"webmap": {
"id": item,
"extent": extent,
"layers": layer_visibility,
"popup": popup,
"overview": {
"enable": False,
"openByDefault": True
},
"legend": {
"enable": show_legend,
"openByDefault": show_default_legend
},
"geocoder": {
"enable": False
},
"altText": alt_text
}
}
}
self._properties['values']['story']['sections'].append(wm)
return True
#----------------------------------------------------------------------
def _add_image(self,
title,
image,
content=None,
actions=None,
visible=True,
alt_text=None,
display='fill'):
"""
Adds a new image section to the storymap
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string. The title of the section.
--------------- --------------------------------------------------------------------
url Required string. The web address of the image
--------------- --------------------------------------------------------------------
content Optional string. The content of the section.
--------------- --------------------------------------------------------------------
actions Optional list. A collection of actions performed on the section
--------------- --------------------------------------------------------------------
visible Optional boolean. If True, the section is visible on publish. If
False, the section is not displayed.
--------------- --------------------------------------------------------------------
alt_text Optional string. Specifies an alternate text for an image.
--------------- --------------------------------------------------------------------
display Optional string. The image display properties.
=============== ====================================================================
:return: Boolean
"""
if actions is None:
actions = []
if visible:
visible = "PUBLISHED"
else:
visible = "HIDDEN"
self._properties['values']['story']['sections'].append(
{
"title": title,
"content": content,
"contentActions": actions,
"creaDate": int(datetime.datetime.now().timestamp() * 1000),
"pubDate": int(datetime.datetime.now().timestamp() * 1000),
"status": visible,
"media": {
"type": "image",
"image": {
"url": image,
"type": "image",
"altText": alt_text,
"display": display
}
}
}
)
return True
#----------------------------------------------------------------------
def remove(self, index):
"""
Removes a section by index.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
index Required integer. The position of the section to remove.
=============== ====================================================================
:return: Boolean
"""
try:
item = self._properties['values']['story']['sections'][index]
self._properties['values']['story']['sections'].remove(item)
return True
except:
return False
#----------------------------------------------------------------------
def save(self, title=None, tags=None, description=None):
"""
Saves an Journal StoryMap to the GIS
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Optional string. The title of the StoryMap.
--------------- --------------------------------------------------------------------
tags Optional string. The tags of the StoryMap.
--------------- --------------------------------------------------------------------
description Optional string. The description of the StoryMap
=============== ====================================================================
:return: Boolean
"""
import uuid
if self._item:
p = {
'text' : json.dumps(self._properties)
}
if title:
p['title'] = title
if tags:
p['tags'] = tags
return self._item.update(item_properties=p)
else:
if title is None:
title = "Map Journal, %s" % uuid.uuid4().hex[:10]
if tags is None:
tags = "Story Map,Map Journal"
typeKeywords = ",".join(['JavaScript', 'layout-side', 'Map', 'MapJournal',
'Mapping Site', 'Online Map', 'Ready To Use',
'selfConfigured', 'Story Map', 'Story Maps',
'Web Map'])
item = self._gis.content.add(item_properties={
'title' : title,
'tags' : tags,
'text' : json.dumps(self._properties),
'typeKeywords' : typeKeywords,
'itemType' : 'text',
'type' : "Web Mapping Application",
})
parse = urlparse(self._gis._con.baseurl)
isinstance(self._gis, GIS)
if self._gis._portal.is_arcgisonline:
url = "%s://%s/apps/MapJournal/index.html?appid=%s" % (parse.scheme, parse.netloc, item.itemid)
else:
import os
wa = os.path.dirname(parse.path[1:])
url = "%s://%s/%s/sharing/rest/apps/MapJournal/index.html?appid=%s" % (parse.scheme, parse.netloc, wa, item.itemid)
return item.update(item_properties={
'url' : url
})
return False
#----------------------------------------------------------------------
def delete(self):
"""Deletes the saved item on ArcGIS Online/Portal"""
if self._item:
return self._item.delete()
return False
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
| 44.177542 | 131 | 0.334352 |
f799698de0ff8776338f8a1ec460edf6e103c58f | 703 | py | Python | tests/test_core.py | emauton/aoc2015 | f321571b623a0e7acaa173be57506e64bd32765f | [
"MIT"
] | null | null | null | tests/test_core.py | emauton/aoc2015 | f321571b623a0e7acaa173be57506e64bd32765f | [
"MIT"
] | null | null | null | tests/test_core.py | emauton/aoc2015 | f321571b623a0e7acaa173be57506e64bd32765f | [
"MIT"
] | null | null | null | from aoc2015.core import dispatch
def test_dispatch_fail(capsys):
'''Dispatch fails properly when passed a bad day'''
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
dispatch(['204'])
captured = capsys.readouterr()
assert 'No module named aoc2015.day204' in captured.out
def test_dispatch_day0(capsys):
'''Dispatch to "template" day0 module works'''
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
dispatch(['0', 'arg1', 'arg2'])
captured = capsys.readouterr()
assert "day0: ['arg1', 'arg2']" in captured.out
| 35.15 | 73 | 0.702703 |
f79a3d652453d780701f42332b40981d8f1da3a1 | 2,749 | py | Python | bin/pannzer/operators/output_DE.py | nestorzaburannyi/annotate | e175226504efef811d4ac3914f2ab342968edf98 | [
"MIT"
] | 1 | 2021-11-26T17:29:56.000Z | 2021-11-26T17:29:56.000Z | bin/pannzer/operators/output_DE.py | nestorzaburannyi/annotate | e175226504efef811d4ac3914f2ab342968edf98 | [
"MIT"
] | 1 | 2020-03-19T21:12:23.000Z | 2020-03-19T21:12:23.000Z | bin/pannzer/operators/output_DE.py | nestorzaburannyi/annotate | e175226504efef811d4ac3914f2ab342968edf98 | [
"MIT"
] | null | null | null | from myoperator import BlockOperator
import re
| 45.816667 | 167 | 0.476901 |
f79b68b39e1d3fc6804f9e60df51a84aec79e5e5 | 6,016 | py | Python | Utility.py | psarkozy/HWTester | 2553398f4ac8645a897b4f41bd36a21d54d2b177 | [
"MIT"
] | null | null | null | Utility.py | psarkozy/HWTester | 2553398f4ac8645a897b4f41bd36a21d54d2b177 | [
"MIT"
] | null | null | null | Utility.py | psarkozy/HWTester | 2553398f4ac8645a897b4f41bd36a21d54d2b177 | [
"MIT"
] | 2 | 2019-11-11T12:44:17.000Z | 2020-11-20T11:08:53.000Z | import os
from StringIO import StringIO
from zipfile import ZipFile
import subprocess
import shutil
import fcntl
import time
import signal
import imp
import sys,traceback
| 39.064935 | 223 | 0.636137 |
f79bf4e8cdd9d2e6fe7f0243351b84e61c125647 | 1,432 | py | Python | wagtailsharing/tests/test_urls.py | mikiec84/wagtail-sharing | e3c338dae3327d955f058b5eb2f311d4dc0cbbf7 | [
"CC0-1.0"
] | 1 | 2019-02-25T21:56:56.000Z | 2019-02-25T21:56:56.000Z | wagtailsharing/tests/test_urls.py | mikiec84/wagtail-sharing | e3c338dae3327d955f058b5eb2f311d4dc0cbbf7 | [
"CC0-1.0"
] | null | null | null | wagtailsharing/tests/test_urls.py | mikiec84/wagtail-sharing | e3c338dae3327d955f058b5eb2f311d4dc0cbbf7 | [
"CC0-1.0"
] | null | null | null | from __future__ import absolute_import, unicode_literals
try:
from importlib import reload
except ImportError:
pass
from django.conf.urls import url
from django.test import TestCase
from mock import patch
try:
import wagtail.core.urls as wagtail_core_urls
except ImportError: # pragma: no cover; fallback for Wagtail <2.0
import wagtail.wagtailcore.urls as wagtail_core_urls
import wagtailsharing.urls
| 28.078431 | 76 | 0.657821 |
f79fdffacf758f6c9d435f6cbf7feae5c9594ded | 1,295 | py | Python | py2neo/timing.py | VitalyRomanov/py2neo | 2d0683cf2ab8b77b0c5bbba4eade0003c68d5905 | [
"Apache-2.0"
] | null | null | null | py2neo/timing.py | VitalyRomanov/py2neo | 2d0683cf2ab8b77b0c5bbba4eade0003c68d5905 | [
"Apache-2.0"
] | null | null | null | py2neo/timing.py | VitalyRomanov/py2neo | 2d0683cf2ab8b77b0c5bbba4eade0003c68d5905 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monotonic import monotonic
| 27.553191 | 74 | 0.68417 |
f7a18de75e2c5d8f0a8fefb62bdf2a19e6aa2d6f | 1,662 | py | Python | versions/versions.py | juanfec/juan_rueda_test | 1e5d6bccadc24569db26b5a8b58486c8295cef12 | [
"MIT"
] | null | null | null | versions/versions.py | juanfec/juan_rueda_test | 1e5d6bccadc24569db26b5a8b58486c8295cef12 | [
"MIT"
] | null | null | null | versions/versions.py | juanfec/juan_rueda_test | 1e5d6bccadc24569db26b5a8b58486c8295cef12 | [
"MIT"
] | null | null | null | # check to strings that represent version numbers and finds the greatest,
# 'equals' if they are the same version or 'Invalid Format'
# example: 1.2 is greater than 1.1.
# for reusability this function just returns the version number or the word equals
# if a more elaborated answer is needed an interface would be usefull | 42.615385 | 82 | 0.570397 |
e38a4870c5f8e88d0175c33a20658010577ed3a2 | 824 | py | Python | packages/pyright-internal/src/tests/samples/match7.py | Strum355/pyright | 01f15ce31f8f6cd9a054e21fc48cb762923ae25d | [
"MIT"
] | null | null | null | packages/pyright-internal/src/tests/samples/match7.py | Strum355/pyright | 01f15ce31f8f6cd9a054e21fc48cb762923ae25d | [
"MIT"
] | null | null | null | packages/pyright-internal/src/tests/samples/match7.py | Strum355/pyright | 01f15ce31f8f6cd9a054e21fc48cb762923ae25d | [
"MIT"
] | null | null | null | # This sample tests type narrowing of subject expressions for
# match statements.
from typing import Literal
| 23.542857 | 76 | 0.527913 |
e38a5b70b844182225b5e64d4a3c3af08686053b | 1,869 | py | Python | examples/finance/stocks_baselines.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | examples/finance/stocks_baselines.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | examples/finance/stocks_baselines.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.getcwd())
import argparse
import numpy as np
import pmdarima
import torch
import torch.nn.functional as F
from torch import nn
from fusions.common_fusions import Stack
from unimodals.common_models import LSTMWithLinear
from datasets.stocks.get_data import get_dataloader
parser = argparse.ArgumentParser()
parser.add_argument('--input-stocks', metavar='input', help='input stocks')
parser.add_argument('--target-stock', metavar='target', help='target stock')
args = parser.parse_args()
print('Input: ' + args.input_stocks)
print('Target: ' + args.target_stock)
stocks = sorted(args.input_stocks.split(' '))
train_loader, val_loader, test_loader = get_dataloader(stocks, stocks, [args.target_stock], modality_first=True)
baselines()
| 41.533333 | 129 | 0.721776 |
e38a5e9b805eaee8a5ed1bb8c56f9e375e34bdfa | 3,329 | py | Python | textmate/bundles/LaTeX.tmbundle/Support/lib/Python/tmprefs.py | leo-brewin/hybrid-latex | 2debaf3f97eb551928d08dc4baded7ef7a4ab29a | [
"MIT"
] | 16 | 2018-10-12T06:31:49.000Z | 2022-03-31T23:16:08.000Z | textmate/bundles/LaTeX.tmbundle/Support/lib/Python/tmprefs.py | leo-brewin/hybrid-latex | 2debaf3f97eb551928d08dc4baded7ef7a4ab29a | [
"MIT"
] | null | null | null | textmate/bundles/LaTeX.tmbundle/Support/lib/Python/tmprefs.py | leo-brewin/hybrid-latex | 2debaf3f97eb551928d08dc4baded7ef7a4ab29a | [
"MIT"
] | 2 | 2021-06-27T03:29:40.000Z | 2022-03-30T17:17:18.000Z | # -- Imports ------------------------------------------------------------------
from __future__ import print_function
from __future__ import unicode_literals
from Foundation import CFPreferencesAppSynchronize, CFPreferencesCopyAppValue
from os import getenv
# -- Class --------------------------------------------------------------------
if __name__ == '__main__':
from doctest import testmod
testmod()
| 31.11215 | 79 | 0.545209 |
e38ad8911f43a8dc1cf2caa5fecf9c3fdcb3062c | 1,916 | py | Python | parsetab.py | UVG-Teams/analizador-lexico-sintactico | 71ac98e11fc63c6fcba36e94d9d40f0e59b6d55f | [
"MIT"
] | null | null | null | parsetab.py | UVG-Teams/analizador-lexico-sintactico | 71ac98e11fc63c6fcba36e94d9d40f0e59b6d55f | [
"MIT"
] | null | null | null | parsetab.py | UVG-Teams/analizador-lexico-sintactico | 71ac98e11fc63c6fcba36e94d9d40f0e59b6d55f | [
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'leftIMPLIESSIMPLIESleftANDORleftRPARENLPARENrightNEGATIONALPHABET AND IMPLIES LPAREN NEGATION OR PREDICATE RPAREN SIMPLIESexpr : expr AND exprexpr : ALPHABETexpr : expr OR exprexpr : NEGATION exprexpr : expr IMPLIES exprexpr : expr SIMPLIES exprexpr : LPAREN expr RPAREN'
_lr_action_items = {'ALPHABET':([0,3,4,5,6,7,8,],[2,2,2,2,2,2,2,]),'NEGATION':([0,3,4,5,6,7,8,],[3,3,3,3,3,3,3,]),'LPAREN':([0,3,4,5,6,7,8,],[4,4,4,4,4,4,4,]),'$end':([1,2,9,11,12,13,14,15,],[0,-2,-4,-1,-3,-5,-6,-7,]),'AND':([1,2,9,10,11,12,13,14,15,],[5,-2,-4,5,-1,-3,5,5,-7,]),'OR':([1,2,9,10,11,12,13,14,15,],[6,-2,-4,6,-1,-3,6,6,-7,]),'IMPLIES':([1,2,9,10,11,12,13,14,15,],[7,-2,-4,7,-1,-3,-5,-6,-7,]),'SIMPLIES':([1,2,9,10,11,12,13,14,15,],[8,-2,-4,8,-1,-3,-5,-6,-7,]),'RPAREN':([2,9,10,11,12,13,14,15,],[-2,-4,15,-1,-3,-5,-6,-7,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expr':([0,3,4,5,6,7,8,],[1,9,10,11,12,13,14,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> expr","S'",1,None,None,None),
('expr -> expr AND expr','expr',3,'p_and','main.py',48),
('expr -> ALPHABET','expr',1,'p_expr','main.py',52),
('expr -> expr OR expr','expr',3,'p_or','main.py',56),
('expr -> NEGATION expr','expr',2,'p_negation','main.py',60),
('expr -> expr IMPLIES expr','expr',3,'p_implies','main.py',64),
('expr -> expr SIMPLIES expr','expr',3,'p_simplies','main.py',69),
('expr -> LPAREN expr RPAREN','expr',3,'p_parens','main.py',73),
]
| 50.421053 | 538 | 0.601775 |
e38c0cf17108b05c381e4d71a5abca8083aae594 | 6,936 | py | Python | Q1_ab.py | dkilike/Image-Segmentation | 6a3ab7f96d105475051502aba8013c242eba5fcb | [
"ADSL"
] | 2 | 2019-05-05T01:52:23.000Z | 2019-05-06T22:56:54.000Z | Q1_ab.py | dkilike/Image-Segmentation | 6a3ab7f96d105475051502aba8013c242eba5fcb | [
"ADSL"
] | null | null | null | Q1_ab.py | dkilike/Image-Segmentation | 6a3ab7f96d105475051502aba8013c242eba5fcb | [
"ADSL"
] | null | null | null | '''Please write a program to read the scan and print out
The maximum voxel intensity
The mean voxel intensity
The coordinates of the centre of the image volume, in the scanner coordinate system.
'''
import pydicom
import numpy as np
import matplotlib.pyplot as plt
import cv2
import glob
import os
import image_slicer
'''form a 3D array by stacking all CT scan slices'''
# load the DICOM files
src_path = r'C:\Users\GGPC\SegmentationTest\Image-Segmentation'
DICOM_dir_path = src_path + '\DICOM data'
# snapshot dicom file
files = []
for fname in glob.glob(DICOM_dir_path+'\*', recursive=False):
print("loading: {}".format(fname))
files.append(pydicom.read_file(fname))
print("file count: {}".format(len(files)))
# skip files with no SliceLocation
slices = []
skipcount = 0
for f in files:
if hasattr(f, 'SliceLocation'):
slices.append(f)
else:
skipcount = skipcount + 1
print("skipped, no SliceLocation: {}".format(skipcount))
# ensure they are in the correct order
slices = sorted(slices, key=lambda s: s.SliceLocation)
# create 3D array (assuming that each slice has the same pixel size)
img_shape = list(slices[0].pixel_array.shape)
img_shape.append(len(slices))
img3d = np.zeros(img_shape)
# fill 3D array with the images from the files
for i, s in enumerate(slices):
img3d[:, :, i] = s.pixel_array
input("Press Enter to continue showing Question 1 (a) results...")
'''start solving Q1_a read and print'''
# first two questions are straight-forward
print()
print('Question 1 (a)')
print('i. The maximum voxel intensity is {}'.format(img3d.max()))
print('ii. The mean voxel intensity is {}'.format(img3d.mean()))
# centre of the image volume is at (256.5,256.5) pixel position between the 100th and 101st slices
ImagePlanePosition_of_100th_slice = np.array(slices[99].ImagePositionPatient)
RowChangeInX_of_100th_slice = np.array(slices[99].ImageOrientationPatient[0:3]) * slices[99].PixelSpacing[0] * 256.5
ColumnChangeInY_of_100th_slice = np.array(slices[99].ImageOrientationPatient[3:6]) * slices[99].PixelSpacing[1] * 256.5
coordinate_of_100th_slice = ImagePlanePosition_of_100th_slice + RowChangeInX_of_100th_slice + ColumnChangeInY_of_100th_slice
ImagePlanePosition_of_101th_slice = np.array(slices[100].ImagePositionPatient)
RowChangeInX_of_101th_slice = np.array(slices[100].ImageOrientationPatient[0:3]) * slices[100].PixelSpacing[0] * 256.5
ColumnChangeInY_of_101th_slice = np.array(slices[100].ImageOrientationPatient[3:6]) * slices[100].PixelSpacing[1] * 256.5
coordinate_of_101th_slice = ImagePlanePosition_of_101th_slice + RowChangeInX_of_101th_slice + ColumnChangeInY_of_101th_slice
coordinate_of_ImageVolumeCentre = (coordinate_of_100th_slice+coordinate_of_101th_slice)/2
print('iii. coordinates of the centre of the image volume is {} mm'.format(list(coordinate_of_ImageVolumeCentre)))
input("Press Enter to continue showing Question 1 (b) results...")
'''start solving Q1_b'''
# plot the maximum voxel intensity of each slice
MaxVoxelList=[]
MeanVoxelList=[]
for s in slices:
MaxVoxelList.append(s.pixel_array.max())
MeanVoxelList.append(s.pixel_array.mean())
print('Close plot to continue')
plt.scatter(range(0,len(MaxVoxelList)), MaxVoxelList)
plt.xlabel('slice index (1-200)')
plt.ylabel('maximum voxel intensity')
plt.title('Scatter of Max Voxel over Slice Index')
plt.show()
# selection voxel intensity threshold of 3000
Threshold = 3000
print('Close plot of an mask dection example to continue')
a1 = plt.subplot(2, 2, 1)
plt.imshow(img3d[:, :, 30])
a1 = plt.subplot(2, 2, 2)
plt.imshow(img3d[:, :, 30]>Threshold)
a1 = plt.subplot(2, 2, 3)
plt.imshow(img3d[:, :, 176])
a1 = plt.subplot(2, 2, 4)
plt.imshow(img3d[:, :, 176]>Threshold)
plt.show()
input("Press Enter to continue generating images and masks to Folders: SegmentationMask(metal mask) and Images(ct scan slices)...")
# generate images and masks
NameCount = 300
for s in slices:
ImageName = '\SegmentationMask\IM00' + str(NameCount) + '.png'
img = s.pixel_array>Threshold
img = img.astype('uint8')*255
cv2.imwrite(src_path + ImageName, img)
print(ImageName + ' has been saved')
NameCount+=1
NameCount = 300
for s in slices:
ImageName = '\Images\IM00' + str(NameCount) + '.png'
img = (s.pixel_array - img3d.min())/(img3d.max()-img3d.min())*255
cv2.imwrite(src_path + ImageName, img)
print(ImageName + ' has been saved')
NameCount+=1
# NameCount = 300
# for s in slices:
# ImageName = '\SegmentationBoneMask\IM00' + str(NameCount) + '.png'
# img = s.pixel_array>0
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset_Slicer\masks\IM00' + str(NameCount) + '.png'
# img = s.pixel_array>Threshold
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# image_slicer.slice(src_path + ImageName,14)
# os.remove(src_path + ImageName)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset_Slicer\images\IM00' + str(NameCount) + '.png'
# img = (s.pixel_array - img3d.min())/(img3d.max()-img3d.min())*255
# cv2.imwrite(src_path + ImageName, img)
# image_slicer.slice(src_path + ImageName, 14)
# os.remove(src_path + ImageName)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset_Slicer\masks\IM00' + str(NameCount) + '.png'
# img = s.pixel_array>0
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
# os.mkdir(src_path + '\\Dataset')
# for fname in glob.glob(DICOM_dir_path + '\*', recursive=False):
# os.mkdir(src_path + '\\Dataset' + fname[-8:])
# os.mkdir(src_path + '\\Dataset' + fname[-8:] + '\\images')
# os.mkdir(src_path + '\\Dataset' + fname[-8:] + '\\masks')
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset\IM00' + str(NameCount) + '\masks\MetalMask.png'
# img = s.pixel_array>Threshold
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset\IM00' + str(NameCount) + '\images' + '\IM00' + str(NameCount) + '.png'
# img = (s.pixel_array - img3d.min())/(img3d.max()-img3d.min())*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1
#
# NameCount = 300
# for s in slices:
# ImageName = '\Dataset\IM00' + str(NameCount) + '\masks\PositiveVoxelMask.png'
# img = s.pixel_array>0
# img = img.astype('uint8')*255
# cv2.imwrite(src_path + ImageName, img)
# print(ImageName + ' has been saved')
# NameCount+=1 | 36.698413 | 131 | 0.697953 |
e38c2e039b77985412d12e168040f3115a42b4f3 | 1,759 | py | Python | 12/day_twelve.py | tmay-sarsaparilla/advent-of-code-2021 | 3cd827df57d315dd96627544b9f5c31b7db1aa11 | [
"MIT"
] | null | null | null | 12/day_twelve.py | tmay-sarsaparilla/advent-of-code-2021 | 3cd827df57d315dd96627544b9f5c31b7db1aa11 | [
"MIT"
] | null | null | null | 12/day_twelve.py | tmay-sarsaparilla/advent-of-code-2021 | 3cd827df57d315dd96627544b9f5c31b7db1aa11 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 34.490196 | 112 | 0.604321 |
e38df443614ff2ea0fe09c328aa447f49ee9a8df | 146 | py | Python | pythonic_binance/__init__.py | hANSIc99/pythonic-binance | df25353cc20f545224d97a34544844029dabe4ce | [
"MIT"
] | 1 | 2020-12-23T03:20:01.000Z | 2020-12-23T03:20:01.000Z | pythonic_binance/__init__.py | hANSIc99/pythonic-binance | df25353cc20f545224d97a34544844029dabe4ce | [
"MIT"
] | null | null | null | pythonic_binance/__init__.py | hANSIc99/pythonic-binance | df25353cc20f545224d97a34544844029dabe4ce | [
"MIT"
] | null | null | null | """An unofficial Python wrapper for the Binance exchange API v3
.. moduleauthor:: Sam McHardy
.. modified by Stephan Avenwedde for Pythonic
"""
| 20.857143 | 63 | 0.753425 |
e38e7b771230b46e0d59d9e9f903a942ff021817 | 1,643 | py | Python | tests/unit/test_question_answer.py | Lunga001/pmg-cms-2 | 10cea3979711716817b0ba2a41987df73f2c7642 | [
"Apache-2.0"
] | 2 | 2019-06-11T20:46:43.000Z | 2020-08-27T22:50:32.000Z | tests/unit/test_question_answer.py | Lunga001/pmg-cms-2 | 10cea3979711716817b0ba2a41987df73f2c7642 | [
"Apache-2.0"
] | 70 | 2017-05-26T14:04:06.000Z | 2021-06-30T10:21:58.000Z | tests/unit/test_question_answer.py | OpenUpSA/pmg-cms-2 | ec5f259dae81674ac7a8cdb80f124a8b0f167780 | [
"Apache-2.0"
] | 4 | 2017-08-29T10:09:30.000Z | 2021-05-25T11:29:03.000Z | import os
from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeQuestionData
| 33.530612 | 80 | 0.645161 |
e38eb4838b0bca24f076f914d11b9ed6e01734df | 1,470 | py | Python | Job-Interviews/Python/BinaryTrees/Problem2.py | JuanPabloMontoya271/ITC | f5899e7a8fed4e9f90e97bb3af635a276d9cf13a | [
"MIT"
] | 1 | 2020-11-02T15:18:16.000Z | 2020-11-02T15:18:16.000Z | Job-Interviews/Python/BinaryTrees/Problem2.py | JuanPabloMontoya271/ITC | f5899e7a8fed4e9f90e97bb3af635a276d9cf13a | [
"MIT"
] | null | null | null | Job-Interviews/Python/BinaryTrees/Problem2.py | JuanPabloMontoya271/ITC | f5899e7a8fed4e9f90e97bb3af635a276d9cf13a | [
"MIT"
] | 1 | 2021-10-30T14:18:29.000Z | 2021-10-30T14:18:29.000Z | root = Tree(4, left = Tree(3), right=Tree(5, left= Tree(4)))
#InOrderTraversal
print("In order:", InOrderTraversal(root))
#PreOrderTraversal
print("Pre order:", PreOrderTraversal(root))
#PostOrderTraversal
print("Post order:", PostOrderTraversal(root))
print("Level order Traversal: ", LevelOrderTraversal(root)) | 28.269231 | 61 | 0.614286 |
e38ef669498a375d8712d764abb891d4af6ac615 | 4,429 | py | Python | csv_analyzer/apps/dataset/api/dataset.py | saduqz/csv-analyzer-test | 732d4902aeba9278e7547ed5a83e4a482790076c | [
"MIT"
] | null | null | null | csv_analyzer/apps/dataset/api/dataset.py | saduqz/csv-analyzer-test | 732d4902aeba9278e7547ed5a83e4a482790076c | [
"MIT"
] | null | null | null | csv_analyzer/apps/dataset/api/dataset.py | saduqz/csv-analyzer-test | 732d4902aeba9278e7547ed5a83e4a482790076c | [
"MIT"
] | null | null | null | from datetime import datetime
# Rest framework
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import RetrieveModelMixin, ListModelMixin, UpdateModelMixin, CreateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework.permissions import IsAuthenticated
# Serializers
from csv_analyzer.apps.dataset.serializers import (
DataSetModelSerializer,
CreateDataSetModelSerializer,
FileDataSetModelSerializer,
)
# Models
from csv_analyzer.apps.dataset.models import DataSet
# Permissions
from csv_analyzer.apps.dataset.permissions.dataset import IsDataSetOwner
# Celery
from csv_analyzer.apps.dataset.tasks import populate_dataset_file
# MongoDB utils
from csv_analyzer.apps.mongodb.utils import MongoDBConnection
| 33.300752 | 109 | 0.664484 |
e38f3cccadcd0f9b76ee123f571fa527044e60b4 | 11,984 | py | Python | tfx_bsl/tfxio/tensor_to_arrow_test.py | brills/tfx-bsl | 089d6673a8d3cccef84ff3d6583808544d2da038 | [
"Apache-2.0"
] | null | null | null | tfx_bsl/tfxio/tensor_to_arrow_test.py | brills/tfx-bsl | 089d6673a8d3cccef84ff3d6583808544d2da038 | [
"Apache-2.0"
] | null | null | null | tfx_bsl/tfxio/tensor_to_arrow_test.py | brills/tfx-bsl | 089d6673a8d3cccef84ff3d6583808544d2da038 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx_bsl.tfxio.tensor_to_arrow."""
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tensor_to_arrow
from google.protobuf import text_format
from absl.testing import absltest
from absl.testing import parameterized
from tensorflow_metadata.proto.v0 import schema_pb2
_TF_TYPE_TO_ARROW_TYPE = {
tf.int8: pa.int8(),
tf.int16: pa.int16(),
tf.int32: pa.int32(),
tf.int64: pa.int64(),
tf.uint8: pa.uint8(),
tf.uint16: pa.uint16(),
tf.uint32: pa.uint32(),
tf.uint64: pa.uint64(),
tf.float32: pa.float32(),
tf.float64: pa.float64(),
tf.string: pa.binary(),
}
_ROW_PARTITION_DTYPES = {
"INT64": np.int64,
"INT32": np.int32
}
_CONVERT_TEST_CASES = [
dict(
testcase_name="multiple_tensors",
type_specs={
"sp1": tf.SparseTensorSpec([None, None], tf.int32),
"sp2": tf.SparseTensorSpec([None, None], tf.string),
},
expected_schema={
"sp1": pa.list_(pa.int32()),
"sp2": pa.list_(pa.binary()),
},
expected_tensor_representations={
"sp1": """varlen_sparse_tensor { column_name: "sp1" }""",
"sp2": """varlen_sparse_tensor { column_name: "sp2" }""",
},
tensor_input={
"sp1":
tf.SparseTensor(
values=tf.constant([1, 2], dtype=tf.int32),
indices=[[0, 0], [2, 0]],
dense_shape=[4, 1]),
"sp2":
tf.SparseTensor(
values=[b"aa", b"bb"],
indices=[[2, 0], [2, 1]],
dense_shape=[4, 2])
},
expected_record_batch={
"sp1":
pa.array([[1], [], [2], []], type=pa.list_(pa.int32())),
"sp2":
pa.array([[], [], [b"aa", b"bb"], []],
type=pa.list_(pa.binary()))
}),
dict(
testcase_name="ragged_tensors",
type_specs={
"sp1":
tf.RaggedTensorSpec(
tf.TensorShape([2, None]),
tf.int64,
ragged_rank=1,
row_splits_dtype=tf.int64),
"sp2":
tf.RaggedTensorSpec(
tf.TensorShape([2, None]),
tf.string,
ragged_rank=1,
row_splits_dtype=tf.int64),
},
expected_schema={
"sp1": pa.list_(pa.int64()),
"sp2": pa.list_(pa.binary()),
},
expected_tensor_representations={
"sp1":
"""ragged_tensor {
feature_path {
step: "sp1"
}
row_partition_dtype: INT64
}""",
"sp2":
"""ragged_tensor {
feature_path {
step: "sp2"
}
row_partition_dtype: INT64
}""",
},
tensor_input={
"sp1":
tf.RaggedTensor.from_row_splits(
values=np.asarray([1, 5, 9], dtype=np.int64),
row_splits=np.asarray([0, 2, 3], dtype=np.int64)),
"sp2":
tf.RaggedTensor.from_row_splits(
values=np.asarray([b"x", b"y", b"z"], dtype=np.str),
row_splits=np.asarray([0, 2, 3], dtype=np.int64))
},
expected_record_batch={
"sp1": pa.array([[1, 5], [9]], type=pa.list_(pa.int32())),
"sp2": pa.array([[b"x", b"y"], [b"z"]], type=pa.list_(pa.binary())),
})
] + _make_2d_varlen_sparse_tensor_test_cases(
) + _make_3d_ragged_tensor_test_cases()
if __name__ == "__main__":
# Do not run these tests under TF1.x -- TensorToArrow does not support TF 1.x.
if tf.__version__ >= "2":
absltest.main()
| 38.044444 | 80 | 0.550484 |
e38f485bd754322f09d50cbe4ef3dae03d97f83a | 417 | py | Python | th_watchdog/failure_email.py | hwjeremy/th-watchdog | c32682f838fffa3396cabc3d83eeb4960c765fc9 | [
"MIT"
] | null | null | null | th_watchdog/failure_email.py | hwjeremy/th-watchdog | c32682f838fffa3396cabc3d83eeb4960c765fc9 | [
"MIT"
] | null | null | null | th_watchdog/failure_email.py | hwjeremy/th-watchdog | c32682f838fffa3396cabc3d83eeb4960c765fc9 | [
"MIT"
] | null | null | null | """
Thornleigh Farm - VPN Watchdog
Failure Email Module
author: hugh@blinkybeach.com
"""
from th_watchdog.email import Email
| 21.947368 | 58 | 0.690647 |
e390652d724f8e334ded89e33631ccb73990db8b | 2,749 | py | Python | nemo/nemo/backends/pytorch/common/data.py | petermartigny/NeMo | b20821e637314940e36b63d32c601c43d1b74051 | [
"Apache-2.0"
] | 1 | 2020-03-22T11:23:11.000Z | 2020-03-22T11:23:11.000Z | nemo/nemo/backends/pytorch/common/data.py | petermartigny/NeMo | b20821e637314940e36b63d32c601c43d1b74051 | [
"Apache-2.0"
] | null | null | null | nemo/nemo/backends/pytorch/common/data.py | petermartigny/NeMo | b20821e637314940e36b63d32c601c43d1b74051 | [
"Apache-2.0"
] | 1 | 2019-10-23T01:19:19.000Z | 2019-10-23T01:19:19.000Z | __all__ = ['TextDataLayer']
from functools import partial
import torch
from torch.utils.data import DataLoader, DistributedSampler
from nemo.backends.pytorch.common.parts import TextDataset
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core import DeviceType
from nemo.core.neural_types import *
from nemo.utils.misc import pad_to
| 29.244681 | 79 | 0.616588 |
e39338fd7b675e7103c88c702302ca7865a71de5 | 21,161 | py | Python | bot.py | marsDurden/UnipdBot | 402b74f6bd876265b952f052e2c132f6aa3c050d | [
"Unlicense"
] | 4 | 2018-04-12T03:39:36.000Z | 2019-11-26T07:52:30.000Z | bot.py | marsDurden/UnipdBot | 402b74f6bd876265b952f052e2c132f6aa3c050d | [
"Unlicense"
] | null | null | null | bot.py | marsDurden/UnipdBot | 402b74f6bd876265b952f052e2c132f6aa3c050d | [
"Unlicense"
] | 1 | 2019-10-07T16:50:48.000Z | 2019-10-07T16:50:48.000Z | import logging
from datetime import time, timedelta
from functools import wraps
# Telegram bot libraries
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler
from telegram import KeyboardButton, ReplyKeyboardMarkup, ParseMode, Bot, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.error import TelegramError, Unauthorized, BadRequest, TimedOut, ChatMigrated, NetworkError
# Local files
from utils import *
from orario import *
import config
from captions import Captions
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.WARNING)
logger = logging.getLogger(__name__)
# Captions class handles languages
languages = Captions(config.supported_languages, config.captions_path)
# Decorators
# Update Handlers
def callback_orario(update, context):
data = update.callback_query.data[2:]
ultima_data = update.callback_query.message.text.splitlines()[0][-11:-1]
u_id = str(update.callback_query.from_user.id)
chat_id = update.callback_query.message.chat_id
lang_str = languages.get_reply('orario', lang=get_lang('', u_id=u_id))
reply, keyboard = orarioSaveSetting(chat_id, data, lang_str, last_date=ultima_data)
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.editMessageText(text=reply,
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
def callback_settings(update, context):
data = update.callback_query.data[2:].split('-')
u_id = str(update.callback_query.from_user.id)
chat_id = update.callback_query.message.chat_id
if data[0] == 'alarm':
if data[1] == 'on':
# Chiude il job
unset_job_orario(str(chat_id), context.job_queue)
set_alarm_value(u_id, None)
elif data[1] == 'off':
# Scelta timing orario
lang_list = languages.get_reply('settings', lang=get_lang('', u_id=u_id))
markup = []
for hour in [5, 7, 9, 12, 18, 21]:
markup.append([InlineKeyboardButton(str(hour)+':00', callback_data='2-alarm-set-'+str(hour)+':00'), InlineKeyboardButton(str(hour)+':30', callback_data='2-alarm-set-'+str(hour)+':30'),
InlineKeyboardButton(str(hour+1)+':00', callback_data='2-alarm-set-'+str(hour+1)+':00'), InlineKeyboardButton(str(hour+1)+':30', callback_data='2-alarm-set-'+str(hour+1)+':30')])
markup = InlineKeyboardMarkup(markup)
context.bot.editMessageText(text=lang_list[5],
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
return
elif data[1] == 'set':
set_job_orario(str(chat_id), u_id, context.job_queue, orario=data[2])
set_alarm_value(u_id, data[2])
elif data[0] == 'mensa':
if data[1] == 'enable':
# Scelta mensa
mense_list = languages.get_keyboard('mensa')
lang_list = languages.get_reply('settings', lang=get_lang('', u_id=u_id))
markup = []
for row in mense_list:
for mensa in row:
if mensa != '/home':
markup.append([InlineKeyboardButton(mensa.replace('/',''), callback_data='2-mensa-set-'+mensa.replace('/',''))])
markup = InlineKeyboardMarkup(markup)
context.bot.editMessageText(text=lang_list[9],
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup)
return
elif data[1] == 'set':
set_fav_mensa(u_id, data[2])
elif data[1] == 'disable':
set_fav_mensa(u_id, None)
elif data[0] == 'lang':
changed = set_lang(u_id, data[1])
if not changed: return
reply, keyboard = get_user_settings(update, languages.get_reply('settings', lang=get_lang('', u_id=u_id)), u_id=u_id)
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.editMessageText(text=reply,
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
def job_orario(context):
chat_id = context.job.context[0]
u_id = context.job.context[0]
lang_str = languages.get_reply('orario', lang=get_lang('', u_id=u_id))
reply, keyboard = orarioSetup(chat_id, lang_str, resetDate=True)
# Check if orario is empty
if lang_str['text'][9] in reply: return
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.sendMessage(chat_id=chat_id,
text=reply,
parse_mode=ParseMode.MARKDOWN,
disable_notification=True,
reply_markup=reply_markup)
def set_job_orario(chat_id, u_id, job_queue, orario):
try:
# 0: lun, 1: mar, 2: mer, 3: gio, 4: ven
orario = orario.split(':')
job_queue.run_daily(job_orario, time=time(int(orario[0]), int(orario[1]), 0), days=(0, 1, 2, 3, 4), context=[chat_id, u_id])
#job_queue.run_repeating(job_orario, timedelta(seconds=10), context=[chat_id, u_id]) # For testing
except (IndexError, ValueError):
pass
def unset_job_orario(chat_id, job_queue):
for job in job_queue.jobs():
try:
if job.context[0] == chat_id:
job.schedule_removal()
except:
pass
def admin_forward(update, context):
context.bot.forwardMessage(chat_id=config.botAdminID,
from_chat_id=get_chat_id(update),
message_id=update.message.message_id)
text = '<code>/reply ' + str(update.message.chat.id) + '</code>'
context.bot.sendMessage(chat_id=config.botAdminID,
parse_mode=ParseMode.HTML,
disable_notification=True,
text=text)
if __name__ == '__main__':
main()
| 40.460803 | 228 | 0.641038 |
e39343d1ccb1c9771b5f47a5eb48d8ff84409b31 | 2,591 | py | Python | server/twitter.py | abijith-kp/Emolytics | 00e94798ab20621b51f6ce2a058e0dd8dec1cdba | [
"BSD-3-Clause"
] | null | null | null | server/twitter.py | abijith-kp/Emolytics | 00e94798ab20621b51f6ce2a058e0dd8dec1cdba | [
"BSD-3-Clause"
] | null | null | null | server/twitter.py | abijith-kp/Emolytics | 00e94798ab20621b51f6ce2a058e0dd8dec1cdba | [
"BSD-3-Clause"
] | null | null | null | from server import db, auth, emolytics
from server.models import Tweet
from classifier import create_classifier
from tweepy import Stream
from tweepy.streaming import StreamListener
from flask.ext.rq import job
import json
import random
from multiprocessing import Process
from sqlalchemy.exc import IntegrityError
'''
def start_thread(track):
global process
if process != None and process.is_alive():
process.terminate()
process = Process(target=start_streaming, kwargs={"track": track})
process.start()
print "Started the thread"
def start_classification():
global clf_process
if clf_process != None and clf_process.is_alive():
clf_process.terminate()
clf_process = Process(target=classify)
clf_process.start()
print "Started classification"
'''
| 27.56383 | 80 | 0.580857 |
e3946d8baaf74d520467a9069ae0fdd15f75585b | 1,340 | py | Python | misc_code/extractGridFeatures.py | Lab-Work/gpsresilience | 7c5183092013d44ce6d295469880502407c0e4ac | [
"NCSA",
"Unlicense"
] | 21 | 2015-03-10T19:13:38.000Z | 2021-06-27T06:01:00.000Z | misc_code/extractGridFeatures.py | Lab-Work/gpsresilience | 7c5183092013d44ce6d295469880502407c0e4ac | [
"NCSA",
"Unlicense"
] | null | null | null | misc_code/extractGridFeatures.py | Lab-Work/gpsresilience | 7c5183092013d44ce6d295469880502407c0e4ac | [
"NCSA",
"Unlicense"
] | 17 | 2015-03-10T19:13:39.000Z | 2020-11-14T08:30:40.000Z | import csv
import os
import shutil
from datetime import datetime
from grid import *
#from cluster import *
from regions import *
start_time = datetime.now()
print("Allocating...")
#grid2
#gridSystem = GridSystem(-74.04, -73.775, 5, 40.63, 40.835, 5)
#gridname = "grid2"
#grid3
#gridSystem = GridSystem(-74.02, -73.938, 4, 40.7, 40.815, 6)
#gridname = "grid3"
#cluster1
#gridSystem = ClusterSystem("cluster1/clusters.csv")
#gridname = "cluster1"
gridSystem = RegionSystem("4year_features")
gridname = "region1"
invalids = 0
for y in ["FOIL2010", "FOIL2011", "FOIL2012", "FOIL2013"]:
for n in range(1,13):
filename = "../../new_chron/" + y + "/trip_data_" + str(n) + ".csv"
print("Reading file " + filename)
r = csv.reader(open(filename, "r"))
i = 0
header = True
for line in r:
if(header):
Trip.initHeader(line)
header = False
else:
trip = None
try:
trip = Trip(line)
except ValueError:
invalids += 1
if(trip!= None and (y!="FOIL" + str(trip.date.year) or n!= trip.date.month)):
trip.has_other_error = True
gridSystem.record(trip)
i += 1
if(i%1000000==0):
print("Read " + str(i) + " rows")
gridSystem.close()
end_time = datetime.now()
program_duration = end_time - start_time
print("Processing took " + str(program_duration))
| 20 | 81 | 0.636567 |
e394ba0d9db0eefab6e3defb03dc2daf2b6faade | 6,472 | py | Python | haiku/_src/stateful_test.py | madisonmay/dm-haiku | de95f6f83561edeb582d46b2e3bf135051792b91 | [
"Apache-2.0"
] | null | null | null | haiku/_src/stateful_test.py | madisonmay/dm-haiku | de95f6f83561edeb582d46b2e3bf135051792b91 | [
"Apache-2.0"
] | null | null | null | haiku/_src/stateful_test.py | madisonmay/dm-haiku | de95f6f83561edeb582d46b2e3bf135051792b91 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.stateful."""
from absl.testing import absltest
from haiku._src import base
from haiku._src import module
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
def _callback_prim(forward, backward):
prim = jax.core.Primitive("hk_callback")
prim.def_impl(f_impl)
prim.def_abstract_eval(f_impl)
jax.ad.deflinear(prim, b_impl)
return prim.bind
class CountingModule(module.Module):
class SquareModule(module.Module):
if __name__ == "__main__":
absltest.main()
| 30.102326 | 80 | 0.65204 |
e396807417ead50e7f98ae6a7599b85f383deff5 | 395 | py | Python | src/orion/algo/robo/__init__.py | lebrice/orion.algo.robo | f7e14e305619344ed9afd303fecbfcabda6ae7ce | [
"BSD-3-Clause"
] | null | null | null | src/orion/algo/robo/__init__.py | lebrice/orion.algo.robo | f7e14e305619344ed9afd303fecbfcabda6ae7ce | [
"BSD-3-Clause"
] | null | null | null | src/orion/algo/robo/__init__.py | lebrice/orion.algo.robo | f7e14e305619344ed9afd303fecbfcabda6ae7ce | [
"BSD-3-Clause"
] | 2 | 2020-09-28T15:18:19.000Z | 2021-06-29T20:27:18.000Z | # -*- coding: utf-8 -*-
"""
Wrapper for RoBO
"""
__descr__ = "TODO"
__license__ = "BSD 3-Clause"
__author__ = u"Epistmio"
__author_short__ = u"Epistmio"
__author_email__ = "xavier.bouthillier@umontreal.ca"
__copyright__ = u"2021, Epistmio"
__url__ = "https://github.com/Epistimio/orion.algo.robo"
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
| 21.944444 | 56 | 0.741772 |
e399392a521bfe1faf93a652bafa6c185ca4d8e0 | 2,343 | py | Python | minimum/minimum-function.py | gunater/Numerical-methods | 4cf676b7d3996b7e70c6f4b50b15acc330a0d763 | [
"MIT"
] | null | null | null | minimum/minimum-function.py | gunater/Numerical-methods | 4cf676b7d3996b7e70c6f4b50b15acc330a0d763 | [
"MIT"
] | null | null | null | minimum/minimum-function.py | gunater/Numerical-methods | 4cf676b7d3996b7e70c6f4b50b15acc330a0d763 | [
"MIT"
] | null | null | null | """
Znale kt , przy ktrym zasig skoku z wahada bdzie maksymalny. Naley
posuy si metod zotego podziau.
Mateusz Ostrowski
Index:216708
"""
import matplotlib.pyplot as plt
import numpy as np
while True:
h0 = input("podaj wysokosc:")
line0 = input("podaj dugo liny:")
a00 = input("podaj amplitude waha W stopniach:")
if int(line0) > int(h0):
print("Error: wysoko mniejsza od dugoci liny!!!")
else:
break
fig = plt.figure()
ax = fig.add_subplot(111)
a = Zloty_podzial(h0, line0, a00)
ax.plot(a.xw, a.f(a.xw, a.a0, a.h, a.line), "-b")
plt.show()
| 31.662162 | 193 | 0.521554 |
e399bf6533e93672377322199e348e30448cbeff | 3,967 | py | Python | app/products/product_info.py | Group-16-COSC-310/grocery-chat-bot | a9759dd0a6d5b91733267ec4ed156f85f45c05ff | [
"MIT"
] | null | null | null | app/products/product_info.py | Group-16-COSC-310/grocery-chat-bot | a9759dd0a6d5b91733267ec4ed156f85f45c05ff | [
"MIT"
] | 7 | 2022-03-10T00:24:51.000Z | 2022-03-19T01:37:18.000Z | app/products/product_info.py | Group-16-COSC-310/grocery-chat-bot | a9759dd0a6d5b91733267ec4ed156f85f45c05ff | [
"MIT"
] | 1 | 2022-03-31T03:28:27.000Z | 2022-03-31T03:28:27.000Z | from app.database import MOCK_PRODUCT_DATA
import re
from app.products.base_handler import BaseHandler
| 36.731481 | 138 | 0.590623 |
e39a4f94218457c4ddd8055721b30ec15463d320 | 5,940 | py | Python | qcdb/iface_psi4/runner.py | vivacebelles/qcdb | 5bbdcb5c833277647a36bb0a5982abb56bf29b20 | [
"BSD-3-Clause"
] | 1 | 2019-02-20T20:18:02.000Z | 2019-02-20T20:18:02.000Z | qcdb/iface_psi4/runner.py | vivacebelles/qcdb | 5bbdcb5c833277647a36bb0a5982abb56bf29b20 | [
"BSD-3-Clause"
] | null | null | null | qcdb/iface_psi4/runner.py | vivacebelles/qcdb | 5bbdcb5c833277647a36bb0a5982abb56bf29b20 | [
"BSD-3-Clause"
] | null | null | null | import sys
import copy
import pprint
pp = pprint.PrettyPrinter(width=120)
import inspect
import numpy as np
from .. import __version__
from .. import qcvars
from ..driver.driver_helpers import print_variables
from ..exceptions import *
from ..molecule import Molecule
from ..pdict import PreservingDict
from .worker import psi4_subprocess
from .botanist import muster_inherited_options
def psi4_harvest(jobrec, psi4rec): # jobrec@i, psi4rec@io -> jobrec@io
"""Processes raw results from read-only `psi4rec` into QCAspect fields in returned `jobrec`."""
psi4rec = psi4rec['json'] # TODO NOT how this should be done figure out 1-tier/2-tier
try:
pass
#jobrec['molecule']['real']
#jobrec['do_gradient']
except KeyError as err:
raise KeyError(
'Required fields missing from ({})'.format(jobrec.keys())) from err
try:
psi4rec['raw_output']
#if jobrec['do_gradient'] is True:
# dftd3rec['dftd3_gradient']
except KeyError as err:
raise KeyError('Required fields missing from ({})'.format(
psi4rec.keys())) from err
if psi4rec['error']:
raise RuntimeError(psi4rec['error'])
#c4files = {}
for fl in psi4rec.keys():
if fl.startswith('outfile_'):
jobrec[fl] = psi4rec[fl]
#for fl in ['GRD', 'FCMFINAL', 'DIPOL']:
# field = 'output_' + fl.lower()
# if field in cfourrec:
# text += ' Cfour scratch file {} has been read\n'.format(fl)
# text += cfourrec[field]
# c4files[fl] = cfourrec[field]
# Absorb results into qcdb data structures
progvars = PreservingDict(psi4rec['psivars'])
import psi4
progarrs = {k: np.array(psi4.core.Matrix.from_serial(v)) for k, v in psi4rec['psiarrays'].items()}
progvars.update(progarrs)
qcvars.build_out(progvars)
calcinfo = qcvars.certify(progvars)
jobrec['raw_output'] = psi4rec['raw_output']
jobrec['qcvars'] = calcinfo
#prov = {}
#prov['creator'] = 'Psi4'
#prov['routine'] = sys._getframe().f_code.co_name
#prov['version'] = version
jobrec['provenance'].append(psi4rec['provenance'])
return jobrec
"""
Required Input Fields
---------------------
Optional Input Fields
---------------------
Output Fields
-------------
"""
| 28.834951 | 102 | 0.605724 |
e39aa6632ad08319c28262297573d3c36b620844 | 670 | py | Python | users/models.py | tanmayag8958/upes-fipi-jigyasa | e05e41e7624175ae64216a54cc546bbb74b2df61 | [
"MIT"
] | 8 | 2019-03-08T10:28:38.000Z | 2019-10-17T00:04:44.000Z | users/models.py | tanmayag8958/upes-fipi-jigyasa | e05e41e7624175ae64216a54cc546bbb74b2df61 | [
"MIT"
] | 124 | 2020-02-11T23:51:09.000Z | 2022-01-13T01:06:09.000Z | users/models.py | tanmayag8958/upes-fipi-jigyasa | e05e41e7624175ae64216a54cc546bbb74b2df61 | [
"MIT"
] | 3 | 2019-03-07T18:44:55.000Z | 2019-03-08T10:36:50.000Z | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
| 39.411765 | 81 | 0.770149 |
e39b51664f30f25e2e70980af59a04bbf06d0208 | 10,685 | py | Python | script/study/sedov/sedov_main_function.py | will-iam/Variant | 5b6732134fd51cf6c2b90b51b7976be0693ba28d | [
"MIT"
] | 8 | 2017-05-04T07:50:02.000Z | 2019-05-17T02:27:20.000Z | script/study/sedov/sedov_main_function.py | will-iam/Variant | 5b6732134fd51cf6c2b90b51b7976be0693ba28d | [
"MIT"
] | null | null | null | script/study/sedov/sedov_main_function.py | will-iam/Variant | 5b6732134fd51cf6c2b90b51b7976be0693ba28d | [
"MIT"
] | null | null | null | #Main Sedov Code Module
#Ported to python from fortran code written by James R Kamm and F X Timmes
#Original Paper and code found at http://cococubed.asu.edu/papers/la-ur-07-2849.pdf
import numpy as np
from globalvars import comvars as gv
from sedov_1d import sed_1d
from sedov_1d_time import sed_1d_time
from matplotlib import pyplot as plt
import pickle
gv.its = 20
# define sedov_main as a function
#define function to produce results at different points in time instead of sedov_1d
#final graph plots scaled density, pressure and velocity one one plot.
# plt.plot(zpos, den/max(den), 'b', label = 'Density')
# plt.plot(zpos, pres/max(pres), 'g', label = 'Pressure')
# plt.plot(zpos, vel/max(vel), 'r', label = 'Velocity')
# plt.axis([0, zmax, 0, 1])
# plt.legend(loc = 'upper left')
# plt.title('Scaled Density, Pressure, and Velocity')
# plt.ylabel('Scaled Value (x/max(x))')
# plt.xlabel('Position (m)')
# plt.show()
| 44.3361 | 232 | 0.467759 |
e39bbfc2ad38c9bcb6507c0a41b528d361682009 | 224 | py | Python | packs/hue/actions/rgb.py | jonico/st2contrib | 149c9c553f24360d91a14fef7ea6146707de75fd | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/hue/actions/rgb.py | jonico/st2contrib | 149c9c553f24360d91a14fef7ea6146707de75fd | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/hue/actions/rgb.py | jonico/st2contrib | 149c9c553f24360d91a14fef7ea6146707de75fd | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | from lib import action
| 28 | 63 | 0.705357 |
e39de89d4940f42750569f7100f7133d1493a932 | 1,755 | py | Python | test/test_math_challenge.py | nikett/math_challenge_eval | bafe9f6d30fc5ffd97492ce5e42716f839c29c4f | [
"Apache-2.0"
] | null | null | null | test/test_math_challenge.py | nikett/math_challenge_eval | bafe9f6d30fc5ffd97492ce5e42716f839c29c4f | [
"Apache-2.0"
] | null | null | null | test/test_math_challenge.py | nikett/math_challenge_eval | bafe9f6d30fc5ffd97492ce5e42716f839c29c4f | [
"Apache-2.0"
] | null | null | null | import unittest
from typing import Dict, List
from src.math_challenge import Challenge, DEFAULT_EMPTY_ANS
from src.student_info import StudentInfo
if __name__ == '__main__':
unittest.main()
| 51.617647 | 150 | 0.707123 |
e39ec861b279f059e70d4bea0eec9d21d1b19ced | 6,784 | py | Python | MyBot.py | joebieb/halite | da389aa8e9f97a0ac6a417ca53023609376c0dc6 | [
"MIT"
] | null | null | null | MyBot.py | joebieb/halite | da389aa8e9f97a0ac6a417ca53023609376c0dc6 | [
"MIT"
] | null | null | null | MyBot.py | joebieb/halite | da389aa8e9f97a0ac6a417ca53023609376c0dc6 | [
"MIT"
] | null | null | null | import hlt
import logging
from collections import OrderedDict
# GAME START
game = hlt.Game("Spoof_v7")
logging.info('Starting my %s bot!', game._name)
TURN = 0
while True:
# TURN START
TURN += 1
group_attack_limit = 3
attack_ship_modifier = .4
game_map = game.update_map()
command_queue = []
me = game_map.get_me()
enemies = [enemy for enemy in game_map.all_players() if enemy.id != me.id]
my_ships = me.all_ships()
my_docked_ships = [ship for ship in my_ships if ship.docking_status != ship.DockingStatus.UNDOCKED]
#planet_docking_status = []
enemy_ships = [ship for ship in game_map._all_ships() if ship not in my_ships]
docked_enemy_ships = [ship for ship in enemy_ships if ship.docking_status != ship.DockingStatus.UNDOCKED]
unowned_planets = [planet for planet in game_map.all_planets() if not planet.is_owned()]
my_planets = [planet for planet in game_map.all_planets() if planet.is_owned() and planet.owner.id == me.id]
enemy_planets = [planet for planet in game_map.all_planets() if planet.is_owned() and planet.owner.id != me.id]
targeted_planets = []
targeted_ships = []
# find center of enemy mass
planet_x = [planet.x for planet in enemy_planets]
ship_x = [ship.x for ship in enemy_ships]
planet_y = [planet.y for planet in enemy_planets]
ship_y = [ship.y for ship in enemy_ships]
x = planet_x + ship_x
y = planet_y + ship_y
enemy_centroid = hlt.entity.Position(0,0)
if len(x):
enemy_centroid = hlt.entity.Position(sum(x) / len(x), sum(y) / len(y))
entities_by_distance_to_enemy_centroid = OrderedDict(sorted(game_map.nearby_entities_by_distance(enemy_centroid).items(), key=lambda t: t[0]))
my_ships_by_distance_to_enemy_centroid = [entities_by_distance_to_enemy_centroid[distance][0]
for distance in entities_by_distance_to_enemy_centroid
if entities_by_distance_to_enemy_centroid[distance][0] in my_ships
and entities_by_distance_to_enemy_centroid[distance][0] not in my_docked_ships]
# adjust limits based on ship counts
my_ship_count = len(my_ships)
enemy_ship_count = len(enemy_ships)
if my_ship_count > 0 and enemy_ship_count > 0:
ratio = (my_ship_count / enemy_ship_count)
if ratio > 1:
group_attack_limit *= ratio
# logging.info('group attack limit: %s', group_attack_limit)
#logging.info(enemy_centroid)
# find undocked ships that are closest to action and make them fighters first set the rest as miners
attack_ships = my_ships_by_distance_to_enemy_centroid[0 : int(len(my_ships_by_distance_to_enemy_centroid) * attack_ship_modifier)]
# logging.info('Number of attack ships: %s', len(attack_ships))
# For every ship that I control
for ship in my_ships:
# If the ship is docked
if ship.docking_status != ship.DockingStatus.UNDOCKED:
# Skip this ship
continue
entities_by_distance = OrderedDict(sorted(game_map.nearby_entities_by_distance(ship).items(), key=lambda t: t[0]))
target_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in game_map.all_planets() and entities_by_distance[distance][0] not in targeted_planets]
target_unowned_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in unowned_planets and entities_by_distance[distance][0] not in targeted_planets]
target_enemy_planets = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in enemy_planets]
target_ships = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in enemy_ships]
target_docked_ships = [entities_by_distance[distance][0] for distance in entities_by_distance if entities_by_distance[distance][0] in docked_enemy_ships]
# if ship in attack_ships attack
if ship in attack_ships:
for enemy_ship in target_ships:
# if unowned planet is closer, then dock, otherwise attack
# if target_unowned_planets[0]:
# if ship.calculate_distance_between(target_unowned_planets[0]) < ship.calculate_distance_between(enemy_ship):
# if ship.can_dock(target_unowned_planets[0]):
# command_queue.append(ship.dock(target_unowned_planets[0]))
# else:
# navigate(ship, enemy_ship, 1)
# else:
# if enemy is targeted by n ships then get next closest ship
if enemy_ship in targeted_ships:
if targeted_ships.count(enemy_ship) >= group_attack_limit:
# logging.info('group attack limit met, trying next ship')
continue
targeted_ships.append(enemy_ship)
navigate(ship, enemy_ship, 1)
break
else:
for planet in target_planets:
# If we can dock, let's (try to) dock. If two ships try to dock at once, neither will be able to.
if ship.can_dock(planet) and planet in unowned_planets:
command_queue.append(ship.dock(planet))
elif ship.can_dock(planet) and planet in my_planets and not planet.is_full():
command_queue.append(ship.dock(planet))
# if planet is owned then attack
elif planet.is_owned() and planet in enemy_planets:
for enemy_ship in planet.all_docked_ships():
if enemy_ship:
navigate(ship, enemy_ship)
break
else:
targeted_planets.append(planet)
navigate(ship, planet)
break
# Send our set of commands to the Halite engine for this turn
game.send_command_queue(command_queue)
# TURN END
# GAME END
| 48.457143 | 223 | 0.658461 |
e39f7df5983d058b6f0e12ddeb89dfebc298ff47 | 2,875 | py | Python | test/test_horizons.py | bluePhlavio/eph | 9ad8d753ba70d7ed147a591c4181edd56b9533cc | [
"MIT"
] | 1 | 2021-05-07T23:35:32.000Z | 2021-05-07T23:35:32.000Z | test/test_horizons.py | bluePhlavio/eph | 9ad8d753ba70d7ed147a591c4181edd56b9533cc | [
"MIT"
] | 8 | 2019-11-02T01:04:26.000Z | 2021-06-02T00:01:37.000Z | test/test_horizons.py | bluePhlavio/eph | 9ad8d753ba70d7ed147a591c4181edd56b9533cc | [
"MIT"
] | 1 | 2019-03-20T13:55:52.000Z | 2019-03-20T13:55:52.000Z | import pytest
from eph.horizons import *
| 21.455224 | 67 | 0.614261 |
e3a03a276ee7eba66fe85aa5ecec8c492d7bc5fa | 950 | py | Python | demo.py | ademilly/sqs-service | cd6cb1e7ca904472376eafb8682621675c310f2e | [
"MIT"
] | null | null | null | demo.py | ademilly/sqs-service | cd6cb1e7ca904472376eafb8682621675c310f2e | [
"MIT"
] | null | null | null | demo.py | ademilly/sqs-service | cd6cb1e7ca904472376eafb8682621675c310f2e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import sqs_service
"""Usage:
python demo.py
Expected set environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_DEFAULT_REGION
- AWS_SESSION_TOKEN for IAM roles
- AWS_SECURITY_TOKEN for IAM roles
Send 'Hello World' to queue 'TEST', listen to the queue and
print first message received
"""
if __name__ == '__main__':
run()
| 22.093023 | 76 | 0.678947 |
e3a0f222ef753f195c708bec4e9a91e2e4562806 | 34,106 | py | Python | web_console_v2/api/testing/workflow_template/psi_join_tree_model_no_label.py | chen1i/fedlearner | 981514dadbd0aa49ae87d185dd247d310e35605c | [
"Apache-2.0"
] | null | null | null | web_console_v2/api/testing/workflow_template/psi_join_tree_model_no_label.py | chen1i/fedlearner | 981514dadbd0aa49ae87d185dd247d310e35605c | [
"Apache-2.0"
] | null | null | null | web_console_v2/api/testing/workflow_template/psi_join_tree_model_no_label.py | chen1i/fedlearner | 981514dadbd0aa49ae87d185dd247d310e35605c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import json
from google.protobuf.json_format import MessageToDict
from fedlearner_webconsole.proto.workflow_definition_pb2 import (
WorkflowDefinition, JobDefinition, JobDependency
)
from fedlearner_webconsole.proto.common_pb2 import (
Variable
)
if __name__ == '__main__':
print(json.dumps(MessageToDict(
make_workflow_template(),
preserving_proto_field_name=True,
including_default_value_fields=True)))
| 46.592896 | 136 | 0.27306 |
e3a19136ac88239183f6ccc7f508189df0b1db51 | 675 | py | Python | utils/sys_utils.py | machine2learn/galaina | 47ea16dd99687b38307674dd16ab7b7e99453910 | [
"BSD-3-Clause"
] | 3 | 2019-05-04T16:46:27.000Z | 2021-03-05T14:37:05.000Z | utils/sys_utils.py | machine2learn/galaina | 47ea16dd99687b38307674dd16ab7b7e99453910 | [
"BSD-3-Clause"
] | 2 | 2019-08-08T13:01:32.000Z | 2019-08-19T13:32:22.000Z | utils/sys_utils.py | machine2learn/galaina | 47ea16dd99687b38307674dd16ab7b7e99453910 | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
| 33.75 | 115 | 0.631111 |
e3a225a979c9b3bf4e5e8bd9eaea65a272da293b | 14,693 | py | Python | RasaMakeupRobot/script/dst/trade/utils.py | xiaobuguilaile/rasa-conversational-robot | 05f03c8e928a7c6bef284cdc18a89ef423977974 | [
"Apache-2.0"
] | 2 | 2021-01-05T08:56:54.000Z | 2021-01-22T06:05:55.000Z | script/dst/trade/utils.py | qq751220449/xbot | 557793302badfce1c0befa81de8948da99c0baae | [
"MIT"
] | null | null | null | script/dst/trade/utils.py | qq751220449/xbot | 557793302badfce1c0befa81de8948da99c0baae | [
"MIT"
] | null | null | null | import os
import bz2
import json
import random
import pickle
from collections import defaultdict, Counter
from tqdm import tqdm
import torch
from data.crosswoz.data_process.dst.trade_preprocess import (
EXPERIMENT_DOMAINS,
Lang,
get_seq,
get_slot_information,
)
def fix_general_label_error(belief_state):
"""
:param belief_state:
"belief_state": [
{
"slots": [
[
"-",
" "
]
]
},
{
"slots": [
[
"-",
"100 - 150 "
]
]
}
]
:return:
"""
belief_state_dict = {
slot_value["slots"][0][0]: slot_value["slots"][0][1]
for slot_value in belief_state
}
return belief_state_dict
| 33.699541 | 107 | 0.562445 |
e3a3386ce9240964c1a178b8bb4fca5a690e725d | 789 | py | Python | pharmrep/reports/urls.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | pharmrep/reports/urls.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | pharmrep/reports/urls.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.views.generic import TemplateView
from reports import views
urlpatterns = [
url(r'balance/$', views.balance, name='report_balance'),
url(r'performance/$', views.performance, name='report_performance'),
url(r'last_activity/$', views.last_activity, name='last_activity'),
url(r'collection/$', views.CollectionListView.as_view(),
name='report_collection'),
url(r'saleschart/$', TemplateView.as_view(
template_name='reports/sales_chart.html'), name='chart_sales'),
url(r'paymentchart/$', TemplateView.as_view(
template_name='reports/payment_chart.html'), name='chart_payment'),
url(r'callchart/$', TemplateView.as_view(
template_name='reports/calls_chart.html'), name='chart_call'),
]
| 39.45 | 75 | 0.714829 |
e3a354a397453e432d52c1cd363a4b2592457f4b | 1,150 | py | Python | pycam/pycam/Utils/progress.py | pschou/py-sdf | 0a269ed155d026e29429d76666fb63c95d2b4b2c | [
"MIT"
] | null | null | null | pycam/pycam/Utils/progress.py | pschou/py-sdf | 0a269ed155d026e29429d76666fb63c95d2b4b2c | [
"MIT"
] | null | null | null | pycam/pycam/Utils/progress.py | pschou/py-sdf | 0a269ed155d026e29429d76666fb63c95d2b4b2c | [
"MIT"
] | null | null | null | from pycam.Utils.events import get_event_handler, get_mainloop
| 28.75 | 71 | 0.616522 |
e3a40b62615cdac16bdbd6f21218bc07a791e56b | 4,030 | py | Python | validate_submission.py | ChunghyunPark/semantic-kitti-api | 8863f21cb05fd99667b4a1bb755cc432c430c9fe | [
"MIT"
] | 1 | 2019-10-18T15:12:24.000Z | 2019-10-18T15:12:24.000Z | validate_submission.py | ZiyiLiubird/semantic-kitti-api | 9a6366264b1fd95d7a84e05bd41659524fd9fd32 | [
"MIT"
] | null | null | null | validate_submission.py | ZiyiLiubird/semantic-kitti-api | 9a6366264b1fd95d7a84e05bd41659524fd9fd32 | [
"MIT"
] | 1 | 2019-10-11T22:30:53.000Z | 2019-10-11T22:30:53.000Z | #!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import zipfile
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Validate a submission zip file needed to evaluate on CodaLab competitions.\n\nThe verification tool checks:\n 1. correct folder structure,\n 2. existence of label files for each scan,\n 3. count of labels for each scan.\nInvalid labels are ignored by the evaluation script, therefore we don't check\nfor invalid labels.", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"zipfile",
type=str,
help='zip file that should be validated.',
)
parser.add_argument(
'dataset',
type=str,
help='directory containing the folder "sequences" containing folders "11", ..., "21" with the "velodyne" files.'
)
parser.add_argument(
"--task",
type=str,
choices=["segmentation"],
default="segmentation",
help='task for which the zip file should be validated.'
)
FLAGS, _ = parser.parse_known_args()
checkmark = "\u2713"
try:
print('Validating zip archive "{}".\n'.format(FLAGS.zipfile))
print(" 1. Checking filename.............. ", end="", flush=True)
if not FLAGS.zipfile.endswith('.zip'):
raise ValidationException('Competition bundle must end with ".zip"')
print(checkmark)
with zipfile.ZipFile(FLAGS.zipfile) as zipfile:
if FLAGS.task == "segmentation":
print(" 2. Checking directory structure... ", end="", flush=True)
directories = [folder.filename for folder in zipfile.infolist() if folder.filename.endswith("/")]
if "sequences/" not in directories:
raise ValidationException('Directory "sequences" missing inside zip file.')
for sequence in range(11, 22):
sequence_directory = "sequences/{}/".format(sequence)
if sequence_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(sequence_directory))
predictions_directory = sequence_directory + "predictions/"
if predictions_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(predictions_directory))
print(checkmark)
print(' 3. Checking file sizes............ ', end='', flush=True)
prediction_files = {info.filename: info for info in zipfile.infolist() if not info.filename.endswith("/")}
for sequence in range(11, 22):
sequence_directory = 'sequences/{}'.format(sequence)
velodyne_directory = os.path.join(FLAGS.dataset, 'sequences/{}/velodyne/'.format(sequence))
velodyne_files = sorted([os.path.join(velodyne_directory, file) for file in os.listdir(velodyne_directory)])
label_files = sorted([os.path.join(sequence_directory, "predictions", os.path.splitext(filename)[0] + ".label")
for filename in os.listdir(velodyne_directory)])
for velodyne_file, label_file in zip(velodyne_files, label_files):
num_points = os.path.getsize(velodyne_file) / (4 * 4)
if label_file not in prediction_files:
raise ValidationException('"' + label_file + '" is missing inside zip.')
num_labels = prediction_files[label_file].file_size / 4
if num_labels != num_points:
raise ValidationException('label file "' + label_file +
"' should have {} labels, but found {} labels!".format(int(num_points), int(num_labels)))
print(checkmark)
else:
# TODO scene completion.
raise NotImplementedError("Unknown task.")
except ValidationException as ex:
print("\n\n " + "\u001b[1;31m>>> Error: " + str(ex) + "\u001b[0m")
exit(1)
print("\n\u001b[1;32mEverything ready for submission!\u001b[0m \U0001F389")
| 39.90099 | 390 | 0.657072 |
e3a5131242de5ad9d1dfff7bb93f08796a6b50ce | 5,231 | py | Python | sheldon_behaviors/ship_behavior/scripts/behavior_service.py | shinselrobots/sheldon | 911148cd82d28e37aebc5e083fbf830d1c9768ab | [
"Apache-2.0"
] | 1 | 2021-01-02T18:17:52.000Z | 2021-01-02T18:17:52.000Z | sheldon_behaviors/ship_behavior/scripts/behavior_service.py | shinselrobots/sheldon | 911148cd82d28e37aebc5e083fbf830d1c9768ab | [
"Apache-2.0"
] | null | null | null | sheldon_behaviors/ship_behavior/scripts/behavior_service.py | shinselrobots/sheldon | 911148cd82d28e37aebc5e083fbf830d1c9768ab | [
"Apache-2.0"
] | 4 | 2017-09-16T03:56:01.000Z | 2018-09-19T02:15:57.000Z | #! /usr/bin/env python
# License: Apache 2.0. See LICENSE file in root directory.
#
# For simple behaviors that can run syncronously, Python provides
# a simple way to implement this. Add the work of your behavior
# in the execute_cb callback
#
import rospy
import actionlib
import behavior_common.msg
import time
import random
from std_msgs.msg import Float64
from std_msgs.msg import UInt16
from std_msgs.msg import UInt32
from std_msgs.msg import Bool
from std_msgs.msg import Empty
# for talking
import actionlib
import actionlib.action_client
import audio_and_speech_common.msg
# for servos
#from sheldon_servos.head_servo_publishers import *
#from sheldon_servos.right_arm_servo_publishers import *
#from sheldon_servos.left_arm_servo_publishers import *
from sheldon_servos.standard_servo_positions import *
from sheldon_servos.set_servo_speed import *
from sheldon_servos.set_servo_torque import *
if __name__ == '__main__':
rospy.init_node('ship_behavior')
server = BehaviorAction(rospy.get_name())
rospy.spin()
| 36.326389 | 150 | 0.676161 |
e3a55d32d7c5a654a176400a4c92296634a021f4 | 7,831 | py | Python | src/core/utils/bert_utils.py | joe3d1998/GraphFlow | 8a751e4fc69a1e0c06ded23b7d1096f3161931a1 | [
"Apache-2.0"
] | 30 | 2019-08-18T21:56:20.000Z | 2022-03-18T10:04:02.000Z | src/core/utils/bert_utils.py | joe3d1998/GraphFlow | 8a751e4fc69a1e0c06ded23b7d1096f3161931a1 | [
"Apache-2.0"
] | 2 | 2019-11-12T02:28:36.000Z | 2022-03-20T05:27:05.000Z | src/core/utils/bert_utils.py | joe3d1998/GraphFlow | 8a751e4fc69a1e0c06ded23b7d1096f3161931a1 | [
"Apache-2.0"
] | 11 | 2020-02-17T02:47:26.000Z | 2021-09-05T05:37:29.000Z | from collections import defaultdict, namedtuple
import torch
# When using the sliding window trick for long sequences,
# we take the representation of each token with maximal context.
# Take average of the BERT embeddings of these BPE sub-tokens
# as the embedding for the word.
# Take *weighted* average of the word embeddings through all layers.
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| 38.960199 | 99 | 0.70106 |
e3a5f11059953bb156d9e0590e2727a61cd805cc | 282 | py | Python | apps/lectures/serializers.py | csilouanos/student-management-system | 91800a1d95234918ab7e9ce5a2a017eb93e81431 | [
"MIT"
] | null | null | null | apps/lectures/serializers.py | csilouanos/student-management-system | 91800a1d95234918ab7e9ce5a2a017eb93e81431 | [
"MIT"
] | null | null | null | apps/lectures/serializers.py | csilouanos/student-management-system | 91800a1d95234918ab7e9ce5a2a017eb93e81431 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Lecture
| 31.333333 | 70 | 0.659574 |
e3a71e4692cca720e8c9165426c410f3c3fef261 | 396 | py | Python | OpenDataCatalog/suggestions/urls.py | timwis/Open-Data-Catalog | 0ccdc71f28773508c337875fd32478dd4324a50c | [
"MIT"
] | 3 | 2016-08-07T17:25:56.000Z | 2019-11-12T00:51:14.000Z | suggestions/urls.py | opensandiego/Open-Data-Catalog | 06f93bab36d22431ff86a87faea4e388d0491846 | [
"MIT"
] | 1 | 2021-04-17T10:52:53.000Z | 2021-04-17T10:52:53.000Z | suggestions/urls.py | opensandiego/Open-Data-Catalog | 06f93bab36d22431ff86a87faea4e388d0491846 | [
"MIT"
] | 2 | 2016-10-28T14:20:27.000Z | 2021-04-17T10:52:28.000Z | from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
(r'^$', 'suggestions.views.list_all'),
(r'^post/$', 'suggestions.views.add_suggestion'),
(r'^vote/(?P<suggestion_id>.*)/$', 'suggestions.views.add_vote'),
(r'^unvote/(?P<suggestion_id>.*)/$', 'suggestions.views.remove_vote'),
(r'^close/(?P<suggestion_id>.*)/$', 'suggestions.views.close'),
)
| 39.6 | 73 | 0.659091 |
e3a9a5af8690681698031e1a127157f06eef690e | 3,850 | py | Python | fronteira_eficiente2.py | samuelbarrosm/Python-for-finances- | e1fd118b05f6efa2c4c72e88c5b2bf028d120c45 | [
"MIT"
] | null | null | null | fronteira_eficiente2.py | samuelbarrosm/Python-for-finances- | e1fd118b05f6efa2c4c72e88c5b2bf028d120c45 | [
"MIT"
] | null | null | null | fronteira_eficiente2.py | samuelbarrosm/Python-for-finances- | e1fd118b05f6efa2c4c72e88c5b2bf028d120c45 | [
"MIT"
] | null | null | null | #Esse codigo utilizado para calcular a fronteira eficiente de um portfolio
#Esse codigo tem como objetivo avaliar a eficiencia das de um portfolio
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas_datareader import data as wb
assets = ['PG', '^GSPC']
pf_data = pd.DataFrame()
for a in assets:
pf_data[a] = wb.DataReader(a, data_source='yahoo', start='2010-1-1')['Adj Close']
pf_data.tail()
(pf_data / pf_data.iloc[0] * 100).plot(figsize=(10, 5))
log_returns = np.log(pf_data / pf_data.shift(1))
log_returns.mean() * 250
log_returns['PG'].cov(log_returns['^GSPC']) * 250
#Correlacao superior a 30% indica que sao muito correlacionados, isso bom
log_returns['PG'].corr(log_returns['^GSPC'])
#Agora vamos aprtir pra uma otimizacao do portfolio por uma perspectiva mais tecnica
#Vamos criar uma variavel que ira contar o numero de ativos na nossa carteira
num_assets = len(assets)
#Agora iremos criar dois pesos alatorios para esses ativos
#O metodo random.random pode gerar dois numeros aleatorios entre o e 1
arr = np.random.random(2)
#Vamos calcular a soma do valor dos dois pesos obtidos aleatoriamente
arr[0] + arr[1]
#A soma desses pesos aleatorios nem sempre sera igual a 1
#Para fazer com que a soma seja = 1, temos
weights = np.random.random(num_assets)
weights /= np.sum(weights)
print(weights)
#O codigo /= significa o peso, dividido pela soma dos pesos, como um loop
#Lembrando, quando se usa o numpy estamos transformando esses valores em elementos da matriz
#Por isso quando atribuimos esse codigo a soma dos pesos igual a 1
#Para escrever o retorno esperado de um portfolio:
#Retorno = soma do produto da media dos retornos logaritmicos anualizados pelo seus respectivos pesos
#Essa funcao .sun do numpy, funciona somando objetos em mais de uma dimensao, por isso difere do sum(nativo do python)
np.sum(weights * log_returns.mean()) * 250
#Esse codigo como ja foi visto fornece a variancia
np.dot(weights.T, np.dot(log_returns['PG'].cov(log_returns['^GSPC']) * 250, weights))
#Esse codigo como ja foi visto fornece a volatilidade
np.sqrt(np.dot(weights.T, np.dot(log_returns['PG'].cov(log_returns['^GSPC']) * 250, weights)))
#Usaremos esses 3 codigos para calcular o retorno e a volatilidade na simulacao dos portfolios de minima variancia
#Agora iremos criar um grafico onde mil simulacoes de minima variancia serao plotadas
#Nao estamos fazendo 1000 investimentos diferentes
#estamos fazendo 1000 combinacoes dos mesmos ativos(pesos)
#Esse loop, gerara uma repeticao de 1000 possibilidades para os pesos dos ativos
pfolio_returns = []
pfolio_volatilities = []
for x in range (1000):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
pfolio_returns.append(np.sum(weights * log_returns.mean()) * 250)
pfolio_volatilities.append(np.sqrt(np.dot(weights.T, np.dot(log_returns['PG'].cov(log_returns['^GSPC']) * 250, weights))))
#Fazemos isso para transformar os numeros dispersos, em arrays contidos numa matriz, fica mais pratico de trabalhar
pfolio_returns = np.array(pfolio_returns)
pfolio_volatilities = np.array(pfolio_volatilities)
pfolio_volatilities,pfolio_returns
#Agora iremos criar um objeto no dataframe com duas colunas, uma para os retornos e outra para as respectivas volatilidades
portfolios = pd.DataFrame({'Return': pfolio_returns, 'Volatility': pfolio_volatilities})
portfolios.head()
portfolios.tail()
#Agora estamos plotando os valores do dataframe num grafico
#O tipo de grafico que estamos inserindo to tipo scatter (grafico de dispersao)
portfolios.plot(x='Volatility', y='Return', kind='scatter', figsize=(10, 6))
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
plt.show()
| 36.666667 | 127 | 0.744416 |
e3aa556fa11d4a3e0d7e99f07a6cd0ab4a4331f6 | 7,607 | py | Python | test/integration/test_build.py | DahlitzFlorian/wily | 069c26bff9741b49420e3cfd7b0954ac9b88cc3f | [
"Apache-2.0"
] | null | null | null | test/integration/test_build.py | DahlitzFlorian/wily | 069c26bff9741b49420e3cfd7b0954ac9b88cc3f | [
"Apache-2.0"
] | null | null | null | test/integration/test_build.py | DahlitzFlorian/wily | 069c26bff9741b49420e3cfd7b0954ac9b88cc3f | [
"Apache-2.0"
] | null | null | null | """
Tests for the wily build command.
All of the following tests will use a click CLI runner to fully simulate the CLI.
Many of the tests will depend on a "builddir" fixture which is a compiled wily cache.
TODO : Test build + build with extra operator
"""
import pathlib
import pytest
from click.testing import CliRunner
from git import Repo, Actor
from mock import patch
import wily.__main__ as main
from wily.archivers import ALL_ARCHIVERS
def test_build_not_git_repo(tmpdir):
"""
Test that build defaults to filesystem in a non-git directory
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build", "test.py"])
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "filesystem" / "index.json"
assert index_path.exists()
def test_build_invalid_path(tmpdir):
"""
Test that build fails with a garbage path
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", "/fo/v/a", "build", "test.py"])
assert result.exit_code == 1, result.stdout
def test_build_no_target(tmpdir):
"""
Test that build fails with no target
"""
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build"])
assert result.exit_code == 2, result.stdout
def test_build_crash(tmpdir):
"""
Test that build works in a basic repository.
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc")
with open(tmppath / ".gitignore", "w") as test_txt:
test_txt.write(".wily/")
index = repo.index
index.add(["test.py", ".gitignore"])
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
index.commit("basic test", author=author, committer=committer)
import wily.commands.build
with patch.object(
wily.commands.build.Bar, "finish", side_effect=RuntimeError("arggh")
) as bar_finish:
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "build", "test.py"])
assert bar_finish.called_once
assert result.exit_code == 1, result.stdout
with patch("wily.commands.build.logger") as logger:
logger.level = "DEBUG"
with patch.object(
wily.commands.build.Bar, "finish", side_effect=RuntimeError("arggh")
) as bar_finish:
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", tmpdir, "build", "test.py"]
)
assert bar_finish.called_once
assert result.exit_code == 1, result.stdout
def test_build(tmpdir):
"""
Test that build works in a basic repository.
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc")
with open(tmppath / ".gitignore", "w") as test_txt:
test_txt.write(".wily/")
index = repo.index
index.add(["test.py", ".gitignore"])
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
commit = index.commit("basic test", author=author, committer=committer)
with patch("wily.logger") as logger:
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", tmpdir, "build", "test.py"]
)
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "git" / "index.json"
assert index_path.exists()
rev_path = tmpdir / ".wily" / "git" / commit.name_rev.split(" ")[0] + ".json"
assert rev_path.exists()
def test_build_twice(tmpdir):
"""
Test that build works when run twice.
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc")
with open(tmppath / ".gitignore", "w") as test_txt:
test_txt.write(".wily/")
index = repo.index
index.add(["test.py", ".gitignore"])
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
commit = index.commit("basic test", author=author, committer=committer)
runner = CliRunner()
result = runner.invoke(main.cli, ["--debug", "--path", tmpdir, "build", "test.py"])
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "git" / "index.json"
assert index_path.exists()
rev_path = tmpdir / ".wily" / "git" / commit.name_rev.split(" ")[0] + ".json"
assert rev_path.exists()
# Write a test file to the repo
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc\nfoo = 1")
index.add(["test.py"])
commit2 = index.commit("basic test", author=author, committer=committer)
result = runner.invoke(main.cli, ["--debug", "--path", tmpdir, "build", "test.py"])
assert result.exit_code == 0, result.stdout
cache_path = tmpdir / ".wily"
assert cache_path.exists()
index_path = tmpdir / ".wily" / "git" / "index.json"
assert index_path.exists()
rev_path = tmpdir / ".wily" / "git" / commit.name_rev.split(" ")[0] + ".json"
assert rev_path.exists()
rev_path2 = tmpdir / ".wily" / "git" / commit2.name_rev.split(" ")[0] + ".json"
assert rev_path2.exists()
def test_build_no_commits(tmpdir):
"""
Test that build fails cleanly with no commits
"""
repo = Repo.init(path=tmpdir)
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", tmpdir, "build", tmpdir, "--skip-ignore-check"]
)
assert result.exit_code == 1, result.stdout
def test_build_dirty_repo(builddir):
"""
Test that build fails cleanly with a dirty repo
"""
tmppath = pathlib.Path(builddir)
with open(tmppath / "test.py", "w") as test_txt:
test_txt.write("import abc\nfoo = 1")
runner = CliRunner()
result = runner.invoke(main.cli, ["--debug", "--path", builddir, "build", builddir])
assert result.exit_code == 1, result.stdout
archivers = {name for name in ALL_ARCHIVERS.keys()}
| 31.962185 | 88 | 0.625739 |
e3aa733a9aa92608aebcdce4ac3a723c8a9e99a6 | 356 | py | Python | authz/test/test_obp_helper.py | shivdeep-singh/conversational-ai-chatbot | b67802a96b3fe3d64457931a8cbf8bf03442fd0d | [
"BSD-3-Clause"
] | 11 | 2021-09-09T16:16:48.000Z | 2022-03-31T21:25:46.000Z | authz/test/test_obp_helper.py | shivdeep-singh/conversational-ai-chatbot | b67802a96b3fe3d64457931a8cbf8bf03442fd0d | [
"BSD-3-Clause"
] | 1 | 2022-02-10T06:08:11.000Z | 2022-02-10T06:08:11.000Z | authz/test/test_obp_helper.py | shivdeep-singh/conversational-ai-chatbot | b67802a96b3fe3d64457931a8cbf8bf03442fd0d | [
"BSD-3-Clause"
] | 12 | 2021-09-19T10:39:27.000Z | 2022-03-09T05:17:05.000Z | import unittest
from zmq_integration_lib import RPCClient, RPCServer
import unittest.mock as mock
| 18.736842 | 52 | 0.676966 |
e3aa8e55cd8c639086caeeedaf341d4860bf12f5 | 1,657 | py | Python | cvtData.py | leduchust/ST-GCN_HAR | 778e7931b24eaa7d78b5a61216bb9a7a2ad6ab9e | [
"MIT"
] | null | null | null | cvtData.py | leduchust/ST-GCN_HAR | 778e7931b24eaa7d78b5a61216bb9a7a2ad6ab9e | [
"MIT"
] | null | null | null | cvtData.py | leduchust/ST-GCN_HAR | 778e7931b24eaa7d78b5a61216bb9a7a2ad6ab9e | [
"MIT"
] | null | null | null | import os
import numpy as np
import tqdm as tqdm
cvtData.cvt_Data()
print('done')
| 36.021739 | 116 | 0.388654 |
e3aac0da41e8ebd49fe3952b0e96fb25ef7523c4 | 2,412 | py | Python | src/zope/app/applicationcontrol/browser/runtimeinfo.py | zopefoundation/zope.app.applicationcontrol | de7b160dde9ce01f65af5412a984065c5a1a9284 | [
"ZPL-2.1"
] | null | null | null | src/zope/app/applicationcontrol/browser/runtimeinfo.py | zopefoundation/zope.app.applicationcontrol | de7b160dde9ce01f65af5412a984065c5a1a9284 | [
"ZPL-2.1"
] | 4 | 2017-05-02T18:43:09.000Z | 2021-09-20T06:29:14.000Z | src/zope/app/applicationcontrol/browser/runtimeinfo.py | zopefoundation/zope.app.applicationcontrol | de7b160dde9ce01f65af5412a984065c5a1a9284 | [
"ZPL-2.1"
] | 1 | 2015-04-03T07:25:44.000Z | 2015-04-03T07:25:44.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Define runtime information view component for Application Control
"""
__docformat__ = 'restructuredtext'
from zope.app.applicationcontrol.interfaces import IRuntimeInfo
from zope.app.applicationcontrol.i18n import ZopeMessageFactory as _
| 33.5 | 78 | 0.563018 |
e3aad4147f45eb6d3a2f6a2928f807f8445336c7 | 1,171 | py | Python | helper/storageHelper.py | LHGames-2018/DCI5espaces | 8f71ca3b6cf2bae78822d8a4a8546b5482eaa627 | [
"MIT"
] | null | null | null | helper/storageHelper.py | LHGames-2018/DCI5espaces | 8f71ca3b6cf2bae78822d8a4a8546b5482eaa627 | [
"MIT"
] | null | null | null | helper/storageHelper.py | LHGames-2018/DCI5espaces | 8f71ca3b6cf2bae78822d8a4a8546b5482eaa627 | [
"MIT"
] | 5 | 2017-10-07T14:54:28.000Z | 2018-09-27T20:16:59.000Z | import json
import os.path
| 27.880952 | 85 | 0.599488 |
e3ab35bc88f90fb1279165d05f8411f9b2a64d26 | 12,383 | py | Python | ddot/cx_services_old-8-31-17/align.py | pupster90/ddot2 | 1952bff30383b35dff72b332592e1471201d40f3 | [
"MIT"
] | 1 | 2018-11-08T14:41:43.000Z | 2018-11-08T14:41:43.000Z | ddot/cx_services_old-8-31-17/align.py | pupster90/ddot2 | 1952bff30383b35dff72b332592e1471201d40f3 | [
"MIT"
] | null | null | null | ddot/cx_services_old-8-31-17/align.py | pupster90/ddot2 | 1952bff30383b35dff72b332592e1471201d40f3 | [
"MIT"
] | null | null | null | import ndex.client as nc
from ndex.networkn import NdexGraph
import io
import json
from IPython.display import HTML
from time import sleep
import os, time, tempfile
import sys
import time
import logging
import grpc
import networkx as nx
import cx_pb2
import cx_pb2_grpc
import numpy as np
import inspect
from concurrent import futures
from itertools import combinations
from subprocess import Popen, PIPE, STDOUT
import pandas as pd
from ddot import Ontology, align_hierarchies
from ddot.utils import update_nx_with_alignment
from ddot.cx_services.cx_utils import yield_ndex, required_params, cast_params
from ddot.config import default_params
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
verbose = True
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s %(message)s')
log_info("Listening for requests on '0.0.0.0:8081'")
serve()
| 36.313783 | 128 | 0.562868 |
e3abcdda55715b1f38a3a0acd05e2b4c08e37048 | 3,509 | py | Python | biosignalsnotebooks/build/lib/biosignalsnotebooks/__init__.py | csavur/biosignalsnotebooks | c99596741a854c58bdefb429906023ac48ddc3b7 | [
"MIT"
] | 1 | 2020-06-26T05:05:11.000Z | 2020-06-26T05:05:11.000Z | biosignalsnotebooks/build/lib/biosignalsnotebooks/__init__.py | csavur/biosignalsnotebooks | c99596741a854c58bdefb429906023ac48ddc3b7 | [
"MIT"
] | null | null | null | biosignalsnotebooks/build/lib/biosignalsnotebooks/__init__.py | csavur/biosignalsnotebooks | c99596741a854c58bdefb429906023ac48ddc3b7 | [
"MIT"
] | null | null | null | """
OPENSIGNALSFACTORY PACKAGE INITIALISATION FILE (WITH IMPORT STATEMENTS)
The main purpose of biosignalsnotebooks package is to support the users of PLUX acquisition
devices, such as biosgnalsplux or bitalino, in some processing tasks that can be applied to the
acquired electrophysiological signals, namely ECG, EMG...
This package had been developed as part of the "OpenSignals Tools" project, that offers a set of
Jupyter Notebooks (tutorials) where it is explained step by step how the user can execute the
previously mentioned processing tasks (such as detection of muscular activation from EMG signal,
determination of each cardiac cycle duration from an ECG acquisition or monitoring fatigue by
generating EMG median power frequency evolution time series).
At the end of each Notebook is referred the correspondent biosignalsnotebooks function that synthesises
the processing functionality presented step by step.
In spite of being 'part' of an integrate solution for OpenSignals users, this package can be used
independently.
Package Documentation
---------------------
The docstring presented as the initial statement of each module function will help the user to
correctly and effectively use all biosignalsnotebooks functions.
A full guide that collects all the function docstring is available for download at:
...
OpenSignals Tools Project Repository
------------------------------------
More information's about the project and the respective files are available at:
https://github.com/biosignalsplux/biosignalsnotebooks
Available Modules
-----------------
aux_functions
Includes a set of auxiliary functions that are invoked in other biosignalsnotebooks modules.
This module has a 'private' classification, i.e., it was not specially designed for users.
__notebook_support__
Set of functions invoked in OpenSignals Tools Notebooks to present some graphical results.
These function are only designed for a single end, but we made them available to the user if he
want to explore graphical functionalities in an example format.
conversion
Module responsible for the definition of functions that convert Raw units (available in the
acquisition files returned by OpenSignals) and sample units to physical units like mV, A, C,
s,..., accordingly to the sensor under analysis.
detect
Contains functions intended to detect events on electrophysiological signals.
extract
Ensures to the user that he can extract multiple parameters from a specific electrophysiological
signal at once.
open
Module dedicated to read/load data from .txt and .h5 files generated by OpenSignals.
With the available functions the user can easily access data inside files (signal samples)
together with the metadata in the file header.
process
Processing capabilities that are more general than the categories of the remaining modules.
signal_samples
A module that gives an easy access to the biosignalsnotebooks dataset/library of signals (used in
OpenSignals Tools Notebooks).
visualise
Graphical data representation functions based on the application of Bokeh main functionalities.
/\
"""
from .conversion import *
from .detect import *
from .extract import *
from .load import *
from .process import *
from .visualise import *
from .signal_samples import *
from .factory import *
from .synchronisation import *
from .train_and_classify import *
from .__notebook_support__ import *
from .synchronisation import *
# 11/10/2018 16h45m :)
| 43.320988 | 103 | 0.785694 |
e3af0c54fc348474f9b9d9f22f0f2e2bbfffd5d0 | 4,049 | py | Python | Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py | neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | [
"BSD-2-Clause"
] | 212 | 2015-07-05T07:57:17.000Z | 2022-02-27T01:55:35.000Z | Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py | neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | [
"BSD-2-Clause"
] | 6 | 2016-07-07T14:31:56.000Z | 2017-12-12T02:21:15.000Z | Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py | neopenx/Dragon | 0e639a7319035ddc81918bd3df059230436ee0a1 | [
"BSD-2-Clause"
] | 71 | 2016-03-24T09:02:41.000Z | 2021-06-03T01:52:41.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import time
import shutil
from six.moves import urllib
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
shutil.copy(temp_file_name, filepath)
size = os.path.getsize(filepath)
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| 32.918699 | 80 | 0.637935 |
e3afe486c89ac3a00730c37ba0aa5141f39fe3fe | 97 | py | Python | test/integration/steps/pds.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 4 | 2021-06-25T08:28:54.000Z | 2021-12-16T11:03:42.000Z | test/integration/steps/pds.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 184 | 2021-06-24T15:27:08.000Z | 2022-03-17T12:44:28.000Z | test/integration/steps/pds.py | NHSDigital/list-reconciliation | 37b1ebe99a64275e23b0e7fb6a89415b92d14306 | [
"MIT"
] | 3 | 2021-11-05T10:21:44.000Z | 2022-03-04T14:29:24.000Z | from behave import given
| 13.857143 | 36 | 0.731959 |
e3b045a473bd87ba50f0bc065652bd367fcdfb8d | 19,208 | py | Python | iSearch/isearch.py | Twilightgo/iSearch | 600398dc22c07ef1211209769f9fda4d2c1151d7 | [
"MIT"
] | null | null | null | iSearch/isearch.py | Twilightgo/iSearch | 600398dc22c07ef1211209769f9fda4d2c1151d7 | [
"MIT"
] | null | null | null | iSearch/isearch.py | Twilightgo/iSearch | 600398dc22c07ef1211209769f9fda4d2c1151d7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
import argparse
import os
import re
import sqlite3
import requests
import bs4
from termcolor import colored
# Python2 compatibility
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
# Default database path is ~/.iSearch.
DEFAULT_PATH = os.path.join(os.path.expanduser('~'), '.iSearch')
CREATE_TABLE_WORD = '''
CREATE TABLE IF NOT EXISTS Word
(
name TEXT PRIMARY KEY,
expl TEXT,
pr INT DEFAULT 1,
aset CHAR[1],
addtime TIMESTAMP NOT NULL DEFAULT (DATETIME('NOW', 'LOCALTIME'))
)
'''
def colorful_print(raw):
'''print colorful text in terminal.'''
lines = raw.split('\n')
colorful = True
detail = False
for line in lines:
if line:
if colorful:
colorful = False
print(colored(line, 'white', 'on_green') + '\n')
continue
elif line.startswith(''):
print(line + '\n')
continue
elif line.startswith(''):
print(colored(line, 'white', 'on_green') + '\n')
detail = True
continue
if not detail:
print(colored(line + '\n', 'yellow'))
else:
print(colored(line, 'cyan') + '\n')
def normal_print(raw):
''' no colorful text, for output.'''
lines = raw.split('\n')
for line in lines:
if line:
print(line + '\n')
def search_online(word, printer=True):
'''search the word or phrase on http://dict.youdao.com.'''
url = 'http://dict.youdao.com/w/ %s' % word
expl = get_text(url)
if printer:
colorful_print(expl)
return expl
def search_database(word):
'''offline search.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute(r'SELECT expl, pr FROM Word WHERE name LIKE "%s%%"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' ', 'white', 'on_green'))
print()
print(colored(' ' * res[0][1], 'red'), colored(' ' * (5 - res[0][1]), 'yellow'), sep='')
colorful_print(res[0][0])
else:
print(colored(word + ' ', 'white', 'on_red'))
search_online(word)
input_msg = '(1~5) Enter \n>>> '
if sys.version_info[0] == 2:
add_in_db_pr = raw_input(input_msg)
else:
add_in_db_pr = input(input_msg)
if add_in_db_pr and add_in_db_pr.isdigit():
if(int(add_in_db_pr) >= 1 and int(add_in_db_pr) <= 5):
add_word(word, int(add_in_db_pr))
print(colored(' {word} '.format(word=word), 'white', 'on_red'))
curs.close()
conn.close()
def add_word(word, default_pr):
'''add the word or phrase to database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' ', 'white', 'on_red'))
sys.exit()
try:
expl = search_online(word, printer=False)
curs.execute('insert into word(name, expl, pr, aset) values ("%s", "%s", %d, "%s")' % (
word, expl, default_pr, word[0].upper()))
except Exception as e:
print(colored('something\'s wrong, you can\'t add the word', 'white', 'on_red'))
print(e)
else:
conn.commit()
print(colored('%s has been inserted into database' % word, 'green'))
finally:
curs.close()
conn.close()
def delete_word(word):
'''delete the word or phrase from database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
# search fisrt
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('DELETE FROM Word WHERE name = "%s"' % word)
except Exception as e:
print(e)
else:
print(colored('%s has been deleted from database' % word, 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def set_priority(word, pr):
'''
set the priority of the word.
priority(from 1 to 5) is the importance of the word.
'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word)
res = curs.fetchall()
if res:
try:
curs.execute('UPDATE Word SET pr= %d WHERE name = "%s"' % (pr, word))
except Exception as e:
print(colored('something\'s wrong, you can\'t reset priority', 'white', 'on_red'))
print(e)
else:
print(colored('the priority of %s has been reset to %s' % (word, pr), 'green'))
conn.commit()
finally:
curs.close()
conn.close()
else:
print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def list_letter(aset, vb=False, output=False):
'''list words by letter, from a-z (ingore case).'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
curs.execute('SELECT name, pr FROM Word WHERE aset = "%s"' % aset)
else:
curs.execute('SELECT expl, pr FROM Word WHERE aset = "%s"' % aset)
except Exception as e:
print(colored('something\'s wrong, catlog is from A to Z', 'red'))
print(e)
else:
if not output:
print(colored(format(aset, '-^40s'), 'green'))
else:
print(format(aset, '-^40s'))
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored(' ' * pr, 'red', ), colored(' ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print(' ' * pr + ' ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def list_priority(pr, vb=False, output=False):
'''
list words by priority, like this:
1 : list words which the priority is 1,
2+ : list words which the priority is lager than 2,
3-4 : list words which the priority is from 3 to 4.
'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
if len(pr) == 1:
curs.execute('SELECT name, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 2 and pr[1] == '+':
curs.execute('SELECT name, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 3 and pr[1] == '-':
curs.execute('SELECT name, pr FROM Word WHERE pr >= %d AND pr<= % d ORDER by pr, name' % (
int(pr[0]), int(pr[2])))
else:
if len(pr) == 1:
curs.execute('SELECT expl, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 2 and pr[1] == '+':
curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0])))
elif len(pr) == 3 and pr[1] == '-':
curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d AND pr<= %d ORDER by pr, name' % (
int(pr[0]), int(pr[2])))
except Exception as e:
print(colored('something\'s wrong, priority must be 1-5', 'red'))
print(e)
else:
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored(' ' * pr, 'red', ), colored(' ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print(' ' * pr + ' ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def list_latest(limit, vb=False, output=False):
'''list words by latest time you add to database.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
try:
if not vb:
curs.execute('SELECT name, pr, addtime FROM Word ORDER by datetime(addtime) DESC LIMIT %d' % limit)
else:
curs.execute('SELECT expl, pr, addtime FROM Word ORDER by datetime(addtime) DESC LIMIT %d' % limit)
except Exception as e:
print(e)
print(colored('something\'s wrong, please set the limit', 'red'))
else:
for line in curs.fetchall():
expl = line[0]
pr = line[1]
print('\n' + '=' * 40 + '\n')
if not output:
print(colored(' ' * pr, 'red'), colored(' ' * (5 - pr), 'yellow'), sep='')
colorful_print(expl)
else:
print(' ' * pr + ' ' * (5 - pr))
normal_print(expl)
finally:
curs.close()
conn.close()
def count_word(arg):
'''count the number of words'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
if arg[0].isdigit():
if len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE pr == %d' % (int(arg[0])))
elif len(arg) == 2 and arg[1] == '+':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d' % (int(arg[0])))
elif len(arg) == 3 and arg[1] == '-':
curs.execute('SELECT count(*) FROM Word WHERE pr >= %d AND pr<= % d' % (int(arg[0]), int(arg[2])))
elif arg[0].isalpha():
if arg == 'all':
curs.execute('SELECT count(*) FROM Word')
elif len(arg) == 1:
curs.execute('SELECT count(*) FROM Word WHERE aset == "%s"' % arg.upper())
res = curs.fetchall()
print(res[0][0])
curs.close()
conn.close()
if __name__ == '__main__':
main()
| 32.834188 | 112 | 0.508434 |
e3b1ad3f8a41b03310d872dbf885d93f88101fcf | 4,925 | py | Python | models/gcn.py | Louis-udm/Word-Grounded-Graph-Convolutional-Network | 4c90bff0ec8bcdd8994154eead0efb5a3caefca7 | [
"MIT"
] | null | null | null | models/gcn.py | Louis-udm/Word-Grounded-Graph-Convolutional-Network | 4c90bff0ec8bcdd8994154eead0efb5a3caefca7 | [
"MIT"
] | null | null | null | models/gcn.py | Louis-udm/Word-Grounded-Graph-Convolutional-Network | 4c90bff0ec8bcdd8994154eead0efb5a3caefca7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Title: GCN models
Description:
The original Graph convolutional network model and GCN layer.
Refer to: https://arxiv.org/abs/1609.02907
"""
# =======================================
# @author Zhibin.Lu
# @email zhibin.lu@umontreal.ca
# =======================================
import collections
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
| 24.502488 | 72 | 0.520406 |
e3b1ba519d604af495caccc117a36b3a9bff6079 | 2,513 | py | Python | tabledefinition/generate_table_definitions_for_solana.py | blockchain-etl/evmchain-etl-table-definition-cli | 033d7e8ddc33f47378547a304b2688df3a0a3746 | [
"MIT"
] | 1 | 2022-03-04T11:24:31.000Z | 2022-03-04T11:24:31.000Z | tabledefinition/generate_table_definitions_for_solana.py | blockchain-etl/evmchain-etl-table-definition-cli | 033d7e8ddc33f47378547a304b2688df3a0a3746 | [
"MIT"
] | null | null | null | tabledefinition/generate_table_definitions_for_solana.py | blockchain-etl/evmchain-etl-table-definition-cli | 033d7e8ddc33f47378547a304b2688df3a0a3746 | [
"MIT"
] | null | null | null | SOLIDITY_TO_BQ_TYPES = {
'address': 'STRING',
}
table_description = ''
| 32.217949 | 119 | 0.578591 |
e3b286c18d71e706ee97d4e448587e741b1515a4 | 587 | py | Python | number-guessing-game.py | DataSciPyCodes/Python-Projects | 0c62477f2177d6ec7431875da6aa53778a790bf6 | [
"MIT"
] | null | null | null | number-guessing-game.py | DataSciPyCodes/Python-Projects | 0c62477f2177d6ec7431875da6aa53778a790bf6 | [
"MIT"
] | null | null | null | number-guessing-game.py | DataSciPyCodes/Python-Projects | 0c62477f2177d6ec7431875da6aa53778a790bf6 | [
"MIT"
] | null | null | null | #Method-1 guess the number game
import random
number = random.randint(1,10)
guess = 0
count = 0
print("You can exit the game anytime. Just enter 'exit'.")
while guess != number and guess != "exit":
guess = input("Guess a number between 1 to 10 :- ")
if guess == "exit":
print("Closing the game...")
break
guess = int(guess)
count += 1
if guess < number:
print("Too low!")
elif guess > number:
print("Too high!")
else:
print("\nCongratulation, You got it!")
print("You have tried ", count ," times")
| 23.48 | 58 | 0.577513 |
e3b312bcfe15753efff73463e7b650e5bc126303 | 10,014 | py | Python | docking/dock_and_equilibrate.py | proteneer/timemachine | feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701 | [
"Apache-2.0"
] | 91 | 2019-01-05T17:03:04.000Z | 2022-03-11T09:08:46.000Z | docking/dock_and_equilibrate.py | proteneer/timemachine | feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701 | [
"Apache-2.0"
] | 474 | 2019-01-07T14:33:15.000Z | 2022-03-31T19:15:12.000Z | docking/dock_and_equilibrate.py | proteneer/timemachine | feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701 | [
"Apache-2.0"
] | 12 | 2019-01-13T00:40:36.000Z | 2022-01-14T10:23:54.000Z | """Solvates a host, inserts guest(s) into solvated host, equilibrates
"""
import os
import time
import tempfile
import numpy as np
from rdkit import Chem
from md import builders, minimizer
from fe import pdb_writer, free_energy
from ff import Forcefield
from ff.handlers.deserialize import deserialize_handlers
from timemachine.lib import custom_ops, LangevinIntegrator
from docking import report
def dock_and_equilibrate(
host_pdbfile,
guests_sdfile,
max_lambda,
insertion_steps,
eq_steps,
outdir,
fewer_outfiles=False,
constant_atoms=[],
):
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
Parameters
----------
host_pdbfile: path to host pdb file to dock into
guests_sdfile: path to input sdf with guests to pose/dock
max_lambda: lambda value the guest should insert from or delete to
(recommended: 1.0 for work calulation, 0.25 to stay close to original pose)
(must be =1 for work calculation to be applicable)
insertion_steps: how many steps to insert the guest over (recommended: 501)
eq_steps: how many steps of equilibration to do after insertion (recommended: 15001)
outdir: where to write output (will be created if it does not already exist)
fewer_outfiles: if True, will only write frames for the equilibration, not insertion
constant_atoms: atom numbers from the host_pdbfile to hold mostly fixed across the simulation
(1-indexed, like PDB files)
Output
------
A pdb & sdf file for the last step of insertion
(outdir/<guest_name>/<guest_name>_ins_<step>_[host.pdb/guest.sdf])
A pdb & sdf file every 1000 steps of equilibration
(outdir/<guest_name>/<guest_name>_eq_<step>_[host.pdb/guest.sdf])
stdout corresponding to the files written noting the lambda value and energy
stdout for each guest noting the work of transition, if applicable
stdout for each guest noting how long it took to run
Note
----
The work will not be calculated if the du_dl endpoints are not close to 0 or if any norm of
force per atom exceeds 20000 kJ/(mol*nm) [MAX_NORM_FORCE defined in docking/report.py]
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
print(
f"""
HOST_PDBFILE = {host_pdbfile}
GUESTS_SDFILE = {guests_sdfile}
OUTDIR = {outdir}
MAX_LAMBDA = {max_lambda}
INSERTION_STEPS = {insertion_steps}
EQ_STEPS = {eq_steps}
"""
)
# Prepare host
# TODO: handle extra (non-transitioning) guests?
print("Solvating host...")
(
solvated_host_system,
solvated_host_coords,
_,
_,
host_box,
solvated_topology,
) = builders.build_protein_system(host_pdbfile)
_, solvated_host_pdb = tempfile.mkstemp(suffix=".pdb", text=True)
writer = pdb_writer.PDBWriter([solvated_topology], solvated_host_pdb)
writer.write_frame(solvated_host_coords)
writer.close()
solvated_host_mol = Chem.MolFromPDBFile(solvated_host_pdb, removeHs=False)
os.remove(solvated_host_pdb)
guest_ff_handlers = deserialize_handlers(
open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"ff/params/smirnoff_1_1_0_ccc.py",
)
).read()
)
ff = Forcefield(guest_ff_handlers)
# Run the procedure
print("Getting guests...")
suppl = Chem.SDMolSupplier(guests_sdfile, removeHs=False)
for guest_mol in suppl:
start_time = time.time()
guest_name = guest_mol.GetProp("_Name")
guest_conformer = guest_mol.GetConformer(0)
orig_guest_coords = np.array(guest_conformer.GetPositions(), dtype=np.float64)
orig_guest_coords = orig_guest_coords / 10 # convert to md_units
minimized_coords = minimizer.minimize_host_4d(
[guest_mol], solvated_host_system, solvated_host_coords, ff, host_box
)
afe = free_energy.AbsoluteFreeEnergy(guest_mol, ff)
ups, sys_params, combined_masses, _ = afe.prepare_host_edge(
ff.get_ordered_params(), solvated_host_system, minimized_coords
)
combined_bps = []
for up, sp in zip(ups, sys_params):
combined_bps.append(up.bind(sp))
x0 = np.concatenate([minimized_coords, orig_guest_coords])
v0 = np.zeros_like(x0)
print(f"SYSTEM", f"guest_name: {guest_name}", f"num_atoms: {len(x0)}")
for atom_num in constant_atoms:
combined_masses[atom_num - 1] += 50000
seed = 2021
intg = LangevinIntegrator(300.0, 1.5e-3, 1.0, combined_masses, seed).impl()
u_impls = []
for bp in combined_bps:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
ctxt = custom_ops.Context(x0, v0, host_box, intg, u_impls)
# insert guest
insertion_lambda_schedule = np.linspace(max_lambda, 0.0, insertion_steps)
calc_work = True
# collect a du_dl calculation once every other step
subsample_interval = 1
full_du_dls, _, _ = ctxt.multiple_steps(insertion_lambda_schedule, subsample_interval)
step = len(insertion_lambda_schedule) - 1
lamb = insertion_lambda_schedule[-1]
ctxt.step(lamb)
report.report_step(
ctxt,
step,
lamb,
host_box,
combined_bps,
u_impls,
guest_name,
insertion_steps,
"INSERTION",
)
if not fewer_outfiles:
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(insertion_steps))),
"ins",
)
if report.too_much_force(ctxt, lamb, host_box, combined_bps, u_impls):
print("Not calculating work (too much force)")
calc_work = False
continue
# Note: this condition only applies for ABFE, not RBFE
if abs(full_du_dls[0]) > 0.001 or abs(full_du_dls[-1]) > 0.001:
print("Not calculating work (du_dl endpoints are not ~0)")
calc_work = False
if calc_work:
work = np.trapz(full_du_dls, insertion_lambda_schedule[::subsample_interval])
print(f"guest_name: {guest_name}\tinsertion_work: {work:.2f}")
# equilibrate
for step in range(eq_steps):
ctxt.step(0.00)
if step % 1000 == 0:
report.report_step(
ctxt,
step,
0.00,
host_box,
combined_bps,
u_impls,
guest_name,
eq_steps,
"EQUILIBRATION",
)
if (not fewer_outfiles) or (step == eq_steps - 1):
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(eq_steps))),
"eq",
)
if step in (0, int(eq_steps / 2), eq_steps - 1):
if report.too_much_force(ctxt, 0.00, host_box, combined_bps, u_impls):
break
end_time = time.time()
print(f"{guest_name} took {(end_time - start_time):.2f} seconds")
if __name__ == "__main__":
main()
| 33.049505 | 105 | 0.603056 |
e3b3a2b9c400072459039396551edf7edb2673da | 5,552 | py | Python | Lessons/source/bases.py | ericanaglik/cs13 | 6dc2dd41e0b82a43999145b226509d8fc0adb366 | [
"MIT"
] | null | null | null | Lessons/source/bases.py | ericanaglik/cs13 | 6dc2dd41e0b82a43999145b226509d8fc0adb366 | [
"MIT"
] | 8 | 2019-04-26T06:29:56.000Z | 2019-08-17T01:48:07.000Z | Lessons/source/bases.py | ericanaglik/cs13 | 6dc2dd41e0b82a43999145b226509d8fc0adb366 | [
"MIT"
] | null | null | null | #!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
digit_value = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, 'g': 16, 'h': 17, 'i': 18, 'j': 19, 'k': 20, 'l': 21, 'm': 22, 'n': 23, 'o': 24, 'p': 25, 'q': 26, 'r': 27, 's': 28, 't': 29, 'u': 30, 'v': 31, 'w': 32, 'x': 33, 'y': 34, 'z': 35}
value_digit = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', 16: 'g', 17: 'h', 18: 'i', 19: 'j', 20: 'k', 21: 'l', 22: 'm', 23: 'n', 24: 'o', 25: 'p', 26: 'q', 27: 'r', 28: 's', 29: 't', 30: 'u', 31: 'v', 32: 'w', 33: 'x', 34: 'y', 35: 'z'}
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# TODO: Decode digits from binary (base 2)
digits_list = list(digits.lower())
digits_list.reverse()
# print(digits_list)
# go through the array and figure out what each 1 and 0 mean
total = 0
for i, value in enumerate(digits_list):
place_value = base ** i
# print(place_value, value)
total += digit_value[value] * place_value
# print(place_value, digit_value[value], digit_value[value] * place_value, total)
return total
# ...
# TODO: Decode digits from hexadecimal (base 16)
# TODO: Decode digits from any base (2 up to 36)
# ...
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
# TODO: Encode number in binary (base 2)
numbers = []
while number > 0:
remainder = number % base
if number < base:
remainder = number
number = number//base
numbers.append(value_digit[remainder])
numbers.reverse()
numbers_string = ''.join(numbers)
return numbers_string
# TODO: Encode number in hexadecimal (base 16)
# ...
# TODO: Encode number in any base (2 up to 36)
# ...
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
decoded = decode(digits, base1)
encoded = encode(decoded, base2)
return encoded
# TODO: Convert digits from base 2 to base 16 (and vice versa)
# ...
# TODO: Convert digits from base 2 to base 10 (and vice versa)
# ...
# TODO: Convert digits from base 10 to base 16 (and vice versa)
# ...
# TODO: Convert digits from any base to any base (2 up to 36)
result = decode(digits, base1)
return encode(result, base2)
# ...
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
# main()
print(convert_fractional(".625", 10, 2))
| 36.287582 | 328 | 0.600865 |
e3b3eb4f092c715b7640f0a297086182d40badaa | 3,667 | py | Python | ecl/provider_connectivity/v2/address_assignment.py | keiichi-hikita/eclsdk | c43afb982fd54eb1875cdc22d46044644d804c4a | [
"Apache-2.0"
] | null | null | null | ecl/provider_connectivity/v2/address_assignment.py | keiichi-hikita/eclsdk | c43afb982fd54eb1875cdc22d46044644d804c4a | [
"Apache-2.0"
] | null | null | null | ecl/provider_connectivity/v2/address_assignment.py | keiichi-hikita/eclsdk | c43afb982fd54eb1875cdc22d46044644d804c4a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from ecl.provider_connectivity import provider_connectivity_service
from ecl import resource2
from ecl.network.v2 import network
from ecl.network.v2 import subnet
import hashlib
| 32.166667 | 79 | 0.648487 |
e3b455062720d39836f878d513bb8f75e9ad6e80 | 675 | py | Python | tests/test_gifGenerator.py | wmokrogulski/gifGenerator | fa2b36d082e32f310583935a361d7b7a2bf29fe6 | [
"MIT"
] | null | null | null | tests/test_gifGenerator.py | wmokrogulski/gifGenerator | fa2b36d082e32f310583935a361d7b7a2bf29fe6 | [
"MIT"
] | 2 | 2021-12-23T11:01:14.000Z | 2022-03-12T01:01:15.000Z | tests/test_gifGenerator.py | wmokrogulski/gifGenerator | fa2b36d082e32f310583935a361d7b7a2bf29fe6 | [
"MIT"
] | null | null | null | import unittest
from unittest import TestCase
from src.gifGenerator import GifGenerator
if __name__ == '__main__':
unittest.main()
| 20.454545 | 57 | 0.666667 |
e3b8997cfd0dae36bdb5f953799806c281136e2c | 9,915 | py | Python | PSP/GAME/Python/python/bsddb/test/test_dbshelve.py | TheMindVirus/pspy | e9d1bba4f6b7486c3010bede93d88afdfc036492 | [
"MIT"
] | 7 | 2015-04-06T15:17:13.000Z | 2020-10-21T04:57:00.000Z | PSP/GAME/Python/python/bsddb/test/test_dbshelve.py | TheMindVirus/pspy | e9d1bba4f6b7486c3010bede93d88afdfc036492 | [
"MIT"
] | 1 | 2021-04-11T15:01:12.000Z | 2021-04-11T15:01:12.000Z | PSP/GAME/Python/python/bsddb/test/test_dbshelve.py | TheMindVirus/pspy | e9d1bba4f6b7486c3010bede93d88afdfc036492 | [
"MIT"
] | 4 | 2016-05-16T17:53:08.000Z | 2020-11-28T17:18:50.000Z | """
TestCases for checking dbShelve objects.
"""
import sys, os, string
import tempfile, random
from pprint import pprint
from types import *
import unittest
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
except ImportError:
# For Python 2.3
from bsddb import db, dbshelve
from test_all import verbose
#----------------------------------------------------------------------
# We want the objects to be comparable so we can test dbshelve.values
# later on.
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# test cases for a DBShelf in a RECNO DB.
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBShelveTestCase))
suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
suite.addTest(unittest.makeSuite(HashShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(RecNoShelveTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 27.618384 | 79 | 0.558548 |
e3b8e41843e13fa56ad91af90735c93477b63c0f | 2,940 | py | Python | lib/pyexcel/pyexcel/sources/file_source_output.py | logice/QQ-Groups-Spider | a161282c6832ed40183905e96205edb5a57e8a05 | [
"MIT"
] | null | null | null | lib/pyexcel/pyexcel/sources/file_source_output.py | logice/QQ-Groups-Spider | a161282c6832ed40183905e96205edb5a57e8a05 | [
"MIT"
] | null | null | null | lib/pyexcel/pyexcel/sources/file_source_output.py | logice/QQ-Groups-Spider | a161282c6832ed40183905e96205edb5a57e8a05 | [
"MIT"
] | 1 | 2021-04-12T07:48:42.000Z | 2021-04-12T07:48:42.000Z | """
pyexcel.sources.file
~~~~~~~~~~~~~~~~~~~
Representation of file sources
:copyright: (c) 2015-2016 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel import params
from pyexcel.factory import FileSource
from pyexcel.sources.rendererfactory import RendererFactory
from pyexcel.sources import renderer
RendererFactory.register_renderers(renderer.renderers)
try:
import pyexcel_text as text
RendererFactory.register_renderers(text.renderers)
except ImportError:
pass
file_types = tuple(RendererFactory.renderer_factories.keys())
sources = (
WriteOnlySheetSource,
WriteOnlyBookSource,
SheetSource,
BookSource
)
| 27.735849 | 76 | 0.618707 |
e3b964ad8299bef44ea12f1a518924e1fbba8289 | 920 | py | Python | setup.py | vmyrgiotis/MDF_DALEC_Grass | fdd168ce7845c925f8e95fc792e2204b440cca2e | [
"CC0-1.0"
] | null | null | null | setup.py | vmyrgiotis/MDF_DALEC_Grass | fdd168ce7845c925f8e95fc792e2204b440cca2e | [
"CC0-1.0"
] | null | null | null | setup.py | vmyrgiotis/MDF_DALEC_Grass | fdd168ce7845c925f8e95fc792e2204b440cca2e | [
"CC0-1.0"
] | null | null | null | import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
VERSION = '0.1.0'
PACKAGE_NAME = 'MDF_DALEC_GRASS'
AUTHOR = 'Vasilis Myrgiotis'
AUTHOR_EMAIL = 'v.myrgioti@ed.ac.uk'
URL = 'https://github.com/vmyrgiotis/MDF_DALEC_GRASS'
LICENSE = 'MIT'
DESCRIPTION = 'A Bayesian model-data fusion algorithm for simulating carbon dynamics in grassland ecosystems'
LONG_DESCRIPTION = (HERE / "README.md").read_text()
LONG_DESC_TYPE = "text/markdown"
INSTALL_REQUIRES = ["numpy", "pandas","spotpy","sklearn","sentinelhub", "shapely", "datetime", "geopandas", "cdsapi"]
PYTHON_REQUIRES = '>=3.8'
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages()
)
| 28.75 | 117 | 0.773913 |
e3ba2aa1467f1469e9c62d6360d6ba267f4c6b98 | 752 | py | Python | setup.py | guma44/croo | 5cddee4c3163698cd9f265638e76671fef415baa | [
"MIT"
] | null | null | null | setup.py | guma44/croo | 5cddee4c3163698cd9f265638e76671fef415baa | [
"MIT"
] | null | null | null | setup.py | guma44/croo | 5cddee4c3163698cd9f265638e76671fef415baa | [
"MIT"
] | null | null | null | import setuptools
from croo import croo_args
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='croo',
version=croo_args.__version__,
scripts=['bin/croo'],
python_requires='>3.4.1',
author='Jin Lee',
author_email='leepc12@gmail.com',
description='CRomwell Output Organizer',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ENCODE-DCC/croo',
packages=setuptools.find_packages(exclude=['examples', 'docs']),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
],
install_requires=['caper']
)
| 28.923077 | 68 | 0.666223 |
e3bc8d2fb6f6907f9468220745bf4d9d7f0ccd81 | 5,142 | py | Python | source/estimators/estimator.py | mingweima/rldsge | ad40af982f455b65c5f407f6aa082e4caf7322a6 | [
"MIT"
] | null | null | null | source/estimators/estimator.py | mingweima/rldsge | ad40af982f455b65c5f407f6aa082e4caf7322a6 | [
"MIT"
] | null | null | null | source/estimators/estimator.py | mingweima/rldsge | ad40af982f455b65c5f407f6aa082e4caf7322a6 | [
"MIT"
] | null | null | null | from typing import Dict
import numpy as np
from ..envs.env import StructuralModel
from ..utils.lik_func import *
from ..utils.useful_class import ParameterGrid
if __name__ == "__main__":
grid = {
'delta': [0.1, 0.2, 0.3],
'gamma': [1, 10]
}
pg = ParameterGrid(grid)
for g in pg:
print(g)
| 42.495868 | 112 | 0.632828 |
e3bd47079e9b2036b424cb4e9c92e2174a230006 | 1,269 | py | Python | Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py | BlackBoxAM/Lean | 5ea9f04b104d27f0fcfe3a383a3a60ca12206d99 | [
"Apache-2.0"
] | 6,580 | 2015-01-12T16:48:44.000Z | 2022-03-31T22:05:09.000Z | Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py | BlackBoxAM/Lean | 5ea9f04b104d27f0fcfe3a383a3a60ca12206d99 | [
"Apache-2.0"
] | 3,392 | 2015-01-12T17:44:07.000Z | 2022-03-30T20:34:03.000Z | Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py | BlackBoxAM/Lean | 5ea9f04b104d27f0fcfe3a383a3a60ca12206d99 | [
"Apache-2.0"
] | 3,354 | 2015-01-12T16:58:31.000Z | 2022-03-31T00:56:03.000Z | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This algorithm is a regression test for issue #2018 and PR #2038.
### </summary>
| 37.323529 | 98 | 0.735225 |
e3bda12509b429c895c643f26b992aa471887764 | 1,371 | py | Python | examples/sharedlinks/sharedlinks-backend/links/models.py | gcbirzan/django-rest-registration | 1a9da937c283d03d1fce1a68322a702e14692c79 | [
"MIT"
] | 329 | 2018-05-09T13:10:37.000Z | 2022-03-25T11:05:20.000Z | examples/sharedlinks/sharedlinks-backend/links/models.py | gcbirzan/django-rest-registration | 1a9da937c283d03d1fce1a68322a702e14692c79 | [
"MIT"
] | 167 | 2018-04-21T00:28:17.000Z | 2022-03-30T09:24:52.000Z | examples/sharedlinks/sharedlinks-backend/links/models.py | gcbirzan/django-rest-registration | 1a9da937c283d03d1fce1a68322a702e14692c79 | [
"MIT"
] | 97 | 2018-05-09T14:17:59.000Z | 2022-02-23T08:46:30.000Z | from django.db import models
from django.contrib.auth.models import User
| 23.637931 | 68 | 0.592268 |
e3bdcff4bd778ceff3ed0e2ca2a1821228f999c6 | 7,106 | py | Python | hpc_rll/rl_utils/ppo.py | mingzhang96/DI-hpc | 5431c283a91b77df7c6a86fb0affa60099d4bb31 | [
"Apache-2.0"
] | 64 | 2021-07-08T02:18:08.000Z | 2022-02-28T09:52:57.000Z | hpc_rll/rl_utils/ppo.py | mingzhang96/DI-hpc | 5431c283a91b77df7c6a86fb0affa60099d4bb31 | [
"Apache-2.0"
] | null | null | null | hpc_rll/rl_utils/ppo.py | mingzhang96/DI-hpc | 5431c283a91b77df7c6a86fb0affa60099d4bb31 | [
"Apache-2.0"
] | 3 | 2021-07-14T08:58:45.000Z | 2022-03-30T12:36:46.000Z | import torch
import torch.nn.functional as F
from typing import Optional
from collections import namedtuple
import hpc_rl_utils
# hpc version only support cuda
hpc_ppo_loss = namedtuple('hpc_ppo_loss', ['policy_loss', 'value_loss', 'entropy_loss'])
hpc_ppo_info = namedtuple('hpc_ppo_info', ['approx_kl', 'clipfrac'])
| 47.373333 | 192 | 0.659161 |
e3be7a53e508b992ad117b38ccc98afaeeef9017 | 1,069 | py | Python | src/monitoring_service/metrics.py | netcriptus/raiden-services | 3955d91852c616f6ba0a3a979757edbd852b2c6d | [
"MIT"
] | 13 | 2019-02-07T23:23:33.000Z | 2021-07-03T16:00:53.000Z | src/monitoring_service/metrics.py | netcriptus/raiden-services | 3955d91852c616f6ba0a3a979757edbd852b2c6d | [
"MIT"
] | 1,095 | 2019-01-21T09:30:57.000Z | 2022-03-25T05:13:30.000Z | src/monitoring_service/metrics.py | netcriptus/raiden-services | 3955d91852c616f6ba0a3a979757edbd852b2c6d | [
"MIT"
] | 18 | 2019-01-21T09:17:19.000Z | 2022-02-23T15:53:17.000Z | from prometheus_client import Counter
from raiden.utils.typing import TokenAmount
from raiden_libs.metrics import ( # noqa: F401, pylint: disable=unused-import
ERRORS_LOGGED,
EVENTS_EXCEPTIONS_RAISED,
EVENTS_PROCESSING_TIME,
MESSAGES_EXCEPTIONS_RAISED,
MESSAGES_PROCESSING_TIME,
REGISTRY,
ErrorCategory,
MetricsEnum,
collect_event_metrics,
collect_message_metrics,
get_metrics_for_label,
)
REWARD_CLAIMS = Counter(
"economics_reward_claims_successful_total",
"The number of overall successful reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
REWARD_CLAIMS_TOKEN = Counter(
"economics_reward_claims_token_total",
"The amount of token earned by reward claims",
labelnames=[Who.label_name()],
registry=REGISTRY,
)
| 25.452381 | 78 | 0.750234 |
e3be9c37370787ab104874a6e05f24ddb94436e5 | 9,774 | py | Python | helper/fetch_funcdata.py | SysSec-KAIST/FirmKit | 6d8408e1336ed0b5d42d9722e0918888b3f3b424 | [
"MIT"
] | 3 | 2022-01-05T22:04:09.000Z | 2022-03-28T07:01:48.000Z | helper/fetch_funcdata.py | SysSec-KAIST/FirmKit | 6d8408e1336ed0b5d42d9722e0918888b3f3b424 | [
"MIT"
] | null | null | null | helper/fetch_funcdata.py | SysSec-KAIST/FirmKit | 6d8408e1336ed0b5d42d9722e0918888b3f3b424 | [
"MIT"
] | null | null | null | # modified from TikNib/tiknib/ida/fetch_funcdata_v7.5.py
import os
import sys
import string
from hashlib import sha1
from collections import defaultdict
import time
import pprint as pp
import idautils
import idc
import idaapi
import ida_pro
import ida_nalt
import ida_bytes
sys.path.append(os.path.abspath("./TikNib"))
from tiknib.utils import demangle, get_arch, init_idc, parse_fname, store_func_data
printset = set(string.printable)
isprintable = lambda x: set(x).issubset(printset)
# find consts
# find strings
# This function returns a caller map, and callee map for each function.
# This function returns edges, and updates caller_map, and callee_map
init_idc()
try:
func_data = main()
except:
import traceback
traceback.print_exc()
ida_pro.qexit(1)
else:
bin_path = get_bin_path()
store_func_data(bin_path, func_data)
ida_pro.qexit(0)
| 34.294737 | 89 | 0.561797 |