hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a6742060ae0c9724845b125a09501149114e4ca | 7,284 | py | Python | digesters/hipchat/hipchat_notification_digester.py | paul-hammant/imapdigester | 7d2d9525d39b1f3f839a219061180971404e4bb8 | [
"MIT"
] | 25 | 2016-04-04T17:32:47.000Z | 2022-03-08T02:18:07.000Z | digesters/hipchat/hipchat_notification_digester.py | paul-hammant/imapslurper | 7d2d9525d39b1f3f839a219061180971404e4bb8 | [
"MIT"
] | null | null | null | digesters/hipchat/hipchat_notification_digester.py | paul-hammant/imapslurper | 7d2d9525d39b1f3f839a219061180971404e4bb8 | [
"MIT"
] | 4 | 2017-01-02T21:03:28.000Z | 2022-02-22T18:38:44.000Z | # coding=utf-8
import arrow
from bs4 import BeautifulSoup
from digesters.base_digester import BaseDigester
TEMPLATE = """<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>
<title>Atlassian HipChat</title>
</head>
<body style="box-sizing: border-box; height: 100%; width: 100%;">
<table bgcolor="#f5f5f5" border="0" cellpadding="0" cellspacing="0" class="container wrapper_shrink"
style="_padding: 20px; padding: 3%;" width="640">
<tr>
<td valign="top">
<table bgcolor="#ffffff" border="0" cellpadding="0" cellspacing="0" class="inner-container table_shrink"
id="email_content"
style="-khtml-border-radius: 6px; -moz-border-radius: 6px; -webkit-border-radius: 6px; border: 1px solid #dadada; border-radius: 6px; width: 100% !important; margin-top: 15px;"
width="600">
<tr>
<td class="td top-spacer"
style="font-size: 15px; line-height: 4px; padding-left: 20px; padding-right: 10px !important;"
valign="top">
</td>
</tr>
<tr>
<td>
<div class="history_container history_email" id="chats" style="padding-right: 0px !important;">
<InsertHere/>
</div>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>"""
| 42.596491 | 195 | 0.623009 |
5a6831d8ec7d93dd05d620a6d41fce88e4531158 | 138 | py | Python | FB2/__init__.py | Ae-Mc/FB2 | 2c29f774ab08bdad5bd6144b1be71b93146ce8fe | [
"MIT"
] | 3 | 2020-11-15T10:55:22.000Z | 2022-02-09T19:45:52.000Z | FB2/__init__.py | Ae-Mc/FB2 | 2c29f774ab08bdad5bd6144b1be71b93146ce8fe | [
"MIT"
] | 1 | 2020-11-15T11:04:59.000Z | 2020-11-19T22:12:52.000Z | FB2/__init__.py | Ae-Mc/FB2 | 2c29f774ab08bdad5bd6144b1be71b93146ce8fe | [
"MIT"
] | null | null | null | from .FictionBook2 import FictionBook2
from .Author import Author
from .TitleInfo import TitleInfo
from .DocumentInfo import DocumentInfo
| 27.6 | 38 | 0.855072 |
5a683a89ea393148d4edd0bc84134016995c858d | 374 | py | Python | runserver.py | chintal/tendril-monitor-vendor | af7577bd88b3d35e09a733607555d5d10e1cd9c7 | [
"MIT"
] | null | null | null | runserver.py | chintal/tendril-monitor-vendor | af7577bd88b3d35e09a733607555d5d10e1cd9c7 | [
"MIT"
] | null | null | null | runserver.py | chintal/tendril-monitor-vendor | af7577bd88b3d35e09a733607555d5d10e1cd9c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2015 Chintalagiri Shashank
# Released under the MIT license.
"""
Simple Deployment Example
-------------------------
"""
from vendor_monitor import worker
from twisted.internet import reactor
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
worker.start()
reactor.run()
| 16.26087 | 42 | 0.68984 |
5a697644fbf259cd8f3bc1346fab09736144290b | 3,746 | py | Python | yt/frontends/ytdata/tests/test_unit.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-09-15T08:17:43.000Z | 2021-09-15T08:17:43.000Z | yt/frontends/ytdata/tests/test_unit.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | 8 | 2020-04-02T16:51:49.000Z | 2022-01-11T14:12:44.000Z | yt/frontends/ytdata/tests/test_unit.py | stonnes/yt | aad3cfa3b4ebab7838352ab467275a27c26ff363 | [
"BSD-3-Clause-Clear"
] | 2 | 2020-08-12T15:46:11.000Z | 2021-02-09T13:09:17.000Z | import os
import shutil
import tempfile
import numpy as np
from yt.loaders import load, load_uniform_grid
from yt.testing import (
assert_array_equal,
assert_fname,
fake_random_ds,
requires_file,
requires_module,
)
from yt.utilities.answer_testing.framework import data_dir_load
from yt.visualization.plot_window import ProjectionPlot, SlicePlot
ytdata_dir = "ytdata_test"
| 28.815385 | 88 | 0.644688 |
5a6985ea52c126cdfc4394e0251917377b3471a6 | 10,580 | py | Python | openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py | mjfwest/OpenMDAO-Framework | a5521f47ad7686c25b203de74e1c7dff5fd7a52b | [
"Apache-2.0"
] | 69 | 2015-01-02T19:10:08.000Z | 2021-11-14T04:42:28.000Z | openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py | jcchin/OpenMDAO-Framework | 038e89b06da1c74f00918f4c6fbd8bd365e25657 | [
"Apache-2.0"
] | 3 | 2015-01-15T23:08:18.000Z | 2015-03-11T16:57:35.000Z | openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py | jcchin/OpenMDAO-Framework | 038e89b06da1c74f00918f4c6fbd8bd365e25657 | [
"Apache-2.0"
] | 31 | 2015-09-16T00:37:35.000Z | 2022-01-10T06:27:55.000Z | """
Test the genetic optimizer driver
"""
import unittest
import random
from openmdao.main.datatypes.api import Float, Array, Enum, Int, Str
from pyevolve import Selectors
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.lib.drivers.genetic import Genetic
# pylint: disable-msg=E1101
if __name__ == "__main__":
unittest.main()
| 33.587302 | 80 | 0.58913 |
5a69dfb1498fd1737edb8cb80ef069c5d681ed1f | 2,974 | py | Python | src/db/ohlc_to_db.py | canl/algo-trading | 288f43a54d6594f79c79dc21f5534ad9aa785b29 | [
"MIT"
] | 11 | 2020-04-04T08:59:37.000Z | 2020-12-25T20:21:05.000Z | src/db/ohlc_to_db.py | canl/algo-trading | 288f43a54d6594f79c79dc21f5534ad9aa785b29 | [
"MIT"
] | 1 | 2021-12-13T20:35:20.000Z | 2021-12-13T20:35:20.000Z | src/db/ohlc_to_db.py | canl/algo-trading | 288f43a54d6594f79c79dc21f5534ad9aa785b29 | [
"MIT"
] | 3 | 2020-06-21T16:29:56.000Z | 2020-07-18T15:15:01.000Z | import sqlite3
from datetime import datetime
from sqlite3 import Error
import pandas as pd
from src.pricer import read_price_df
DB_FILE_PATH = 'db.sqlite'
def connect_to_db(db_file):
"""
Connect to an SQlite database, if db file does not exist it will be created
:param db_file: absolute or relative path of db file
:return: sqlite3 connection
"""
sqlite3_conn = None
try:
sqlite3_conn = sqlite3.connect(db_file)
return sqlite3_conn
except Error as err:
print(err)
if sqlite3_conn is not None:
sqlite3_conn.close()
def insert_df_to_table(data: pd.DataFrame, table_name: str):
"""
Open a csv file with pandas, store its content in a pandas data frame, change the data frame headers to the table
column names and insert the data to the table
:param data: Data in DataFrame format, to be populated to SQL table
:param table_name: table name in the database to insert the data into
:return: None
"""
conn = connect_to_db(DB_FILE_PATH)
if conn is not None:
c = conn.cursor()
# Create table if it is not exist
c.execute('CREATE TABLE IF NOT EXISTS ' + table_name +
'(time VARCHAR NOT NULL PRIMARY KEY,'
'open DECIMAL,'
'high DECIMAL,'
'low DECIMAL,'
'close DECIMAL)')
data.columns = get_column_names_from_db_table(c, table_name)
data.to_sql(name=table_name, con=conn, if_exists='append', index=False)
conn.close()
print('SQL insert process finished')
else:
print('Connection to database failed')
def get_column_names_from_db_table(sql_cursor, table_name):
"""
Scrape the column names from a database table to a list
:param sql_cursor: sqlite cursor
:param table_name: table name to get the column names from
:return: a list with table column names
"""
table_column_names = 'PRAGMA table_info(' + table_name + ');'
sql_cursor.execute(table_column_names)
table_column_names = sql_cursor.fetchall()
column_names = list()
for name in table_column_names:
column_names.append(name[1])
return column_names
if __name__ == '__main__':
ccy_pair = 'USD_JPY'
start = datetime(2015, 1, 1, 0, 0, 0)
to = datetime(2020, 7, 31, 23, 59, 59)
df = read_price(start_date=start, end_date=to, instrument=ccy_pair)
# pattern: currency_pair _ ohlc
insert_df_to_table(data=df, table_name=f"{ccy_pair.lower().replace('_', '')}_ohlc")
| 30.979167 | 117 | 0.66308 |
5a6ab1cd0cde51b96b0f8b27b7f207dcb0b63462 | 2,793 | py | Python | morphs/data/localize.py | MarvinT/morphs | c8b204debcb23ba79c3112933af9e6ca4b05b7a1 | [
"MIT"
] | 2 | 2019-01-25T17:36:33.000Z | 2019-04-03T14:25:05.000Z | morphs/data/localize.py | MarvinT/morphs | c8b204debcb23ba79c3112933af9e6ca4b05b7a1 | [
"MIT"
] | 17 | 2018-09-21T00:07:10.000Z | 2019-05-23T17:07:35.000Z | morphs/data/localize.py | MarvinT/morphs | c8b204debcb23ba79c3112933af9e6ca4b05b7a1 | [
"MIT"
] | 3 | 2018-09-20T18:47:07.000Z | 2021-09-15T20:43:31.000Z | import pandas as pd
import numpy as np
import morphs
from six import exec_
from pathlib2 import Path
from joblib import Parallel, delayed
# adapted from klustakwik
# NEVER POINT THIS AT SOMETHING YOU DONT TRUST
| 33.25 | 85 | 0.649481 |
5a6c3376aee63cfa4176eec2e2221796087f1da4 | 55 | py | Python | app/cli/plugin/__init__.py | lonless0/flask_project | f5d6c5c7655e54d95069b469e3d470eda7a05cb7 | [
"MIT"
] | 786 | 2019-01-15T14:30:37.000Z | 2022-03-28T08:53:39.000Z | app/cli/plugin/__init__.py | lonless0/flask_project | f5d6c5c7655e54d95069b469e3d470eda7a05cb7 | [
"MIT"
] | 107 | 2019-01-18T05:15:16.000Z | 2022-03-16T07:13:05.000Z | app/cli/plugin/__init__.py | lonless0/flask_project | f5d6c5c7655e54d95069b469e3d470eda7a05cb7 | [
"MIT"
] | 222 | 2019-01-16T14:44:23.000Z | 2022-03-23T11:33:00.000Z | from .generator import generate
from .init import init
| 18.333333 | 31 | 0.818182 |
5a6c7805cdb06035d72a4db4a8f024fac0e49f51 | 2,512 | py | Python | labelocr/verify_ocr_app.py | tienthienhd/labelocr | 65297c12af9fa15f30d1457164d5cda7bebe70c1 | [
"Apache-2.0"
] | 2 | 2020-10-01T02:39:48.000Z | 2020-10-01T04:27:13.000Z | labelocr/verify_ocr_app.py | tienthienhd/labelocr | 65297c12af9fa15f30d1457164d5cda7bebe70c1 | [
"Apache-2.0"
] | null | null | null | labelocr/verify_ocr_app.py | tienthienhd/labelocr | 65297c12af9fa15f30d1457164d5cda7bebe70c1 | [
"Apache-2.0"
] | null | null | null | import atexit
import glob
import json
import logging
import os
import shutil
import sys
import tkinter as tk
import threading
from tkinter import filedialog, messagebox
import cv2
import numpy as np
import pandas as pd
import pygubu
from PIL import Image, ImageTk
from deprecated import deprecated
PROJECT_PATH = os.path.dirname(__file__)
PROJECT_UI = os.path.join(PROJECT_PATH, "verify_ocr.ui")
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger("LabelOcr")
| 35.885714 | 139 | 0.667994 |
5a6ebd896d0065716f83ceee55fedb02e43d2b47 | 17,814 | py | Python | cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/firewall.py | sanderv32/cosmic | 9a9d86500b67255a1c743a9438a05c0d969fd210 | [
"Apache-2.0"
] | 64 | 2016-01-30T13:31:00.000Z | 2022-02-21T02:13:25.000Z | cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/firewall.py | sanderv32/cosmic | 9a9d86500b67255a1c743a9438a05c0d969fd210 | [
"Apache-2.0"
] | 525 | 2016-01-22T10:46:31.000Z | 2022-02-23T11:08:01.000Z | cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/firewall.py | sanderv32/cosmic | 9a9d86500b67255a1c743a9438a05c0d969fd210 | [
"Apache-2.0"
] | 25 | 2016-01-13T16:46:46.000Z | 2021-07-23T15:22:27.000Z | import logging
from jinja2 import Environment, FileSystemLoader
import utils
| 57.650485 | 140 | 0.544179 |
5a6f4d014d86fed26640b0dae06b65517e18a73d | 2,875 | py | Python | MachineLearning/knn/knn.py | z8g/pettern | abf6b9c09597bb2badec97d51112681e46dde760 | [
"Apache-2.0"
] | 72 | 2019-09-26T09:12:14.000Z | 2020-09-05T11:59:25.000Z | MachineLearning/knn/knn.py | z8g/common | abf6b9c09597bb2badec97d51112681e46dde760 | [
"Apache-2.0"
] | null | null | null | MachineLearning/knn/knn.py | z8g/common | abf6b9c09597bb2badec97d51112681e46dde760 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import numpy
import operator
"""
================================================================================
kNN
1. ()
2.
3. k
4. k
5. k
================================================================================
"""
"""
:
: group,lables = kNN.createDataSet()
@return ,
"""
"""
: kNN
: classify0([0,0],group,lables,3)
@param u
@param dataSet
@param lables (labelsdataSet)
@param k
@return ()
"""
"""
: ( [0,1] [-1,1] )
: normDataSet, ranges, minValues = kNN.autoNorm(m)
@param dataset
@return , ,
01:
newValue = (oldValue - min) / (max - min)
"""
"""
: (dating)
: dataset_matrix,label_list = read_matrix('knnDataSet.txt')
@param filepath
@return ,
"""
"""
: ()
: return_vector = read_vector('digits/test/0_1.txt')
@param filepath
@return
""" | 26.136364 | 80 | 0.606957 |
5a6f7399d0e46958326d190fed0176f8bf1bbfef | 468 | py | Python | core/migrations/0012_alter_preco_categoria.py | thiagofreitascarneiro/Projeto_Fusion | 4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9 | [
"MIT"
] | null | null | null | core/migrations/0012_alter_preco_categoria.py | thiagofreitascarneiro/Projeto_Fusion | 4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9 | [
"MIT"
] | null | null | null | core/migrations/0012_alter_preco_categoria.py | thiagofreitascarneiro/Projeto_Fusion | 4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-09-05 19:39
from django.db import migrations, models
| 24.631579 | 133 | 0.587607 |
5a6fc90d5c1328218d16b60badb1e9edda81f0c8 | 2,394 | py | Python | Source/State/Main_Menu.py | LesterYHZ/Super-Mario-Bro-Python-Project | 2cbcb7ba713a81d37bd1ea16311f15e982a00774 | [
"MIT"
] | null | null | null | Source/State/Main_Menu.py | LesterYHZ/Super-Mario-Bro-Python-Project | 2cbcb7ba713a81d37bd1ea16311f15e982a00774 | [
"MIT"
] | null | null | null | Source/State/Main_Menu.py | LesterYHZ/Super-Mario-Bro-Python-Project | 2cbcb7ba713a81d37bd1ea16311f15e982a00774 | [
"MIT"
] | null | null | null | """
Main menu set up
"""
import pygame
from .. import Setup
from .. import Tools
from .. import Constant as Con
from ..Components import Info
| 36.830769 | 99 | 0.552632 |
5a7057c32e096dcc96fd46f2913322b29562d86b | 634 | py | Python | user/models.py | ThePokerFaCcCe/teamwork | e6d3cfa7821ddba7a122b740e7f5dabb2b1eb316 | [
"MIT"
] | null | null | null | user/models.py | ThePokerFaCcCe/teamwork | e6d3cfa7821ddba7a122b740e7f5dabb2b1eb316 | [
"MIT"
] | null | null | null | user/models.py | ThePokerFaCcCe/teamwork | e6d3cfa7821ddba7a122b740e7f5dabb2b1eb316 | [
"MIT"
] | null | null | null | from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import AbstractUser
from django.db import models
from user.validators import UsernameValidator
| 27.565217 | 69 | 0.637224 |
5a71f92e7f88851d5919ffc0e563e6147877d1d6 | 812 | py | Python | Advent2016/6.py | SSteve/AdventOfCode | aed16209381ccd292fc02008f1f2da5d16ff1a05 | [
"MIT"
] | null | null | null | Advent2016/6.py | SSteve/AdventOfCode | aed16209381ccd292fc02008f1f2da5d16ff1a05 | [
"MIT"
] | null | null | null | Advent2016/6.py | SSteve/AdventOfCode | aed16209381ccd292fc02008f1f2da5d16ff1a05 | [
"MIT"
] | null | null | null | from collections import Counter
TEST = """eedadn
drvtee
eandsr
raavrd
atevrs
tsrnev
sdttsa
rasrtv
nssdts
ntnada
svetve
tesnvt
vntsnd
vrdear
dvrsen
enarar"""
part1 = decode(TEST.splitlines())
assert part1 == 'easter'
part2 = decode(TEST.splitlines(), True)
assert part2 == 'advent'
with open('6.txt', 'r') as infile:
part1 = decode(infile.read().splitlines())
print(f"Part 1: {part1}")
with open('6.txt', 'r') as infile:
part2 = decode(infile.read().splitlines(), True)
print(f"Part 2: {part2}")
| 18.044444 | 52 | 0.64532 |
5a7517c33209b1b32f8a9e56da76245b5b0b9793 | 6,246 | py | Python | profile_api/views.py | csalaman/profiles-rest-api | 936d2a23fb78144c8e50a8d3de2b94051add49b9 | [
"MIT"
] | null | null | null | profile_api/views.py | csalaman/profiles-rest-api | 936d2a23fb78144c8e50a8d3de2b94051add49b9 | [
"MIT"
] | null | null | null | profile_api/views.py | csalaman/profiles-rest-api | 936d2a23fb78144c8e50a8d3de2b94051add49b9 | [
"MIT"
] | null | null | null | # DRF Views types (APIView & ViewSet)
# APIViews allows to write standard HTTP Methods as functions & give most control over the logic
# Benefits: Perfect for implementing complex logic, calling other APIs, working with local files
# Viewsets -> uses model operations for functions kist, create, retrieve, update, partial_update, destroy
# When to use: simple CRUD interface to database, quick & simple API, little to no customization on the logic, working with standard data structures
# Good to use when: need full control over the logic(complex algo, updating multiple datasources in a single API call),
# processing files and rendering a synchronous response, calling other APIs/services, accessing local files or data
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
# Import the serializer (app_name/serializers.py)
from profile_api import serializers
from profile_api import models
# Get Auth Token (For user authentication for every request)
from rest_framework.authentication import TokenAuthentication
# Get View Auth Token (for login, etc)
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
# Import permissions
from profile_api import permissions
# Import filters for filtering of data
from rest_framework import filters
# Viewset to manage user profiles API | 39.0375 | 148 | 0.693724 |
5a75c828e876ed3a1b7b9389dd4545aaaf2d9462 | 466 | py | Python | examples/panflute/myemph.py | jacobwhall/panflute | 281ddeaebd2c2c94f457f3da785037cadf69389e | [
"BSD-3-Clause"
] | 361 | 2016-04-26T18:23:30.000Z | 2022-03-24T20:58:18.000Z | examples/panflute/myemph.py | jacobwhall/panflute | 281ddeaebd2c2c94f457f3da785037cadf69389e | [
"BSD-3-Clause"
] | 164 | 2016-04-27T18:42:55.000Z | 2022-02-13T23:34:17.000Z | examples/panflute/myemph.py | jacobwhall/panflute | 281ddeaebd2c2c94f457f3da785037cadf69389e | [
"BSD-3-Clause"
] | 62 | 2016-06-15T13:33:54.000Z | 2021-11-20T07:33:07.000Z | #!/usr/bin/env python
import panflute as pf
"""
Pandoc filter that causes emphasis to be rendered using
the custom macro '\myemph{...}' rather than '\emph{...}'
in latex. Other output formats are unaffected.
"""
if __name__ == "__main__":
pf.toJSONFilter(myemph)
| 21.181818 | 64 | 0.654506 |
5a78040379a605d417a65ff4123fa8c2e73e5ad9 | 3,393 | py | Python | src/financial_statements/old/balance_sheet.py | LeanderLXZ/intelligent-analysis-of-financial-statements | 38bab5bea3c2f22f71020020c8325f6b6b014853 | [
"Apache-2.0"
] | null | null | null | src/financial_statements/old/balance_sheet.py | LeanderLXZ/intelligent-analysis-of-financial-statements | 38bab5bea3c2f22f71020020c8325f6b6b014853 | [
"Apache-2.0"
] | null | null | null | src/financial_statements/old/balance_sheet.py | LeanderLXZ/intelligent-analysis-of-financial-statements | 38bab5bea3c2f22f71020020c8325f6b6b014853 | [
"Apache-2.0"
] | 1 | 2021-12-15T02:09:16.000Z | 2021-12-15T02:09:16.000Z | import time
import threading
import argparse
import tushare as ts
import numpy as np
import pandas as pd
from pandas import datetime as dt
from tqdm import tqdm
from utils import *
with open('../../tushare_token.txt', 'r') as f:
token = f.readline()
ts.set_token(token)
tushare_api = ts.pro_api()
#
df_list = []
for list_status in ['L', 'D', 'P']:
df_i = tushare_api.stock_basic(
exchange='',
list_status=list_status,
fields='ts_code')
df_list.append(df_i)
df_all = pd.concat(df_list)
#
df = pd.DataFrame()
for ts_code in tqdm(df_all['ts_code'].values):
df_i = safe_get(
tushare_api.balancesheet,
ts_code=ts_code,
fields=
'ts_code, ann_date, f_ann_date, end_date, report_type, comp_type,'
'total_share, cap_rese, undistr_porfit, surplus_rese, special_rese,'
'money_cap, trad_asset, notes_receiv, accounts_receiv, oth_receiv,'
'prepayment, div_receiv, int_receiv, inventories, amor_exp,'
'nca_within_1y, sett_rsrv, loanto_oth_bank_fi, premium_receiv,'
'reinsur_receiv, reinsur_res_receiv, pur_resale_fa, oth_cur_assets,'
'total_cur_assets, fa_avail_for_sale, htm_invest, lt_eqt_invest,'
'invest_real_estate, time_deposits, oth_assets, lt_rec, fix_assets,'
'cip, const_materials, fixed_assets_disp, produc_bio_assets,'
'oil_and_gas_assets, intan_assets, r_and_d, goodwill, lt_amor_exp,'
'defer_tax_assets, decr_in_disbur, oth_nca, total_nca, cash_reser_cb,'
'depos_in_oth_bfi, prec_metals, deriv_assets, rr_reins_une_prem,'
'rr_reins_outstd_cla, rr_reins_lins_liab, rr_reins_lthins_liab,'
'refund_depos, ph_pledge_loans, refund_cap_depos, indep_acct_assets,'
'client_depos, client_prov, transac_seat_fee, invest_as_receiv,'
'total_assets, lt_borr, st_borr, cb_borr, depos_ib_deposits,'
'loan_oth_bank, trading_fl, notes_payable, acct_payable, adv_receipts,'
'sold_for_repur_fa, comm_payable, payroll_payable, taxes_payable,'
'int_payable, div_payable, oth_payable, acc_exp, deferred_inc,'
'st_bonds_payable, payable_to_reinsurer, rsrv_insur_cont,'
'acting_trading_sec, acting_uw_sec, non_cur_liab_due_1y, oth_cur_liab,'
'total_cur_liab, bond_payable, lt_payable, specific_payables,'
'estimated_liab, defer_tax_liab, defer_inc_non_cur_liab, oth_ncl,'
'total_ncl, depos_oth_bfi, deriv_liab, depos, agency_bus_liab,'
'oth_liab, prem_receiv_adva, depos_received, ph_invest, reser_une_prem,'
'reser_outstd_claims, reser_lins_liab, reser_lthins_liab,'
'indept_acc_liab, pledge_borr, indem_payable, policy_div_payable,'
'total_liab, treasury_share, ordin_risk_reser, forex_differ,'
'invest_loss_unconf, minority_int, total_hldr_eqy_exc_min_int,'
'total_hldr_eqy_inc_min_int, total_liab_hldr_eqy, lt_payroll_payable,'
'oth_comp_income, oth_eqt_tools, oth_eqt_tools_p_shr, lending_funds,'
'acc_receivable, st_fin_payable, payables, hfs_assets, hfs_sales,'
'update_flag'
)
df_i = df_i.drop_duplicates()
df_i = df_i.reindex(index=df_i.index[::-1])
df_i.insert(0, 'code', [c[:6] for c in df_i['ts_code']])
df = df.append(df_i)
df = df.reset_index(drop=True)
df.to_csv('../../data/financial_statements/balance_sheet.csv', index=False) | 44.644737 | 80 | 0.72178 |
5a79960fc035f3d47bd3d6b6b9332c5bd900eee5 | 1,208 | py | Python | examples/wsgi/test.py | gelnior/couchdbkit | 8277d6ffd00553ae0b0b2368636460d40f8d8225 | [
"MIT"
] | 51 | 2015-04-01T14:53:46.000Z | 2022-03-16T09:16:10.000Z | examples/wsgi/test.py | gelnior/couchdbkit | 8277d6ffd00553ae0b0b2368636460d40f8d8225 | [
"MIT"
] | 17 | 2015-02-04T11:25:02.000Z | 2021-07-10T10:17:53.000Z | examples/wsgi/test.py | gelnior/couchdbkit | 8277d6ffd00553ae0b0b2368636460d40f8d8225 | [
"MIT"
] | 40 | 2015-01-13T23:38:01.000Z | 2022-02-26T22:08:01.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008,2009 Benoit Chesneau <benoitc@e-engura.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import couchdbkit
from couchdbkit.contrib import WSGIHandler
import json
def app(environ, start_response):
"""Simplest possible application object"""
data = 'Hello, World!\n DB Infos : %s\n' % json.dumps(environ["COUCHDB_INFO"])
status = '200 OK'
response_headers = [
('Content-type','text/plain'),
('Content-Length', len(data))
]
start_response(status, response_headers)
return [data]
if __name__ == "__main__":
main()
| 30.974359 | 83 | 0.693709 |
5a7ade7264494768c161fd0f8d10b792225101d5 | 2,480 | py | Python | src/comments/api/views/DetailAPIView.py | samrika25/TRAVIS_HEROKU_GIT | bcae6d0422d9a0369810944a91dd03db7df0d058 | [
"MIT"
] | null | null | null | src/comments/api/views/DetailAPIView.py | samrika25/TRAVIS_HEROKU_GIT | bcae6d0422d9a0369810944a91dd03db7df0d058 | [
"MIT"
] | 4 | 2021-03-30T12:35:36.000Z | 2021-06-10T18:11:24.000Z | src/comments/api/views/DetailAPIView.py | samrika25/TRAVIS_HEROKU_GIT | bcae6d0422d9a0369810944a91dd03db7df0d058 | [
"MIT"
] | 2 | 2021-02-07T16:16:36.000Z | 2021-07-13T05:26:51.000Z | from django.views import View
from comments.models import Comment
from django.http import JsonResponse
from utils.decorators import fail_safe_api
from utils.models import nested_model_to_dict
from utils.request import parse_body, set_user
from django.contrib.contenttypes.models import ContentType
| 26.666667 | 114 | 0.604435 |
5a7b8772eb3240b031d703bd91a985fdc85cecd0 | 2,857 | py | Python | src/router.py | mix2zeta/social-d | 923cc2b224470e940ae6ac9cc712adb685c1b216 | [
"MIT"
] | null | null | null | src/router.py | mix2zeta/social-d | 923cc2b224470e940ae6ac9cc712adb685c1b216 | [
"MIT"
] | null | null | null | src/router.py | mix2zeta/social-d | 923cc2b224470e940ae6ac9cc712adb685c1b216 | [
"MIT"
] | 1 | 2021-03-11T09:07:11.000Z | 2021-03-11T09:07:11.000Z | from aiohttp import web
import urllib.parse
from conf import settings
ROUTER = {
"poke_task": {
"url": "/poke",
"GET": "request_handle.poke_task",
"POST": "request_handle.poke_task",
},
"task": {
"url": "/task/{task_id}",
"GET": "request_handle.get_task",
},
"message": {
"url": "/message/{msg_id}",
"GET": "request_handle.get_message_by_id"
},
"message-daily": {
"url": "/date/{from}/{to}/message/daily",
"GET": "request_handle.get_daily_message_count"
},
"message-top": {
"url": "/date/{from}/{to}/message/top",
"GET": "request_handle.get_account_by_message"
},
"message-engagement": {
"url": "/date/{from}/{to}/message/engagement",
"GET": "request_handle.get_message_by_engagement"
},
"wordcloud":{
"url": "/date/{from}/{to}/message/{cloud_type}",
"GET": "request_handle.get_word_cloud"
},
}
def object_at_end_of_path(path):
"""Attempt to return the Python object at the end of the dotted
path by repeated imports and attribute access.
"""
access_path = path.split(".")
module = None
for index in range(1, len(access_path)):
try:
# import top level module
module_name = ".".join(access_path[:-index])
module = __import__(module_name)
except ImportError:
continue
else:
for step in access_path[1:-1]: # walk down it
module = getattr(module, step)
break
if module:
return getattr(module, access_path[-1])
else:
return globals()["__builtins__"][path]
| 28.287129 | 88 | 0.533427 |
5a7f094b28c04c830704df3edc53f45db870422e | 3,668 | py | Python | golly_python/manager.py | golly-splorts/golly-python | 54bc277cc2aed9f35b67a6f8de1d468d9893440c | [
"MIT"
] | null | null | null | golly_python/manager.py | golly-splorts/golly-python | 54bc277cc2aed9f35b67a6f8de1d468d9893440c | [
"MIT"
] | null | null | null | golly_python/manager.py | golly-splorts/golly-python | 54bc277cc2aed9f35b67a6f8de1d468d9893440c | [
"MIT"
] | null | null | null | import json
from .life import BinaryLife
| 30.823529 | 88 | 0.507361 |
5a7f42aae312bdb1dfd1e806bfb1013a4638beeb | 48 | py | Python | surge_multiplier_mdp/__init__.py | mbattifarano/surge-multiplier-mdp | 8a8477662a2a9b7daa7acb8b8cf486bef0ec8c05 | [
"MIT"
] | null | null | null | surge_multiplier_mdp/__init__.py | mbattifarano/surge-multiplier-mdp | 8a8477662a2a9b7daa7acb8b8cf486bef0ec8c05 | [
"MIT"
] | null | null | null | surge_multiplier_mdp/__init__.py | mbattifarano/surge-multiplier-mdp | 8a8477662a2a9b7daa7acb8b8cf486bef0ec8c05 | [
"MIT"
] | null | null | null | from .mdp_value_iteration import value_iteration | 48 | 48 | 0.916667 |
5a7f6cebc7d1d5a0a12a5527001bd5fbb8d22d54 | 568 | py | Python | DiplomaProject/office/admin.py | iamgo100/diploma | fc7314468631bf43774b4678890d2a315658713c | [
"MIT"
] | null | null | null | DiplomaProject/office/admin.py | iamgo100/diploma | fc7314468631bf43774b4678890d2a315658713c | [
"MIT"
] | null | null | null | DiplomaProject/office/admin.py | iamgo100/diploma | fc7314468631bf43774b4678890d2a315658713c | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Shift, Service, Appointment
admin.site.register(Shift, ShiftAdmin)
admin.site.register(Service, ServicetAdmin)
admin.site.register(Appointment, AppointmentAdmin) | 35.5 | 66 | 0.713028 |
5a7fe776654c20e1290bc4e948072b1dcc063b7e | 2,007 | py | Python | util/query_jmx.py | perfsonar/esmond | 391939087321c1438d54cdadee3eb936b95f3e92 | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-10-23T01:10:19.000Z | 2022-03-26T18:40:44.000Z | util/query_jmx.py | perfsonar/esmond | 391939087321c1438d54cdadee3eb936b95f3e92 | [
"BSD-3-Clause-LBNL"
] | 23 | 2018-12-05T20:30:04.000Z | 2020-11-11T19:20:57.000Z | util/query_jmx.py | perfsonar/esmond | 391939087321c1438d54cdadee3eb936b95f3e92 | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-02-11T20:40:41.000Z | 2022-03-26T18:40:50.000Z | #!/usr/bin/env python3
"""
Code to issue calls to the cassandra MX4J http server and get stats.
"""
import os
import sys
from optparse import OptionParser
from esmond.api.client.jmx import CassandraJMX
if __name__ == '__main__':
main()
| 38.596154 | 78 | 0.678127 |
5a8074c85da0b1531e270b6b0eaa82126e705010 | 1,294 | py | Python | apps/accounts/management/commands/amend_hostingproviders_stats.py | BR0kEN-/admin-portal | 0c38dc0d790031f45bf07660bce690e972fe2858 | [
"Apache-2.0"
] | null | null | null | apps/accounts/management/commands/amend_hostingproviders_stats.py | BR0kEN-/admin-portal | 0c38dc0d790031f45bf07660bce690e972fe2858 | [
"Apache-2.0"
] | null | null | null | apps/accounts/management/commands/amend_hostingproviders_stats.py | BR0kEN-/admin-portal | 0c38dc0d790031f45bf07660bce690e972fe2858 | [
"Apache-2.0"
] | null | null | null | from django.core.management.base import BaseCommand
from django.db import connection
| 38.058824 | 91 | 0.532457 |
5a80b2e184b51cbc11327bc99c0e1506a3d4bc1b | 2,493 | py | Python | src/brain_atlas/diff_exp.py | MacoskoLab/brain-atlas | 6db385435ea1a6e96fd019963b4f7e23148a7b9a | [
"MIT"
] | 2 | 2022-01-21T19:13:35.000Z | 2022-03-24T07:46:57.000Z | src/brain_atlas/diff_exp.py | MacoskoLab/brain-atlas | 6db385435ea1a6e96fd019963b4f7e23148a7b9a | [
"MIT"
] | null | null | null | src/brain_atlas/diff_exp.py | MacoskoLab/brain-atlas | 6db385435ea1a6e96fd019963b4f7e23148a7b9a | [
"MIT"
] | null | null | null | import numba as nb
import numpy as np
import scipy.stats
def mannwhitneyu(x, y, use_continuity=True):
"""Version of Mann-Whitney U-test that runs in parallel on 2d arrays
This is the two-sided test, asymptotic algo only. Returns log p-values
"""
x = np.asarray(x)
y = np.asarray(y)
assert x.shape[1] == y.shape[1]
n1 = x.shape[0]
n2 = y.shape[0]
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[:n1, :] # get the x-ranks
u1 = n1 * n2 + (n1 * (n1 + 1)) / 2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1 * n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
# if *everything* is identical we'll raise an error, not otherwise
if np.all(T == 0):
raise ValueError("All numbers are identical in mannwhitneyu")
sd = np.sqrt(T * n1 * n2 * (n1 + n2 + 1) / 12.0)
meanrank = n1 * n2 / 2.0 + 0.5 * use_continuity
bigu = np.maximum(u1, u2)
with np.errstate(divide="ignore", invalid="ignore"):
z = (bigu - meanrank) / sd
logp = np.minimum(scipy.stats.norm.logsf(z) + np.log(2), 0)
return u2, logp
| 29.329412 | 85 | 0.584436 |
5a81a24952b6eed80c202bd9ff7db7e295855534 | 2,088 | py | Python | piece.py | brouxco/quarto-solver | 12ae87f43d4a80137cb4394de9c399d8f9894da3 | [
"0BSD"
] | null | null | null | piece.py | brouxco/quarto-solver | 12ae87f43d4a80137cb4394de9c399d8f9894da3 | [
"0BSD"
] | null | null | null | piece.py | brouxco/quarto-solver | 12ae87f43d4a80137cb4394de9c399d8f9894da3 | [
"0BSD"
] | null | null | null |
if __name__ == "__main__":
pass
| 32.625 | 56 | 0.531609 |
5a81e0954b1a9e5e3552a3af4e53c8b36b9c007f | 21,061 | py | Python | tests/test_build_docs.py | simon-ritchie/action-py-script | f502ede320089562d77d13231e85e65b9de64938 | [
"MIT"
] | null | null | null | tests/test_build_docs.py | simon-ritchie/action-py-script | f502ede320089562d77d13231e85e65b9de64938 | [
"MIT"
] | 16 | 2021-02-13T05:19:16.000Z | 2021-02-23T11:40:18.000Z | tests/test_build_docs.py | simon-ritchie/action-py-script | f502ede320089562d77d13231e85e65b9de64938 | [
"MIT"
] | null | null | null | import hashlib
import os
import shutil
from random import randint
from typing import List
from retrying import retry
import build_docs
from apysc._file import file_util
from build_docs import _CodeBlock
from build_docs import _CodeBlockFlake8Error
from build_docs import _CodeBlockMypyError
from build_docs import _CodeBlockNumdoclintError
from build_docs import _RunReturnData
from build_docs import _ScriptData
from tests.testing_helper import assert_attrs
from tests.testing_helper import assert_raises
_CHECKOUT_FILE_PATHS: List[str] = [
'docs_src/hashed_vals/stage.md',
]
def teardown() -> None:
"""
The function would be called when the test ended.
"""
for checkout_file_path in _CHECKOUT_FILE_PATHS:
os.system(f'git checkout {checkout_file_path}')
def test__save_md_hashed_val() -> None:
original_hashed_vals_dir_path: str = build_docs.HASHED_VALS_DIR_PATH
build_docs.HASHED_VALS_DIR_PATH = '../tmp_test_build_docs_5/hashed_vals/'
expected_file_path: str = os.path.join(
build_docs.HASHED_VALS_DIR_PATH,
'any/path.md')
file_util.remove_file_if_exists(file_path=expected_file_path)
build_docs._save_md_hashed_val(
md_file_path='./docs_src/source/any/path.md', hashed_val='1234')
hashed_val: str = build_docs._read_md_file_hashed_val_from_file(
hash_file_path=expected_file_path)
assert hashed_val == '1234'
build_docs.HASHED_VALS_DIR_PATH = original_hashed_vals_dir_path
file_util.remove_file_if_exists(file_path=expected_file_path)
| 35.160267 | 78 | 0.651346 |
5a8286acf837a481397e002bada53024ba40d6ed | 15,551 | py | Python | Generator/views.py | SmilingTornado/sfia_generator | f675a3fe55e3b56267cafade44ebd069bac185d7 | [
"Apache-2.0"
] | 2 | 2020-08-19T08:43:51.000Z | 2021-11-18T09:05:55.000Z | Generator/views.py | SmilingTornado/sfia_generator | f675a3fe55e3b56267cafade44ebd069bac185d7 | [
"Apache-2.0"
] | 5 | 2020-06-06T14:15:30.000Z | 2021-09-22T18:47:36.000Z | Generator/views.py | SmilingTornado/sfia_generator | f675a3fe55e3b56267cafade44ebd069bac185d7 | [
"Apache-2.0"
] | null | null | null | # Create your views here.
import docx
import gensim
import numpy as np
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from docx.shared import RGBColor, Inches, Pt
from nltk.tokenize import sent_tokenize, word_tokenize
from .models import Skill, Level
# View for home page
# View for search page
# View to list skills
# View to list skills for second skill selection
# View details of skill
# View details of second selected skill
# Returns whether a skill is valid
# Get skill information
# Get levels in a certain range
# Generate description for the skill
| 42.02973 | 118 | 0.641952 |
5a83d552df37fe7fdd13e1e5236c56ad3f9e80ab | 3,076 | py | Python | flask_pancake/extension.py | arthurio/flask-pancake | 5fc752d6e917bbe8e06be7d7a802cdeb10cca591 | [
"MIT"
] | 4 | 2020-01-21T04:33:01.000Z | 2021-04-27T22:56:23.000Z | flask_pancake/extension.py | arthurio/flask-pancake | 5fc752d6e917bbe8e06be7d7a802cdeb10cca591 | [
"MIT"
] | 16 | 2020-01-25T19:27:11.000Z | 2020-10-13T20:09:18.000Z | flask_pancake/extension.py | arthurio/flask-pancake | 5fc752d6e917bbe8e06be7d7a802cdeb10cca591 | [
"MIT"
] | 2 | 2020-06-18T08:38:28.000Z | 2021-04-28T02:53:39.000Z | from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
from cached_property import cached_property
from .constants import EXTENSION_NAME
from .registry import registry
from .utils import GroupFuncType, import_from_string, load_cookies, store_cookies
if TYPE_CHECKING:
from flask import Flask
from .flags import Flag, Sample, Switch
__all__ = ["FlaskPancake"]
| 29.295238 | 85 | 0.624187 |
5a857abf3570c3df69b81be2e28f99b2e77798fb | 1,563 | py | Python | tests/pygithub/test_targettag.py | ktlim/sqre-codekit | 98122404cd9065d4d1d570867fe518042669126c | [
"MIT"
] | null | null | null | tests/pygithub/test_targettag.py | ktlim/sqre-codekit | 98122404cd9065d4d1d570867fe518042669126c | [
"MIT"
] | 23 | 2015-12-04T16:54:15.000Z | 2019-03-15T01:14:26.000Z | tests/pygithub/test_targettag.py | ktlim/sqre-codekit | 98122404cd9065d4d1d570867fe518042669126c | [
"MIT"
] | 3 | 2016-08-08T16:44:04.000Z | 2020-04-29T00:58:00.000Z | #!/usr/bin/env python3
import codekit.pygithub
import github
import itertools
import pytest
def test_init(git_author):
"""Test TargetTag object instantiation"""
t_tag = codekit.pygithub.TargetTag(
name='foo',
sha='bar',
message='baz',
tagger=git_author,
)
assert isinstance(t_tag, codekit.pygithub.TargetTag), type(t_tag)
def test_attributes(git_author):
"""Test TargetTag attributes"""
t_tag = codekit.pygithub.TargetTag(
name='foo',
sha='bar',
message='baz',
tagger=git_author,
)
assert t_tag.name == 'foo'
assert t_tag.sha == 'bar'
assert t_tag.message == 'baz'
assert isinstance(t_tag.tagger, github.InputGitAuthor), type(t_tag.tagger)
def test_init_required_args(git_author):
"""TargetTag requires named args"""
all_args = dict(
name='foo',
sha='bar',
message='baz',
tagger=git_author,
)
args = {}
# try all named args but one
for k, v in itertools.islice(all_args.items(), len(all_args) - 1):
args[k] = v
with pytest.raises(KeyError):
codekit.pygithub.TargetTag(**args)
def test_init_tagger_type():
"""TargetTag tagger named arg must be correct type"""
with pytest.raises(AssertionError):
codekit.pygithub.TargetTag(
name='foo',
sha='bar',
message='baz',
tagger='bonk',
)
| 22.328571 | 78 | 0.614203 |
5a898eeb8ca1914311a3bfe38f233e0ef651e459 | 497 | py | Python | src/test/model/test_node.py | AstrorEnales/GenCoNet | c596d31a889f14499883fcdf74fdc67f927a806e | [
"MIT"
] | 2 | 2019-12-05T11:46:48.000Z | 2022-03-09T00:11:06.000Z | src/test/model/test_node.py | AstrorEnales/GenCoNet | c596d31a889f14499883fcdf74fdc67f927a806e | [
"MIT"
] | null | null | null | src/test/model/test_node.py | AstrorEnales/GenCoNet | c596d31a889f14499883fcdf74fdc67f927a806e | [
"MIT"
] | null | null | null | import unittest
from model import node
| 26.157895 | 83 | 0.615694 |
5a8acbff39d71356c0bdbbffc0011959d6b7ec58 | 1,109 | py | Python | 2020/Python/day06.py | kamoshi/Advent-of-Code | 5b78fa467409e8b8c5a16efe31684b8ce493bcee | [
"MIT"
] | 1 | 2020-12-21T13:27:52.000Z | 2020-12-21T13:27:52.000Z | 2020/Python/day06.py | kamoshi/advent-of-code | 5b78fa467409e8b8c5a16efe31684b8ce493bcee | [
"MIT"
] | null | null | null | 2020/Python/day06.py | kamoshi/advent-of-code | 5b78fa467409e8b8c5a16efe31684b8ce493bcee | [
"MIT"
] | null | null | null | import functools
GROUPS = parse_input()
print(solve_p1(GROUPS))
print(solve_p2(GROUPS))
| 22.632653 | 61 | 0.537421 |
ce456a679b725d44ec91f64a8df14df4d86ae155 | 1,918 | py | Python | src/python/grpcio_tests/tests/interop/_intraop_test_case.py | txl0591/grpc | 8b732dc466fb8a567c1bca9dbb84554d29087395 | [
"Apache-2.0"
] | 117 | 2017-10-02T21:34:35.000Z | 2022-03-02T01:49:03.000Z | src/python/grpcio_tests/tests/interop/_intraop_test_case.py | txl0591/grpc | 8b732dc466fb8a567c1bca9dbb84554d29087395 | [
"Apache-2.0"
] | 4 | 2017-10-03T22:45:30.000Z | 2018-09-27T07:31:00.000Z | src/python/grpcio_tests/tests/interop/_intraop_test_case.py | txl0591/grpc | 8b732dc466fb8a567c1bca9dbb84554d29087395 | [
"Apache-2.0"
] | 24 | 2017-10-31T12:14:15.000Z | 2021-12-11T10:07:46.000Z | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for unit tests of the interoperability test code."""
from tests.interop import methods
| 36.884615 | 80 | 0.728363 |
ce460a49da25a43c7d3e4ff3e64726a1574194b1 | 604 | py | Python | scripts/add_vf_ids.py | rajbot/vaccinebot | 9b7c13eb248e92a248dbc0e3e9de6d4dc7a2c20a | [
"MIT"
] | 2 | 2021-02-07T05:06:09.000Z | 2021-03-02T18:23:07.000Z | scripts/add_vf_ids.py | rajbot/vaccinebot | 9b7c13eb248e92a248dbc0e3e9de6d4dc7a2c20a | [
"MIT"
] | null | null | null | scripts/add_vf_ids.py | rajbot/vaccinebot | 9b7c13eb248e92a248dbc0e3e9de6d4dc7a2c20a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from airtable import Airtable
import csv
import os
for var in ["AIRTABLE_API_KEY", "AIRTABLE_BASE_ID"]:
if os.environ.get(var) is None:
sys.exit(f"Must set {var} env var!")
api_key = os.environ.get("AIRTABLE_API_KEY")
base_id = os.environ.get("AIRTABLE_BASE_ID")
airtable = Airtable(base_id, "Locations", api_key)
path = sys.argv[1]
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print("adding", row)
fields = {"vaccinefinder_location_id": row["vaccinefinder_id"]}
airtable.update(row["id"], fields)
| 26.26087 | 71 | 0.688742 |
ce462fe45d9f73cc50c3b487d621d5b2ad86a06b | 99 | py | Python | pubmedpy/__init__.py | dhimmel/pubmedpy | 9d716768f5ab798ec448154588e4fd99afd7584a | [
"BlueOak-1.0.0"
] | 7 | 2019-11-13T09:14:19.000Z | 2022-03-09T01:35:06.000Z | pubmedpy/__init__.py | dhimmel/pubmedpy | 9d716768f5ab798ec448154588e4fd99afd7584a | [
"BlueOak-1.0.0"
] | 2 | 2020-08-24T15:05:57.000Z | 2020-10-21T04:12:56.000Z | pubmedpy/__init__.py | dhimmel/pubmedpy | 9d716768f5ab798ec448154588e4fd99afd7584a | [
"BlueOak-1.0.0"
] | 1 | 2021-02-18T00:01:09.000Z | 2021-02-18T00:01:09.000Z | """
# Utilities for interacting with NCBI EUtilities relating to PubMed
"""
__version__ = "0.0.1"
| 16.5 | 67 | 0.717172 |
ce46ad7566bbdce61b2ab0578c3f8020ac4af53c | 945 | py | Python | config/logging.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | config/logging.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | config/logging.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | import json
import sys
import traceback
from logging import Handler
# Specific logging module for GCP, use json to serialize output -> work better for GKE
# Can be used for further customization
| 28.636364 | 86 | 0.557672 |
ce4a42074a7c0b3a3d6615721ca72bb43e10e32b | 16,095 | py | Python | sublimeText3/Packages/SublimeCodeIntel/libs/codeintel2/tdparser.py | MoAnsir/dot_file_2017 | 5f67ef8f430416c82322ab7e7e001548936454ff | [
"MIT"
] | 2 | 2018-04-24T10:02:26.000Z | 2019-06-02T13:53:31.000Z | Data/Packages/SublimeCodeIntel/libs/codeintel2/tdparser.py | Maxize/Sublime_Text_3 | be620476b49f9a6ce2ca2cfe825c4e142e7e82b9 | [
"Apache-2.0"
] | 1 | 2016-02-10T09:50:09.000Z | 2016-02-10T09:50:09.000Z | Packages/SublimeCodeIntel/libs/codeintel2/tdparser.py | prisis/sublime-text-packages | 99ae8a5496613e27a75e5bd91723549b21476e60 | [
"MIT"
] | 2 | 2019-04-11T04:13:02.000Z | 2019-06-02T13:53:33.000Z | """
A simple Top-Down Python expression parser.
This parser is based on the "Simple Top-Down Parsing in Python" article by
Fredrik Lundh (http://effbot.org/zone/simple-top-down-parsing.htm)
These materials could be useful for understanding ideas behind
the Top-Down approach:
* Top Down Operator Precedence -- Douglas Crockford
http://javascript.crockford.com/tdop/tdop.html
* Top-Down operator precedence parsing -- Eli Benderski
http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
* Top down operator precedence -- Vaughan R. Pratt
http://portal.acm.org/citation.cfm?doid=512927.512931
This implementation is a subject to change as it is very premature.
"""
import re
import io as sio
import tokenize
type_map = {tokenize.NUMBER: "(literal)",
tokenize.STRING: "(literal)",
tokenize.OP: "(operator)",
tokenize.NAME: "(name)"}
def arg_list_py(args):
buf = []
for name, value, type in args:
if value:
buf.append("%s=%s" % (name.py(), value.py()))
else:
buf.append(name.py())
return ", ".join(buf)
def call_list_py(args):
buf = []
for name, value in args:
value_py = value and value.py() or ''
if name:
if name.id in ("*", "**"):
arg = name.id + value.py()
else:
arg = "%s=%s" % (name.id, value_py)
else:
arg = value_py
buf.append(arg)
return ", ".join(buf)
def py_expr_grammar():
self = Grammar()
self.symbol("lambda", 20)
self.symbol(":", 10)
self.symbol("if", 20)
self.symbol("else")
self.infix_r("or", 30)
self.infix_r("and", 40)
self.prefix("not", 50)
self.infix("in", 60)
self.infix("not", 60) # in, not in
self.infix("is", 60) # is, is not
self.infix("<", 60)
self.infix("<=", 60)
self.infix(">", 60)
self.infix(">=", 60)
self.infix("<>", 60)
self.infix("!=", 60)
self.infix("==", 60)
self.infix("|", 70)
self.infix("^", 80)
self.infix("&", 90)
self.infix("<<", 100)
self.infix(">>", 100)
self.infix("+", 110)
self.infix("-", 110)
self.infix("*", 120)
self.infix("/", 120)
self.infix("//", 120)
self.infix("%", 120)
self.prefix("-", 130)
self.prefix("+", 130)
self.prefix("~", 130)
self.infix_r("**", 140)
self.symbol(".", 150)
self.symbol("[", 150)
self.symbol("]")
self.symbol("(", 150)
self.symbol(")")
self.symbol(",")
self.symbol("=")
self.symbol("{", 150)
self.symbol("}")
self.symbol("(literal)").nud = lambda self: self
self.symbol("(name)").nud = lambda self: self
self.symbol("(end)")
self.constant("None")
self.constant("True")
self.constant("False")
return self
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("Usage: tdparser.py filename")
parser = PyExprParser()
res = parser.parse_bare_arglist(file(sys.argv[1]).read())
print(res)
| 27.46587 | 82 | 0.491892 |
ce4ab2aff6e500e8239e651be0e0851e93f8d29c | 597 | py | Python | atcoder/abc/abc002_d.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/abc/abc002_d.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/abc/abc002_d.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | from copy import deepcopy
N, M = map(int, input().split())
E = [[] for _ in range(N)]
for i in range(M):
x, y = map(lambda x:int(x)-1, input().split())
E[x].append(y)
E[y].append(x)
ans = 0
for mask in range(2**N):
faction = ''
for x in range(N):
faction += '1' if mask&(1<<x) else '0'
flag = True
for cnt, i in enumerate(faction):
if int(i) == 1:
for j in range(cnt+1, N):
if faction[j] == '1' and j not in E[cnt]:
flag = False
if flag: ans = max(ans, faction.count('1'))
print(ans)
| 27.136364 | 57 | 0.494137 |
ce4b095948b8f81b5b5833c6dcab9d8f5bd587a5 | 290 | py | Python | advisor/api/urls.py | Sachin-c/api-test | c8242de24375149dcbc14e30b44d9a77d9771034 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | advisor/api/urls.py | Sachin-c/api-test | c8242de24375149dcbc14e30b44d9a77d9771034 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | advisor/api/urls.py | Sachin-c/api-test | c8242de24375149dcbc14e30b44d9a77d9771034 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from advisor.api.views import (
# api_advisor_view,
api_advisor_view_post,
)
app_name = 'advisor'
urlpatterns = [
path('admin/advisor/', api_advisor_view_post, name="post"),
# path('user/<int:id>/advisor/', api_advisor_view, name="detail"),
]
| 22.307692 | 70 | 0.7 |
ce4b6a50f11f5cd0ce57c03afebe02596310a357 | 405 | py | Python | src/utils/config.py | mlrepa/automate-ml-with-dvc | b54a2e4818a991362d304890828df70359bab84a | [
"MIT"
] | 4 | 2021-04-11T17:30:14.000Z | 2021-07-27T10:09:53.000Z | src/utils/config.py | mlrepa/automate-ml-with-dvc | b54a2e4818a991362d304890828df70359bab84a | [
"MIT"
] | null | null | null | src/utils/config.py | mlrepa/automate-ml-with-dvc | b54a2e4818a991362d304890828df70359bab84a | [
"MIT"
] | 1 | 2021-09-05T04:15:07.000Z | 2021-09-05T04:15:07.000Z | import box
from typing import Text
import yaml
def load_config(config_path: Text) -> box.ConfigBox:
"""Loads yaml config in instance of box.ConfigBox.
Args:
config_path {Text}: path to config
Returns:
box.ConfigBox
"""
with open(config_path) as config_file:
config = yaml.safe_load(config_file)
config = box.ConfigBox(config)
return config
| 20.25 | 54 | 0.659259 |
ce4c0eaf1f91aac00dd03914e8def1ffd020858d | 3,835 | py | Python | python/methylnet/visualizations.py | hossein20s/dnaMethylation | eb2c4e14a6d32f6582f54fe39c62e83205f18665 | [
"MIT"
] | 26 | 2019-07-11T04:58:24.000Z | 2022-02-15T19:31:48.000Z | python/methylnet/visualizations.py | hossein20s/dnaMethylation | eb2c4e14a6d32f6582f54fe39c62e83205f18665 | [
"MIT"
] | 5 | 2020-04-30T13:02:13.000Z | 2022-03-02T16:41:47.000Z | python/methylnet/visualizations.py | hossein20s/dnaMethylation | eb2c4e14a6d32f6582f54fe39c62e83205f18665 | [
"MIT"
] | 8 | 2019-10-08T07:16:09.000Z | 2022-03-11T23:17:27.000Z | import pandas as pd
import numpy as np
import networkx as nx
import click
import pickle
from sklearn.preprocessing import LabelEncoder
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
#################
if __name__ == '__main__':
visualize()
| 43.089888 | 196 | 0.681356 |
ce4d2974d0b31d80078e3f7458f018c00bbd3cf4 | 13,343 | py | Python | tests/test_sagemaker/test_sagemaker_processing.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | tests/test_sagemaker/test_sagemaker_processing.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | tests/test_sagemaker/test_sagemaker_processing.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | import boto3
from botocore.exceptions import ClientError
import datetime
import pytest
from moto import mock_sagemaker
from moto.sts.models import ACCOUNT_ID
FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
TEST_REGION_NAME = "us-east-1"
| 39.829851 | 227 | 0.67526 |
ce4d63008769bb7f26121f3ebe84e27bc4d39e53 | 7,853 | py | Python | tools/sprite-editor/gui/direction_sprite_widget.py | jordsti/stigame | 6ac0ae737667b1c77da3ef5007f5c4a3a080045a | [
"MIT"
] | 8 | 2015-02-03T20:23:49.000Z | 2022-02-15T07:51:05.000Z | tools/sprite-editor/gui/direction_sprite_widget.py | jordsti/stigame | 6ac0ae737667b1c77da3ef5007f5c4a3a080045a | [
"MIT"
] | null | null | null | tools/sprite-editor/gui/direction_sprite_widget.py | jordsti/stigame | 6ac0ae737667b1c77da3ef5007f5c4a3a080045a | [
"MIT"
] | 2 | 2017-02-13T18:04:00.000Z | 2020-08-24T03:21:37.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'direction_sprite_widget.ui'
#
# Created: Wed Jul 30 18:37:40 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
| 58.17037 | 99 | 0.741118 |
ce4e49dc4da5a6289114dc9b19fa4b7569d8b066 | 4,913 | py | Python | venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | import autopep8
import nbformat
from coalib.bearlib.spacing.SpacingHelper import SpacingHelper
from coalib.bears.LocalBear import LocalBear
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.settings.Setting import typed_list
# Comments regarind Jupyter Notebooks:
# The `nbformat` module contains the reference implementation of the Jupyter
# Notebook format, and Python APIs for working with notebooks.
# On the file level, a notebook is a JSON file, i.e. dictionary with a few
# keys.
# The functions in `nbformat` work with `NotebookNode` objects, which are like
# dictionaries, but allow attribute access. The structure of these objects
# matches the notebook format specification.
def notebook_node_from_string_list(string_list):
"""
Reads a notebook from a string list and returns the NotebookNode
object.
:param string_list: The notebook file contents as list of strings
(linewise).
:return: The notebook as NotebookNode.
"""
return nbformat.reads(''.join(string_list), nbformat.NO_CONVERT)
def notebook_node_to_string_list(notebook_node):
"""
Writes a NotebookNode to a list of strings.
:param notebook_node: The notebook as NotebookNode to write.
:return: The notebook as list of strings (linewise).
"""
return nbformat.writes(notebook_node, nbformat.NO_CONVERT).splitlines(True)
def autopep8_fix_code_cell(source, options=None, apply_config=None):
"""
Applies autopep8.fix_code and takes care of newline characters.
autopep8.fix_code automatically adds a final newline at the end,
e.g. ``autopep8.fix_code('a=1')`` yields 'a = 1\\n'.
Note that this is not related to the 'W292' flag, i.e.
``autopep8.fix_code('a=1', options=dict(ignore=('W292',)))`` gives
the same result.
For notebook code cells, this behaviour does not make sense, hence
newline is removed if ``source`` does not end with one.
"""
source_corrected = autopep8.fix_code(source,
apply_config=apply_config,
options=options)
if not source.endswith('\n'):
return source_corrected[:-1]
return source_corrected
| 40.270492 | 79 | 0.632607 |
ce5057f4503ef56fd394e2f07ab56b6b56dccf58 | 1,043 | py | Python | doc2cube-master/src/prel.py | sustcjudgement/Judgement_information_extraction | c769eb1cb7ee695a157a981dbe9cd9d6559d072b | [
"MIT"
] | 1 | 2019-05-30T07:07:13.000Z | 2019-05-30T07:07:13.000Z | doc2cube-master/src/prel.py | sustcjudgement/Judgement_information_extraction | c769eb1cb7ee695a157a981dbe9cd9d6559d072b | [
"MIT"
] | null | null | null | doc2cube-master/src/prel.py | sustcjudgement/Judgement_information_extraction | c769eb1cb7ee695a157a981dbe9cd9d6559d072b | [
"MIT"
] | null | null | null | import nltk
import string
import argparse
parser = argparse.ArgumentParser(description='.')
parser.add_argument('-text', help='')
parser.add_argument('-meta', help='')
parser.add_argument('-output', help='')
args = parser.parse_args()
# parser.add_argument('-iter', dest='iter', type=int,
# default=max_iter)
text_docs = {}
with open(args.text, 'r') as f:
with open(args.output + 'd_prel.txt', 'w+') as g:
idx = 0
for line in f:
tokens = [w.lower().replace('###', '_') for w in line.strip('\r\n').split(' ')]
stopwords = nltk.corpus.stopwords.words('english')
tokens = [w for w in tokens if w not in stopwords]
line = str(idx) + '\t' + ';'.join(tokens) + ';\n'
g.write(line)
idx += 1
with open(args.output + 'l_prel.txt', 'w+') as g:
g.write('0 * \n')
with open(args.meta, 'r') as f:
idx = 0
for line in f:
idx += 1
path = line.strip('\r\n')
value = path.split('|')[-1]
g.write(str(idx) + '\t' + path + '\t' + value + '\n')
# prel => parse_flat => run.sh
# ==> evaluate.py | 24.833333 | 82 | 0.589645 |
ce517a5ddc247572eac79c178a88597e1d88b706 | 43 | py | Python | models/__init__.py | salesforce/DataHardness | 18b9231f8d08f35b2452e6357b7d6b31f21c695c | [
"BSD-3-Clause"
] | 3 | 2021-11-18T22:48:28.000Z | 2022-01-08T08:02:31.000Z | models/__init__.py | salesforce/DataHardness | 18b9231f8d08f35b2452e6357b7d6b31f21c695c | [
"BSD-3-Clause"
] | null | null | null | models/__init__.py | salesforce/DataHardness | 18b9231f8d08f35b2452e6357b7d6b31f21c695c | [
"BSD-3-Clause"
] | 1 | 2021-11-18T22:48:32.000Z | 2021-11-18T22:48:32.000Z | from models.glow import Glow, GlowAdditive
| 21.5 | 42 | 0.837209 |
ce51bf2481ad7448201c8511a71d60800f43cedd | 350 | py | Python | Logic/Helpers/ChronosTextEntry.py | terexdev/BSDS-V39 | 7deea469fbfbc56c48f8326ba972369679f6b098 | [
"Apache-2.0"
] | 11 | 2021-11-04T01:49:50.000Z | 2022-01-31T16:50:47.000Z | Logic/Helpers/ChronosTextEntry.py | terexdev/BSDS-V39 | 7deea469fbfbc56c48f8326ba972369679f6b098 | [
"Apache-2.0"
] | 6 | 2021-11-04T08:52:01.000Z | 2021-12-27T02:33:19.000Z | Logic/Helpers/ChronosTextEntry.py | terexdev/BSDS-V39 | 7deea469fbfbc56c48f8326ba972369679f6b098 | [
"Apache-2.0"
] | 5 | 2021-11-04T02:31:56.000Z | 2022-03-14T02:04:33.000Z | from Logic.Classes.LogicDataTables import LogicDataTables
from Logic.Data.DataManager import Writer
from Logic.Data.DataManager import Reader
| 25 | 57 | 0.717143 |
ce52086eedaa4d8450071ad9dda9ddc525a4ba30 | 1,136 | py | Python | Hoofdstuk 3/animals.py | BearWithAFez/Learning-Python | 23f6aa82e431838dc3891fe46d6ff6ea64281fe0 | [
"MIT"
] | null | null | null | Hoofdstuk 3/animals.py | BearWithAFez/Learning-Python | 23f6aa82e431838dc3891fe46d6ff6ea64281fe0 | [
"MIT"
] | null | null | null | Hoofdstuk 3/animals.py | BearWithAFez/Learning-Python | 23f6aa82e431838dc3891fe46d6ff6ea64281fe0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from urllib.request import urlopen
import sys
# Get the animals
def fetch_animals(url):
"""Get a list of lines (animals) from a given URL.
Args:
url: The URL of a utf-8 text
Returns:
A list of lines.
"""
with urlopen(url) as data:
animals = []
for animal in data:
animals.append(animal.decode('utf-8').rstrip())
return animals
# Print the animals given
def print_items(animals):
"""Prints all items from given collection.
Args:
animals: The collection to print.
"""
for animal in animals:
print(animal)
# Main method
def main(url):
"""Prints all lines (animals) from a given URL.
Args:
url: The URL of a utf-8 text
"""
animals = fetch_animals(url)
print_items(animals)
"""A list of lines printed from the given URL
Args:
1: the URL to a UTF-8 text to print
Usage:
python3 animals.py
"""
animalsUrl = 'https://raw.githubusercontent.com/BearWithAFez/Learning-Python/master/Hoofdstuk%202/animals.txt'
if __name__ == '__main__':
main(sys.argv[1])
| 20.285714 | 110 | 0.628521 |
ce522237e0f47825dd315d861dd8e20bb64f4c53 | 19,762 | py | Python | code/game.py | chaonan99/merge_sim | 0a96685b261c94ffe7d73abec3a488ef02b48cd0 | [
"MIT"
] | null | null | null | code/game.py | chaonan99/merge_sim | 0a96685b261c94ffe7d73abec3a488ef02b48cd0 | [
"MIT"
] | null | null | null | code/game.py | chaonan99/merge_sim | 0a96685b261c94ffe7d73abec3a488ef02b48cd0 | [
"MIT"
] | null | null | null | from collections import deque
import numpy as np
import os
from abc import ABCMeta, abstractmethod
import random
random.seed(42)
from common import config, VehicleState
from helper import Helper
INFO = """Average merging time: {} s
Traffic flow: {} vehicle/s
Average speed: {} km/h
Average fuel consumption: {} ml/vehicle"""
def main():
# vehicle_generator = Case1VehicleGenerator()
# game = GameLoop(vehicle_generator)
# game.play()
# game.draw_result_pyplot("case1")
vehicle_generator = MainHigherSpeedVG()
game = GameLoop(vehicle_generator)
# game = SpeedGameLoop(vehicle_generator)
game.play()
game.draw_result_pyplot("case2")
# vehicle_generator = APPVehicleGenerator(12, 'FIFO', 16.9)
# vehicle_generator = PoissonVehicleGenerator(config.case_speed['tnum_lane0'],
# config.case_speed['tnum_lane1'])
# ggame = GameLoop(vehicle_generator)
# ggame.play()
# # ggame.draw_result("result.html")
# ggame.draw_result_pyplot(".")
if __name__ == '__main__':
main() | 37.785851 | 108 | 0.58233 |
ce530130c467202c9dd2359037337dda83e6eaa4 | 387 | py | Python | mundo2/ex053.py | dilsonm/CeV | 8043be36b2da187065691d23ed5cb40fd65f806f | [
"MIT"
] | null | null | null | mundo2/ex053.py | dilsonm/CeV | 8043be36b2da187065691d23ed5cb40fd65f806f | [
"MIT"
] | null | null | null | mundo2/ex053.py | dilsonm/CeV | 8043be36b2da187065691d23ed5cb40fd65f806f | [
"MIT"
] | null | null | null | # Crie um programa que leia uma frase qualquer e diga se ele um palndromo, desconsiderando os espaos.
frase = str(input('Digite uma frase: ')).strip()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for c in range( len(junto)-1, -1, -1):
inverso += junto[c]
if inverso == junto:
print('A frase um PALINDROMO')
else:
print('A frase NO um PALINDROMO') | 32.25 | 105 | 0.677003 |
ce53b07d3a1a59be1abb2c6bf2cf0cd25eb7f425 | 562 | py | Python | scripts/show_by_content_type.py | b-cube/Response-Identification-Info | d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e | [
"MIT"
] | null | null | null | scripts/show_by_content_type.py | b-cube/Response-Identification-Info | d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e | [
"MIT"
] | 1 | 2015-09-23T16:30:34.000Z | 2015-09-23T16:30:34.000Z | scripts/show_by_content_type.py | b-cube/Response-Identification-Info | d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e | [
"MIT"
] | 1 | 2020-03-25T09:41:03.000Z | 2020-03-25T09:41:03.000Z | import os
import glob
import json
for f in glob.glob('/Users/sparky/Documents/solr_responses/solr_20150922_docs/*.json'):
with open(f, 'r') as g:
data = json.loads(g.read())
headers = data.get('response_headers', [])
if not headers:
continue
headers = dict(
(k.strip().lower(), v.strip()) for k, v in (h.split(':', 1) for h in headers)
)
content_type = headers.get('content-type', '')
if content_type and 'shockwave' in content_type:
print data.get('url'), content_type, data.get('tstamp')
| 28.1 | 87 | 0.617438 |
ce53e368e8055c32a3b93d22ee8f35500ad5e829 | 5,024 | py | Python | descarteslabs/workflows/models/tests/test_tile_url.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 167 | 2017-03-23T22:16:58.000Z | 2022-03-08T09:19:30.000Z | descarteslabs/workflows/models/tests/test_tile_url.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 93 | 2017-03-23T22:11:40.000Z | 2021-12-13T18:38:53.000Z | descarteslabs/workflows/models/tests/test_tile_url.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 46 | 2017-03-25T19:12:14.000Z | 2021-08-15T18:04:29.000Z | import pytest
import datetime
import json
import functools
from urllib.parse import urlencode, parse_qs
from descarteslabs.common.graft import client as graft_client
from ... import types
from .. import tile_url
| 35.132867 | 87 | 0.602309 |
ce5459689c023b5b6363dd479cd3042521f3f23d | 1,112 | py | Python | backend-project/small_eod/collections/migrations/0003_auto_20200131_2033.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 64 | 2019-12-30T11:24:03.000Z | 2021-06-24T01:04:56.000Z | backend-project/small_eod/collections/migrations/0003_auto_20200131_2033.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 465 | 2018-06-13T21:43:43.000Z | 2022-01-04T23:33:56.000Z | backend-project/small_eod/collections/migrations/0003_auto_20200131_2033.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 72 | 2018-12-02T19:47:03.000Z | 2022-01-04T22:54:49.000Z | # Generated by Django 3.0.2 on 2020-01-31 20:33
from django.db import migrations, models
| 32.705882 | 121 | 0.611511 |
ce57252a3bbc1b4941fbfe4e1830281dc89e2cd0 | 908 | py | Python | violet_services/src/service_client.py | Violet-C/EE477_Final | d30cc07833d8c1bb44c0a3373afa739a0b81d25f | [
"Apache-2.0"
] | null | null | null | violet_services/src/service_client.py | Violet-C/EE477_Final | d30cc07833d8c1bb44c0a3373afa739a0b81d25f | [
"Apache-2.0"
] | null | null | null | violet_services/src/service_client.py | Violet-C/EE477_Final | d30cc07833d8c1bb44c0a3373afa739a0b81d25f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy # python client library for ROS
from violet_services.srv import WordCount # needed message type
import sys # python functions, methods, etc.
rospy.init_node('service_client') # initialize client node
rospy.wait_for_service('word_count') # wait for registration
word_counter = rospy.ServiceProxy( # set up proxy
'word_count', # service name
WordCount # service type
)
valid_words = [k for k in sys.argv[1:] if '__' not in k] # filter out non-valid strings
parsed_words = ' '.join(valid_words) # parse arguments (put in correct form)
word_count = word_counter(parsed_words) # use service to count word
print(parsed_words+' --> has '+str(word_count.count)+' words') # print input words and count | 47.789474 | 99 | 0.611233 |
ce58480e7eec21fe6db13cf13d137977321623c9 | 8,249 | py | Python | utils/util.py | Hhhhhhhhhhao/I2T2I | 6a08705b72ff38e3679a9344f987b191d3f94a25 | [
"MIT"
] | null | null | null | utils/util.py | Hhhhhhhhhhao/I2T2I | 6a08705b72ff38e3679a9344f987b191d3f94a25 | [
"MIT"
] | null | null | null | utils/util.py | Hhhhhhhhhhao/I2T2I | 6a08705b72ff38e3679a9344f987b191d3f94a25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import torch
from torch.autograd import Variable
import numpy as np
import scipy
import matplotlib.pyplot as plt
import cv2
import scipy.ndimage
import shutil
import scipy.misc as misc
from PIL import Image
def imresize(img, resizeratio=1):
'''Take care of cv2 reshape squeeze behevaior'''
if resizeratio == 1:
return img
outshape = (int(img.shape[1] * resizeratio), int(img.shape[0] * resizeratio))
# temp = cv2.resize(img, outshape).astype(float)
temp = misc.imresize(img, size=outshape).astype(float)
if len(img.shape) == 3 and img.shape[2] == 1:
temp = np.reshape(temp, temp.shape + (1,))
return temp
def word_list(word_idx_list, vocab):
"""Take a list of word ids and a vocabulary from a dataset as inputs
and return the corresponding words as a list.
"""
word_list = []
for i in range(len(word_idx_list)):
vocab_id = word_idx_list[i]
word = vocab.idx2word[vocab_id]
if word == vocab.end_word:
break
if word != vocab.start_word:
word_list.append(word)
return word_list
def clean_sentence(word_idx_list, vocab):
"""Take a list of word ids and a vocabulary from a dataset as inputs
and return the corresponding sentence (as a single Python string).
"""
sentence = []
for i in range(len(word_idx_list)):
vocab_id = word_idx_list[i]
word = vocab.idx2word[vocab_id]
if word == vocab.end_word:
break
if word != vocab.start_word:
sentence.append(word)
sentence = " ".join(sentence)
return sentence
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 30.216117 | 127 | 0.63244 |
ce5a2416c780442544d0d5e9283fbaff98d9c5b6 | 9,882 | py | Python | hn2pdf.py | KyrillosL/HackerNewsToPDF | 489e8225d14550c874c2eb448005e8313662eac6 | [
"BSD-3-Clause"
] | null | null | null | hn2pdf.py | KyrillosL/HackerNewsToPDF | 489e8225d14550c874c2eb448005e8313662eac6 | [
"BSD-3-Clause"
] | null | null | null | hn2pdf.py | KyrillosL/HackerNewsToPDF | 489e8225d14550c874c2eb448005e8313662eac6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Python-Pinboard
Python script for downloading your saved stories and saved comments on Hacker News.
"""
__version__ = "1.1"
__license__ = "BSD"
__copyright__ = "Copyright 2013-2014, Luciano Fiandesio"
__author__ = "Luciano Fiandesio <http://fiandes.io/> & John David Pressman <http://jdpressman.com>"
import argparse
import json
import os
import sys
import time
import urllib
import pdfkit
import requests
import tqdm
from bs4 import BeautifulSoup
from lxml import html
HACKERNEWS = 'https://news.ycombinator.com'
parser = argparse.ArgumentParser()
parser.add_argument("username", help="The Hacker News username to grab the stories from.")
parser.add_argument("password", help="The password to login with using the username.")
parser.add_argument("-f", "--file", help="Filepath to store the JSON document at.")
parser.add_argument("-n", "--number", default=1, type=int, help="Number of pages to grab, default 1. 0 grabs all pages.")
parser.add_argument("-s", "--stories", action="store_true", help="Grab stories only.")
parser.add_argument("-c", "--comments", action="store_true", help="Grab comments only.")
parser.add_argument("-pdf", "--pdf", default=1, type=bool, help="Save to PDF")
parser.add_argument("-o", "--output_folder", default="output/", type=str, help="Output Folder for PDF")
arguments = parser.parse_args()
def getSavedStories(session, hnuser, page_range):
"""Return a list of story IDs representing your saved stories.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by story ID."""
story_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content, features="lxml")
for tag in soup.findAll('td', attrs={'class': 'subtext'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
story_id = a_tag['href'].split('id=')[1]
story_ids.append(story_id)
break
return story_ids
def getSavedComments(session, hnuser, page_range):
"""Return a list of IDs representing your saved comments.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by ID."""
comment_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&comments=t" + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content, features="lxml")
for tag in soup.findAll('td', attrs={'class': 'default'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
comment_id = a_tag['href'].split('id=')[1]
comment_ids.append(comment_id)
break
return comment_ids
def getHackerNewsItem(item_id):
"""Get an 'item' as specified in the HackerNews v0 API."""
time.sleep(0.2)
item_json_link = "https://hacker-news.firebaseio.com/v0/item/" + item_id + ".json"
try:
with urllib.request.urlopen(item_json_link) as item_json:
current_story = json.loads(item_json.read().decode('utf-8'))
if "kids" in current_story:
del current_story["kids"]
# Escape / in name for a later use
current_story["title"] = current_story["title"].replace("/", "-")
return current_story
except urllib.error.URLError:
return {"title": "Item " + item_id + " could not be retrieved",
"id": item_id}
if __name__ == "__main__":
main()
| 40.334694 | 121 | 0.589152 |
ce5a8256603662fe067ec0abfb76762e08552066 | 558 | py | Python | backend/step_functions/default.py | barak-obama/Game-Of-Life | 3e84e5dda2561c4b87249de64680a4ea504dd42e | [
"MIT"
] | null | null | null | backend/step_functions/default.py | barak-obama/Game-Of-Life | 3e84e5dda2561c4b87249de64680a4ea504dd42e | [
"MIT"
] | null | null | null | backend/step_functions/default.py | barak-obama/Game-Of-Life | 3e84e5dda2561c4b87249de64680a4ea504dd42e | [
"MIT"
] | null | null | null | import itertools
| 23.25 | 83 | 0.483871 |
ce5ab22e009ac58d14c27fe38208f968a51e0d2e | 2,110 | py | Python | rapid_response_xblock/models.py | HamzaIbnFarooq/rapid-response-xblock | dbc6bfbaab0f583680816ba86f0d43c84c931d58 | [
"BSD-3-Clause"
] | null | null | null | rapid_response_xblock/models.py | HamzaIbnFarooq/rapid-response-xblock | dbc6bfbaab0f583680816ba86f0d43c84c931d58 | [
"BSD-3-Clause"
] | 104 | 2018-02-02T20:51:00.000Z | 2022-03-31T08:44:24.000Z | rapid_response_xblock/models.py | HamzaIbnFarooq/rapid-response-xblock | dbc6bfbaab0f583680816ba86f0d43c84c931d58 | [
"BSD-3-Clause"
] | 1 | 2020-12-16T08:24:02.000Z | 2020-12-16T08:24:02.000Z | """
Rapid Response block models
"""
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import (
CourseKeyField,
UsageKeyField,
)
| 28.133333 | 78 | 0.652607 |
ce5beb636533234d09e40c6e181344e4d00f51e7 | 371 | py | Python | sched_slack_bot/reminder/sender.py | Germandrummer92/SchedSlackBot | d211f7c0d78eb8ebbc1f22cc186c94fc61bad491 | [
"MIT"
] | null | null | null | sched_slack_bot/reminder/sender.py | Germandrummer92/SchedSlackBot | d211f7c0d78eb8ebbc1f22cc186c94fc61bad491 | [
"MIT"
] | null | null | null | sched_slack_bot/reminder/sender.py | Germandrummer92/SchedSlackBot | d211f7c0d78eb8ebbc1f22cc186c94fc61bad491 | [
"MIT"
] | null | null | null | import abc
from sched_slack_bot.model.reminder import Reminder
| 24.733333 | 60 | 0.738544 |
ce5d174c1f7c2c86516c002a84d1f0b2d728c91d | 1,987 | py | Python | app/routes.py | systemicsmitty/TI4_battle_sim | b4ed142ff57d19ed50705ba40f83b8b3b7e3a774 | [
"MIT"
] | null | null | null | app/routes.py | systemicsmitty/TI4_battle_sim | b4ed142ff57d19ed50705ba40f83b8b3b7e3a774 | [
"MIT"
] | null | null | null | app/routes.py | systemicsmitty/TI4_battle_sim | b4ed142ff57d19ed50705ba40f83b8b3b7e3a774 | [
"MIT"
] | null | null | null | from flask import render_template
from app import app, html_generator
import app.calculator.calculator as calc
from app.route_helpers import units_from_form, options_from_form, options_list, flash_errors
from app.forms import InputForm
from collections import defaultdict
| 36.127273 | 116 | 0.631605 |
ce5d8d0f3c28fed69d76da9c81283dbdc6272f7e | 1,505 | py | Python | code/dependancy/smaliparser.py | OmkarMozar/CUPAP | 6055f423e3f9b8bb1a44dd8fab73630554363b3d | [
"Apache-2.0"
] | null | null | null | code/dependancy/smaliparser.py | OmkarMozar/CUPAP | 6055f423e3f9b8bb1a44dd8fab73630554363b3d | [
"Apache-2.0"
] | null | null | null | code/dependancy/smaliparser.py | OmkarMozar/CUPAP | 6055f423e3f9b8bb1a44dd8fab73630554363b3d | [
"Apache-2.0"
] | null | null | null | from smalisca.core.smalisca_main import SmaliscaApp
from smalisca.modules.module_smali_parser import SmaliParser
from smalisca.core.smalisca_app import App
from smalisca.core.smalisca_logging import log
from smalisca.modules.module_sql_models import AppSQLModel
import smalisca.core.smalisca_config as config
import multiprocessing
import os
from cement.core import controller
from cement.core.controller import CementBaseController
import json
| 24.672131 | 73 | 0.743522 |
ce5dcda8e728127b9f9d9754ec7ec959e800ef14 | 31,373 | py | Python | modules/users_and_roles_tab.py | scrummastermind/sumologictoolbox | 02d9acb970943521685091d36b8d5135e817c22c | [
"Apache-2.0"
] | null | null | null | modules/users_and_roles_tab.py | scrummastermind/sumologictoolbox | 02d9acb970943521685091d36b8d5135e817c22c | [
"Apache-2.0"
] | null | null | null | modules/users_and_roles_tab.py | scrummastermind/sumologictoolbox | 02d9acb970943521685091d36b8d5135e817c22c | [
"Apache-2.0"
] | null | null | null | class_name = 'users_and_roles_tab'
from qtpy import QtCore, QtGui, QtWidgets, uic
import os
import sys
import re
import pathlib
import json
from logzero import logger
from modules.sumologic import SumoLogic
from modules.shared import ShowTextDialog
| 48.340524 | 137 | 0.597966 |
ce5e17e8dbf5e904faf5468fbc530840fc418ada | 1,201 | py | Python | app.py | kecleveland/mhdn_app | 27cbd3fcb6d831913481a7c0d51af6b3641d6cf3 | [
"MIT"
] | null | null | null | app.py | kecleveland/mhdn_app | 27cbd3fcb6d831913481a7c0d51af6b3641d6cf3 | [
"MIT"
] | null | null | null | app.py | kecleveland/mhdn_app | 27cbd3fcb6d831913481a7c0d51af6b3641d6cf3 | [
"MIT"
] | null | null | null | from twarc import Twarc2, expansions
from pathlib import Path
import json
import config
import os
import config
appConfig = config.Config
client = Twarc2(bearer_token=appConfig.bearer_token)
file_path = Path(f"{appConfig.file_path}{appConfig.file_name}")
if __name__ == "__main__":
main() | 31.605263 | 71 | 0.555371 |
ce5f3e28692a3faeaa82556c686295cb266a77ee | 300 | py | Python | src/utils/regex_utils/regex_utils.py | BichengWang/python-notebook | 83fae37432a2bf701566e85ab6d7e8e3d688a0ee | [
"MIT"
] | null | null | null | src/utils/regex_utils/regex_utils.py | BichengWang/python-notebook | 83fae37432a2bf701566e85ab6d7e8e3d688a0ee | [
"MIT"
] | null | null | null | src/utils/regex_utils/regex_utils.py | BichengWang/python-notebook | 83fae37432a2bf701566e85ab6d7e8e3d688a0ee | [
"MIT"
] | null | null | null | import re
if __name__ == "__main__":
content = 'an example word:cat and word:dog'
reg = r'word:\w'
print(find_indices())
print(find_content())
| 17.647059 | 58 | 0.653333 |
ce60955aeef652ef027da2711317acb273b74ef6 | 5,944 | py | Python | tests/test_trainer/test_pipeline/test_p2p.py | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 | [
"Apache-2.0"
] | null | null | null | tests/test_trainer/test_pipeline/test_p2p.py | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 | [
"Apache-2.0"
] | null | null | null | tests/test_trainer/test_pipeline/test_p2p.py | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication import (recv_backward, recv_forward,
recv_tensor_meta, send_backward,
send_backward_recv_forward, send_forward,
send_forward_recv_backward,
send_tensor_meta)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import get_dist_logger
from colossalai.utils import get_current_device
from functools import partial
BATCH_SIZE = 16
SEQ_LENGTH = 64
HIDDEN_SIZE = 128
CONFIG = dict(
parallel=dict(
pipeline=dict(size=4),
tensor=dict(size=1, mode=None)
),
seed=1024
)
if __name__ == '__main__':
test_p2p()
| 36.466258 | 79 | 0.675135 |
ce60e885998a6e65935f35c9104bd85ccefe442c | 23,622 | py | Python | mine.py | appenz/minebot | e1bd18053873c4d686de57e014a2cd8f27d4dd4c | [
"Apache-2.0"
] | 11 | 2021-08-28T18:21:43.000Z | 2022-03-08T16:08:55.000Z | mine.py | appenz/minebot | e1bd18053873c4d686de57e014a2cd8f27d4dd4c | [
"Apache-2.0"
] | 3 | 2022-02-05T17:47:53.000Z | 2022-03-10T17:36:48.000Z | mine.py | appenz/minebot | e1bd18053873c4d686de57e014a2cd8f27d4dd4c | [
"Apache-2.0"
] | 5 | 2022-02-04T19:12:50.000Z | 2022-03-18T20:54:00.000Z | #
# Functions for mining blocks
#
import itertools
from javascript import require
Vec3 = require('vec3').Vec3
from botlib import *
from inventory import *
from workarea import * | 33.649573 | 120 | 0.502117 |
ce617e3015fa7ae63ff96b316d5d14af95c4007f | 16,667 | py | Python | src/the_tale/the_tale/accounts/tests/test_account_prototype.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | 1 | 2020-04-02T11:51:20.000Z | 2020-04-02T11:51:20.000Z | src/the_tale/the_tale/accounts/tests/test_account_prototype.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/accounts/tests/test_account_prototype.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
| 47.48433 | 163 | 0.710866 |
ce62bdf76c9ed174e5607a0e506209e79d02b892 | 698 | py | Python | 952/952.py | vladcto/ACMP_Answers | bc163068d7b27c5241f995da3f58a1f8c623d460 | [
"Unlicense"
] | 1 | 2020-02-17T18:56:12.000Z | 2020-02-17T18:56:12.000Z | 952/952.py | vladcto/ACMP_Answers | bc163068d7b27c5241f995da3f58a1f8c623d460 | [
"Unlicense"
] | null | null | null | 952/952.py | vladcto/ACMP_Answers | bc163068d7b27c5241f995da3f58a1f8c623d460 | [
"Unlicense"
] | null | null | null | inp = input().split(" ")
adult = int(inp[0])
child = int(inp[1])
if adult == 0 and child == 0:
print("0 0")
quit()
if adult == 0:
print("Impossible")
quit()
min = 0
# =>
# " "
not_free_child = child - adult
# , .
if not_free_child < 0:
not_free_child = 0
min = adult + not_free_child
max = 0
# ,
# "".
if child == 0:
# , ""
max = adult
else:
max = adult + child - 1
print("{} {}".format(min, max))
| 21.8125 | 59 | 0.65616 |
ce642e6e7a09fb0be794de1dfe62d3f787626a2a | 52 | py | Python | tests/django/__init__.py | estudio89/maestro-python | 331079cb3f0c10de2e19210cbade793544510f33 | [
"BSD-3-Clause"
] | null | null | null | tests/django/__init__.py | estudio89/maestro-python | 331079cb3f0c10de2e19210cbade793544510f33 | [
"BSD-3-Clause"
] | null | null | null | tests/django/__init__.py | estudio89/maestro-python | 331079cb3f0c10de2e19210cbade793544510f33 | [
"BSD-3-Clause"
] | null | null | null | default_app_config = "tests.django.apps.MyAppConfig" | 52 | 52 | 0.846154 |
ce6475945a6e1e99c628db4e1feb8a20077669ce | 1,440 | py | Python | setup.py | eric-volz/defichainLibrary | 458a8155bd595bf0fdf026651d95a5fe78dafc9c | [
"MIT"
] | 1 | 2022-03-29T15:15:17.000Z | 2022-03-29T15:15:17.000Z | setup.py | eric-volz/defichainLibrary | 458a8155bd595bf0fdf026651d95a5fe78dafc9c | [
"MIT"
] | null | null | null | setup.py | eric-volz/defichainLibrary | 458a8155bd595bf0fdf026651d95a5fe78dafc9c | [
"MIT"
] | 1 | 2022-03-24T12:25:44.000Z | 2022-03-24T12:25:44.000Z | from setuptools import setup
from os import path
VERSION = '1.0.0'
DESCRIPTION = 'Defichain Python Library'
# Project URLs
project_urls = {
"Tracker": "https://github.com/eric-volz/DefichainPython",
"Documentation": "https://docs.defichain-python.de"
}
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_for_pypi.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
# Setting up
setup(
name="defichain",
version=VERSION,
author="Intr0c",
author_email="introc@volz.link",
url="https://github.com/eric-volz/DefichainPython",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
packages=['defichain',
'defichain.node',
'defichain.exceptions',
'defichain.ocean',
'defichain.node.modules',
'defichain.ocean.modules'],
install_requires=["requests"],
keywords=['python', 'defichain', 'node', 'ocean'],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| 31.304348 | 82 | 0.634722 |
ce6649c4f6c16cf45f7213f96f05b37dd34d751f | 4,936 | py | Python | test/test_hdf5.py | gonzalobg/hpc-container-maker | dd5486c3fbb0fce38d825173022908ef0f96f77e | [
"Apache-2.0"
] | 1 | 2021-01-04T00:29:22.000Z | 2021-01-04T00:29:22.000Z | test/test_hdf5.py | gonzalobg/hpc-container-maker | dd5486c3fbb0fce38d825173022908ef0f96f77e | [
"Apache-2.0"
] | null | null | null | test/test_hdf5.py | gonzalobg/hpc-container-maker | dd5486c3fbb0fce38d825173022908ef0f96f77e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the hdf5 module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu
from hpccm.building_blocks.hdf5 import hdf5
| 37.393939 | 164 | 0.640194 |
ce69ac58ca5435e4721a3c9bb26cdcd8b83c0839 | 160 | py | Python | exercise/newfile45.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | exercise/newfile45.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | exercise/newfile45.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | show databases;
show tables
desc table
use database
delete from . where id=5;
select * from where id=5;
update set age=15,home='' where id=5 | 22.857143 | 41 | 0.73125 |
ce6a80ab1bf79ba95677f54c2de54bfe1b5f0e5f | 604 | py | Python | src/word2vec.py | shiroyagicorp/japanese-word2vec-model-builder | c9570702110f2851f4cb7d38948c5d7f59ef8a4c | [
"MIT"
] | 98 | 2017-02-20T14:23:36.000Z | 2022-01-23T07:09:29.000Z | src/word2vec.py | shiroyagicorp/japanese-word2vec-model-builder | c9570702110f2851f4cb7d38948c5d7f59ef8a4c | [
"MIT"
] | 1 | 2021-06-29T05:34:39.000Z | 2021-11-17T23:52:07.000Z | src/word2vec.py | shiroyagicorp/japanese-word2vec-model-builder | c9570702110f2851f4cb7d38948c5d7f59ef8a4c | [
"MIT"
] | 11 | 2017-11-07T05:25:30.000Z | 2021-06-29T05:28:08.000Z | import multiprocessing
from gensim.models.word2vec import Word2Vec
def build_gensim_w2v_model(model_path, iter_tokens, size, window, min_count):
"""
Parameters
----------
model_path : string
Path of Word2Vec model
iter_tokens : iterator
Iterator of documents, which are lists of words
"""
model = Word2Vec(
size=size,
window=window,
min_count=min_count,
workers=multiprocessing.cpu_count()
)
model.build_vocab(iter_tokens())
model.train(iter_tokens())
model.init_sims(replace=True)
model.save(model_path)
| 24.16 | 77 | 0.663907 |
ce6c5f2a56792c631b587b682534feb77c7a0a15 | 2,945 | py | Python | companion/telegram.py | jouir/mining-companion | b66aa8b1586a31ddad0c2454e4762661c63385a1 | [
"Unlicense"
] | 2 | 2021-02-25T09:09:57.000Z | 2021-03-03T14:11:30.000Z | companion/telegram.py | jouir/flexpool-activity | b66aa8b1586a31ddad0c2454e4762661c63385a1 | [
"Unlicense"
] | 2 | 2021-08-18T11:10:26.000Z | 2021-08-18T11:14:23.000Z | companion/telegram.py | jouir/mining-companion | b66aa8b1586a31ddad0c2454e4762661c63385a1 | [
"Unlicense"
] | null | null | null | import logging
import os
from copy import copy
import requests
from jinja2 import Environment, FileSystemLoader
logger = logging.getLogger(__name__)
absolute_path = os.path.split(os.path.abspath(__file__))[0]
| 43.955224 | 110 | 0.639389 |
ce6cccfac6a948d40441d5b2f5121b05efacb62f | 295 | py | Python | forecast_lab/metrics.py | gsimbr/forecast-lab | a26234f3e11b4b8268d6cbe33bb84d79da45ecdd | [
"MIT"
] | 5 | 2019-06-04T11:04:06.000Z | 2022-03-29T23:05:25.000Z | forecast_lab/metrics.py | gsimbr/forecast-lab | a26234f3e11b4b8268d6cbe33bb84d79da45ecdd | [
"MIT"
] | 1 | 2022-02-14T13:22:47.000Z | 2022-02-14T13:22:47.000Z | forecast_lab/metrics.py | gsimbr/forecast-lab | a26234f3e11b4b8268d6cbe33bb84d79da45ecdd | [
"MIT"
] | 2 | 2020-02-17T11:54:18.000Z | 2020-10-06T12:49:15.000Z | import numpy
import math
from sklearn.metrics import mean_squared_error
| 29.5 | 66 | 0.79322 |
ce6db4a22e7fa1d771c8c341bb718daa5999b3ea | 446 | py | Python | Python_Interview/Algorithm/step_wise.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | Python_Interview/Algorithm/step_wise.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | Python_Interview/Algorithm/step_wise.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | '''
mn
'''
| 18.583333 | 46 | 0.495516 |
ce6e0d7a568a5fc925496c5e465b79c3f4a3e233 | 3,854 | py | Python | run_create_codebuild_default.py | HardBoiledSmith/johanna | 0443a9040f0248f0a800c9d4b062e375f997bb6f | [
"MIT"
] | 64 | 2016-11-03T11:20:25.000Z | 2021-05-24T03:08:57.000Z | run_create_codebuild_default.py | HardBoiledSmith/johanna | 0443a9040f0248f0a800c9d4b062e375f997bb6f | [
"MIT"
] | 69 | 2016-11-03T14:09:35.000Z | 2022-02-07T12:52:05.000Z | run_create_codebuild_default.py | HardBoiledSmith/johanna | 0443a9040f0248f0a800c9d4b062e375f997bb6f | [
"MIT"
] | 19 | 2016-11-03T11:04:51.000Z | 2020-06-12T10:40:57.000Z | #!/usr/bin/env python3
import json
import time
from run_common import AWSCli
from run_common import print_message
from run_create_codebuild_common import create_base_iam_policy
from run_create_codebuild_common import create_iam_service_role
from run_create_codebuild_common import create_managed_secret_iam_policy
from run_create_codebuild_common import create_notification_rule
from run_create_codebuild_common import get_notification_rule
from run_create_codebuild_common import have_parameter_store
from run_create_codebuild_common import update_notification_rule
| 34.106195 | 105 | 0.583809 |
ce6e3c09a2e66420e8e9c581cff7e8f8d2db23fe | 2,282 | py | Python | config/test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 1 | 2016-02-13T15:40:20.000Z | 2016-02-13T15:40:20.000Z | config/test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 3 | 2020-02-11T22:29:15.000Z | 2021-06-10T17:44:31.000Z | config/test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Configuration parameters for the test subsystem."""
import os
from grr.lib import config_lib
# Default for running in the current directory
config_lib.DEFINE_constant_string(
"Test.srcdir",
os.path.normpath(os.path.dirname(__file__) + "/../.."),
"The directory containing the source code.")
config_lib.DEFINE_constant_string(
"Test.data_dir",
default="%(Test.srcdir)/grr/test_data",
help="The directory where test data exist.")
config_lib.DEFINE_constant_string(
"Test.config",
default="%(Test.srcdir)/grr/config/grr-server.yaml",
help="The path where the test configuration file exists.")
config_lib.DEFINE_constant_string(
"Test.additional_test_config",
default="%(Test.data_dir)/localtest.yaml",
help="The path to a test config with local customizations.")
config_lib.DEFINE_string("Test.tmpdir", "/tmp/",
help="Somewhere to write temporary files.")
config_lib.DEFINE_string("Test.data_store", "FakeDataStore",
"The data store to run the tests against.")
config_lib.DEFINE_integer("Test.remote_pdb_port", 2525,
"Remote debugger port.")
config_lib.DEFINE_list("Test.end_to_end_client_ids", [],
"List of client ids to perform regular end_to_end tests"
" on. These clients should be always on and connected"
" to the network.")
config_lib.DEFINE_list("Test.end_to_end_client_hostnames", [],
"List of hostnames to perform regular end_to_end tests"
" on. These clients should be always on and connected"
" to the network.")
config_lib.DEFINE_string("Test.end_to_end_result_check_wait", "50m",
"rdfvalue.Duration string that determines how long we "
"wait after starting the endtoend test hunt before we "
"check the results. Should be long enough that all "
"clients will have picked up the hunt, but not so "
"long that the flow gets timed out.")
config_lib.DEFINE_string("PrivateKeys.ca_key_raw_data", "",
"For testing purposes.")
| 40.75 | 80 | 0.633655 |
ce6eed2c9d0065dffb079ead3cb624c8d3a05810 | 224 | py | Python | wixaward/urls.py | LekamCharity/wix-projects | 76f9ab4429a978a42f0cea3e3a305a7cdfc4541d | [
"MIT"
] | null | null | null | wixaward/urls.py | LekamCharity/wix-projects | 76f9ab4429a978a42f0cea3e3a305a7cdfc4541d | [
"MIT"
] | null | null | null | wixaward/urls.py | LekamCharity/wix-projects | 76f9ab4429a978a42f0cea3e3a305a7cdfc4541d | [
"MIT"
] | null | null | null | from django.urls import path
urlpatterns=[
path('profile',views.profile, name='profile'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 22.4 | 60 | 0.665179 |
ce6fa0b28898bd299005931220b5305722ba63c3 | 8,940 | py | Python | pylogview/reader.py | CrazyIvan359/logview | 4fb145843315dd03ff4ba414a5a617775d9d2af1 | [
"MIT"
] | null | null | null | pylogview/reader.py | CrazyIvan359/logview | 4fb145843315dd03ff4ba414a5a617775d9d2af1 | [
"MIT"
] | 3 | 2020-11-01T23:57:39.000Z | 2020-11-02T01:21:48.000Z | pylogview/reader.py | CrazyIvan359/logview | 4fb145843315dd03ff4ba414a5a617775d9d2af1 | [
"MIT"
] | null | null | null | import typing as t
from pylogview import datefinder
from pylogview.record import LogRecord
if t.TYPE_CHECKING:
from pylogview.window import Window
def read(self, records: int = 1) -> t.List[LogRecord]:
"""Read to end of file and parse next line"""
self._read()
return self._get_records(records)
##### Internal Methods #####
def _read_last(self):
"""Read last ``lines`` of file, like 'tail -n'"""
if not self.isOpen:
return
try:
last_read_block = self._fd.tell()
block_end_byte = last_read_block
BLOCK_SIZE = min(block_end_byte, 1024)
remain_lines = self._window.config.lines
block_num = -1
blocks = []
while remain_lines > 0 and block_end_byte > 0:
if block_end_byte - BLOCK_SIZE > 0:
self._fd.seek(block_num * BLOCK_SIZE, 2)
blocks.append(self._fd.read(BLOCK_SIZE))
else:
self._fd.seek(0, 0)
blocks.append(self._fd.read(block_end_byte))
remain_lines -= blocks[-1].count(b"\n")
block_end_byte -= BLOCK_SIZE
block_num -= 1
self._fd.seek(last_read_block, 0)
except IOError as err:
self._fd = None
self._window.log.append(
f"Error while reading '{self.filename}': [{err.errno}] {err.strerror}"
)
else:
for block in blocks[::-1]:
self._buffer += block
def _find_record_prefix_length(self):
"""
Rudamentary prefix length finder. Looks for repeated same number of chars
between newline/file-start and timestamp.
"""
prefix_lengths = []
last_end = 1
buffer_string = self._buffer.decode()
for result in datefinder.find_dates(buffer_string, source=True, index=True):
if self._record_prefix_length is not None:
break
elif len(result[1]) < 6:
# skip matches too short, probably just numbers not a timestamp
continue
timestamp_end = result[2][1]
timestamp_start = timestamp_end - len(result[1]) - 1
prefix_lengths.append(
len(
buffer_string[
timestamp_start
- buffer_string[last_end:timestamp_start][::-1].find(
"\n"
) : timestamp_start
]
)
)
last_end = buffer_string.find("\n", timestamp_end)
for length in prefix_lengths:
if prefix_lengths.count(length) > 3:
self._record_prefix_length = length
break
| 36.048387 | 90 | 0.503803 |
ce70b641f16acd29f6ec6fd771bef13d60610bff | 235 | py | Python | zad1_6.py | kamilhabrych/python-semestr5-lista1 | 65faeffe83bcc4706b2818e2e7802d986b19244b | [
"MIT"
] | null | null | null | zad1_6.py | kamilhabrych/python-semestr5-lista1 | 65faeffe83bcc4706b2818e2e7802d986b19244b | [
"MIT"
] | null | null | null | zad1_6.py | kamilhabrych/python-semestr5-lista1 | 65faeffe83bcc4706b2818e2e7802d986b19244b | [
"MIT"
] | null | null | null | x = int(input('Podaj pierwsza liczbe calkowita: '))
y = int(input('Podaj druga liczbe calkowita: '))
z = int(input('Podaj trzecia liczbe calkowita: '))
print()
if x > 10:
print(x)
if y > 10:
print(y)
if z > 10:
print(z) | 16.785714 | 51 | 0.617021 |
ce70fc922ee9bc7104f6b739b1a14c96b849d90a | 6,194 | py | Python | modules/bulletinGenerator_Kingsgrove.py | featherbear/swec-elvanto-automation | 7f330ca5a87623ca452170efb4845814a4fbc2ad | [
"MIT"
] | null | null | null | modules/bulletinGenerator_Kingsgrove.py | featherbear/swec-elvanto-automation | 7f330ca5a87623ca452170efb4845814a4fbc2ad | [
"MIT"
] | null | null | null | modules/bulletinGenerator_Kingsgrove.py | featherbear/swec-elvanto-automation | 7f330ca5a87623ca452170efb4845814a4fbc2ad | [
"MIT"
] | null | null | null | from mailmerge import MailMerge
import re
import os.path
from ElvantoAPIExtensions import Enums, Helpers
from modules.__stub__ import ModuleStub
| 41.293333 | 165 | 0.610268 |
ce735019669e5c6f53493f5d8d363b42ab7d2267 | 1,434 | py | Python | class4/e3_pexpect.py | ktbyers/pynet_wantonik | 601bce26142b6741202c2bdafb9e0d0cec1b3c78 | [
"Apache-2.0"
] | 2 | 2017-05-11T12:05:15.000Z | 2021-07-15T18:13:19.000Z | class4/e3_pexpect.py | ktbyers/pynet_wantonik | 601bce26142b6741202c2bdafb9e0d0cec1b3c78 | [
"Apache-2.0"
] | null | null | null | class4/e3_pexpect.py | ktbyers/pynet_wantonik | 601bce26142b6741202c2bdafb9e0d0cec1b3c78 | [
"Apache-2.0"
] | 1 | 2017-05-11T12:05:18.000Z | 2017-05-11T12:05:18.000Z | #!/usr/bin/env python
'''
Simple script to execute shell command on lab router with Pexpect module.
'''
import pexpect, sys, re
from getpass import getpass
if __name__ == "__main__":
main()
| 29.875 | 86 | 0.631799 |
ce73d6c6f78dfe5a98cce6abd28653eb0dd424b3 | 2,081 | py | Python | codewof/tests/utils/errors/test_MissingRequiredFieldError.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | 3 | 2019-08-29T04:11:22.000Z | 2021-06-22T16:05:51.000Z | codewof/tests/utils/errors/test_MissingRequiredFieldError.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | 265 | 2019-05-30T03:51:46.000Z | 2022-03-31T01:05:12.000Z | codewof/tests/utils/errors/test_MissingRequiredFieldError.py | samuelsandri/codewof | c9b8b378c06b15a0c42ae863b8f46581de04fdfc | [
"MIT"
] | 7 | 2019-06-29T12:13:37.000Z | 2021-09-06T06:49:14.000Z | """Test class for MissingRequiredFieldError error."""
from django.test import SimpleTestCase
from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
| 35.87931 | 79 | 0.555983 |
ce75d65d2274a6ff994472ca2ea00470b33ed889 | 12,685 | py | Python | matmih/plot.py | glypher/pokemons | c2ea2edef984ee180425866c3f816504c27e460e | [
"BSD-3-Clause"
] | null | null | null | matmih/plot.py | glypher/pokemons | c2ea2edef984ee180425866c3f816504c27e460e | [
"BSD-3-Clause"
] | null | null | null | matmih/plot.py | glypher/pokemons | c2ea2edef984ee180425866c3f816504c27e460e | [
"BSD-3-Clause"
] | null | null | null | """plot.py: Utility builder class for ML plots.
Uses scikit-learn code samples and framework
"""
__author__ = "Mihai Matei"
__license__ = "BSD"
__email__ = "mihai.matei@my.fmi.unibuc.ro"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import randomcolor
import math
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from .image import Image
| 36.982507 | 144 | 0.578242 |
ce77b50727f7be773d7ee04df988b1888387d995 | 478 | py | Python | census_data_downloader/tables/medianage.py | JoeGermuska/census-data-downloader | 0098b9e522b78ad0e30301c9845ecbcc903c62e4 | [
"MIT"
] | 170 | 2019-04-01T01:41:42.000Z | 2022-03-25T21:22:06.000Z | census_data_downloader/tables/medianage.py | JoeGermuska/census-data-downloader | 0098b9e522b78ad0e30301c9845ecbcc903c62e4 | [
"MIT"
] | 68 | 2019-03-31T22:52:43.000Z | 2021-08-30T16:33:54.000Z | census_data_downloader/tables/medianage.py | JoeGermuska/census-data-downloader | 0098b9e522b78ad0e30301c9845ecbcc903c62e4 | [
"MIT"
] | 34 | 2019-04-02T17:57:16.000Z | 2022-03-28T17:22:35.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*
import collections
from census_data_downloader.core.tables import BaseTableConfig
from census_data_downloader.core.decorators import register
| 26.555556 | 62 | 0.707113 |
ce78d29afc746e1513a1eb1206ac1f0e6d11d03c | 3,791 | py | Python | powerfulseal/metriccollectors/prometheus_collector.py | snehalbiche/powerfulseal | 4ab70e0db8f33bd390d87e65c662774991483726 | [
"Apache-2.0"
] | 1 | 2018-07-12T22:04:51.000Z | 2018-07-12T22:04:51.000Z | powerfulseal/metriccollectors/prometheus_collector.py | kz/powerfulseal | 24276dd670777a72fed1780539ffe03f3bea63b9 | [
"Apache-2.0"
] | null | null | null | powerfulseal/metriccollectors/prometheus_collector.py | kz/powerfulseal | 24276dd670777a72fed1780539ffe03f3bea63b9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from prometheus_client import Counter
from powerfulseal.metriccollectors import AbstractCollector
from powerfulseal.metriccollectors.collector import NODE_SOURCE, POD_SOURCE
STATUS_SUCCESS = 'success'
STATUS_FAILURE = 'failure'
# Define Prometheus metrics to be stored in the default registry
POD_KILLS_METRIC_NAME = 'seal_pod_kills_total'
POD_KILLS = Counter(POD_KILLS_METRIC_NAME,
'Number of pods killed (including failures)',
['status', 'namespace', 'name'])
NODE_STOPS_METRIC_NAME = 'seal_nodes_stopped_total'
NODE_STOPS = Counter(NODE_STOPS_METRIC_NAME,
'Number of nodes stopped (including failures)',
['status', 'uid', 'name'])
EXECUTE_FAILED_METRIC_NAME = 'seal_execute_failed_total'
EXECUTE_FAILURES = Counter(EXECUTE_FAILED_METRIC_NAME,
'Increasing counter for command execution failures',
['uid', 'name'])
FILTERED_TO_EMPTY_SET_METRIC_NAME = 'seal_empty_filter_total'
FILTERED_TO_EMPTY_SET = Counter(FILTERED_TO_EMPTY_SET_METRIC_NAME,
'Increasing counter for cases where filtering '
'returns an empty result')
PROBABILITY_FILTER_NOT_PASSED_METRIC_NAME = 'seal_probability_filter_not_passed_total'
PROBABILITY_FILTER_NOT_PASSED = Counter(PROBABILITY_FILTER_NOT_PASSED_METRIC_NAME,
'Increasing counter for cases where the'
' probability filter does not pass any '
'nodes')
MATCHED_TO_EMPTY_SET_METRIC_NAME = 'seal_empty_match_total'
MATCHED_TO_EMPTY_SET = Counter(MATCHED_TO_EMPTY_SET_METRIC_NAME,
'Increasing counter for cases where matching '
'returns an empty result',
['source'])
| 42.595506 | 86 | 0.69665 |
ce796d88eb98f929fefba1eaa8a093ed6e266e4a | 1,285 | py | Python | icm/__main__.py | MCasari-PMEL/EDD-ICMGUI | 3210e7bb74ff2ace6e1c8c0bf132ecae5713141b | [
"MIT"
] | null | null | null | icm/__main__.py | MCasari-PMEL/EDD-ICMGUI | 3210e7bb74ff2ace6e1c8c0bf132ecae5713141b | [
"MIT"
] | 3 | 2018-01-08T16:44:33.000Z | 2018-01-08T16:47:55.000Z | icm/__main__.py | MCasari-PMEL/EDD-ICMGUI | 3210e7bb74ff2ace6e1c8c0bf132ecae5713141b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, time, serial, json
import numpy as np
import pyqtgraph as pg
import pyqtgraph.console
from PyQt5.QtCore import pyqtSignal, QObject
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph.dockarea import *
from icm.ui_clock import *
from icm.ui_createfile import *
from icm.ui_parameter import *
from icm.ui_serial import *
from icm.ui_qicmgui import Ui_QIcmGuiMainWindow
def main():
app = QtGui.QApplication([])
window = QIcmGuiMainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
#if __name__ != "__main__":
# raise ImportError('this module should not be imported')
| 22.54386 | 65 | 0.662257 |
ce7a9f4356e5b101ec971a15e988ba01f163fc67 | 1,603 | py | Python | tests/unit/plugins/widgets/conftest.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
] | 2 | 2017-11-08T19:55:57.000Z | 2018-12-21T12:41:41.000Z | tests/unit/plugins/widgets/conftest.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
] | null | null | null | tests/unit/plugins/widgets/conftest.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
] | 1 | 2018-10-13T08:59:29.000Z | 2018-10-13T08:59:29.000Z | import dectate
import pytest
from kaybee.plugins.widgets.directive import WidgetDirective
from kaybee.plugins.widgets.action import WidgetAction
| 22.263889 | 75 | 0.679975 |
ce7bc7b64a4d3cbc613ea8cb55194b8c8ec890ce | 1,751 | py | Python | eeyore/models/model.py | papamarkou/eeyore | 4cd9b5a619cd095035aa93f348d1c937629aa8a3 | [
"MIT"
] | 6 | 2020-04-22T18:56:46.000Z | 2021-09-09T15:57:48.000Z | eeyore/models/model.py | papamarkou/eeyore | 4cd9b5a619cd095035aa93f348d1c937629aa8a3 | [
"MIT"
] | 19 | 2019-11-14T21:22:21.000Z | 2020-10-31T16:18:36.000Z | eeyore/models/model.py | scidom/eeyore | 4cd9b5a619cd095035aa93f348d1c937629aa8a3 | [
"MIT"
] | null | null | null | import hashlib
import torch
import torch.nn as nn
| 31.267857 | 117 | 0.561965 |
ce7c235a673286d6890334627fb2a0108f4ba40f | 1,129 | py | Python | Curso/Challenges/URI/1827SquareArrayIV.py | DavidBitner/Aprendizado-Python | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | [
"MIT"
] | null | null | null | Curso/Challenges/URI/1827SquareArrayIV.py | DavidBitner/Aprendizado-Python | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | [
"MIT"
] | null | null | null | Curso/Challenges/URI/1827SquareArrayIV.py | DavidBitner/Aprendizado-Python | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | [
"MIT"
] | null | null | null | while True:
try:
dados = []
matriz = []
n = int(input())
for linha in range(0, n):
for coluna in range(0, n):
dados.append(0)
matriz.append(dados[:])
dados.clear()
# Numeros na diagonal
for diagonal_principal in range(0, n):
matriz[diagonal_principal][diagonal_principal] = 2
for diagonal_secundaria in range(0, n):
matriz[diagonal_secundaria][n - 1 - diagonal_secundaria] = 3
# Matriz do numero 1
for linha in range(n // 3, n - n // 3):
for coluna in range(n // 3, n - n // 3):
matriz[linha][coluna] = 1
# Matriz do numero 4
matriz[n // 2][n // 2] = 4
# Print da Matriz completa
for linha in range(0, len(matriz)):
for coluna in range(0, len(matriz)):
if coluna == 0:
print(f"{matriz[linha][coluna]}", end="")
else:
print(f"{matriz[linha][coluna]}", end="")
print()
print()
except EOFError:
break
| 31.361111 | 72 | 0.478299 |
ce7c570a565ac358f3c0cebb92e2e6aa904f3655 | 17,145 | py | Python | HetSANN_MRV/execute_sparse.py | xhhszc/hetsann | 432ce7493331cc393ff90af0e03a445e758919ea | [
"Apache-2.0"
] | 116 | 2019-12-10T02:14:37.000Z | 2022-02-23T09:22:13.000Z | HetSANN_MRV/execute_sparse.py | xhhszc/hetsann | 432ce7493331cc393ff90af0e03a445e758919ea | [
"Apache-2.0"
] | 6 | 2020-01-07T00:04:00.000Z | 2021-07-30T17:40:27.000Z | HetSANN_MRV/execute_sparse.py | xhhszc/hetsann | 432ce7493331cc393ff90af0e03a445e758919ea | [
"Apache-2.0"
] | 35 | 2019-12-10T02:15:45.000Z | 2021-11-15T09:44:31.000Z | import os
import time
import random
import scipy.sparse as sp
import numpy as np
import tensorflow as tf
import argparse
from models import SpHGAT
from utils import process
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', help='Dataset.', default='imdb', type=str)
parser.add_argument('--epochs', help='Epochs.', default=100000, type=int)
parser.add_argument('--patience', help='Patience for early stopping.', default=100, type=int)
parser.add_argument('--lr', help='Learning rate.', default=0.005, type=float)
parser.add_argument('--l2_coef', help='Weight decay.', default=0.0005, type=float)
parser.add_argument('--dropout', help='Dropout.', default=0.6, type=float)
parser.add_argument('--train_rate', help='Label rate for training.', default=0.1, type=float)
parser.add_argument('--seed', help='Random seed for data splitting.', default=None, type=int)
parser.add_argument('--layers', help='Number of layers.', default=2, type=int)
parser.add_argument('--hid', help='Number of hidden units per head in each layer.',
nargs='*', default=[8, 8], type=int)
parser.add_argument('--heads', help='Number of attention heads in each layer.',
nargs='*', default=[8, 1], type=int)
parser.add_argument('--residue', help='Using residue.', action='store_true')
parser.add_argument('--repeat', help='Repeat.', default=10, type=int)
parser.add_argument('--random_feature', help='Random features', action='store_true')
parser.add_argument('--target_node', help='index of target nodes for classification.',
nargs='*', default=[0, 1], type=int)
parser.add_argument('--target_is_multilabels', help='each type of target node for classification is multi-labels or not.(0 means not else means yes)',
nargs='*', default=[0, 1], type=int)
parser.add_argument('--saved_model_suffix', help='to splite checkpoint by suffix', default="", type=str)
parser.add_argument('--no_attn_reg', help='Do not use edge direction regularization', action='store_true')
parser.add_argument('--simple_inner', help='Use original inner product', action='store_true')
parser.add_argument('--loop_coef', help='Coefficient for regularization.', default=1e-3, type=float)
parser.add_argument('--inv_coef', help='Coefficient for regularization.', default=1e-3, type=float)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
args= parser.parse_args()
dataset = args.dataset
checkpt_file = 'pre_trained/{}/{}/{}.ckpt'.format(dataset, args.saved_model_suffix, dataset)
checkpt_file = checkpt_file.replace('//', '/')
process.mkdir(os.path.split(checkpt_file)[0])
# training params
batch_size = 1
train_rate = args.train_rate
seed = args.seed
nb_epochs = args.epochs
patience = args.patience
lr = args.lr # learning rate
l2_coef = args.l2_coef # weight decay
dropout = args.dropout
repeat = args.repeat
random_feature = args.random_feature
target_node = args.target_node
is_multilabel = [False if t==0 else True for t in args.target_is_multilabels]
loop_coef = args.loop_coef
inv_coef = args.inv_coef
layers = args.layers
hid = args.hid
if len(hid) == 1:
hid_units = hid * layers
elif len(hid) == layers:
hid_units = hid
heads = args.heads
if len(heads) == 1:
n_heads = heads * layers
elif len(heads) == 2:
n_heads = [heads[0]] * (layers - 1) + [heads[1]]
elif len(heads) == layers:
n_heads = heads
residual = args.residue # False
nonlinearity = tf.nn.elu
model = SpHGAT
no_attn_reg = args.no_attn_reg
simple_inner = args.simple_inner
random.seed(seed) # random seed for random data split only
print('Dataset: ' + dataset)
print('Train rate: ' + str(train_rate))
print('----- Opt. hyperparams -----')
print('lr: ' + str(lr))
print('l2_coef: ' + str(l2_coef))
print('----- Archi. hyperparams -----')
print('nb. layers: ' + str(len(hid_units)))
print('nb. units per layer: ' + str(hid_units))
print('nb. attention heads: ' + str(n_heads))
print('residual: ' + str(residual))
print('nonlinearity: ' + str(nonlinearity))
print('model: ' + str(model))
print('target nodes: ', target_node)
print('is_multilabel: ', is_multilabel)
print('loop_coef:', loop_coef)
print('inv_coef:', inv_coef)
sparse = True
metr_num = 2
total_vl_acc = np.array([0.]*(len(target_node)*metr_num)) # should be array
total_ts_acc = np.array([0.]*(len(target_node)*metr_num)) # should be array
for repeat_i in range(repeat):
print('Run #' + str(repeat_i) + ':')
adj, adj_type, edge_list, features, y_train, y_val, y_test,\
train_mask, val_mask, test_mask = process.load_heterogeneous_data(dataset, train_rate=train_rate, target_node=target_node)
features = [process.preprocess_features(feature)[0] for feature in features]
nb_nodes = [feature.shape[0] for feature in features]
ft_size = [feature.shape[1] for feature in features]
nb_classes = [y.shape[1] for y in y_train]
features = [feature[np.newaxis] for feature in features]
y_train = [y[np.newaxis] for y in y_train]
y_val = [y[np.newaxis] for y in y_val]
y_test = [y[np.newaxis] for y in y_test]
train_mask = [m[np.newaxis] for m in train_mask]
val_mask = [m[np.newaxis] for m in val_mask]
test_mask = [m[np.newaxis] for m in test_mask]
if random_feature:
features[0] = np.random.standard_normal(features[0].shape)
if sparse:
biases = [process.preprocess_adj_hete(a) for a in adj] # transposed here
else:
biases = []
for a in adj:
a = a.todense()
a = a[np.newaxis]
if no_attn_reg:
edge_list = [(i,) for i in range(len(adj_type))]
if simple_inner:
edge_list = []
with tf.Graph().as_default():
with tf.name_scope('input'):
ftr_in = [tf.placeholder(dtype=tf.float32,
shape=(batch_size, nb, ft)) for nb, ft in zip(nb_nodes, ft_size)]
if sparse:
bias_in = [tf.sparse_placeholder(dtype=tf.float32) for _ in biases]
else:
bias_in = None
lbl_in = [tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes[target_node[i]], nb_classes[i])) for i in range(len(nb_classes))]
msk_in = [tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes[target_node[i]])) for i in range(len(nb_classes))]
attn_drop = tf.placeholder(dtype=tf.float32, shape=())
ffd_drop = tf.placeholder(dtype=tf.float32, shape=())
is_train = tf.placeholder(dtype=tf.bool, shape=())
logits = model.inference(ftr_in, nb_classes, nb_nodes, is_train,
attn_drop, ffd_drop, target_nodes=target_node,
bias_mat=bias_in, adj_type=adj_type,
edge_list=edge_list,
hid_units=hid_units, n_heads=n_heads,
residual=residual, activation=nonlinearity)
with tf.name_scope('loss_acc'):
loss, accuracy, acc_name, acc_full_name = [], [], [], []
all_class_loss = 0.0
for tn in range(len(target_node)):
tn_logits = logits[tn]
tn_labels = lbl_in[tn]
tn_masks = msk_in[tn]
tn_is_multilabel = is_multilabel[tn]
tn_loss, tn_accuracy, tn_acc_name, tn_acc_full_name = get_loss_acc(tn_logits, tn_labels, tn_masks, is_multilabel=tn_is_multilabel)
loss.append(tn_loss)
accuracy.extend(tn_accuracy)
acc_name.extend(tn_acc_name)
acc_full_name.extend(tn_acc_full_name)
all_class_loss += tn_loss
loss_loop = tf.add_n(tf.get_collection('loss_loop')) * loop_coef
loss_inv= tf.add_n(tf.get_collection('loss_inv')) * inv_coef
train_op = model.training(all_class_loss + loss_loop + loss_inv, lr, l2_coef)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
vlss_mn = np.inf
vacc_mx = 0.0
curr_step = 0
with tf.Session(config=config) as sess:
sess.run(init_op)
vacc_early_model = 0.0
vlss_early_model = 0.0
vacc_each_early_model = np.array([0.]*(len(target_node)*metr_num))
for epoch in range(nb_epochs):
# summary information
train_loss_avg = 0
train_acc_avg = 0
val_loss_avg = 0
val_acc_avg = 0
# for each class information
train_loss_each = []
train_acc_each = []
val_loss_each = []
val_acc_each = []
tr_step = 0
tr_size = features[0].shape[0]
while tr_step * batch_size < tr_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[tr_step*batch_size:(tr_step+1)*batch_size] for i, d in zip(lbl_in, y_train)})
fd.update({i:d[tr_step*batch_size:(tr_step+1)*batch_size] for i, d in zip(msk_in, train_mask)})
fd.update({is_train: True})
fd.update({attn_drop: dropout, ffd_drop:dropout})
_, loss_list_tr, acc_list_tr, loss_loop_tr, loss_inv_tr = sess.run([train_op, loss, accuracy, loss_loop, loss_inv], feed_dict=fd)
train_loss_each.append(np.array(loss_list_tr))
train_acc_each.append(np.array(acc_list_tr))
train_loss_avg += np.sum(np.array(loss_list_tr))
train_acc_avg += np.sum(np.array(acc_list_tr))
tr_step += 1
vl_step = 0
vl_size = features[0].shape[0]
while vl_step * batch_size < vl_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[vl_step * batch_size:(vl_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[vl_step * batch_size:(vl_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[vl_step*batch_size:(vl_step+1)*batch_size] for i, d in zip(lbl_in, y_val)})
fd.update({i:d[vl_step*batch_size:(vl_step+1)*batch_size] for i, d in zip(msk_in, val_mask)})
fd.update({is_train: False})
fd.update({attn_drop: 0.0, ffd_drop:0.0})
loss_list_vl, acc_list_vl = sess.run([loss, accuracy], feed_dict=fd)
acc_list_vl = [0. if np.isnan(acc_vl) else acc_vl for acc_vl in acc_list_vl]
val_loss_each.append(np.array(loss_list_vl))
val_acc_each.append(np.array(acc_list_vl))
val_loss_avg += np.sum(np.array(loss_list_vl))
val_acc_avg += np.sum(np.array(acc_list_vl))
vl_step += 1
print('Training %s: loss = %.5f, %s = %.5f, loss_loop = %.5f, loss_inv = %.5f | Val: loss = %.5f, %s = %.5f' %
(epoch, train_loss_avg/tr_step, 'acc/F1', train_acc_avg/tr_step,
loss_loop_tr, loss_inv_tr,
val_loss_avg/vl_step, 'acc/F1', val_acc_avg/vl_step))
print_eachclass_info(train_loss_each, train_acc_each, val_loss_each, val_acc_each, acc_name)
if val_acc_avg/vl_step > vacc_mx or val_loss_avg/vl_step < vlss_mn:
if val_acc_avg/vl_step > vacc_mx and val_loss_avg/vl_step < vlss_mn:
vacc_early_model = val_acc_avg/vl_step
vlss_early_model = val_loss_avg/vl_step
vacc_each_early_model = np.mean(np.array(val_acc_each), axis=0)
saver.save(sess, checkpt_file)
print("saved model as %s"%checkpt_file)
vacc_mx = np.max((val_acc_avg/vl_step, vacc_mx))
vlss_mn = np.min((val_loss_avg/vl_step, vlss_mn))
curr_step = 0
else:
curr_step += 1
if curr_step == patience:
print('Early stop! Min loss: ', vlss_mn,
', Max', 'acc/F1', ': ', vacc_mx)
print('Early stop model validation loss: ', vlss_early_model,
', ', 'acc/F1', ': ', vacc_early_model)
total_vl_acc += vacc_each_early_model
break
if curr_step < patience:
print('Min loss: ', vlss_mn, ', Max', 'acc/F1', ': ', vacc_mx)
print('model validation loss: ', vlss_early_model, ', ', 'acc/F1', ': ', vacc_early_model)
total_vl_acc += vacc_each_early_model
saver.restore(sess, checkpt_file)
ts_size = features[0].shape[0]
ts_step = 0
test_loss_each = []
test_acc_each = []
while ts_step * batch_size < ts_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[ts_step*batch_size:(ts_step+1)*batch_size] for i, d in zip(lbl_in, y_test)})
fd.update({i:d[ts_step*batch_size:(ts_step+1)*batch_size] for i, d in zip(msk_in, test_mask)})
fd.update({is_train: False})
fd.update({attn_drop: 0.0, ffd_drop:0.0})
loss_list_ts, acc_list_ts = sess.run([loss, accuracy], feed_dict=fd)
test_loss_each.append(np.array(loss_list_ts))
test_acc_each.append(np.array(acc_list_ts))
ts_step += 1
test_loss_each = np.mean(np.array(test_loss_each), axis=0)
test_acc_each = np.mean(np.array(test_acc_each), axis=0)
print('*'*10,'Test information:', '*'*10)
for e in range(len(target_node)):
print('target %s: loss: %.3f, %s:%.5f, %s:%.5f' % (e, test_loss_each[e], acc_full_name[e*metr_num], test_acc_each[e*metr_num], acc_full_name[e*metr_num+1], test_acc_each[e*metr_num+1]))
total_ts_acc += test_acc_each
sess.close()
print('Validation:', total_vl_acc/repeat, 'Test:', total_ts_acc/repeat)
| 49.267241 | 201 | 0.594984 |
ce7c996f310c0d3f46033c26982db618d4c517fe | 230 | py | Python | pythontutor-ru/02_ifelse/01_minimum.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | 2 | 2019-05-24T20:10:16.000Z | 2020-07-11T06:06:43.000Z | pythontutor-ru/02_ifelse/01_minimum.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | null | null | null | pythontutor-ru/02_ifelse/01_minimum.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | 21 | 2019-03-11T20:25:05.000Z | 2022-02-28T13:53:10.000Z | '''
http://pythontutor.ru/lessons/ifelse/problems/minimum/
. .
'''
val_01 = int(input())
val_02 = int(input())
if val_01 > val_02:
print(val_02)
else:
print(val_01)
| 19.166667 | 59 | 0.704348 |
ce7cd3565831e5b22995deb48bc2a2fd08f936c7 | 1,705 | py | Python | src/ui/license.py | Schrut/PRT | 09d136cc75ef5e4e79e72ade07c5d64fabd097f2 | [
"MIT"
] | 2 | 2018-02-20T11:53:36.000Z | 2018-05-12T10:01:27.000Z | src/ui/license.py | Schrut/PRT | 09d136cc75ef5e4e79e72ade07c5d64fabd097f2 | [
"MIT"
] | null | null | null | src/ui/license.py | Schrut/PRT | 09d136cc75ef5e4e79e72ade07c5d64fabd097f2 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QWidget, QMessageBox | 50.147059 | 100 | 0.753079 |
ce7d9b34d754c223a723c3b5526adb48b8a8f699 | 2,021 | py | Python | python/GameFlow/console/Console.py | Silversmithe/Connect4 | dfdf89196e2eae6b40d2f637e2a47e03e2447534 | [
"Apache-2.0"
] | null | null | null | python/GameFlow/console/Console.py | Silversmithe/Connect4 | dfdf89196e2eae6b40d2f637e2a47e03e2447534 | [
"Apache-2.0"
] | null | null | null | python/GameFlow/console/Console.py | Silversmithe/Connect4 | dfdf89196e2eae6b40d2f637e2a47e03e2447534 | [
"Apache-2.0"
] | null | null | null | import sys
import os
| 25.2625 | 73 | 0.521029 |
ce7dcfa1ba0e4b637228e061f83bafab463cb61b | 766 | py | Python | servoblst.py | ForToffee/MeArm | 90fdd94fd96b53b3579c6d8132e8586188e3d344 | [
"MIT"
] | 1 | 2016-04-04T17:39:54.000Z | 2016-04-04T17:39:54.000Z | servoblst.py | ForToffee/MeArm | 90fdd94fd96b53b3579c6d8132e8586188e3d344 | [
"MIT"
] | null | null | null | servoblst.py | ForToffee/MeArm | 90fdd94fd96b53b3579c6d8132e8586188e3d344 | [
"MIT"
] | null | null | null | import time
import os
servos = {}
| 22.529412 | 68 | 0.680157 |
ce801d6bd90e41604f5f09f5bc95fde822da704c | 728 | py | Python | TASQ/problems/forms.py | harshraj22/smallProjects | b31e9173c60abb778a1c196609757704ec9c3750 | [
"MIT"
] | 2 | 2019-11-18T14:13:57.000Z | 2020-11-08T06:50:32.000Z | TASQ/problems/forms.py | harshraj22/smallProjects | b31e9173c60abb778a1c196609757704ec9c3750 | [
"MIT"
] | 16 | 2019-11-12T13:08:01.000Z | 2022-02-27T10:51:28.000Z | TASQ/problems/forms.py | harshraj22/smallProjects | b31e9173c60abb778a1c196609757704ec9c3750 | [
"MIT"
] | null | null | null | from django import forms
from .models import Problem
| 28 | 89 | 0.634615 |
ce80328129a238de658580aec1efceb41862bd9d | 2,404 | py | Python | recipes/make_spreadsheet_with_named_ranges.py | mat-m/odfdo | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | [
"Apache-2.0"
] | null | null | null | recipes/make_spreadsheet_with_named_ranges.py | mat-m/odfdo | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | [
"Apache-2.0"
] | null | null | null | recipes/make_spreadsheet_with_named_ranges.py | mat-m/odfdo | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Create a spreadsheet with two tables, using some named ranges.
"""
import os
from odfdo import Document, Table
if __name__ == "__main__":
document = Document('spreadsheet')
body = document.body
table = Table("First Table")
body.append(table)
# populate the table :
for i in range(10):
table.set_value((1, i), (i + 1)**2)
table.set_value("A11", "Total:")
# lets define a named range for the 10 values :
crange = "B1:B10"
name = "squares_values"
table_name = table.name
table.set_named_range(name, crange, table_name)
# we can define a single cell range, using notation "B11" or (1, 10) :
table.set_named_range('total', (1, 10), table_name)
# get named range values :
values = table.get_named_range('squares_values').get_values(flat=True)
# set named range value :
result = sum(values)
table.get_named_range('total').set_value(result)
# lets use the named ranges from a second table :
table2 = Table("Second Table")
body.append(table2)
named_range1 = table2.get_named_range('total')
table2.set_value('A1', "name:")
table2.set_value('B1', named_range1.name)
table2.set_value('A2', "range:")
table2.set_value('B2', str(named_range1.crange))
table2.set_value('A3', "from table:")
table2.set_value('B3', named_range1.table_name)
table2.set_value('A4', "content:")
table2.set_value('B4', named_range1.get_value())
named_range2 = table2.get_named_range('squares_values')
table2.set_value('D1', "name:")
table2.set_value('E1', named_range2.name)
table2.set_value('D2', "range:")
table2.set_value('E2', str(named_range2.crange))
table2.set_value('D3', "from table:")
table2.set_value('E3', named_range2.table_name)
table2.set_value('D4', "content:")
# using "E4:4" notaion is a little hack for the area starting at E4 on row 4
table2.set_values(
values=[named_range2.get_values(flat=True)], coord='E4:4')
print("Content of the table1:")
print(table.name)
print(table.to_csv())
print(table2.name)
print(table2.to_csv())
# of course named ranges are stored in the document :
if not os.path.exists('test_output'):
os.mkdir('test_output')
output = os.path.join('test_output', "my_spreadsheet_with_named_range.ods")
document.save(target=output, pretty=True)
| 33.388889 | 80 | 0.671381 |