hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
837739a005a237684a780c9335e0ae3dc01c7873 | 730 | py | Python | configProvider.py | misc77/dsegenerator | 3fbaed79ff2809de5b7efb3ac86acf8ffb45afe4 | [
"MIT"
] | null | null | null | configProvider.py | misc77/dsegenerator | 3fbaed79ff2809de5b7efb3ac86acf8ffb45afe4 | [
"MIT"
] | null | null | null | configProvider.py | misc77/dsegenerator | 3fbaed79ff2809de5b7efb3ac86acf8ffb45afe4 | [
"MIT"
] | null | null | null | from resources import Resources
import configparser
| 29.2 | 109 | 0.642466 |
8378ea628ccc21371175ad9061b5e8ae8ef0a59b | 3,041 | py | Python | H5_News_Tracker/gui/ticker_window.py | Mouse-Diplodicus/H5-NewsTracker | a771105463db6757171ea28e847208960c7ac598 | [
"BSD-2-Clause"
] | null | null | null | H5_News_Tracker/gui/ticker_window.py | Mouse-Diplodicus/H5-NewsTracker | a771105463db6757171ea28e847208960c7ac598 | [
"BSD-2-Clause"
] | 20 | 2020-02-27T01:39:28.000Z | 2021-12-13T20:39:17.000Z | H5_News_Tracker/gui/ticker_window.py | Mouse-Diplodicus/H5-NewsTracker | a771105463db6757171ea28e847208960c7ac598 | [
"BSD-2-Clause"
] | null | null | null | """
Program displays a window with text using Tkinter when run.
"""
import tkinter
import webbrowser
from tkinter import font
from tkinter import ttk
| 39.493506 | 117 | 0.64584 |
837a6c35581467319f1075c05fa1224fd922d268 | 3,562 | py | Python | Yu/Web.py | Hiroshiba/KotohiraYu | 1ab5a5376e01aae5c730ae163298e1c34980b586 | [
"MIT"
] | null | null | null | Yu/Web.py | Hiroshiba/KotohiraYu | 1ab5a5376e01aae5c730ae163298e1c34980b586 | [
"MIT"
] | 1 | 2019-05-18T13:16:25.000Z | 2019-05-18T13:16:25.000Z | Yu/Web.py | Hiroshiba/KotohiraYu | 1ab5a5376e01aae5c730ae163298e1c34980b586 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import configparser
import glob
import sqlite3
import traceback
import json
from bottle import route, run, auth_basic, abort, response
from sqlite3 import OperationalError
config = configparser.ConfigParser()
config.read('config/config.ini')
def WEBRUN():
run(port=7878)
| 31.522124 | 174 | 0.571028 |
837a8a805f3fa86050a9f939d897eba29f04412d | 1,713 | py | Python | urduhack/stop_words.py | fahdrazavi/urduhack | a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8 | [
"MIT"
] | null | null | null | urduhack/stop_words.py | fahdrazavi/urduhack | a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8 | [
"MIT"
] | null | null | null | urduhack/stop_words.py | fahdrazavi/urduhack | a2370b0d8c1ee3f260ff90ca5056f45ed9b73ee8 | [
"MIT"
] | null | null | null | # coding: utf8
"""
Complete collection of stopwords for the Urdu language.
Maintainer: Ikram Ali(mrikram1989@gmail.com)
version = 2019.04.07
Source = https://github.com/urduhack/urdu-stopwords
"""
# Urdu Language Stop words list
STOP_WORDS = frozenset("""
""".split())
| 57.1 | 101 | 0.76474 |
837ad353a450f945fc5a6b024a1362ed9689c173 | 3,982 | py | Python | src/sequencemodel_09.py | PatrikValkovic/neural-networks-step-by-step | 86f5f98de1dbeb3a69ba101f06e303dbaabe6b8e | [
"MIT"
] | 1 | 2021-02-04T09:01:44.000Z | 2021-02-04T09:01:44.000Z | src/sequencemodel_09.py | alexdevero/neural-networks-step-by-step | 55e12e82c78f9be2d942fc1bff252b92fb61c1dd | [
"MIT"
] | null | null | null | src/sequencemodel_09.py | alexdevero/neural-networks-step-by-step | 55e12e82c78f9be2d942fc1bff252b92fb61c1dd | [
"MIT"
] | 2 | 2021-01-30T15:17:50.000Z | 2021-02-04T09:01:45.000Z | import numpy as np
from progressbar import progressbar
| 45.770115 | 120 | 0.566298 |
837c932ac45c8e6207580a84130808a7f51d5177 | 685 | py | Python | django_sso_app/app/views.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | 1 | 2021-11-16T15:16:08.000Z | 2021-11-16T15:16:08.000Z | django_sso_app/app/views.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | null | null | null | django_sso_app/app/views.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | null | null | null | from django.http import HttpResponseRedirect
from django.views import View
from ..core import app_settings
| 26.346154 | 74 | 0.694891 |
837cbe3de90b812a9c90cd64972dc52fe2924f87 | 8,989 | py | Python | tests/components/multimatic/__init__.py | thomasgermain/home-assistant | 69a8ba678e0276bc1bfde0f3d9e9d3682209f962 | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | tests/components/multimatic/__init__.py | thomasgermain/home-assistant | 69a8ba678e0276bc1bfde0f3d9e9d3682209f962 | [
"Apache-2.0"
] | 73 | 2020-10-01T06:39:39.000Z | 2022-03-31T06:16:15.000Z | tests/components/multimatic/__init__.py | thomasgermain/home-assistant | 69a8ba678e0276bc1bfde0f3d9e9d3682209f962 | [
"Apache-2.0"
] | 4 | 2019-10-26T14:25:13.000Z | 2020-11-10T11:00:18.000Z | """The tests for multimatic integration."""
from __future__ import annotations
import datetime
from typing import Any
from unittest.mock import AsyncMock, patch
from pymultimatic.model import (
ActiveFunction,
BoilerStatus,
Circulation,
Device,
Dhw,
EmfReport,
Error,
FacilityDetail,
HolidayMode,
HotWater,
HvacStatus,
OperatingModes,
Report,
Room,
SettingModes,
TimePeriodSetting,
TimeProgram,
TimeProgramDay,
Ventilation,
Zone,
ZoneHeating,
)
from pymultimatic.systemmanager import SystemManager
from homeassistant import config_entries
from homeassistant.components.multimatic import COORDINATORS, DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.util import utcnow
from tests.common import async_fire_time_changed
VALID_MINIMAL_CONFIG = {CONF_USERNAME: "test", CONF_PASSWORD: "test"}
def zones(with_rb=True):
"""Get zones."""
zones = []
heating = ZoneHeating(
time_program=time_program(SettingModes.NIGHT, None),
operating_mode=OperatingModes.AUTO,
target_low=22,
target_high=30,
)
zones.append(
Zone(
id="zone_1",
name="Zone 1",
temperature=25,
active_function=ActiveFunction.HEATING,
rbr=False,
heating=heating,
)
)
if with_rb:
zones.append(
Zone(
id="zone_2",
name="Zone rbr",
temperature=25,
active_function=ActiveFunction.HEATING,
rbr=True,
heating=heating,
)
)
return zones
def rooms():
"""Get rooms."""
room_device = Device("Device 1", "123456789", "VALVE", False, False)
return [
Room(
id="1",
name="Room 1",
time_program=time_program(),
temperature=22,
target_high=24,
operating_mode=OperatingModes.AUTO,
child_lock=False,
window_open=False,
devices=[room_device],
)
]
def dhw():
"""Get dhw."""
hot_water = HotWater(
id="dhw",
name="Hot water",
time_program=time_program(temp=None),
temperature=None,
target_high=40,
operating_mode=OperatingModes.AUTO,
)
circulation = Circulation(
id="dhw",
name="Circulation",
time_program=time_program(temp=None),
operating_mode=OperatingModes.AUTO,
)
return Dhw(hotwater=hot_water, circulation=circulation)
def report():
"""Get report."""
return Report(
device_name="VRC700 MultiMatic",
device_id="Control_SYS_MultiMatic",
unit="bar",
value=1.9,
name="Water pressure",
id="WaterPressureSensor",
)
def report_dhw():
"""Get report for dhw."""
return Report(
device_name="Control_DHW",
device_id="DomesticHotWaterTankTemperature",
unit="C",
value=45,
name="DomesticHotWaterTankTemperature",
id="DomesticHotWaterTankTemperature",
)
def reports():
"""Get reports."""
return [report()]
def ventilation():
"""Return ventilation."""
return Ventilation(
time_program=time_program(SettingModes.ON, 6),
operating_mode=OperatingModes.AUTO,
target_high=6,
target_low=2,
id="ventilation",
name="Ventilation",
)
def active_holiday_mode():
"""Return a active holiday mode."""
start = datetime.date.today() - datetime.timedelta(days=1)
end = datetime.date.today() + datetime.timedelta(days=1)
return HolidayMode(True, start, end, 15)
def time_program(heating_mode=SettingModes.OFF, temp=20):
"""Create a default time program."""
tp_day_setting = TimePeriodSetting("00:00", temp, heating_mode)
tp_day = TimeProgramDay([tp_day_setting])
tp_days = {
"monday": tp_day,
"tuesday": tp_day,
"wednesday": tp_day,
"thursday": tp_day,
"friday": tp_day,
"saturday": tp_day,
"sunday": tp_day,
}
return TimeProgram(tp_days)
def facility_detail():
"""Get facility detail."""
return FacilityDetail(
name="Home",
serial_number="12345",
firmware_version="1.2.3",
ethernet_mac="01:23:45:67:89:AB",
wifi_mac="23:45:67:89:0A:BC",
)
def hvac_status(with_error=False, with_status=True):
"""Get hvac status."""
boiler_status = None
if with_status:
boiler_status = BoilerStatus(
device_name="boiler",
title="Status",
status_code="1",
description="This is the status",
timestamp=datetime.datetime.now(),
hint="Do nothing",
)
errors = None
if with_error:
errors = [
Error(
device_name="Device",
title="Status",
status_code="99",
description="This is the error",
timestamp=datetime.datetime.now(),
)
]
return HvacStatus(
boiler_status=boiler_status,
errors=errors,
online="ONLINE",
update="UPDATE_NOT_PENDING",
)
def emf_reports():
"""Get emf reports."""
return [
EmfReport(
"flexoTHERM_PR_EBUS",
"VWF 117/4",
"HEAT_PUMP",
"COOLING",
"CONSUMED_ELECTRICAL_POWER",
1000,
datetime.date(2021, 1, 1),
datetime.date(2021, 1, 10),
)
]
def assert_entities_count(hass, count):
"""Count entities owned by the component."""
assert len(hass.states.async_entity_ids()) == count
| 26.206997 | 82 | 0.585938 |
837cd4561ed86c61a564513e1e29e4b4eaead664 | 4,877 | py | Python | test/e2e/test_200_header_invalid.py | elukey/mod_h2 | 3418fc31b8ffe9fe477899d60ccfdecdfac1df34 | [
"Apache-2.0"
] | null | null | null | test/e2e/test_200_header_invalid.py | elukey/mod_h2 | 3418fc31b8ffe9fe477899d60ccfdecdfac1df34 | [
"Apache-2.0"
] | null | null | null | test/e2e/test_200_header_invalid.py | elukey/mod_h2 | 3418fc31b8ffe9fe477899d60ccfdecdfac1df34 | [
"Apache-2.0"
] | null | null | null | #
# mod-h2 test suite
# check handling of invalid chars in headers
#
import copy
import os
import re
import sys
import time
import pytest
from datetime import datetime
from TestEnv import TestEnv
from TestHttpdConf import HttpdConf
| 43.159292 | 119 | 0.565717 |
837d850bff1c24037cf6a37770c38618903819c0 | 7,529 | py | Python | controller/controller.py | angelocarbone/MoDelS | 5bfee8d0b6e719c1d2445acf4e332597427ac906 | [
"MIT"
] | 1 | 2021-12-02T07:29:29.000Z | 2021-12-02T07:29:29.000Z | controller/controller.py | angelocarbone/MoDelS | 5bfee8d0b6e719c1d2445acf4e332597427ac906 | [
"MIT"
] | null | null | null | controller/controller.py | angelocarbone/MoDelS | 5bfee8d0b6e719c1d2445acf4e332597427ac906 | [
"MIT"
] | null | null | null | from scenarios import helper
from scenarios.builder import Builder
from model.enumerations import e_ExperienceFactor, e_MentalOrEmotionalFactor, e_PhyOrPhyFactor, e_EntityType, e_Relation, e_CausalFactorType
from model.knowledge_base import kb
from model.entities import Entity, CausalFactor
from model.utils import BoundingBox
from model import rule
| 37.272277 | 140 | 0.504582 |
837e4da85868086f6aef55e405fd04f2686a56f3 | 1,567 | py | Python | stan/data/data_lex.py | chappers/Stan | 61c189ab12ea50214390804cff5694ac51f8df35 | [
"MIT"
] | 1 | 2015-01-06T11:10:24.000Z | 2015-01-06T11:10:24.000Z | stan/data/data_lex.py | chappers/Stan | 61c189ab12ea50214390804cff5694ac51f8df35 | [
"MIT"
] | null | null | null | stan/data/data_lex.py | chappers/Stan | 61c189ab12ea50214390804cff5694ac51f8df35 | [
"MIT"
] | null | null | null | """
The :mod:`stan.data_lex` module is the lexer for SAS-like language.
"""
from pyparsing import *
from stan.data.data_expr import EXPR_, ID_, DATA, SET, RENAME, RUN, DROP, KEEP, SEMI_, LOGICAL_
# set up logic
dataStepStmt = Forward()
# data/set inline options
rename_stmt = (OneOrMore(Group(ID_ + Suppress("=") +
ID_ ))).setResultsName('rename')
drop_stmt = OneOrMore( ID_ ).setResultsName('drop')
keep_stmt = OneOrMore( ID_ ).setResultsName('keep')
dataset_opt_stmt = Optional("("+
Optional(Suppress(RENAME) + "=" + "(" + rename_stmt + ")") +
Optional(Suppress(DROP) + "=" + drop_stmt) +
Optional(Suppress(KEEP) + "=" + keep_stmt) +")")
# data step options (not inline)
opt_stmt = (
(Suppress(RENAME) + rename_stmt + SEMI_) |
(Suppress(KEEP) + keep_stmt + SEMI_) |
(Suppress(DROP) + drop_stmt + SEMI_)
#add by statement
)
# data step logic
s_stmt = Group(ID_ + Suppress("=") + ( LOGICAL_.setResultsName('logical') | EXPR_ ) + SEMI_)
# data set statements
data_stmt = Group(Suppress(DATA) + ID_.setResultsName('name') + dataset_opt_stmt.setResultsName('data opt')).setResultsName('data') + SEMI_
set_stmt = Group(Suppress(SET) + ID_.setResultsName('name') + dataset_opt_stmt.setResultsName('set opt')).setResultsName('set') + SEMI_
dataStepStmt << (data_stmt +
set_stmt +
(ZeroOrMore(opt_stmt) &
ZeroOrMore(s_stmt).setResultsName('stmt')) +
RUN + SEMI_)
| 35.613636 | 139 | 0.613912 |
837e63fb36e90c2f7dc83ee4de463a8b38b3fbca | 2,334 | py | Python | setup.py | JayDwayne/Neopo | 964e1a13ed016b5a74ccb33b7384a0f783100cd7 | [
"MIT"
] | null | null | null | setup.py | JayDwayne/Neopo | 964e1a13ed016b5a74ccb33b7384a0f783100cd7 | [
"MIT"
] | null | null | null | setup.py | JayDwayne/Neopo | 964e1a13ed016b5a74ccb33b7384a0f783100cd7 | [
"MIT"
] | null | null | null | import os
from platform import system
from setuptools import setup
from subprocess import run, PIPE, CalledProcessError
running_on_windows = system() == "Windows"
running_in_docker = os.path.isfile("/.dockerenv")
# Consistent version as AUR
try:
count = run(["git", "rev-list", "--count", "HEAD"],
stdout=PIPE, check=True).stdout.splitlines()[0].decode('utf-8')
commit = run(["git", "rev-parse", "--short", "HEAD"],
stdout=PIPE, check=True).stdout.splitlines()[0].decode('utf-8')
VERSION = "%s.%s" % (count, commit)
except CalledProcessError:
print("Could not determine package version with Git! Exiting...")
raise
# Additional files for *nix: completion, man page, etc.
share_files = [
('/usr/share/man/man1', ['man/neopo.1']),
('/usr/share/licenses/neopo', ['LICENSE']),
('/usr/share/neopo/scripts', ['scripts/POSTINSTALL']),
('/usr/share/bash-completion/completions', ['completion/neopo'])
]
# Skip share_files on Windows, docker, or when installing as non-root
if running_on_windows or running_in_docker or os.geteuid() != 0:
share_files=None
# Provide neopo, neopo-script, and particle commands
script_unix = ['scripts/unix/neopo',
'scripts/unix/neopo-script',
'scripts/unix/particle']
script_windows = ['scripts/windows/neopo.cmd',
'scripts/windows/neopo-script.cmd',
'scripts/windows/particle.cmd']
script_files = script_windows if running_on_windows else script_unix
# update version.py
with open(os.path.join('neopo', 'version.py'), 'w') as file:
file.writelines(['NEOPO_VERSION="%s"' % VERSION])
setup(
name='neopo',
version=VERSION,
description='A lightweight solution for local Particle development.',
long_description="""
Neopo is a Particle development management utility that simplifies the
installation and usage of Particle's toolchains on a variety of distributions.
It features options to build or flash projects, iterable commands, a scripting
interface, and Particle Workbench/CLI compatibility.""",
author='Nathan Robinson',
author_email='nrobinson2000@me.com',
url="https://neopo.xyz",
download_url='https://github.com/nrobinson2000/neopo',
license="MIT",
packages=['neopo'],
platforms=["Linux", "macOS", "Windows", "ARM"],
data_files=share_files,
scripts=script_files
)
| 35.363636 | 81 | 0.70437 |
837ecef31551741e285f87a84d5925f220afb694 | 2,045 | py | Python | jmetal/core/operator.py | LuckysonKhaidem/ProjectAlpha | e4b4779a8968a83f1e8add3490a4d2c4ad145d55 | [
"MIT"
] | 1 | 2020-05-26T18:57:31.000Z | 2020-05-26T18:57:31.000Z | jmetal/core/operator.py | LuckysonKhaidem/ProjectAlpha | e4b4779a8968a83f1e8add3490a4d2c4ad145d55 | [
"MIT"
] | null | null | null | jmetal/core/operator.py | LuckysonKhaidem/ProjectAlpha | e4b4779a8968a83f1e8add3490a4d2c4ad145d55 | [
"MIT"
] | 2 | 2019-01-08T11:52:52.000Z | 2020-05-25T13:21:26.000Z | from abc import ABCMeta, abstractmethod
from typing import TypeVar, Generic, List
S = TypeVar('S')
R = TypeVar('R')
"""
.. module:: Operator
:platform: Unix, Windows
:synopsis: Templates for operators.
.. moduleauthor:: Antonio J. Nebro <antonio@lcc.uma.es>
"""
| 21.989247 | 90 | 0.627873 |
8383163f22959bd98885d5ed979d31561a7823ce | 1,389 | py | Python | foo/pictureR/wordsTemplate.py | MangetsuC/arkHelper | 02705294f1bc3ecf926e0a9c62c59026494f62f8 | [
"MIT"
] | 147 | 2020-05-06T10:36:13.000Z | 2022-03-17T13:03:16.000Z | foo/pictureR/wordsTemplate.py | MangetsuC/arkHelper | 02705294f1bc3ecf926e0a9c62c59026494f62f8 | [
"MIT"
] | 34 | 2020-07-21T01:20:10.000Z | 2022-01-30T06:38:11.000Z | foo/pictureR/wordsTemplate.py | MangetsuC/arkHelper | 02705294f1bc3ecf926e0a9c62c59026494f62f8 | [
"MIT"
] | 17 | 2020-12-10T14:42:34.000Z | 2022-02-26T15:23:58.000Z | from PIL import Image, ImageDraw, ImageFont
from numpy import asarray
from cv2 import cvtColor, COLOR_RGB2BGR, imshow, waitKey
from os import getcwd | 37.540541 | 101 | 0.670266 |
8383af1ee3c86c7a8396f853fcb82a399a1772cb | 1,185 | py | Python | bin/concat_msa.py | HadrienG/arbetsprov | ee4b887a1a8ac43c9c8cbb016480fde14cf0e48f | [
"MIT"
] | 5 | 2021-10-11T09:30:52.000Z | 2022-01-03T07:03:17.000Z | bin/concat_msa.py | HadrienG/arbetsprov | ee4b887a1a8ac43c9c8cbb016480fde14cf0e48f | [
"MIT"
] | null | null | null | bin/concat_msa.py | HadrienG/arbetsprov | ee4b887a1a8ac43c9c8cbb016480fde14cf0e48f | [
"MIT"
] | 1 | 2022-01-03T07:03:51.000Z | 2022-01-03T07:03:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from Bio import AlignIO
def concat_msa(msas, output):
"""concatenate msas together"""
alignments = []
for msa in msas:
align = AlignIO.read(msa, "fasta")
# shorten id so the concatenated alignment keeps it
for record in align._records:
record.id = record.id.split("|")[0]
if len(align._records) == 3:
alignments.append(align)
concatenated_alignment = alignments[0]
for alignment in alignments[1:]:
concatenated_alignment += alignment
with open(output, "w") as outfile:
AlignIO.write(concatenated_alignment, outfile, "fasta")
if __name__ == "__main__":
main() | 23.7 | 63 | 0.592405 |
8384d1480db51cc6251738da74aa3074adb07e4f | 11,099 | py | Python | rendez-vous.py | MrDarkness117/parseTsum | 03f9f4d7c9e90a48eec5c689082a4274a160f501 | [
"MIT"
] | null | null | null | rendez-vous.py | MrDarkness117/parseTsum | 03f9f4d7c9e90a48eec5c689082a4274a160f501 | [
"MIT"
] | null | null | null | rendez-vous.py | MrDarkness117/parseTsum | 03f9f4d7c9e90a48eec5c689082a4274a160f501 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from os import path
import re
import json
import time
import datetime
import xlsxwriter
print("Start: " + str(datetime.datetime.now()))
options = Options()
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
# driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.implicitly_wait(0.5)
url_brands = "https://www.rendez-vous.ru/catalog/brands/"
brands = [
"Aldo Brue", "AGL", "BANU", "Bally", 'Bresciani', 'Brimarts', 'Carlo Visintini', 'Casadei', 'Casheart,',
'Cerruti 1881', 'Cesare Casadei', 'Coccinelle', 'DKNY', 'Doria Maria', 'Doucal\'s', 'F_WD', 'Fabi', 'Fabrizio Lesi',
'Ferre Milano', 'Flower Mountain', 'Franceschetti', 'Frasconi', 'Fratelli Rossetti', 'Fratelli Rossetti One',
'Gianni Chiarini', 'Goose Tech', 'GUM', 'HIDE&JACK', 'Ice Play', 'Iceberg', 'In The Box', 'Inuikii',
'John Galliano', 'John Richmond', 'Kalliste', 'Kat Maconie', 'Kate Spade', 'Lancaster', 'Landi', 'Le Silla',
'Lemon Jelly', "L'Impermeabile", 'Marsell', 'Merola Gloves', 'Moose Knuckles', 'Moreschi', 'Moschino', 'Panchic',
'Pantanetti', 'Parajumpers', 'Pasotti', 'Pertini', 'Pierre Cardin', 'Pollini', 'Portolano', 'Premiata',
'Principe Di Bologna', 'RBRSL', "Reptile's House", 'Roberto Cavalli', 'Rocco P', 'Sergio Rossi', 'SPRAYGROUND',
'Stemar', 'Stuart Weitzman', 'V SEASON', "VIC MATIE'", "Vic Matie", 'Voile Blanche', 'What For', 'Wolford', '3JUIN',
'Premiata will be', 'Sprayground', 'Domrebel', 'GIUSEPPE ZANOTTI DESIGN', 'Giuseppe Zanotti Design',
'GIUSEPPE ZANOTTI', 'Giuseppe Zanotti'
]
search_values = ['Wolford', 'RBRSL', "Rocco P", "DKNY", 'Flower Mountain', 'HIDE&JACK', 'Inuikii', 'Lancaster']
categories = [
"",
'',
""
]
iframe_ids = ['fl-545545']
show = "//li[@class='next']/a"
pagination_class_selected = 'page selected'
last_page = '//ul[@id="pagination_bottom"]/li[@class="last"]'
search_btn = '//*[@id="search-toggle"]'
search_bar = '//*[@id="Search_q"]'
failed_pages = {'pages': []}
output = xlsxwriter.Workbook('C:\\Users\\admin\\Documents\\outputs\\Rendez-vous {}.xlsx'.format(str(datetime.date.today())))
sheet = output.add_worksheet('Rendez-vous')
sheet.write('A1', '')
sheet.write('B1', '')
sheet.write('C1', ' ')
sheet.write('D1', '')
sheet.write('E1', '')
sheet.write('F1', ' ')
sheet.write('G1', '')
tables = {}
count = 0
row = 2
closed = False
scrolled = False
if __name__ == '__main__':
run()
| 39.080986 | 136 | 0.594918 |
838511c8e3372a6ae2d5fbb109dbbc9156779d54 | 171 | py | Python | stdlib/getpass_qs.py | bpuderer/python-snippets27 | 8d51ff34c48bee1247575536d8ed506eafde8631 | [
"MIT"
] | 3 | 2015-11-20T14:30:53.000Z | 2015-12-19T05:55:19.000Z | stdlib/getpass_qs.py | bpuderer/python-snippets27 | 8d51ff34c48bee1247575536d8ed506eafde8631 | [
"MIT"
] | null | null | null | stdlib/getpass_qs.py | bpuderer/python-snippets27 | 8d51ff34c48bee1247575536d8ed506eafde8631 | [
"MIT"
] | 1 | 2016-01-05T20:54:49.000Z | 2016-01-05T20:54:49.000Z | import getpass
# prompt user without echoing output
print getpass.getpass()
print getpass.getpass(prompt="Custom Prompt:")
print "user login name:", getpass.getuser()
| 17.1 | 46 | 0.766082 |
8385780ba677837b4c5c4c3d8cf272c764342143 | 385 | py | Python | backend/todo/migrations/0008_auto_20190403_0812.py | Bhunesh2000/todoWithDjango | e5fa52a087180b66ae283e6b36fe790323d7b920 | [
"MIT"
] | null | null | null | backend/todo/migrations/0008_auto_20190403_0812.py | Bhunesh2000/todoWithDjango | e5fa52a087180b66ae283e6b36fe790323d7b920 | [
"MIT"
] | 11 | 2019-04-03T09:49:17.000Z | 2022-02-10T08:23:26.000Z | backend/todo/migrations/0008_auto_20190403_0812.py | Bhunesh2000/todoWithDjango | e5fa52a087180b66ae283e6b36fe790323d7b920 | [
"MIT"
] | 1 | 2019-10-21T19:26:29.000Z | 2019-10-21T19:26:29.000Z | # Generated by Django 2.2 on 2019-04-03 08:12
from django.db import migrations, models
| 20.263158 | 57 | 0.58961 |
8385a072d6737fbd7ff6db50b44b8505e7dcadb3 | 1,797 | py | Python | public/neumeeditor/models/fields/short_code_field.py | jacobsanz97/cantus | 37d139ae20972c36d4abb96a2a5ac5106b0c1b47 | [
"MIT"
] | null | null | null | public/neumeeditor/models/fields/short_code_field.py | jacobsanz97/cantus | 37d139ae20972c36d4abb96a2a5ac5106b0c1b47 | [
"MIT"
] | null | null | null | public/neumeeditor/models/fields/short_code_field.py | jacobsanz97/cantus | 37d139ae20972c36d4abb96a2a5ac5106b0c1b47 | [
"MIT"
] | null | null | null | import re
from django.db import models
unacceptable_chars = "[^a-z0-9\._]"
duplicate_spaces_and_dots = "[\ .]+"
def sanitize_short_code(input):
"""
We want to filter-out the undesirable characters.
"""
# Turn spaces and dots into single dots
new_code = re.sub(duplicate_spaces_and_dots, '.', input.strip().lower())
# Filter out everything bad
new_code = replace_common_words(re.sub(unacceptable_chars, '', new_code))
# Duplicates once more
return re.sub(duplicate_spaces_and_dots, '.', new_code)
| 28.983871 | 82 | 0.582638 |
8386475aa5d024fe1d36e6904efce3bbf70bc22b | 328 | py | Python | 2/week2/c.py | briannice/logiscool-python | 00cf772072f574d297ed487e8edc9bb0158b6c68 | [
"Apache-2.0"
] | null | null | null | 2/week2/c.py | briannice/logiscool-python | 00cf772072f574d297ed487e8edc9bb0158b6c68 | [
"Apache-2.0"
] | null | null | null | 2/week2/c.py | briannice/logiscool-python | 00cf772072f574d297ed487e8edc9bb0158b6c68 | [
"Apache-2.0"
] | null | null | null | def som(a, b):
"""Bereken de som van twee getallen. Als de som groter is dan nul return je de som.
Als de som kleiner is dan nul, dan return je nul.
Args:
a (int): het eerste getal
b (int): het tweede getal
"""
pass
assert som(1, 2) == 3
assert som(-1, -2) == -3
assert som(0, 0) == 0
| 20.5 | 87 | 0.570122 |
83882ea566cc14498c7c6f7269a02089a389aa86 | 2,862 | py | Python | src/plugins/pipeline_plugins/utils/blob.py | google/cc4d | 206543832368f96bac7f55c0de93c96e32127779 | [
"Apache-2.0"
] | 11 | 2021-03-23T22:03:00.000Z | 2022-03-30T17:12:38.000Z | src/plugins/pipeline_plugins/utils/blob.py | google/cc4d | 206543832368f96bac7f55c0de93c96e32127779 | [
"Apache-2.0"
] | 3 | 2021-07-21T10:13:24.000Z | 2021-10-18T03:44:03.000Z | src/plugins/pipeline_plugins/utils/blob.py | google/cc4d | 206543832368f96bac7f55c0de93c96e32127779 | [
"Apache-2.0"
] | 5 | 2021-05-07T03:30:29.000Z | 2021-11-03T21:05:00.000Z | # python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A Blob class for data-in representation.
The Blob class contains all JSON events and all necessary metadata to the
operators.
"""
from typing import Any, Dict, List, Optional, Tuple
| 37.657895 | 75 | 0.677149 |
8388c207ef02a512832cd36b34b04ff91b5bc7e2 | 2,636 | py | Python | LinearModel/scripts/three_classes_train.py | SMZCC/TF-deep-learn | 7517685d8b4fb51f1823d4595165538305739fc7 | [
"MIT"
] | null | null | null | LinearModel/scripts/three_classes_train.py | SMZCC/TF-deep-learn | 7517685d8b4fb51f1823d4595165538305739fc7 | [
"MIT"
] | null | null | null | LinearModel/scripts/three_classes_train.py | SMZCC/TF-deep-learn | 7517685d8b4fb51f1823d4595165538305739fc7 | [
"MIT"
] | null | null | null | # coding=utf-8
# date: 2019/1/1, 19:38
# name: smz
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from LinearModel.modules.model3 import ModelThreeClasses
from LinearModel.configuration.options import opts
from LinearModel.scripts.gen_data import generate_data
def train_3_classes():
"""softmaxsigmoid,"""
model3 = ModelThreeClasses(opts)
model3.build()
train_x3 = np.load("../data/train_data_X3.npy")
train_y3 = np.load("../data/train_data_Y3.npy")
model_name = "model3s.ckpt"
num_samples = len(train_x3)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(opts["epochs"]):
start_pointer = 0
train_x, train_y = shuffle(train_x3, train_y3)
while start_pointer < num_samples:
end_pointer = start_pointer + opts["batch_size"]
batch_x = train_x[start_pointer:end_pointer]
batch_y = train_y[start_pointer:end_pointer]
start_pointer = end_pointer
feed_dict = {model3.inputs: batch_x, model3.labels: batch_y}
loss_value, glob_step_value, merge_str, _ = sess.run(
fetches=[model3.loss, model3.global_step, model3.merge_op,model3.train_step],
feed_dict=feed_dict)
model3.writer.add_summary(merge_str, global_step=glob_step_value)
print("epoch:%d, step:%d, loss:%.6f"%(epoch, glob_step_value, loss_value))
if (epoch + 1) % 10 == 0:
model3.saver.save(sess, opts["checkpoints_dir"] + model_name, global_step=model3.global_step)
if __name__ == "__main__":
# gen_train_data()
train_3_classes() | 35.621622 | 119 | 0.65478 |
83894f358de50ff81cde8fdfc6091027cb2fdbb8 | 21,108 | py | Python | trio/_core/tests/test_multierror.py | JefffHofffman/trio | d8631117ce4ca19017bbe3850704dd5ce6cfaeb1 | [
"Apache-2.0",
"MIT"
] | 4 | 2017-03-01T22:14:46.000Z | 2020-07-31T07:18:18.000Z | trio/_core/tests/test_multierror.py | JefffHofffman/trio | d8631117ce4ca19017bbe3850704dd5ce6cfaeb1 | [
"Apache-2.0",
"MIT"
] | 81 | 2017-01-22T11:58:29.000Z | 2017-05-27T22:17:49.000Z | trio/_core/tests/test_multierror.py | JefffHofffman/trio | d8631117ce4ca19017bbe3850704dd5ce6cfaeb1 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-05-28T19:38:09.000Z | 2020-05-28T19:38:09.000Z | import logging
import pytest
from traceback import extract_tb, print_exception, format_exception, _cause_message
import sys
import os
import re
from pathlib import Path
import subprocess
from .tutil import slow
from .._multierror import MultiError, concat_tb
from ..._core import open_nursery
def raiser1():
raiser1_2()
def raiser1_2():
raiser1_3()
def raiser1_3():
raise ValueError("raiser1_string")
def raiser2():
raiser2_2()
def raiser2_2():
raise KeyError("raiser2_string")
def raiser3():
raise NameError
def get_exc(raiser):
try:
raiser()
except Exception as exc:
return exc
def get_tb(raiser):
return get_exc(raiser).__traceback__
# This warning is triggered by ipython 7.5.0 on python 3.8
import warnings
warnings.filterwarnings(
"ignore",
message=".*\"@coroutine\" decorator is deprecated",
category=DeprecationWarning,
module="IPython.*"
)
try:
import IPython
except ImportError: # pragma: no cover
have_ipython = False
else:
have_ipython = True
need_ipython = pytest.mark.skipif(not have_ipython, reason="need IPython")
| 29.646067 | 88 | 0.648664 |
838a825c230b5aebf0d63f09c997caea89e365c9 | 3,896 | py | Python | servo/drv/ec3po_gpio.py | mmind/servo-hdctools | c7d50190837497dafc45f6efe18bf01d6e70cfd2 | [
"BSD-3-Clause"
] | 2 | 2019-09-25T22:44:39.000Z | 2020-07-26T22:29:20.000Z | servo/drv/ec3po_gpio.py | mmind/servo-hdctools | c7d50190837497dafc45f6efe18bf01d6e70cfd2 | [
"BSD-3-Clause"
] | null | null | null | servo/drv/ec3po_gpio.py | mmind/servo-hdctools | c7d50190837497dafc45f6efe18bf01d6e70cfd2 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Driver for gpio controls through ec3po.
Provides the following console controlled function:
_Get_single, _Set_single, _Get_multi, _Set_multi
"""
import logging
import pty_driver
import servo
# EC console mask for enabling only command channel
COMMAND_CHANNEL_MASK = 0x1
# servod numeric translation for GPIO state.
GPIO_STATE = {
0: '0',
1: '1',
2: 'IN',
3: 'A',
4: 'ALT'
}
| 25.973333 | 76 | 0.652977 |
838aa6ee041eb5a569fe74b2aa529125b8e2fda2 | 2,595 | py | Python | scipy_util/util.py | idf/sci_util_py | 53b4d961a1a8faeb444d2972ca7a2baf4a966f6e | [
"BSD-3-Clause"
] | null | null | null | scipy_util/util.py | idf/sci_util_py | 53b4d961a1a8faeb444d2972ca7a2baf4a966f6e | [
"BSD-3-Clause"
] | 1 | 2016-02-10T19:17:20.000Z | 2016-02-10T20:04:59.000Z | scipy_util/util.py | idf/scipy_util | 53b4d961a1a8faeb444d2972ca7a2baf4a966f6e | [
"BSD-3-Clause"
] | null | null | null | """
Adapted from https://github.com/bytefish/facerec
"""
import os
from PIL import Image
import numpy as np
import random
def asRowMatrix(X):
"""
Creates a row-matrix from multi-dimensional data items in list l.
X [list] List with multi-dimensional data.
"""
if len(X) == 0:
return np.array([])
total = 1
for i in range(0, np.ndim(X[0])):
total = total * X[0].shape[i]
mat = np.empty([0, total], dtype=X[0].dtype)
for row in X:
mat = np.append(mat, row.reshape(1, -1), axis=0) # same as vstack
return np.asmatrix(mat)
def asColumnMatrix(X):
"""
Creates a column-matrix from multi-dimensional data items in list X.
X [list] List with multi-dimensional data.
"""
if len(X) == 0:
return np.array([])
total = 1
for i in range(0, np.ndim(X[0])):
total = total * X[0].shape[i]
mat = np.empty([total, 0], dtype=X[0].dtype)
for col in X:
mat = np.append(mat, col.reshape(-1, 1), axis=1) # same as hstack
return np.asmatrix(mat)
def minmax_normalize(X, low, high, minX=None, maxX=None, dtype=np.float):
""" min-max normalize a given matrix to given range [low,high].
Args:
X [rows x columns] input data
low [numeric] lower bound
high [numeric] upper bound
"""
if minX is None:
minX = np.min(X)
if maxX is None:
maxX = np.max(X)
minX = float(minX)
maxX = float(maxX)
# Normalize to [0...1].
X = X - minX
X = X / (maxX - minX)
# Scale to [low...high].
X = X * (high - low)
X = X + low
return np.asarray(X, dtype=dtype)
def shuffle_array(X, y):
""" Shuffles two arrays!
"""
idx = np.argsort([random.random() for i in xrange(len(y))])
X = [X[i] for i in idx]
y = [y[i] for i in idx]
return X, y
def to_col_vec(row_vec):
"""
:param row_vec: 1d np array
:return:
"""
return row_vec[:, np.newaxis]
def to_row_vec(col_vec):
"""
:param col_vec: 2d np array
:return:
"""
return col_vec.reshape(1, -1) | 23.807339 | 74 | 0.570713 |
838c4a8c221ca4daa94ec9e1d608b97fed7bdb05 | 110 | py | Python | cmsplugin_markdown/apps.py | glomium/cmstemplate | 6d51a6d97cf5a132e41ea6d2404bdfedf9edf25c | [
"BSD-3-Clause"
] | null | null | null | cmsplugin_markdown/apps.py | glomium/cmstemplate | 6d51a6d97cf5a132e41ea6d2404bdfedf9edf25c | [
"BSD-3-Clause"
] | null | null | null | cmsplugin_markdown/apps.py | glomium/cmstemplate | 6d51a6d97cf5a132e41ea6d2404bdfedf9edf25c | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
| 18.333333 | 41 | 0.8 |
838edfd8c862125c349c121325bcacc9bf203166 | 1,177 | py | Python | tools/harness/tests/freemem.py | lambdaxymox/barrelfish | 06a9f54721a8d96874a8939d8973178a562c342f | [
"MIT"
] | 111 | 2015-02-03T02:57:27.000Z | 2022-03-01T23:57:09.000Z | tools/harness/tests/freemem.py | lambdaxymox/barrelfish | 06a9f54721a8d96874a8939d8973178a562c342f | [
"MIT"
] | 12 | 2016-03-22T14:44:32.000Z | 2020-03-18T13:30:29.000Z | tools/harness/tests/freemem.py | lambdaxymox/barrelfish | 06a9f54721a8d96874a8939d8973178a562c342f | [
"MIT"
] | 55 | 2015-02-03T05:28:12.000Z | 2022-03-31T05:00:03.000Z | ##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import tests
from common import TestCommon
from results import PassFailResult
| 33.628571 | 80 | 0.605777 |
8391c2e017e6f922119fae69c3e8b24e0d685ffc | 2,959 | py | Python | grad_cam.py | SamuelCahyawijaya/pytorch-smoothgrad | d9a5a359aab520a500e19359b309d1c030babb20 | [
"MIT"
] | 77 | 2017-07-28T15:54:44.000Z | 2018-04-21T08:25:36.000Z | grad_cam.py | SamuelCahyawijaya/pytorch-smoothgrad | d9a5a359aab520a500e19359b309d1c030babb20 | [
"MIT"
] | null | null | null | grad_cam.py | SamuelCahyawijaya/pytorch-smoothgrad | d9a5a359aab520a500e19359b309d1c030babb20 | [
"MIT"
] | 12 | 2019-10-11T16:00:51.000Z | 2021-12-10T03:21:54.000Z | import argparse
import os
import sys
import numpy as np
from scipy import misc
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg16, vgg19
from torchvision.utils import save_image
from lib.gradients import GradCam, GuidedBackpropGrad
from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image
from lib.labels import IMAGENET_LABELS
if __name__ == '__main__':
main()
| 29.29703 | 84 | 0.677256 |
83927dd68fcacbf5d7c2a21161a37e8eb6342054 | 1,021 | py | Python | Task2G.py | asew4/Flood-Warning-System-8 | 66f436caf8307232604b830e4dc4ab385de0556e | [
"MIT"
] | null | null | null | Task2G.py | asew4/Flood-Warning-System-8 | 66f436caf8307232604b830e4dc4ab385de0556e | [
"MIT"
] | null | null | null | Task2G.py | asew4/Flood-Warning-System-8 | 66f436caf8307232604b830e4dc4ab385de0556e | [
"MIT"
] | 1 | 2022-02-06T02:27:29.000Z | 2022-02-06T02:27:29.000Z | from floodsystem import stationdata, datafetcher, station
stations = stationdata.build_station_list()
stationdata.update_water_levels(stations)
#Empty lists for each of the risk categories
severe_level_station = []
high_level_station = []
moderate_level_station = []
low_level_station = []
for station in stations: #Sorts out stations into different levels
level = station.relative_water_level()
if level is not None:
if level > 1.2:
severe_level_station.append(station)
elif level > 0.9:
high_level_station.append(station)
elif level > 0.7:
moderate_level_station.append(station)
else:
low_level_station.append(station)
#sets for the different categories
severe_town = {x.town for x in severe_level_station}
high_town = {x.town for x in high_level_station}
moderate_town = {x.town for x in moderate_level_station}
low_town = {x.town for x in low_level_station}
for town in severe_town:
#xx
print(town)
| 30.939394 | 66 | 0.712047 |
83929360847de74ce432577b4612ddd776a07618 | 3,472 | py | Python | Protheus_WebApp/Modules/SIGAACD/ACDA035TESTCASE.py | 98llm/tir-script-samples | 0bff8393b79356aa562e9e6512c11ee6e039b177 | [
"MIT"
] | 17 | 2018-09-24T17:27:08.000Z | 2021-09-16T19:09:46.000Z | Protheus_WebApp/Modules/SIGAACD/ACDA035TESTCASE.py | 98llm/tir-script-samples | 0bff8393b79356aa562e9e6512c11ee6e039b177 | [
"MIT"
] | 4 | 2018-09-24T17:30:32.000Z | 2022-01-03T11:39:30.000Z | Protheus_WebApp/Modules/SIGAACD/ACDA035TESTCASE.py | 98llm/tir-script-samples | 0bff8393b79356aa562e9e6512c11ee6e039b177 | [
"MIT"
] | 18 | 2019-06-07T17:41:34.000Z | 2022-01-31T18:17:31.000Z | #//-------------------------------------------------------------------
#/*/{Protheus.doc} ACDA035 -
#
#@author PEDRO ANTONIO MISSAGLIA
#@since 23/09/2019
#@version P12
#
# CT001 - Incluso de Lanamento de Inventrio
# CT002 - Viso de um lanamento de inventrio
# CT003 - Visualizao das legendas
# CT004 - Alterao de Lanamento de Inventrio
# CT005 - Excluso de Lanamento de Inventrio
# CT007 - Alterao de Lanamento de Inventrio sem finalizar contagem
#
#/*/
#//-------------------------------------------------------------------
from tir import Webapp
import unittest
import time
if __name__ == '__main__':
unittest.main() | 27.125 | 88 | 0.692396 |
8392df442da4b2f8acf4cb05c261720a7e2145a4 | 967 | py | Python | tests/unit/test_cust_driver.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 2 | 2021-01-22T07:34:35.000Z | 2021-01-23T04:36:41.000Z | tests/unit/test_cust_driver.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 1 | 2021-02-27T05:56:45.000Z | 2021-02-27T05:57:03.000Z | tests/unit/test_cust_driver.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from jina import Flow, Document
from jina.executors import BaseExecutor
from jina.parsers import set_pea_parser
from jina.peapods.peas import BasePea
cur_dir = Path(__file__).parent
| 31.193548 | 110 | 0.733195 |
8393065c4b6aeffae6a10f048cd67e3a8fa65388 | 373 | py | Python | 02-19-Cuma/forLoop.py | cihatdev/misha-staj | d0ee95d5e77a7d7a1f16611d49c87be429a25b31 | [
"MIT"
] | 9 | 2021-03-16T20:21:54.000Z | 2022-01-08T09:15:10.000Z | 02-19-Cuma/forLoop.py | cihatdev/misha-staj | d0ee95d5e77a7d7a1f16611d49c87be429a25b31 | [
"MIT"
] | 1 | 2021-02-28T21:27:17.000Z | 2021-02-28T21:27:17.000Z | 02-19-Cuma/forLoop.py | cihatdev/misha-staj | d0ee95d5e77a7d7a1f16611d49c87be429a25b31 | [
"MIT"
] | 1 | 2021-05-24T11:34:48.000Z | 2021-05-24T11:34:48.000Z | # for loops
# for letter in "Cihat Salik":
# print(letter)
friends = ["Hasan", "Mahmut", "Ali", "Veli"]
for friend in friends:
print(friend)
for index in range(3, 10):
print(index)
for index in range(len(friends)):
print(friends[index])
for index in range(5):
if index == 0:
print("First Iteration")
else:
print("Not first")
| 15.541667 | 44 | 0.600536 |
839340ab08b4524ada1d06b4a611b58353ecf4dc | 3,813 | py | Python | ricga/ricga_server.py | MeteorKepler/laughing-invention | 6f856d7ba27f956d8dceb18fe14ba2575beae6aa | [
"Apache-2.0"
] | 1 | 2018-04-12T01:44:32.000Z | 2018-04-12T01:44:32.000Z | ricga/ricga_server.py | MeteorKepler/RICGA | 6f856d7ba27f956d8dceb18fe14ba2575beae6aa | [
"Apache-2.0"
] | null | null | null | ricga/ricga_server.py | MeteorKepler/RICGA | 6f856d7ba27f956d8dceb18fe14ba2575beae6aa | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cgi
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import tensorflow as tf
from ricga import configuration
from ricga import inference_wrapper
from ricga.inference_utils import caption_generator
from ricga.inference_utils import vocabulary
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "/home/meteorshub/code/RICGA/ricga/model/train",
"Model checkpoint file or directory containing a "
"model checkpoint file.")
tf.flags.DEFINE_string("vocab_file", "/home/meteorshub/code/RICGA/ricga/data/mscoco/word_counts.txt",
"Text file containing the vocabulary.")
tf.flags.DEFINE_string("server_ip", "59.66.143.35", "Server address")
tf.flags.DEFINE_integer("server_port", 8080, "server port")
tf.logging.set_verbosity(tf.logging.INFO)
inf_model = InferenceModel()
if __name__ == "__main__":
tf.app.run()
| 37.019417 | 115 | 0.59402 |
839409e02d06e81916f52fdb0dab1efe39b69585 | 2,190 | py | Python | rotation_analysis/analysis/probe/gui/gui.py | Sepidak/spikeGUI | 25ae60160308c0a34e7180f3e39a1c4dc6aad708 | [
"MIT"
] | null | null | null | rotation_analysis/analysis/probe/gui/gui.py | Sepidak/spikeGUI | 25ae60160308c0a34e7180f3e39a1c4dc6aad708 | [
"MIT"
] | 3 | 2021-08-09T21:51:41.000Z | 2021-08-09T21:51:45.000Z | rotation_analysis/analysis/probe/gui/gui.py | Sepidak/spikeGUI | 25ae60160308c0a34e7180f3e39a1c4dc6aad708 | [
"MIT"
] | 3 | 2021-10-16T14:07:59.000Z | 2021-10-16T17:09:03.000Z | import os
import sys
if sys.platform.startswith('linux'):
from OpenGL import GL
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtWidgets import QApplication
from analysis.probe.gui.backend_classes import PythonBackendClass1, Logger
from analysis.probe.gui.image_providers import PyplotImageProvider
DEBUG = False
if __name__ == '__main__':
app = QApplication(sys.argv)
appEngine = QQmlApplicationEngine()
context = appEngine.rootContext()
analysis_image_provider1 = PyplotImageProvider(fig=None)
appEngine.addImageProvider("analysisprovider1", analysis_image_provider1)
analysis_image_provider2 = PyplotImageProvider(fig=None)
appEngine.addImageProvider("analysisprovider2", analysis_image_provider2)
# ALL THE ADDIMAGEPROVIDER LINES BELOW ARE REQUIRED TO MAKE QML BELIEVE THE PROVIDER IS VALID BEFORE ITS CREATION
# appEngine.addImageProvider('viewerprovider', CvImageProvider())
# analysis_image_provider = PyplotImageProvider(fig=None)
# appEngine.addImageProvider("analysisprovider", analysis_image_provider)
conf = {
'shared_directory': './' # FIXME: this is obviously BS
}
qml_source_path = os.path.join(conf['shared_directory'], 'qml', 'gui_qtquick', 'gui_qtquick.qml')
if not os.path.isfile(qml_source_path):
raise ValueError("Qml code not found at {}, please verify your installation".format(qml_source_path))
appEngine.load(qml_source_path)
try:
win = appEngine.rootObjects()[0]
except IndexError:
raise ValueError("Could not start the QT GUI")
if not DEBUG:
logger = Logger(context, win, "log")
sys.stdout = logger
print('Hello world')
# icon = QIcon(os.path.join(conf.shared_directory, 'resources', 'icons', 'pyper.png'))
# win.setIcon(icon)
backend = PythonBackendClass1(app, context, win, analysis_image_provider1, analysis_image_provider2) # create instance of backend
context.setContextProperty('py_iface', backend) # register backend python object with qml code under variable name py_iface
win.show()
sys.exit(app.exec_())
| 35.322581 | 135 | 0.719635 |
83944542d560f5e410e723d98ca83aade353b2f1 | 3,701 | py | Python | test/integration/test_forcemerge.py | jgough/opensearch-curator | e8d7eb4d969eac551db9f99bd021d0c05e28dc35 | [
"Apache-2.0"
] | null | null | null | test/integration/test_forcemerge.py | jgough/opensearch-curator | e8d7eb4d969eac551db9f99bd021d0c05e28dc35 | [
"Apache-2.0"
] | null | null | null | test/integration/test_forcemerge.py | jgough/opensearch-curator | e8d7eb4d969eac551db9f99bd021d0c05e28dc35 | [
"Apache-2.0"
] | null | null | null | import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
from time import sleep
import click
from click import testing as clicktest
from mock import patch, Mock
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200 | 37.765306 | 94 | 0.604431 |
8394cbef054df0807f179df99652e99fb23bca5e | 7,331 | py | Python | datacube_alchemist/_utils.py | erin-telfer/datacube-alchemist | 4c37b2243027769f01ce0729e5ff56d0f6354316 | [
"Apache-2.0"
] | 15 | 2020-06-23T06:03:41.000Z | 2021-12-23T00:19:01.000Z | datacube_alchemist/_utils.py | erin-telfer/datacube-alchemist | 4c37b2243027769f01ce0729e5ff56d0f6354316 | [
"Apache-2.0"
] | 69 | 2019-08-14T02:03:38.000Z | 2022-03-04T03:38:20.000Z | datacube_alchemist/_utils.py | erin-telfer/datacube-alchemist | 4c37b2243027769f01ce0729e5ff56d0f6354316 | [
"Apache-2.0"
] | 3 | 2020-09-21T22:01:34.000Z | 2021-09-22T03:02:26.000Z | import json
from pathlib import Path
import re
from typing import Dict
import boto3
import structlog
from datacube.model import Dataset
from datacube.virtual import Measurement, Transformation
from eodatasets3 import DatasetAssembler, serialise
from eodatasets3.model import DatasetDoc, ProductDoc
from eodatasets3.properties import StacPropertyView
from eodatasets3.scripts.tostac import dc_to_stac, json_fallback
from eodatasets3.verify import PackageChecksum
from toolz.dicttoolz import get_in
from datacube_alchemist.settings import AlchemistTask
# Regex for extracting region codes from tile IDs.
RE_TILE_REGION_CODE = re.compile(r".*A\d{6}_T(\w{5})_N\d{2}\.\d{2}")
def _stac_to_sns(sns_arn, stac):
"""
Publish our STAC document to an SNS
"""
bbox = stac["bbox"]
product_name = get_in(["properties", "odc:product"], stac, None)
if product_name is None:
product_name = stac.get("collection", None)
if product_name is None:
raise ValueError("No 'odc:product_name' or 'collection' found in STAC doc")
attributes = {
"action": {"DataType": "String", "StringValue": "ADDED"},
"datetime": {
"DataType": "String",
"StringValue": str(get_in(["properties", "datetime"], stac)),
},
"product": {
"DataType": "String",
"StringValue": product_name,
},
"bbox.ll_lon": {"DataType": "Number", "StringValue": str(bbox[0])},
"bbox.ll_lat": {"DataType": "Number", "StringValue": str(bbox[1])},
"bbox.ur_lon": {"DataType": "Number", "StringValue": str(bbox[2])},
"bbox.ur_lat": {"DataType": "Number", "StringValue": str(bbox[3])},
}
maturity = get_in(["properties", "dea:dataset_maturity"], stac)
if maturity is not None:
attributes["maturity"] = {"DataType": "String", "StringValue": maturity}
client = boto3.client("sns")
client.publish(
TopicArn=sns_arn,
Message=json.dumps(stac, indent=4, default=json_fallback),
MessageAttributes=attributes,
)
def _munge_dataset_to_eo3(ds: Dataset) -> DatasetDoc:
"""
Convert to the DatasetDoc format that eodatasets expects.
"""
if ds.metadata_type.name in {"eo_plus", "eo_s2_nrt", "gqa_eo"}:
# Handle S2 NRT metadata identically to eo_plus files.
# gqa_eo is the S2 ARD with extra quality check fields.
return _convert_eo_plus(ds)
if ds.metadata_type.name == "eo":
return _convert_eo(ds)
# Else we have an already mostly eo3 style dataset
product = ProductDoc(name=ds.type.name)
# Wrap properties to avoid typos and the like
properties = StacPropertyView(ds.metadata_doc.get("properties", {}))
if properties.get("eo:gsd"):
del properties["eo:gsd"]
return DatasetDoc(
id=ds.id,
product=product,
crs=str(ds.crs),
properties=properties,
geometry=ds.extent,
)
def _guess_region_code(ds: Dataset) -> str:
"""
Get the region code of a dataset.
"""
try:
# EO plus
return ds.metadata.region_code
except AttributeError:
# Not EO plus
pass
try:
# EO
return ds.metadata_doc["region_code"]
except KeyError:
# No region code!
pass
# Region code not specified, so get it from the tile ID.
# An example of such a tile ID for S2A NRT is:
# S2A_OPER_MSI_L1C_TL_VGS1_20201114T053541_A028185_T50JPP_N02.09
# The region code is 50JPP.
tile_match = RE_TILE_REGION_CODE.match(ds.metadata_doc["tile_id"])
if not tile_match:
raise ValueError("No region code for dataset {}".format(ds.id))
return tile_match.group(1)
| 32.438053 | 105 | 0.657755 |
8394cda94ca23da8940ee7626693fe1126d8fab2 | 834 | py | Python | HMBBF/migrations/0015_auto_20161202_1733.py | HLoveMe/HWMBBF_Serve | a11fb5b67c913b62df839ce3438a3be433e3865b | [
"Apache-2.0"
] | null | null | null | HMBBF/migrations/0015_auto_20161202_1733.py | HLoveMe/HWMBBF_Serve | a11fb5b67c913b62df839ce3438a3be433e3865b | [
"Apache-2.0"
] | null | null | null | HMBBF/migrations/0015_auto_20161202_1733.py | HLoveMe/HWMBBF_Serve | a11fb5b67c913b62df839ce3438a3be433e3865b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 27.8 | 134 | 0.588729 |
83963bff306e66c0a55e66eed48eb8e977fd0dbd | 4,649 | py | Python | rb/processings/text_classifier/text_classifier.py | readerbench/ReaderBench | f0588a9a63ba21e3b8c2e5e5bc474904c07f6897 | [
"Apache-2.0"
] | null | null | null | rb/processings/text_classifier/text_classifier.py | readerbench/ReaderBench | f0588a9a63ba21e3b8c2e5e5bc474904c07f6897 | [
"Apache-2.0"
] | 2 | 2021-10-17T14:00:52.000Z | 2021-10-17T14:00:52.000Z | rb/processings/text_classifier/text_classifier.py | readerbench/ReaderBench | f0588a9a63ba21e3b8c2e5e5bc474904c07f6897 | [
"Apache-2.0"
] | null | null | null | from rb.core.lang import Lang
from rb.core.document import Document
from rb.complexity.complexity_index import ComplexityIndex, compute_indices
from rb.similarity.word2vec import Word2Vec
from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel
from rb.similarity.vector_model_factory import VECTOR_MODELS, create_vector_model
from typing import Tuple, List
from sklearn.svm import SVR
from collections import Counter
from sklearn import svm
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
import matplotlib.pyplot as plt
import pickle
import os
import csv
import random
from werkzeug import secure_filename
import uuid
from rb.cna.cna_graph import CnaGraph
from rb.utils.rblogger import Logger
logger = Logger.get_logger()
| 35.219697 | 113 | 0.591955 |
839812d03b6dbafa768b4338253f5ebbd452fe07 | 821 | py | Python | vault_importer/csv.py | rpetti/vault-keepassxc-importer | 7258a1062a52426e44fddce57d0f841f98f3c2c1 | [
"Apache-2.0"
] | null | null | null | vault_importer/csv.py | rpetti/vault-keepassxc-importer | 7258a1062a52426e44fddce57d0f841f98f3c2c1 | [
"Apache-2.0"
] | null | null | null | vault_importer/csv.py | rpetti/vault-keepassxc-importer | 7258a1062a52426e44fddce57d0f841f98f3c2c1 | [
"Apache-2.0"
] | null | null | null | import csv
| 26.483871 | 75 | 0.483557 |
839832c0e53eab95cbbd979af3ec19abef8086bb | 3,069 | py | Python | src/reader/_plugins/enclosure_tags.py | mirekdlugosz/reader | d929b88f1981085b68e82019aa59af126479d4a9 | [
"BSD-3-Clause"
] | 205 | 2018-07-14T12:54:21.000Z | 2022-03-29T06:47:13.000Z | src/reader/_plugins/enclosure_tags.py | mirekdlugosz/reader | d929b88f1981085b68e82019aa59af126479d4a9 | [
"BSD-3-Clause"
] | 275 | 2018-01-28T20:57:13.000Z | 2022-03-29T21:45:11.000Z | src/reader/_plugins/enclosure_tags.py | mirekdlugosz/reader | d929b88f1981085b68e82019aa59af126479d4a9 | [
"BSD-3-Clause"
] | 12 | 2021-01-01T17:15:53.000Z | 2022-03-22T09:38:12.000Z | """
enclosure_tags
~~~~~~~~~~~~~~
Fix tags for MP3 enclosures (e.g. podcasts).
Adds a "with tags" link to a version of the file with tags set as follows:
* the entry title as title
* the feed title as album
* the entry/feed author as author
This plugin needs additional dependencies, use the ``unstable-plugins`` extra
to install them:
.. code-block:: bash
pip install reader[unstable-plugins]
To load::
READER_APP_PLUGIN='reader._plugins.enclosure_tags:init' \\
python -m reader serve
Implemented for https://github.com/lemon24/reader/issues/50.
Became a plugin in https://github.com/lemon24/reader/issues/52.
"""
import tempfile
from urllib.parse import urlparse
import mutagen.mp3
import requests
from flask import Blueprint
from flask import request
from flask import Response
from flask import stream_with_context
from flask import url_for
blueprint = Blueprint('enclosure_tags', __name__)
ALL_TAGS = ('album', 'title', 'artist')
SET_ONLY_IF_MISSING_TAGS = {'artist'}
| 26.008475 | 81 | 0.640274 |
8398b2a65cd51b95d6dff4f5e09806cedb08e588 | 454 | py | Python | sqllite/delete_the_data.py | arjunjanamatti/pymongo_practise | d69153f6a0cce9416b10c0adf300986bfe9dfe22 | [
"Apache-2.0"
] | null | null | null | sqllite/delete_the_data.py | arjunjanamatti/pymongo_practise | d69153f6a0cce9416b10c0adf300986bfe9dfe22 | [
"Apache-2.0"
] | null | null | null | sqllite/delete_the_data.py | arjunjanamatti/pymongo_practise | d69153f6a0cce9416b10c0adf300986bfe9dfe22 | [
"Apache-2.0"
] | null | null | null | import _sqlite3
mydb = _sqlite3.connect(database = 'namelist')
with mydb:
cur = mydb.cursor()
name = 'update_name_placeholder'
cur.execute('DELETE FROM users WHERE First_name = ?', (name,))
mydb.commit()
print('Data deleted!!!')
cur = mydb.cursor()
selectquery = 'SELECT * FROM users'
cur.execute(selectquery)
results = cur.fetchall()
print('Original data: ')
for row in results:
print(row)
| 16.814815 | 66 | 0.627753 |
8398ea34b25e65cea834c631c5374561393da5cc | 35,415 | py | Python | tools/messages/aura4_messages.py | AuraUAS/aura-core | 4711521074db72ba9089213e14455d89dc5306c0 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 8 | 2016-08-03T19:35:03.000Z | 2019-12-15T06:25:05.000Z | tools/messages/aura4_messages.py | AuraUAS/aura-core | 4711521074db72ba9089213e14455d89dc5306c0 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 4 | 2018-09-27T15:48:56.000Z | 2018-11-05T12:38:10.000Z | tools/messages/aura4_messages.py | AuraUAS/aura-core | 4711521074db72ba9089213e14455d89dc5306c0 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5 | 2017-06-28T19:15:36.000Z | 2020-02-19T19:31:24.000Z | import struct
# Message id constants
command_ack_id = 10
config_airdata_id = 11
config_board_id = 12
config_ekf_id = 13
config_imu_id = 14
config_mixer_id = 15
config_mixer_matrix_id = 16
config_power_id = 17
config_pwm_id = 18
config_stability_damping_id = 19
command_inceptors_id = 20
command_zero_gyros_id = 21
command_reset_ekf_id = 22
command_cycle_inceptors_id = 23
pilot_id = 24
imu_id = 25
aura_nav_pvt_id = 26
airdata_id = 27
power_id = 28
status_id = 29
ekf_id = 30
# Constants
pwm_channels = 8 # number of pwm output channels
sbus_channels = 16 # number of sbus channels
ap_channels = 6 # number of sbus channels
mix_matrix_size = 64 # 8 x 8 mix matrix
# Enums
enum_nav_none = 0 # None
enum_nav_nav15 = 1 # None
enum_nav_nav15_mag = 2 # None
# Message: command_ack
# Id: 10
# Message: config_airdata
# Id: 11
# Message: config_board
# Id: 12
# Message: config_ekf
# Id: 13
# Message: config_imu
# Id: 14
# Message: config_mixer
# Id: 15
# Message: config_mixer_matrix
# Id: 16
# Message: config_power
# Id: 17
# Message: config_pwm
# Id: 18
# Message: config_stability_damping
# Id: 19
# Message: command_inceptors
# Id: 20
# Message: command_zero_gyros
# Id: 21
# Message: command_reset_ekf
# Id: 22
# Message: command_cycle_inceptors
# Id: 23
# Message: pilot
# Id: 24
# Message: imu
# Id: 25
# Message: aura_nav_pvt
# Id: 26
# Message: airdata
# Id: 27
# Message: power
# Id: 28
# Message: status
# Id: 29
# Message: ekf
# Id: 30
| 30.742188 | 86 | 0.472766 |
839c15c319f76f2a4d3c57f98d73d5a1cfa11959 | 1,563 | py | Python | influxdbnagiosplugin/tests/test_query.py | paulboot/influxdb-nagios-plugin | 945eeb518d22863a7878b3726b24d8a2e9e485bb | [
"Apache-2.0"
] | 5 | 2016-05-16T22:35:09.000Z | 2020-11-13T16:03:45.000Z | influxdbnagiosplugin/tests/test_query.py | paulboot/influxdb-nagios-plugin | 945eeb518d22863a7878b3726b24d8a2e9e485bb | [
"Apache-2.0"
] | 2 | 2017-11-28T00:01:19.000Z | 2022-01-21T14:04:58.000Z | influxdbnagiosplugin/tests/test_query.py | paulboot/influxdb-nagios-plugin | 945eeb518d22863a7878b3726b24d8a2e9e485bb | [
"Apache-2.0"
] | 9 | 2015-12-03T00:37:57.000Z | 2021-09-08T09:23:05.000Z | """
Query construction tests.
"""
from hamcrest import assert_that, is_, equal_to
from influxdbnagiosplugin.query import ExplicitQueryBuilder, SingleMeasurementQueryBuilder
| 26.948276 | 90 | 0.627639 |
839cf93a477b1ceb19582913fdf13770dea82220 | 27,056 | py | Python | sfftk_migrate/test_sfftk_migrate.py | emdb-empiar/sfftk-migrate | fc8941082256456edb61fe22ecbf932f6258352a | [
"Apache-2.0"
] | null | null | null | sfftk_migrate/test_sfftk_migrate.py | emdb-empiar/sfftk-migrate | fc8941082256456edb61fe22ecbf932f6258352a | [
"Apache-2.0"
] | 2 | 2020-04-02T15:25:10.000Z | 2020-04-03T14:32:12.000Z | sfftk_migrate/test_sfftk_migrate.py | emdb-empiar/sfftk-migrate | fc8941082256456edb61fe22ecbf932f6258352a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import types
import unittest
import inspect
from lxml import etree
from . import XSL, XML, VERSION_LIST
from .core import get_module, get_stylesheet, get_source_version, get_migration_path, list_versions
from .main import parse_args
from .migrate import migrate_by_stylesheet, do_migration, get_params
from .utils import _print, _check, _decode_data
replace_list = [
('\n', ''),
('\t', ''),
(' ', ''),
]
def compare_elements(el1, el2):
"""Compare two elements and all their children
:return: True or False
"""
_check(el1, (etree._Element), TypeError)
_check(el2, (etree._Element), TypeError)
# https://stackoverflow.com/questions/7905380/testing-equivalence-of-xml-etree-elementtree
if el1.tag != el2.tag:
return False
if _replace(el1.text) != _replace(el2.text):
return False
if _replace(el1.tail) != _replace(el2.tail):
return False
if el1.attrib != el2.attrib:
return False
if len(el1) != len(el2):
return False
return all(compare_elements(e1, e2) for e1, e2 in zip(el1, el2))
| 50.383613 | 120 | 0.623965 |
839e9ac3360c11d26c97e8b4677e721f25a025a1 | 11,298 | py | Python | ross/stochastic/st_shaft_element.py | hssaabbl/ross | 5e548d24c8522c8a9a294479c580c21b4eb3bb65 | [
"MIT"
] | 69 | 2018-12-26T19:21:26.000Z | 2022-02-10T08:48:03.000Z | ross/stochastic/st_shaft_element.py | hssaabbl/ross | 5e548d24c8522c8a9a294479c580c21b4eb3bb65 | [
"MIT"
] | 639 | 2018-12-18T16:44:11.000Z | 2022-03-27T16:46:41.000Z | ross/stochastic/st_shaft_element.py | hssaabbl/ross | 5e548d24c8522c8a9a294479c580c21b4eb3bb65 | [
"MIT"
] | 136 | 2019-01-08T12:37:32.000Z | 2022-03-30T07:14:35.000Z | """Shaft element module for STOCHASTIC ROSS.
This module creates an instance of random shaft element for stochastic
analysis.
"""
from ross.shaft_element import ShaftElement
from ross.stochastic.st_materials import ST_Material
from ross.stochastic.st_results_elements import plot_histogram
from ross.units import Q_, check_units
__all__ = ["ST_ShaftElement", "st_shaft_example"]
def st_shaft_example():
"""Return an instance of a simple random shaft element.
The purpose is to make available a simple model so that doctest can be
written using it.
Returns
-------
elm : ross.stochastic.ST_ShaftElement
An instance of a random shaft element object.
Examples
--------
>>> import ross.stochastic as srs
>>> elm = srs.st_shaft_example()
>>> len(list(iter(elm)))
2
"""
from ross.materials import steel
elm = ST_ShaftElement(
L=[1.0, 1.1],
idl=0.0,
odl=[0.1, 0.2],
material=steel,
is_random=["L", "odl"],
)
return elm
| 32.28 | 88 | 0.547354 |
839ec849aea4ca2defce43d38650cfab96daff56 | 2,873 | py | Python | sympy/benchmarks/bench_symbench.py | vprusso/sympy | d5aa27ec88bb076f59087aada97d99bfff8b2f4c | [
"BSD-3-Clause"
] | null | null | null | sympy/benchmarks/bench_symbench.py | vprusso/sympy | d5aa27ec88bb076f59087aada97d99bfff8b2f4c | [
"BSD-3-Clause"
] | null | null | null | sympy/benchmarks/bench_symbench.py | vprusso/sympy | d5aa27ec88bb076f59087aada97d99bfff8b2f4c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function, division
from sympy.core.compatibility import xrange
from random import random
from sympy import factor, I, Integer, pi, simplify, sin, sqrt, Symbol, sympify
from sympy.abc import x, y, z
from timeit import default_timer as clock
def bench_R1():
"real(f(f(f(f(f(f(f(f(f(f(i/2)))))))))))"
e = f(f(f(f(f(f(f(f(f(f(I/2)))))))))).as_real_imag()[0]
def bench_R2():
"Hermite polynomial hermite(15, y)"
a = hermite(15, y)
def bench_R3():
"a = [bool(f==f) for _ in range(10)]"
f = x + y + z
a = [bool(f == f) for _ in range(10)]
def bench_R5():
"blowup(L, 8); L=uniq(L)"
L = [x, y, z]
blowup(L, 8)
L = uniq(L)
def bench_R6():
"sum(simplify((x+sin(i))/x+(x-sin(i))/x) for i in xrange(100))"
s = sum(simplify((x + sin(i))/x + (x - sin(i))/x) for i in xrange(100))
def bench_R7():
"[f.subs(x, random()) for _ in xrange(10**4)]"
f = x**24 + 34*x**12 + 45*x**3 + 9*x**18 + 34*x**10 + 32*x**21
a = [f.subs(x, random()) for _ in xrange(10**4)]
def bench_R8():
"right(x^2,0,5,10^4)"
a = right(x**2, 0, 5, 10**4)
def _bench_R9():
"factor(x^20 - pi^5*y^20)"
factor(x**20 - pi**5*y**20)
def bench_R10():
"v = [-pi,-pi+1/10..,pi]"
v = srange(-pi, pi, sympify(1)/10)
def bench_R11():
"a = [random() + random()*I for w in [0..1000]]"
a = [random() + random()*I for w in range(1000)]
def bench_S1():
"e=(x+y+z+1)**7;f=e*(e+1);f.expand()"
e = (x + y + z + 1)**7
f = e*(e + 1)
f = f.expand()
if __name__ == '__main__':
benchmarks = [
bench_R1,
bench_R2,
bench_R3,
bench_R5,
bench_R6,
bench_R7,
bench_R8,
#_bench_R9,
bench_R10,
bench_R11,
#bench_S1,
]
report = []
for b in benchmarks:
t = clock()
b()
t = clock() - t
print("%s%65s: %f" % (b.__name__, b.__doc__, t))
| 21.765152 | 78 | 0.484859 |
839f729c16c6176bd93a48ef474f3a2349aae65f | 774 | py | Python | tests/test_capture.py | atac/c10-tools | 278acfaab8bb42dff448fe1fbe08e7b7f75b1752 | [
"BSD-3-Clause"
] | 5 | 2021-06-10T01:32:06.000Z | 2021-12-22T23:05:52.000Z | tests/test_capture.py | atac/c10-tools | 278acfaab8bb42dff448fe1fbe08e7b7f75b1752 | [
"BSD-3-Clause"
] | 17 | 2020-08-03T16:35:26.000Z | 2022-03-30T17:29:41.000Z | tests/test_capture.py | atac/c10-tools | 278acfaab8bb42dff448fe1fbe08e7b7f75b1752 | [
"BSD-3-Clause"
] | null | null | null |
from tempfile import NamedTemporaryFile
import os
import pytest
from c10_tools.capture import main
| 22.114286 | 70 | 0.595607 |
83a0c16822a34d798946b9dc4c088ff91cd8ad8d | 330 | py | Python | e-olymp/p15/373.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | e-olymp/p15/373.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | e-olymp/p15/373.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | # TODO: tle
import re
import sys
m = {"?": ".?", "*": ".*?"}
for ln in sys.stdin:
p, s = ln.rstrip().split()
res = re.findall(tr(p), s)
print(score(min(res)) if res else -1)
| 15.714286 | 61 | 0.50303 |
83a1781aa9bd0a390115621e83bce23ea229c38b | 1,025 | py | Python | dino.py | panpepson/DinoBot-chroma-offline | a6587555bf52c1545e69d79a4d30f19ad911eff2 | [
"MIT"
] | null | null | null | dino.py | panpepson/DinoBot-chroma-offline | a6587555bf52c1545e69d79a4d30f19ad911eff2 | [
"MIT"
] | 3 | 2021-06-08T21:14:50.000Z | 2022-03-12T00:22:40.000Z | dino.py | panpepson/DinoBot-chroma-offline | a6587555bf52c1545e69d79a4d30f19ad911eff2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import numpy as np
import cv2
from mss.linux import MSS as mss
from PIL import Image
import time
import pyautogui as pg
#mon = {'top': 480, 'left': 130, 'width': 70, 'height': 35}
mon = {'top': 200, 'left': 410, 'width': 50, 'height': 30} #git-b01
screen_record()
| 26.282051 | 80 | 0.62439 |
83a1f16b819638b10f8073878aae0693547c3238 | 5,085 | py | Python | trove/tests/scenario/groups/instance_create_group.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 1 | 2019-09-20T08:31:54.000Z | 2019-09-20T08:31:54.000Z | trove/tests/scenario/groups/instance_create_group.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | trove/tests/scenario/groups/instance_create_group.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.instance_create_group"
| 34.828767 | 78 | 0.73117 |
83a21e9300920f882ecbddf58f262c3769b6771a | 20,066 | py | Python | reco_utils/recommender/deeprec/models/dkn.py | suhoy901/recommenders | 8ec9f1950d694a5aeaa3d463ac23cad661a30a11 | [
"MIT"
] | 28 | 2021-11-12T08:26:40.000Z | 2022-03-27T07:21:24.000Z | reco_utils/recommender/deeprec/models/dkn.py | shobhit-agarwal/recommenders | 8ec9f1950d694a5aeaa3d463ac23cad661a30a11 | [
"MIT"
] | 5 | 2021-11-10T02:58:32.000Z | 2022-03-21T16:13:11.000Z | reco_utils/recommender/deeprec/models/dkn.py | shobhit-agarwal/recommenders | 8ec9f1950d694a5aeaa3d463ac23cad661a30a11 | [
"MIT"
] | 9 | 2021-11-03T07:14:47.000Z | 2022-02-22T13:42:04.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import tensorflow as tf
from reco_utils.recommender.deeprec.models.base_model import BaseModel
__all__ = ["DKN"]
| 41.630705 | 126 | 0.546397 |
83a22a3ba1efc66d7ca002b326c88281fa4ad1f6 | 2,193 | py | Python | LeNet-5/LeNet-5.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | LeNet-5/LeNet-5.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | LeNet-5/LeNet-5.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses
# 1.
(x, y), (x_val, y_val) = datasets.mnist.load_data() #
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. # 0~1
y = tf.convert_to_tensor(y, dtype=tf.int32) #
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) #
train_dataset = train_dataset.batch(32).repeat(10) # batch3210
# 2.
network = Sequential([ #
layers.Conv2D(6, kernel_size=3, strides=1), # 63*3*1
layers.MaxPooling2D(pool_size=2, strides=2), # 2*22
layers.ReLU(), #
layers.Conv2D(16, kernel_size=3, strides=1), # 163*3*6
layers.MaxPooling2D(pool_size=2, strides=2), #
layers.ReLU(), #
layers.Flatten(), #
layers.Dense(120, activation='relu'), # 120
layers.Dense(84, activation='relu'), # 84
layers.Dense(10) # 10
])
network.build(input_shape=(None, 28, 28, 1)) # ,batch_size=32,28*28,1
network.summary() #
# 3.
optimizer = optimizers.SGD(lr=0.01) # =0.01
acc_meter = metrics.Accuracy() # accuracy
for step, (x, y) in enumerate(train_dataset): # batch
with tf.GradientTape() as tape: #
x = tf.reshape(x, (32, 28, 28, 1)) # [b,28,28]->[b,784]
# x = tf.extand_dims(x, axis=3)
out = network(x) # [b, 10]
y_onehot = tf.one_hot(y, depth=10) # one-hot
loss = tf.square(out - y_onehot)
loss = tf.reduce_sum(loss) / 32 # 32batch
grads = tape.gradient(loss, network.trainable_variables) #
optimizer.apply_gradients(zip(grads, network.trainable_variables)) #
acc_meter.update_state(tf.argmax(out, axis=1), y) #
if step % 200 == 0: # 200step
print('Step', step, ': Loss is: ', float(loss), ' Accuracy: ', acc_meter.result().numpy()) #
acc_meter.reset_states() # l
| 47.673913 | 106 | 0.674875 |
83a488662edcd23d97eb9c55b24ca7fbca5d6193 | 7,727 | py | Python | gw_full_latest/CloudTTN.py | rendikanyut/LowCostLoRaGw | a4bce0cf123ffeb48a01e779a62d76b1bf1c4486 | [
"Linux-OpenIB"
] | 654 | 2016-03-01T08:43:24.000Z | 2022-03-22T06:55:27.000Z | gw_full_latest/CloudTTN.py | rendikanyut/LowCostLoRaGw | a4bce0cf123ffeb48a01e779a62d76b1bf1c4486 | [
"Linux-OpenIB"
] | 315 | 2016-02-25T10:36:11.000Z | 2022-03-31T20:59:21.000Z | gw_full_latest/CloudTTN.py | rendikanyut/LowCostLoRaGw | a4bce0cf123ffeb48a01e779a62d76b1bf1c4486 | [
"Linux-OpenIB"
] | 388 | 2016-02-16T14:40:36.000Z | 2022-03-15T04:03:20.000Z | #-------------------------------------------------------------------------------
# Part of this Python script is taken from the Pycom NanoGateway
# https://github.com/pycom/pycom-libraries/tree/master/examples/lorawan-nano-gateway
#
# Adapted by Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import binascii
import json
import os
import socket
import time
import datetime
from dateutil import parser
import calendar
import sys
#don't generate pyc (no compilation of imported module) so change in key_* file can be done dynamically
sys.dont_write_bytecode = True
import key_TTN as key_LoRaWAN
netserv='TTN'
try:
key_LoRaWAN.source_list
except AttributeError:
key_LoRaWAN.source_list=[]
try:
key_LoRaWAN.lorawan_server
except AttributeError:
key_LoRaWAN.lorawan_server="router.eu.thethings.network"
try:
key_LoRaWAN.lorawan_port
except AttributeError:
key_LoRaWAN.lorawan_port=1700
PROTOCOL_VERSION = 2
PUSH_DATA = 0
PUSH_ACK = 1
PULL_DATA = 2
PULL_ACK = 4
PULL_RESP = 3
RX_PK = {
'rxpk': [{
'time': '',
'tmst': 0,
'chan': 0,
'rfch': 0,
'freq': 0,
'stat': 1,
'modu': 'LORA',
'datr': '',
'codr': '4/5',
'rssi': 0,
'lsnr': 0,
'size': 0,
'data': ''
}]
}
TX_ACK_PK = {
'txpk_ack': {
'error': ''
}
}
# Testing with pau_lorawan_testing/Pau_testing_device 0x26011721
#
# python CloudTTN.py "QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==" "256,64,637605665,0,28,8,-45" "125,5,12,868100" "2019-03-25T18:46:00.528+01:00" "0000B827EBD1B236"
# or
# python CloudTTN.py "QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==" "256,64,637605665,0,28,8,-45" "125,5,12,868100" "`date +%FT%T%z`" "0000B827EBD1B236"
#
# get the base64 encrypted data from `Arduino_LoRa_temp` sending "Hello from UPPA"
#
# Hello from UPPA
# plain payload hex
# 48 65 6C 6C 6F 20 66 72 6F 6D 20 55 50 50 41
# Encrypting
# encrypted payload
# 84 21 84 D4 BF 3B 34 20 CC 93 48 CB 6B A8 58
# calculate MIC with NwkSKey
# transmitted LoRaWAN-like packet:
# MHDR[1] | DevAddr[4] | FCtrl[1] | FCnt[2] | FPort[1] | EncryptedPayload | MIC[4]
# 40 21 17 01 26 00 00 00 01 84 21 84 D4 BF 3B 34 20 CC 93 48 CB 6B A8 58 5E 6D 1E FA
# [base64 LoRaWAN HEADER+CIPHER+MIC]:QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==
def main(ldata, pdata, rdata, tdata, gwid):
# this is common code to process packet information provided by the main gateway script (i.e. post_processing_gw.py)
# these information are provided in case you need them
arr = map(int,pdata.split(','))
dst=arr[0]
ptype=arr[1]
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
#if lora packet is received with an SX1301 concentrator, then the packet-formatter will pass the tmst field after the date information, separated by '*'
#i.e. "2019-03-25T18:46:00.528+01:00*29641444"
tmst=tdata.count('*')
if (tmst != 0):
tdata_tmp=tdata.split('*')[0]
tmst=tdata.split('*')[1]
tdata=tdata_tmp
else:
tmst=''
#from 2019-05-14T14:53:10.241191+02:00 (similar to command date +%FT%T.%6N%z)
#to 2019-05-14T14:53:10.241191Z (similar to command date +%FT%T.%6NZ)
dt = parser.parse(tdata)
#in case you want to remove microsecond
#tdata = dt.replace(microsecond=0,tzinfo=None).isoformat()+"Z"
tdata = dt.replace(tzinfo=None).isoformat()+"Z"
arr = map(int,rdata.split(','))
rbw=arr[0]
rcr=arr[1]
rsf=arr[2]
rfq=arr[3]/1000.0
#LoRaWAN packet
if dst==256:
src_str="0x%0.8X" % src
#we force to BW125 as TTN is can not handle other bandwidth right now, for instance those of Lora 2.4GHz
#TODO: change when TTN will support LoRa 2.4GHz
rbw=125
else:
src_str=str(src)
if (src_str in key_LoRaWAN.source_list) or (len(key_LoRaWAN.source_list)==0):
#build the ttn_gwid which is defined to be gwid[4:10]+"FFFF"+gwid[10:]
#gwid is normally defined as eth0 MAC address filled by 0 in front: 0000B827EBD1B236
ttn_gwid=gwid[4:10]+"FFFF"+gwid[10:]
ttn = LoRaWAN(
id=ttn_gwid,
frequency=rfq,
bw=rbw,
sf=rsf,
server=key_LoRaWAN.lorawan_server,
port=key_LoRaWAN.lorawan_port)
ttn.start()
ttn.rx_packet(ldata, datalen, tdata, tmst, RSSI, SNR)
else:
print "Source is not is source list, not sending to %s" % netserv
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| 31.410569 | 162 | 0.640611 |
83a6888316b1c7a494fc6ea76d1fb65b1293789a | 2,651 | py | Python | pythonProject1/venv/Lib/site-packages/tkinterpp/dialoguebox.py | mjtomlinson/CNE330_Python_1_Final_Project | 05020806860937ef37b9a0ad2e27de4897a606de | [
"CC0-1.0"
] | null | null | null | pythonProject1/venv/Lib/site-packages/tkinterpp/dialoguebox.py | mjtomlinson/CNE330_Python_1_Final_Project | 05020806860937ef37b9a0ad2e27de4897a606de | [
"CC0-1.0"
] | null | null | null | pythonProject1/venv/Lib/site-packages/tkinterpp/dialoguebox.py | mjtomlinson/CNE330_Python_1_Final_Project | 05020806860937ef37b9a0ad2e27de4897a606de | [
"CC0-1.0"
] | null | null | null | try:
import tkinter as tk
except ImportError:
import Tkinter as tk
| 36.819444 | 180 | 0.606941 |
83a72aa00c8eb33d1a3ee2b6393e98bf6532dbf4 | 6,467 | py | Python | siqbal/hooks.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | null | null | null | siqbal/hooks.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | null | null | null | siqbal/hooks.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "SIqbal"
app_title = "SIqbal"
app_publisher = "RC"
app_description = "Customizations for SIqbal"
app_icon = "octicon octicon-file-directory"
app_color = "green"
app_email = "developer@rccorner.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = "/assets/siqbal/css/siqbal.css"
# app_include_js = "/assets/siqbal/js/siqbal.js"
# include js, css files in header of web template
# web_include_css = "/assets/siqbal/css/siqbal.css"
# web_include_js = "/assets/siqbal/js/siqbal.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
doctype_js = {
"Address": "public/js/address.js",
"Architect and Contractor": "public/js/architect_and_contractor.js",
"Authorization Rule": "public/js/authorization_rule.js",
"Customer": "public/js/customer.js",
"Delivery Note" : "public/js/delivery_note.js",
"Item": "public/js/item.js",
"Journal Entry": "public/js/journal_entry.js",
"Landed Cost Voucher": "public/js/landed_cost_voucher.js",
"Material Request" : "public/js/material_request.js",
"Opportunity": "public/js/opportunity.js",
"Payment Entry": "public/js/payment_entry.js",
"Property Detail": "public/js/property_detail.js",
"Purchase Invoice" : "public/js/purchase_invoice.js",
"Purchase Order" : "public/js/purchase_order.js",
"Purchase Receipt" : "public/js/purchase_receipt.js",
"Quotation" : "public/js/quotation.js",
"Request for Quotation": "public/js/request_for_quotation.js",
"Salary Slip" : "public/js/salary_slip.js",
"Sales Invoice" : "public/js/sales_invoice.js",
"Sales Order" : "public/js/sales_order.js",
"Stock Entry" : "public/js/stock_entry.js",
"Stock Reconciliation" : "public/js/stock_reconciliation.js",
"Supplier Quotation": "public/js/supplier_quotation.js"
}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "siqbal.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "siqbal.install.before_install"
# after_install = "siqbal.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "siqbal.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
doc_events = {
"Sales Order": {
"validate": [
"siqbal.hook_events.sales_order.set_average_valuation_rate",
# "siqbal.utils.validate_date"
],
"before_submit": "siqbal.hook_events.sales_order.unset_needs_approval",
"before_update_after_submit": "siqbal.hook_events.sales_order.validate_items_rate_and_update_boxes"
},
"Sales Invoice": {
"validate": [
"siqbal.hook_events.sales_invoice.validate_discount_while_return",
"siqbal.hook_events.sales_invoice.validate_taxes_and_charges_from_so",
"siqbal.utils.add_location",
"siqbal.hook_events.sales_invoice.validate_sales_invoice"
# "siqbal.utils.validate_date"
],
"before_insert": "siqbal.hook_events.sales_invoice.set_supplier_details",
"on_submit": [
"siqbal.hook_events.sales_invoice.update_reserved_qty",
"siqbal.hook_events.sales_invoice.create_purchase_invoices_against_sales_taxes",
# "siqbal.utils.change_pi_status"
#"siqbal.hook_events.sales_invoice.validate_user_warehouse"
],
"on_cancel": "siqbal.hook_events.sales_invoice.update_reserved_qty"
},
"Payment Entry": {
"validate": [
"siqbal.hook_events.payment_entry.validate_sales_order",
# "siqbal.hook_events.payment_entry.validate_salaryslip_amount",
#"siqbal.utils.validate_date"
],
# "on_submit": "siqbal.hook_events.payment_entry.update_salaryslip_status",
# "on_cancel": "siqbal.hook_events.payment_entry.update_salaryslip_status"
},
"Stock Entry": {
#"validate": "siqbal.utils.validate_date",
#"on_submit": "siqbal.hook_events.stock_entry.validate_user_warehouse"
},
"Opportunity": {
"validate": "siqbal.utils.send_followup_sms"
},
"Purchase Invoice": {
"validate": "siqbal.utils.add_location"
},
"Purchase Order": {
#"validate": "siqbal.utils.validate_date"
},
"Purchase Receipt": {
#"validate": "siqbal.utils.validate_date"
},
"Stock Reconciliation": {
#"validate": "siqbal.utils.validate_date"
},
# "Quotation": {
#"validate": "siqbal.utils.validate_date"
# },
# "Journal Entry": {
# "before_save": "siqbal.hook_events.journal_entry.set_name"
# }
}
jenv = {
"methods" : [
"get_qrcode_image:siqbal.utils.get_qrcode_image"
]
}
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "siqbal.tasks.all"
# ],
# "daily": [
# "siqbal.tasks.daily"
# ],
# "hourly": [
# "siqbal.tasks.hourly"
# ],
# "weekly": [
# "siqbal.tasks.weekly"
# ]
# "monthly": [
# "siqbal.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "siqbal.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "siqbal.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "siqbal.task.get_dashboard_data"
# }
override_doctype_class = {
'Sales Invoice': 'siqbal.hook_events.overide_sales_invoice.OverrideSalesInvoice'
}
| 28.117391 | 101 | 0.706355 |
83a7ad962e9be184926ad2137bbbb0b45b02188c | 4,781 | py | Python | testing/python/tests/test_dcgm_reader.py | omertuc/DCGM | 904e1600e5924ef60ac5256d492d0b7f6a7244bc | [
"Apache-2.0"
] | null | null | null | testing/python/tests/test_dcgm_reader.py | omertuc/DCGM | 904e1600e5924ef60ac5256d492d0b7f6a7244bc | [
"Apache-2.0"
] | null | null | null | testing/python/tests/test_dcgm_reader.py | omertuc/DCGM | 904e1600e5924ef60ac5256d492d0b7f6a7244bc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmReader import *
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
from dcgm_structs import dcgmExceptionClass
import logger
import test_utils
import time
| 35.947368 | 94 | 0.69316 |
83a81a83e057b3d3c679bc9510ffe5779a6f5647 | 14,761 | py | Python | tests/lava/lib/dl/slayer/neuron/test_alif.py | timcheck/lava-dl | e680722071129fde952ea0d744984aa2a038797a | [
"BSD-3-Clause"
] | 37 | 2021-09-30T16:47:15.000Z | 2022-03-07T22:29:21.000Z | tests/lava/lib/dl/slayer/neuron/test_alif.py | timcheck/lava-dl | e680722071129fde952ea0d744984aa2a038797a | [
"BSD-3-Clause"
] | 36 | 2021-11-04T16:54:55.000Z | 2022-03-31T02:26:29.000Z | tests/lava/lib/dl/slayer/neuron/test_alif.py | timcheck/lava-dl | e680722071129fde952ea0d744984aa2a038797a | [
"BSD-3-Clause"
] | 20 | 2021-10-29T22:55:58.000Z | 2022-03-22T17:27:16.000Z | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from lava.lib.dl.slayer.neuron import alif
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 590
np.random.seed(seed)
if verbose:
print(f'{seed=}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if verbose:
print(
'CUDA is not available in the system. '
'Testing for CPU version only.'
)
device = torch.device('cpu')
# neuron parameters
threshold = 1
current_decay = np.random.random()
voltage_decay = np.random.random()
threshold_decay = np.random.random()
refractory_decay = np.random.random()
# create input
time = torch.FloatTensor(np.arange(200)).to(device)
# expand to (batch, neuron, time) tensor
spike_input = torch.autograd.Variable(
torch.zeros([5, 4, len(time)]), requires_grad=True
).to(device)
spike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1
weight = torch.FloatTensor(
5 * np.random.random(size=spike_input.shape[-1]) - 0.5
).reshape(
[1, 1, spike_input.shape[-1]]
).to(device)
# initialize neuron
neuron = alif.Neuron(
threshold,
threshold_step=0.5 * threshold,
current_decay=current_decay,
voltage_decay=voltage_decay,
threshold_decay=threshold_decay,
refractory_decay=refractory_decay,
persistent_state=True,
).to(device)
quantized_weight = neuron.quantize_8bit(weight)
neuron.debug = True
# get the neuron response for full input
current, voltage, th, ref = neuron.dynamics(quantized_weight * spike_input)
spike = neuron.spike(voltage, th, ref)
| 34.569087 | 78 | 0.492175 |
83a83633ab9542d9e22f77076652f1c0ce78f53a | 526 | py | Python | amount_test.py | kalafut/go-ledger | 28a625e31d460e0ac2926c53a30f47f159d2b82f | [
"MIT"
] | null | null | null | amount_test.py | kalafut/go-ledger | 28a625e31d460e0ac2926c53a30f47f159d2b82f | [
"MIT"
] | 2 | 2015-11-08T18:50:11.000Z | 2015-11-08T18:50:42.000Z | amount_test.py | kalafut/go-ledger | 28a625e31d460e0ac2926c53a30f47f159d2b82f | [
"MIT"
] | null | null | null | import decimal
import pytest
from amount import Amount as A
| 21.04 | 46 | 0.452471 |
83a84321ea0a0bc9570475d0ce3c63e9712bd0ca | 4,449 | py | Python | DiscoGAN/discogan_train.py | sumersumerdjl/kozistr-Awesome-GANs | 6e20e9cd07d0ec413a187d496159b97d793dab0c | [
"MIT"
] | 1 | 2021-08-16T01:40:46.000Z | 2021-08-16T01:40:46.000Z | DiscoGAN/discogan_train.py | Psyche-mia/Awesome-GANs | 6e20e9cd07d0ec413a187d496159b97d793dab0c | [
"MIT"
] | null | null | null | DiscoGAN/discogan_train.py | Psyche-mia/Awesome-GANs | 6e20e9cd07d0ec413a187d496159b97d793dab0c | [
"MIT"
] | 1 | 2021-08-16T01:35:21.000Z | 2021-08-16T01:35:21.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
# import numpy as np
import time
import discogan
import sys
sys.path.insert(0, '../')
import image_utils as iu
from datasets import Pix2PixDataSet as DataSets
results = {
'sample_output': './gen_img/',
'model': './model/DiscoGAN-model.ckpt'
}
paras = {
'epoch': 200,
'batch_size': 64,
'logging_interval': 5
}
if __name__ == '__main__':
main()
| 34.757813 | 111 | 0.537199 |
83a940e2ed8e27f8008df09f81164f38241b0cc0 | 303 | bzl | Python | examples/index.bzl | SebastianPodgajny/rules_nodejs | 7d1fbd9b6751225f502eccf2a91d1059371d461d | [
"Apache-2.0"
] | null | null | null | examples/index.bzl | SebastianPodgajny/rules_nodejs | 7d1fbd9b6751225f502eccf2a91d1059371d461d | [
"Apache-2.0"
] | null | null | null | examples/index.bzl | SebastianPodgajny/rules_nodejs | 7d1fbd9b6751225f502eccf2a91d1059371d461d | [
"Apache-2.0"
] | null | null | null | "Used to reference the nested workspaces for examples in /WORKSPACE"
ALL_EXAMPLES = [
"angular",
"app",
"kotlin",
"nestjs",
"parcel",
"protocol_buffers",
"user_managed_deps",
"vendored_node",
"vendored_node_and_yarn",
"web_testing",
"webapp",
"worker",
]
| 18.9375 | 68 | 0.617162 |
83a97810070d0ec137e7706e16cb2e0d4e501275 | 2,611 | py | Python | testproject/testproject/views.py | Najiva/django-tables2-column-shifter | 90695d5890c4ef6d7ba58a189a53d712aa8b9e14 | [
"BSD-3-Clause"
] | 19 | 2017-01-04T19:17:57.000Z | 2021-08-05T20:06:32.000Z | testproject/testproject/views.py | Najiva/django-tables2-column-shifter | 90695d5890c4ef6d7ba58a189a53d712aa8b9e14 | [
"BSD-3-Clause"
] | 16 | 2017-02-20T20:24:01.000Z | 2021-07-23T12:50:35.000Z | testproject/testproject/views.py | Najiva/django-tables2-column-shifter | 90695d5890c4ef6d7ba58a189a53d712aa8b9e14 | [
"BSD-3-Clause"
] | 8 | 2017-01-26T07:15:54.000Z | 2022-03-22T18:03:24.000Z | from django.views.generic import TemplateView
from django_tables2.config import RequestConfig
from django_tables2_column_shifter.tables import (
ColumnShiftTableBootstrap2,
ColumnShiftTableBootstrap3,
ColumnShiftTableBootstrap4,
ColumnShiftTableBootstrap5,
)
from .models import Author, Book
from .tables import get_author_table_class, get_book_table_class
| 32.234568 | 86 | 0.73152 |
83ab1978e9bfcb9289cdc6a850d6619b639f3ad4 | 967 | py | Python | experiments/12_wiki_training.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | null | null | null | experiments/12_wiki_training.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | null | null | null | experiments/12_wiki_training.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | 1 | 2020-12-02T09:07:06.000Z | 2020-12-02T09:07:06.000Z | import numpy as np
EXPERIMENT_NAME = 'EXP_12'
CORPUS_PATH = '/home/dddhiraj/Documents/stuff/data/wiki_en.txt'
TRAINING_WINDOW = 5
CONTEXT_DIMENSION = 64
LEANING_RATE = 1
DROPOUT = 0.05
CONTEXT_DECAY = 1 - TRAINING_WINDOW ** -0.5
CONTRASTIVE_WEIGHT = 1#0.1
NEGATIVE_SAMPLE_SIZE = TRAINING_WINDOW ** 2
CONEXT_INERTIA = np.sqrt(TRAINING_WINDOW)
THREADS = 6
CHUNK_SIZE = 5000
DB = 'REDIS'
if DB == 'MONGO':
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.train_1#neighbour_aware_context_initilization_train_window_8
if DB == 'REDIS':
import redis
collection = redis.Redis(db=1) #11
key_collection= redis.Redis(db=2) #12
#import redisai
# collection = redisai.Client(db=14)
# key_collection = redisai.Client(db=15)
'''
Experiment details:
Trained on wiki data with 51 million words.
'''
| 21.021739 | 82 | 0.680455 |
83ab5e631ea0bec7a174bfa753c93a724a3979a9 | 49,562 | py | Python | yasi.py | arenadotio/yasi-sexp-indenter | f64cd332b3f41d7c2b3458b4279a13ec26df16b8 | [
"MIT"
] | null | null | null | yasi.py | arenadotio/yasi-sexp-indenter | f64cd332b3f41d7c2b3458b4279a13ec26df16b8 | [
"MIT"
] | 1 | 2020-07-14T16:07:38.000Z | 2020-07-14T16:07:38.000Z | yasi.py | arenadotio/yasi-sexp-indenter | f64cd332b3f41d7c2b3458b4279a13ec26df16b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
""" yasi
Date: 20th November 2013
Author: nkmathew <kipkoechmathew@gmail.com>
Dialect aware s-expression indenter
"""
from __future__ import print_function
import argparse
import hashlib
import os
import re
import shutil
import sys
import time
import collections
import json
import difflib
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
# pylint: disable=unused-import
from pprint import pprint # noqa
__version__ = '2.1.2'
def parse_args(arguments=None):
""" Reads command-line arguments
>>> parse_args('--indent-comments')
"""
if arguments is None:
arguments = sys.argv[1:]
if isinstance(arguments, str):
arguments = arguments.split()
if isinstance(arguments, argparse.Namespace):
return arguments
parser = create_args_parser()
args = parser.parse_args(arguments)
args.dialect = args.dialect.lower()
if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']:
parser.error("`{0}' is not a recognized dialect".format(args.dialect))
args.backup_dir = os.path.expanduser(args.backup_dir)
if not os.path.exists(args.backup_dir):
parser.error("Directory `{0}' does not exist".format(args.backup_dir))
if len(args.files) > 1 and args.output_file:
parser.error('Cannot use the -o flag when more than one file is specified')
if not args.files:
# Indentation from standard input
if args.modify and not args.output_file:
args.modify = False
args.backup = False
args.warning = False
if args.output_diff:
# If someone requests a diff we assume they don't want the file to be
# modified
args.modify = False
return args
def read_file(fname):
""" read_file(fname : str) -> str
>>> read_file(r'C:\\mine\\test.lisp')
r'(print "No, no, there\'s \\r\\nlife in him!. ")\\r\\n\\r\\n'
The file is read in binary mode in order to preserve original line endings.
Line ending Binary mode Text mode
CRLF CRLF LF
CR CR LF
"""
assert os.path.exists(fname), "\n--%s-- Warning: File `%s' does not exist..." \
% (current_time(), fname)
with open(fname, 'rb') as fp:
return fp.read().decode('utf-8')
def current_time():
""" current_time() -> str
>>> current_time()
14:28:04
Returns the current local time in 24 clock system.
"""
return time.strftime('%X', (time.localtime()))
def backup_source_file(fname, args=None):
""" backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file.
"""
args = parse_args(args)
backup_dir = args.backup_dir
assert os.path.exists(fname), \
("\n--%s-- Warning: File `%s' does not exist..." % (current_time(), fname))
assert os.path.exists(os.path.abspath(backup_dir)), \
("\n--%s-- Warning: Directory `%s' does not exist..." % (current_time(), fname))
backup_name = backup_dir + os.sep + os.path.split(fname)[1] + args.backup_suffix
try:
shutil.copyfile(fname, backup_name)
except IOError:
message = "\n--%s-- Warning: Couldn't backup the file `%s' in `%s', check if you have enough permissions. "
tpl = (current_time(), fname, backup_dir)
sys.stderr.write(message % tpl)
def md5sum(content):
""" md5sum(content : str) -> str
>>> md5sum('Keyboard not found!! Press F1 to continue...')
'ad98cde09016d2e99a726966a4291acf'
Returns a checksum to be used to determine whether the file has changed.
A simple textual comparison can still do the work
"""
return hashlib.md5(content).hexdigest()
def find_line_ending(string):
""" find_line_ending(string : str) -> str
>>> find_line_ending('Elementary my dear Watson. \\r')
'\\r'
Find the line ending in the file so that we can try to preserve it.
"""
if CRLF in string:
return CRLF
if CR in string:
return CR
return LF
def find_trim_limit(string, args=None):
""" find_trim_limit(string : str) -> int
>>> find_trim_limit(r'(list #\; #\")')
14
>>> find_trim_limit(r'(list ; ")')
6
>>> find_trim_limit(r'(list " ;)')
7
The function attempts to identify upto which point we are supposed to trim
so that we don't mess with strings or any aligned comments.
It does this by comparing the positions of semicolons and double
quotes. It doesn't consider the multiline comment marker. If your
code uses multiline comments(#| ... |#), you'll have to use --no-compact mode
"""
args = parse_args(args)
# Find position of the first unescaped semi colon
comment_start = re.search(r'([^\\];)|(^;)', string)
# Find position of the first unescaped double quote
string_start = re.search(r'([^\\]")|(^")', string)
# Assign -1 if there's no match
limit = string_start.end() if string_start else -1
comment_start = comment_start.end() if comment_start else -1
if comment_start != -1:
# If a semi colon is found, include all the whitespace before it to preserve
# any aligned comments
comment_start = re.search('[ \t]*;', string).start()
if args.dialect == 'newlisp':
# Find out which string type comes first(normal, tag or brace strings)
brace_string_start = re.search('{', string)
tag_string_start = re.search('\[text\]', string)
brace_string_start = brace_string_start.end() if brace_string_start else -1
tag_string_start = tag_string_start.end() if tag_string_start else -1
pos_lst = [limit, brace_string_start, tag_string_start]
pos_lst = [x for x in pos_lst if x != -1]
if pos_lst:
limit = min(pos_lst)
if comment_start != -1 and limit != -1:
if comment_start < limit:
# If the semicolon comes before the comma, it means the string has been
# commented out
limit = comment_start
elif comment_start != -1 and limit == -1:
# If there's a semicolon but no quote, use the semicolon position as the
# limit
limit = comment_start
elif limit == -1:
# If neither a semicolon nor a double quote has been found, use the length
# of the string as the limit
limit = len(string)
return limit
def detabify(text, args):
""" tabify(text : str, args : argparse.Namespace|str) -> str
Expands tabs
"""
args = parse_args(args)
if args.tab_size < 1:
return text.expandtabs(4)
return text.expandtabs(args.tab_size)
def tabify(text, args):
""" tabify(text : str, args : argparse.Namespace|str) -> str
>>> tabify(' (println "hello world")', '--tab=3')
'\t\t (println "hello world")'
Replace spaces with tabs
"""
args = parse_args(args)
if args.tab_size < 1:
return text
tab_equiv = ' ' * args.tab_size
return text.replace(tab_equiv, '\t')
def pad_leading_whitespace(string, zero_level, blist, args=None):
""" pad_leading_whitespace(string : str, current_level : int,
zero_level : int) -> str
>>> pad_leading_whitespace("(print 'Yello)")
" (print 'Yello)"
Takes a string and indents it using the current indentation level
and the zero level.
"""
args = parse_args(args)
if args.compact:
# if compact mode is on, split the string into two, trim the first
# position and merge the two portions.
trim_limit = find_trim_limit(string, args)
comment_line = re.search('^[ \t]*;', string, re.M)
if comment_line and args.indent_comments:
trim_limit = comment_line.end()
substr1 = string[0:trim_limit]
substr2 = string[trim_limit:]
substr1 = trim(substr1)
string = substr1 + substr2
else:
# If in nocompact mode, remove leading spaces only
string = re.sub('^[ \t]+', '', string, count=0)
indent_level = zero_level
if blist:
indent_level = blist[-1]['indent_level']
padding = ' ' * indent_level
padding = tabify(padding, args)
return padding + string, indent_level
def indent_line(zerolevel, bracket_list, line, in_comment, in_symbol_region,
args=None):
""" indent_line(zerolevel : int, bracket_list : list, line : str, in_comment : bool,
in_symbol_region : bool, args : string|list)
Most important function in the indentation process. It uses the bracket
locations stored in the list to indent the line.
"""
args = parse_args(args)
comment_line = re.search('^[ \t]*;', line, re.M)
if args.indent_comments:
# We are allowed to indent comment lines
comment_line = False
if not args.compact and bracket_list == [] and not in_comment:
# If nocompact mode is on and there are no unclosed blocks, try to
# find the zero level by simply counting spaces before a line that
# is not empty or has a comment
_line = detabify(line, args)
leading_spaces = re.search('^[ \t]+[^; )\n\r]', _line)
if leading_spaces:
# NOTE: If you don't subtract one here, the zero level will increase
# every time you indent the file because the character at the end of
# the regex is part of the capture.
zerolevel = leading_spaces.end() - 1
else:
zerolevel = 0
if in_symbol_region:
# No processing done in strings and comments
return zerolevel, line, 0
if not comment_line and not all_whitespace(line):
# If this is not a comment line indent the line.
# If the list is empty, then the current_level defaults
# to zero
curr_line, current_level = pad_leading_whitespace(line, zerolevel,
bracket_list, args)
return zerolevel, curr_line, current_level
return zerolevel, line, 0
# ---------------------------------------------------------------------------------
# GLOBAL CONSTANTS::
CR = '\r'
LF = '\n'
CRLF = CR + LF
KEYWORD0 = 0 # Non-keyword
KEYWORD1 = 1 # Indents uniformly by 1 unit
KEYWORD2 = 2 # Distinguishes subforms
KEYWORD3 = 3 # Indents uniformly by 2 units
KEYWORD4 = 4 # A 1-keyword used mostly for defining local functions e.g flets
# Keywords that indent by two spaces
SCHEME_KEYWORDS = \
['define', 'local-odd?', 'when', 'begin', 'case',
'local-even?', 'do', 'call-with-bytevector-output-port',
'call-with-input-file', 'call-with-port',
'call-with-current-continuation', 'open-file-input-port',
'call-with-port', 'call-with-values', 'call-with-output-file',
'call-with-string-output-port', 'define-syntax', 'if', 'let', 'let*',
'library', 'unless', 'lambda', 'syntax-rules', 'syntax-case',
'let-syntax', 'letrec*', 'letrec', 'let-values', 'let*-values',
'with-exception-handler', 'with-input-from-file',
'with-interrupts-disabled', 'with-input-from-string',
'with-output-to-file', 'with-input-from-port',
'with-output-to-string', 'with-source-path', 'with-syntax',
'with-implicit',
'with-error-handler', 'module', 'parameterize']
CLOJURE_KEYWORDS = \
['defn', 'fn', 'dorun', 'doseq', 'loop', 'when',
'let', 'defmacro', 'binding', 'doto', 'ns', ':import', 'defstruct',
'condp', 'comment', 'when', 'when-let', '->', '->>',
'extend-type', 'reify', 'binding', 'when-not', 'proxy', 'dotimes',
'try', 'finally', 'for', 'letfn', 'catch', 'iterate', 'while',
'with-local-vars', 'locking', 'defmulti', 'defmethod', 'extend'
]
LISP_KEYWORDS = \
[':implementation', ':method', 'case', 'defclass',
'defconstant', 'defgeneric', 'defimplementation',
'define-condition', 'define-implementation-package',
'definterface', 'defmacro', 'defmethod', 'defpackage',
'defproject', 'deftype', 'defun', 'defvar', 'do-external-symbols',
'dolist', 'dotimes', 'ecase', 'etypecase', 'flet', 'handler-bind',
'if', 'lambda', 'let', 'let*', 'print-unreadable-object',
'macrolet', 'defparameter', 'with-slots', 'typecase', 'loop', 'when', 'prog1',
'unless', 'with-open-file', 'with-output-to-string', 'with-input-from-string',
'block', 'handler-case', 'defstruct', 'eval-when', 'tagbody', 'ignore-errors',
'labels', 'multiple-value-bind', 'progn', 'unwind-protect', 'collect'
]
NEWLISP_KEYWORDS = \
['while', 'if', 'case', 'dotimes', 'define', 'dolist', 'catch',
'throw', 'lambda', 'lambda-macro', 'when', 'unless', 'letex', 'begin',
'dostring', 'let', 'letn', 'doargs', 'define-macro', 'until', 'do-until',
'do-while', 'for-all', 'find-all', 'for'
]
# The 'if' and 'else' part of an if block should have different indent levels so
# that they can stand out since there's no else Keyword in Lisp/Scheme to make
# this explicit. list IF_LIKE helps us track these keywords.
IF_LIKE = ['if']
def assign_indent_numbers(lst, inum, dic):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic
def add_keywords(args):
""" add_keywords(dialect : str) -> [str, str]
Takes a lisp dialect name and returns a list of keywords that increase
indentation by two spaces and those that can be one-armed like 'if'
"""
dialect = args.dialect
keywords = collections.defaultdict(int)
two_spacers = []
two_armed = IF_LIKE
local_binders = []
if dialect == 'lisp': # Lisp
two_spacers = LISP_KEYWORDS
two_armed += ['multiple-value-bind', 'destructuring-bind', 'do', 'do*']
local_binders += ['flet', 'macrolet', 'labels']
elif dialect == 'scheme': # Scheme
two_spacers = SCHEME_KEYWORDS
two_armed += ['with-slots', 'do', 'do*']
local_binders += []
elif dialect == 'clojure': # Clojure
two_spacers = CLOJURE_KEYWORDS
two_armed += []
local_binders += ['letfn']
elif dialect == 'newlisp': # newLISP
two_spacers = NEWLISP_KEYWORDS
two_armed += []
local_binders += []
elif dialect == 'all':
two_spacers = LISP_KEYWORDS + SCHEME_KEYWORDS + CLOJURE_KEYWORDS + \
NEWLISP_KEYWORDS
keywords = assign_indent_numbers(two_spacers, KEYWORD1, keywords)
keywords = assign_indent_numbers(two_armed, KEYWORD2, keywords)
keywords = assign_indent_numbers(local_binders, KEYWORD4, keywords)
if args.read_rc:
rc_keywords = parse_rc_json()
keywords.update(rc_keywords[dialect])
return keywords
# ---------------------------------------------------------------------------------
def find_first_arg_pos(bracket_offset, curr_line, args=None):
""" find_first_arg_pos(bracket_offset : int, curr_line : str) -> [int, int]
Arguments:
bracket_offset - The position of the bracket in the current line e.g
" ( list 'timey 'wimey )" --> 4
" ( list 'timey 'wimey )" --> 1
"( list 'timey 'wimey )" --> 0
>>> find_first_arg_pos(0, "( list 'one-sheep 'two-sheep )")
[11, 5]
Returns the position of the first argument to the function relative to the
position of the opening bracket and the number of spaces between the opening
bracket and the function name.
The two values will to be used to align the other arguments in the subsequent line
"""
args = parse_args(args)
spaces_before_func = 0
subline = curr_line[bracket_offset + 1:]
if re.search('^[ \t]*($|\r)', subline):
# whitespace extending to the end of the line means there's no
# function in this line. The indentation level defaults to one.
arg_pos = 1
else:
if bracket_offset != len(curr_line) - 1 and curr_line[bracket_offset + 1] == ' ':
# control reaches here if we are not at the end of the line
# and whitespace follows. We must first find the position of the
# function and then the arguments position
match = re.search(' +[^)\]]| \)', subline) # Find the first non whitespace/bracket character
if match:
spaces_before_func = match.end() - match.start() - 1
end = match.end()
else:
end = 0
# Then use the end of the whitespace group as the first argument
arg_pos = re.search(' +([^)])|( *(\(|\[))', subline[end:])
if arg_pos:
arg_pos = arg_pos.end() + spaces_before_func + 1
else:
arg_pos = spaces_before_func + 1
if re.match('^[ \t]*(#\||;|$|\r)',
subline[(end - 1 + subline[end - 1:].find(' ')):]):
# But, if a comment if found after the function name, the
# indent level becomes one
arg_pos = spaces_before_func + args.default_indent
else:
# If there's no space after the bracket, simply find the end of the
# whitespace group
match = re.search(' +([^)}\n\r])|( *(\(|\[|{))', subline)
if match: # found the argument
arg_pos = match.end()
else: # Either empty list or argument is in the next line
arg_pos = 1
if re.match('^[\t ]*(;|$|\r)', subline[subline.find(' '):]):
# Again if a comment is found after the function name, the
# indent level defaults to 1
arg_pos = spaces_before_func + args.default_indent
return [arg_pos, spaces_before_func]
def _pop_from_list(bracket, lst, line, real_pos, offset, msg_stack):
""" _pop_from_list(char : str, lst : [str], line : str,
real_pos : int, offset : int)
The function is called when a closing bracket is encountered. The function
simply pops the last pushed item and issues a warning if an error is
encountered.
"""
# Try to spot a case when a square bracket is used to close a round bracket
# block
if bracket == ']':
correct_closer = '['
elif bracket == ')':
correct_closer = '('
else:
correct_closer = '{'
if lst != []:
popped = lst.pop()
popped_char = popped['character']
popped_pos = popped['line_number']
popped_offset = popped['bracket_pos']
if popped_char is not correct_closer:
message = "Bracket `%s' does not match `%s' at (%d, %d)"
message = message % (bracket, popped_char, popped_pos, popped_offset)
warning_info = {
'msg': message,
'line': line,
'column': real_pos
}
msg_stack.append(warning_info)
else:
# If the list is empty and a closing bracket is found, it means we have
# excess brackets. That warning is issued here. The coordinates used
# will be slightly or largely off target depending on how much your
# code was modified when used with compact mode
message = "Unmatched closing bracket `%s'" % bracket
warning_info = {
'msg': message,
'line': line,
'column': offset + 1
}
msg_stack.append(warning_info)
return lst
def _push_to_list(lst, func_name, char, line, offset,
first_arg_pos, first_item, in_list_literal,
lead_spaces, args=None):
""" _push_to_list(lst : [str], func_name : str, char : str, line : int, offset : int,
first_arg_pos :int , first_item : int, in_list_literal : bool,
lead_spaces : int, args : str)
Called when an opening bracket is encountered. A hash containing the
necessary data to pin point errors and the indentation level is stored in
the list and the list returned.
"""
args = parse_args(args)
keywords = add_keywords(args)
pos_hash = {'character': char,
'line_number': line,
'bracket_pos': offset,
'indent_level': offset + first_arg_pos, # the default value, e.g in normal function
'func_name': func_name,
'spaces': 0}
is_macro = is_macro_name(func_name, args.dialect)
two_spacer = is_macro or keywords[func_name] in [KEYWORD1, KEYWORD4]
if in_list_literal or char == '{' or (char == '[' and args.dialect == 'clojure'):
# found quoted list or clojure hashmap/vector
pos_hash['indent_level'] = first_item
elif keywords[func_name] == KEYWORD2:
# We only make the if-clause stand out if not in uniform mode
pos_hash['indent_level'] = lead_spaces + ((offset + args.indent_size * 2)
if not args.uniform
else (offset + args.indent_size))
elif func_name != '':
if two_spacer:
pos_hash['indent_level'] = lead_spaces + offset + args.indent_size
elif keywords[func_name] == KEYWORD3:
pos_hash['indent_level'] = lead_spaces + offset + (2 * args.indent_size)
lst.append(pos_hash)
try:
# A hack to make flets and labels in Lisp not indent like
# functions. The 'labels' indentation may not be exactly
# perfect.
parent_func = lst[-3]['func_name']
# Make 'special' indentation occur only in a Clojure binding block([]) for
# letfns
non_bind_block = args.dialect == 'clojure' and lst[-2]['character'] != '['
if keywords[parent_func] == KEYWORD4 and not non_bind_block:
lst[-1]['indent_level'] = offset + args.indent_size
except IndexError:
pass
return lst
def indent_code(original_code, args=None):
""" indented_code(string : str, fname : str) -> [...]
Arguments:
fpath: Simply used in formatting the warning messages
>>> indent_code("(print\n'Hello)")
{'bracket_locations': [],
'comment_locations': [],
'in_comment': 0,
'in_newlisp_tag_string': False,
'in_string': False,
'in_symbol_with_space': False,
'indented_code': ['(print\n', " 'Hello)"],
'last_quote_location': (),
'last_symbol_location': (),
'message_stack': [],
'newlisp_brace_locations': [],
'original_code': ['(print\n', "'Hello)"],
'first_tag_string': ()}
The last entry in the list is the indented string.
"""
args = parse_args(args)
keywords = add_keywords(args)
# Safeguards against processing brackets inside strings
in_string = False
# newLISP use curly brackets as a syntax for multiline strings
# this variable here tries to keep track of that
in_newlisp_string = 0
in_newlisp_tag_string = False
newlisp_brace_locations = []
first_tag_string = ()
# zero_level helps us get the same results as Sitaram's indenter when in
# --no-compact mode.
zero_level = 0
# The two variables prevent formatting comment regions or symbols with whitespace
in_comment = 0
in_symbol_with_space = False
comment_locations = []
last_symbol_location = ()
# A in_symbol_region is the region between pipes(| |) or in strings. This
# includes the comment region. This region is not to be messed with.
in_symbol_region = in_string or in_comment or in_symbol_with_space or \
in_newlisp_string or in_newlisp_tag_string
# we need to know the line number in order to issue almost accurate messages about
# unclosed brackets and string
line_number = 1
# Stores the last position a quote was encountered so that in case there are
# any unclosed strings, we can pinpoint them
last_quote_location = ()
line_ending = find_line_ending(original_code)
code_lines = split_preserve(original_code, line_ending)
indented_code = []
bracket_locations = []
# List of warnings from errors in the code
message_stack = []
for line in code_lines:
escaped = False
curr_line = line
# Get the indent level and the indented line
zero_level, curr_line, indent_level = indent_line(zero_level,
bracket_locations,
line, in_comment,
in_symbol_region, args)
# Build up the indented string.
indented_code.append(curr_line)
regex = '^[ \t]*'
lead_spaces = re.findall(regex, curr_line)
if lead_spaces:
curr_line = re.sub(regex, detabify(lead_spaces[0], args), curr_line)
offset = 0
for curr_char in curr_line:
next_char = curr_line[offset + 1:offset + 2]
prev_char = curr_line[offset - 1:offset]
substr = curr_line[offset + 1:] # slice to the end
if escaped:
# Move to the next character if the current one has been escaped
escaped = False
offset += 1
continue
if curr_char == '\\' and not in_newlisp_string and not in_newlisp_tag_string:
# the next character has been escaped
escaped = True
if (curr_char == ';' or (curr_char == '#' and args.dialect == 'newlisp'))\
and not in_symbol_region and not \
(prev_char == '#' and args.dialect == 'scheme'):
# a comment has been found, go to the next line
# A sharp sign(#) before a semi-colon in Scheme is used to
# comment out sections of code. We don't treat it as a comment
break
# ----------------------------------------------------------
# Comments are dealt with here. Clojure and newLISP don't have Lisp
# style multiline comments so don't include them.
if args.dialect not in ['clojure', 'newlisp'] and curr_char == '|' \
and not in_string:
if prev_char == '#' and not in_symbol_with_space:
comment_locations.append((line_number, offset))
in_comment += 1
elif in_comment and next_char == '#':
in_comment -= 1
comment_locations.pop()
elif not in_comment:
if in_symbol_with_space:
last_symbol_location = ()
in_symbol_with_space = False
else:
last_symbol_location = (line_number, offset)
in_symbol_with_space = True
# ----------------------------------------------------------
# Strings are dealt with here only if we are not in a comment
if not (in_symbol_with_space or in_comment or in_newlisp_tag_string):
if curr_char == '"':
last_quote_location = (line_number, offset)
in_string = not bool(in_string)
if args.dialect == 'newlisp' and not in_string:
# We handle newLISP's multiline(brace) string here. Brace
# strings can nest
if curr_char == '{':
newlisp_brace_locations.append((line_number, offset))
in_newlisp_string += 1
elif curr_char == '}':
if newlisp_brace_locations:
newlisp_brace_locations.pop()
else:
message = "Attempt to close a non-existent newLISP string"
warning_info = {
'msg': message,
'line': line_number,
'column': offset
}
message_stack.append(warning_info)
in_newlisp_string -= 1
if curr_char == '[' and args.dialect == 'newlisp' and not \
(in_newlisp_string or in_string):
# We have to handle tag strings in newLISP here.
if re.match('\[text\]', curr_line[offset:offset + 7]):
in_newlisp_tag_string = True
if first_tag_string == ():
first_tag_string = (line_number, offset)
elif re.match('\[/text\]', curr_line[offset:offset + 7]):
in_newlisp_tag_string = False
first_tag_string = ()
in_symbol_region = in_string or in_comment or in_symbol_with_space \
or in_newlisp_string or in_newlisp_tag_string
if in_symbol_region:
# move on if we are in a string, a symbol with a space or a comment
# altogether known as the symbol region
offset += 1
continue
# Finds the real position of a bracket to be used in pinpointing where
# the unclosed bracket is. The real position is different from the offset
# because current offset is the position of the bracket in the
# trimmed string not the original.
real_position = (offset - zero_level) + \
len(re.findall('^[ \t]*', line)[0]) - indent_level
if curr_char in ['(', '[', '{']:
if curr_char in ['[', '{'] and args.dialect in ['lisp', 'newlisp']:
# Square/Curly brackets are used should not contribute to
# the indentation in CL and newLISP
offset += 1
continue
first_arg_pos, spaces_before_func = \
find_first_arg_pos(offset, curr_line, args)
func_name = substr[0:first_arg_pos - 1].strip(')]\t\n\r ').lower()
in_list_literal = False
if re.search("[^#]('|`|#)([ \t]*\(|\[)($|\r)", curr_line[0:offset + 1]):
in_list_literal = True
if re.search('^[^ \t]+[ \t]*($|\r)', substr):
# The function is the last symbol/form in the line
func_name = substr.strip(')]\t\n\r ').lower()
if in_list_literal:
# an empty string is always in a non-empty string, we don't want
# this. We set False as the func_name because it's not a string
# in_list_literal prevents an keyword in a list literal from
# affecting the indentation
func_name = ''
if func_name in ['define-macro', 'defmacro']:
# Macro names are part of two space indenters.
# This part tries to find the name so that it is not indented
# like a function the next time it's used.
end_of_space = re.search('^[ \t]*', substr).end()
substr = substr[end_of_space:]
substr = substr[re.search('[ \t]*', substr).start():].strip()
macro_name = substr[:substr.find(' ')] # macro name is delimeted by whitespace
if macro_name != '':
keywords[macro_name] = KEYWORD1
# first_item stores the position of the first item in the literal list
# it's necessary so that we don't assume that the first item is always
# after the opening bracket.
first_item = re.search('[ \t]*', curr_line[offset + 1:]).end() + offset + 1
bracket_locations = _push_to_list(bracket_locations[:], func_name,
curr_char, line_number, offset,
first_arg_pos, first_item,
in_list_literal,
spaces_before_func, args)
elif curr_char in [']', ')', '}']:
if curr_char in [']', '}'] and args.dialect in ['lisp', 'newlisp']:
# Square/Curly brackets are used should not contribute to
# the indentation in CL and newLISP
offset += 1
continue
bracket_locations = _pop_from_list(curr_char, bracket_locations[:],
line_number, real_position,
offset, message_stack)
if bracket_locations and curr_char in [' ', '\t'] and \
keywords[bracket_locations[-1]['func_name']] == KEYWORD2:
# This part changes the indentation level of a then clause so that
# we can achieve something like:
# (if (= this that)
# 'then-form
# 'else-form)
# This is done by keeping track of the number of spaces found. If
# you find two spaces it means that, for example that we have just
# passed the then-form and hence should decrease the indentation
# level by 2.(I shamelessly copied this algorithm from Dorai's
# indenter)
if prev_char not in [' ', '\t', ''] or not \
re.search('^[ \t]*(;|#\||$|\r)', curr_line):
# The level shouldn't be decreased if the line is a comment
# line. The regex above takes care of that.
bracket_locations[-1]['spaces'] += 1
if bracket_locations[-1]['spaces'] == 2:
bracket_locations[-1]['indent_level'] -= \
0 if args.uniform else args.indent_size
# some dummy value to prevent control from reaching here again
bracket_locations[-1]['spaces'] = 999
offset += 1
line_number += 1
res = {
'message_stack': message_stack,
'first_tag_string': first_tag_string,
'in_newlisp_tag_string': in_newlisp_tag_string,
'last_symbol_location': last_symbol_location,
'comment_locations': comment_locations,
'newlisp_brace_locations': newlisp_brace_locations,
'in_string': in_string,
'in_comment': in_comment,
'in_symbol_with_space': in_symbol_with_space,
'bracket_locations': bracket_locations,
'last_quote_location': last_quote_location,
'original_code': code_lines,
'indented_code': indented_code
}
return res
def colour_diff(diff_lines):
""" colour_diff(diff_lines : lst)
Print diff text to terminal in color
"""
try:
import colorama
except ImportError:
# colorama is not available, print plain diff
print(''.join(list(diff_lines)))
return
colorama.init()
def p_green(text):
""" Print added line in green """
print(colorama.Fore.GREEN + text + colorama.Fore.WHITE, end='')
def p_yellow(text):
""" Print diff section header in yellow """
print(colorama.Fore.YELLOW + text + colorama.Fore.WHITE, end='')
def p_red(text):
""" Print removed line in red """
print(colorama.Fore.RED + text + colorama.Fore.WHITE, end='')
section = re.compile('@@\s+-\d\d,\d\d\s\+\d\d,\d\d\s+@@')
for line in diff_lines:
if line.startswith('-'):
p_red(line)
elif line.startswith('+'):
p_green(line)
elif section.search(line):
p_yellow(line)
else:
print(line, end='')
def _post_indentation(res, args=None, fpath=''):
""" _post_indentation(res : dict):
Called after the string has been indented appropriately.
It takes care of writing the file and checking for unclosed strings
or comments.
"""
fname = os.path.basename(fpath)
args = parse_args(args)
for msg in res['message_stack']:
if args.warning:
if args.files:
msg['fname'] = fname
sys.stderr.write('\n{fname}:{line}:{column}: {msg}'.format(**msg))
else:
# Input was passed through stdin
sys.stderr.write('\n:{line}:{column}: {msg}'.format(**msg))
if res['bracket_locations']:
# If the bracket_locations list is not empty it means that there are some
# brackets(opening) that haven't been closed.
for bracket in res['bracket_locations']:
line = bracket['line_number']
column = bracket['bracket_pos']
character = bracket['character']
# The bracket_locations are not very accurate. The warning might be
# misleading because it considers round and square brackets to be
# the same.
message = "\n%s:%d:%d: Unmatched `%s'"
if args.warning:
sys.stderr.write(message % (fname, line, column, character))
if res['newlisp_brace_locations']:
for brace in res['newlisp_brace_locations']:
message = "\n%s:%d:%d: Unclosed newLISP brace string"
if args.warning:
sys.stderr.write(message % (fname, brace[0], brace[1]))
if res['comment_locations']:
for comment in res['comment_locations']:
message = "\n%s:%d:%d: Unclosed multiline comment"
tpl = (fname,) + comment
if args.warning:
sys.stderr.write(message % tpl)
if res['last_symbol_location']:
message = "\n%s:%d:%d: Unclosed symbol"
tpl = (fname,) + res['last_symbol_location']
if args.warning:
sys.stderr.write(message % tpl)
if res['in_string']:
message = "\n%s:%d:%d: String extends to end-of-file"
tpl = (fname,) + res['last_quote_location']
if args.warning:
sys.stderr.write(message % tpl)
if res['in_newlisp_tag_string']:
message = "\n%s:%d:%d: Tag string extends to end-of-file"
tpl = (fname,) + res['first_tag_string']
if args.warning:
sys.stderr.write(message % tpl)
output_file = args.output_file
if not output_file:
output_file = fpath
indented_code = res['indented_code']
indent_result = ''.join(indented_code)
if indented_code == res['original_code'] and args.files:
message = "File '%s' has already been formatted. Leaving it unchanged...\n"
sys.stderr.write(message % fname)
if output_file != fpath:
with open(output_file, 'wb') as indented_file:
indented_file.write(indent_result.encode('utf8'))
else:
if args.output_diff:
diff = difflib.unified_diff(res['original_code'], indented_code, n=5)
if args.colour_diff:
colour_diff(diff)
else:
print(''.join(list(diff)))
elif args.output:
print(indent_result, end='')
if args.modify:
# write in binary mode to preserve the original line ending
with open(output_file, 'wb') as indented_file:
indented_file.write(indent_result.encode('utf8'))
def indent_files(arguments):
""" indent_files(arguments)
Note: if the parallel option is provided, the files will be read and processed
in parallel
"""
args = parse_args(arguments)
if not args.files:
# Indent from stdin
code = sys.stdin.read()
indent_result = indent_code(code, args)
_post_indentation(indent_result)
if args.parallel:
import multiprocessing
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pool.starmap(indent_file, [(fname, args) for fname in args.files])
else:
for fname in args.files:
indent_file(fname, args)
def indent_file(fname, args):
"""
indent_file(fname: string, args)
1. Create a backup of the source file(backup_source_file())
2. Read the file contents(read_file())
3. Indent the code(indent_code())
4. Write to the file or print the indented code(_post_indentation())
"""
args = parse_args(args)
fname = os.path.expanduser(fname)
code = read_file(fname)
if not args.dialect:
# Guess dialect from the file extensions if none is specified in the command
# line
if fname.endswith('.lisp'):
args.dialect = 'lisp'
elif fname.endswith('.lsp'):
args.dialect = 'newlisp'
elif re.search(".clj[sc]{0,1}$", fname):
args.dialect = 'clojure'
elif fname.endswith('.ss') or fname.endswith('.scm'):
args.dialect = 'scheme'
else:
args.dialect = 'all'
indent_result = indent_code(code, args)
if args.backup:
# Create a backup file in the specified directory
backup_source_file(fname, args)
_post_indentation(indent_result, fpath=fname)
def main():
""" Entry point """
indent_files(sys.argv[1:])
if __name__ == '__main__':
main()
| 38.994493 | 115 | 0.57877 |
83aba0fd5dc8afef0605fcfd0ba5992a35684f2e | 12,989 | py | Python | Speedo/plugins/animoji.py | aviskumar/speedo | 758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa | [
"BSD-3-Clause"
] | null | null | null | Speedo/plugins/animoji.py | aviskumar/speedo | 758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa | [
"BSD-3-Clause"
] | null | null | null | Speedo/plugins/animoji.py | aviskumar/speedo | 758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa | [
"BSD-3-Clause"
] | 3 | 2021-10-12T08:17:01.000Z | 2021-12-21T01:17:54.000Z | import asyncio
from collections import deque
from . import *
CmdHelp("animoji").add_command(
'think', None, 'Use and see'
).add_command(
'ccry', None, 'Use and see'
).add_command(
'fap', None, 'Use and see'
).add_command(
'lmao', None, 'Use and see'
).add_command(
'nothappy', None, 'Use and see'
).add_command(
'clock', None, 'Use and see'
).add_command(
'muah', None, 'Use and see'
).add_command(
'heart', None, 'Use and see'
).add_command(
'gym', None, 'Use and see'
).add_command(
'earth', None, 'Use and see'
).add_command(
'moon', None, 'Use and see'
).add_command(
'lovestory', None, 'Turu Lob'
).add_command(
'smoon', None, 'Use and see'
).add_command(
'tmoon', None, 'Use and see'
).add_command(
'hart', None, 'Use and see'
).add_command(
'anim', None, 'Use and see'
).add_command(
'fuck', None, 'Use and see'
).add_command(
'sux', None, 'Use and see'
).add_command(
'kiss', None, 'Kya dekh rha h jhopdike.'
).add_command(
'fnl', None, 'Use and See.'
).add_command(
'monkey', None, 'Use and see.'
).add_command(
'hand', None, 'Use and See.'
).add_command(
'gsg', None, 'Use and See.'
).add_command(
'theart', None, 'Hearts Animation.'
).add()
| 26.781443 | 81 | 0.528755 |
83abb74e341537b5ab6f003c11360924411e10b7 | 4,014 | py | Python | Chapter09/01-optimize-mlp-layers.py | KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python | ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686 | [
"MIT"
] | null | null | null | Chapter09/01-optimize-mlp-layers.py | KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python | ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686 | [
"MIT"
] | null | null | null | Chapter09/01-optimize-mlp-layers.py | KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python | ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686 | [
"MIT"
] | null | null | null | from deap import base
from deap import creator
from deap import tools
import random
import numpy
import mlp_layers_test
import elitism
# boundaries for layer size parameters:
# [layer_layer_1_size, hidden_layer_2_size, hidden_layer_3_size, hidden_layer_4_size]
BOUNDS_LOW = [ 5, -5, -10, -20]
BOUNDS_HIGH = [15, 10, 10, 10]
NUM_OF_PARAMS = len(BOUNDS_HIGH)
# Genetic Algorithm constants:
POPULATION_SIZE = 20
P_CROSSOVER = 0.9 # probability for crossover
P_MUTATION = 0.5 # probability for mutating an individual
MAX_GENERATIONS = 10
HALL_OF_FAME_SIZE = 3
CROWDING_FACTOR = 10.0 # crowding factor for crossover and mutation
# set the random seed:
RANDOM_SEED = 42
random.seed(RANDOM_SEED)
# create the classifier accuracy test class:
test = mlp_layers_test.MlpLayersTest(None)
toolbox = base.Toolbox()
# define a single objective, maximizing fitness strategy:
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
# create the Individual class based on list:
creator.create("Individual", list, fitness=creator.FitnessMax)
# define the layer_size_attributes individually:
for i in range(NUM_OF_PARAMS):
# "layer_size_attribute_0", "layer_size_attribute_1", ...
toolbox.register("layer_size_attribute_" + str(i),
random.uniform,
BOUNDS_LOW[i],
BOUNDS_HIGH[i])
# create a tuple containing an layer_size_attribute generator for each hidden layer:
layer_size_attributes = ()
for i in range(NUM_OF_PARAMS):
layer_size_attributes = layer_size_attributes + \
(toolbox.__getattribute__("layer_size_attribute_" + str(i)),)
# create the individual operator to fill up an Individual instance:
toolbox.register("individualCreator",
tools.initCycle,
creator.Individual,
layer_size_attributes,
n=1)
# create the population operator to generate a list of individuals:
toolbox.register("populationCreator",
tools.initRepeat,
list,
toolbox.individualCreator)
# fitness calculation
toolbox.register("evaluate", classificationAccuracy)
# genetic operators:mutFlipBit
# genetic operators:
toolbox.register("select", tools.selTournament, tournsize=2)
toolbox.register("mate",
tools.cxSimulatedBinaryBounded,
low=BOUNDS_LOW,
up=BOUNDS_HIGH,
eta=CROWDING_FACTOR)
toolbox.register("mutate",
tools.mutPolynomialBounded,
low=BOUNDS_LOW,
up=BOUNDS_HIGH,
eta=CROWDING_FACTOR,
indpb=1.0/NUM_OF_PARAMS)
# Genetic Algorithm flow:
if __name__ == "__main__":
main() | 31.359375 | 89 | 0.623069 |
83abd40caac456af21954f1d6b702333668a6968 | 14,410 | py | Python | tokenizer/state_table.py | xxronvel/foobar | 0e4ca414a532eaa69803888a65ac1a2e0114183e | [
"MIT"
] | 1 | 2016-04-28T02:20:59.000Z | 2016-04-28T02:20:59.000Z | tokenizer/state_table.py | xxronvel/foobar | 0e4ca414a532eaa69803888a65ac1a2e0114183e | [
"MIT"
] | null | null | null | tokenizer/state_table.py | xxronvel/foobar | 0e4ca414a532eaa69803888a65ac1a2e0114183e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Aarn Abraham Velasco Alvarez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
input = {
"alpha" : 0,
"numeric" : 1,
"<" : 2,
"%" : 3,
"?" : 4,
"/" : 5,
"=" : 6,
">" : 7,
"#" : 8,
"*" : 9,
"+" : 10,
"-" : 11,
"." : 12,
"'" : 13,
"\"" : 14,
"\\" : 15,
"$" : 16,
"_" : 17,
"!" : 18,
"`" : 19 ,
"&" : 20,
"|" : 21,
":" : 22,
"\n" : 23,
" " : 24
}
errors = {
1 : "Parse error, unexpected input",
2 : "Parse error, unexpected '</'",
3 : "Parse error, expecting '>'",
4 : "Unterminated comment",
5 : "Parse error, expecting \"'\"",
6 : "Parse error, expecting '`'",
7 : "Parse error, expecting variable (T_VARIABLE) or '{' or '$'",
8 : "Unterminated tag",
9 : "Parse error, expecting '.'"
}
#TODO Cdigo embebido
table = {
0 : ((60, 28, 1 , 3 , 2 , 21, 42, 44, 20, 41, 26, 27, 29, 34, 35, 0 , 58, 60, 43, 45, 46, 47, 63, 0 , 0 , 0 ), False, 0 , 1),
1 : ((-3, 0 , 8 , 5 , 6 , 4 , 10, 9 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
2 : ((0 , 0 , 0 , 0 , 11, 0 , 0 , 12, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 11, 0 , 0 , 0 ), True , 105,1),
3 : ((0 , 0 , 0 , 0 , 0 , 0 , 13, 14, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
4 : ((15, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 2),
5 : ((0 , 0 , 0 , 0 , 0 , 0 , 16, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 5 , 1),
6 : ((18, 0 , 0 , 0 , 0 , 0 , 17, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 2 , 1),
7 : ((7 , 7 , 0 , 7 , 7 , 7 , 7 , 72, 7 , 7 , 7 , 7 , 7 , 65, 66, 7 , 7 , 7 , 7 , 7 , 7 , 7 , 7 , 0 , 7 , 7 ), False, 0 , 8),
8 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
9 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
10 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 11, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
11 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
12 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 7 , 1),
13 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
14 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 8 , 1),
15 : ((15, 15, 0 , 0 , 0 , 0 , 0 , 19, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 3),
16 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 6 , 1),
17 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 3 , 1),
18 : ((18, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 1 , 1),
19 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 9 , 1),
20 : ((20, 20, 20, -1, -1, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 0 , 20, 20), True , 26, 1),
21 : ((0 , 0 , 0 , 0 , 0 , 20, 23, 0 , 0 , 22, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
22 : ((22, 22, 22, 22, 22, 22, 22, 22, 22, 24, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22), False, 0 , 4),
23 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
24 : ((22, 22, 22, 22, 22, 25, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22), False, 0 , 4),
25 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 26, 1),
26 : ((0 , 28, 0 , 0 , 0 , 0 , 30, 0 , 0 , 0 , 31, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 29, 1),
27 : ((0 , 28, 0 , 0 , 0 , 0 , 30, 0 , 0 , 0 , 0 , 31, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 25, 1),
28 : ((0 , 28, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 32, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 11, 1),
29 : ((0 , 33, 0 , 0 , 0 , 0 , 30, 0 , 0 , 0 , 0 , 0 , 61, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 28, 1),
30 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
31 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 22, 1),
32 : ((0 , 33, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 11, 1),
33 : ((0 , 33, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 11, 1),
34 : ((34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 40, 34, 36, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34), False, 0 , 5),
35 : ((35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 40, 37, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35), False, 0 , 5),
36 : ((34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 38, 34, 38, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34), False, 0 , 5),
37 : ((35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 39, 39, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35), False, 0 , 5),
38 : ((34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 40, 34, 36, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34), False, 0 , 5),
39 : ((35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 40, 37, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35), False, 0 , 5),
40 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 12, 1),
41 : ((0 , 0 , 0 , 0 , 0 , 0 , 49, 0 , 0 , 48, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
42 : ((0 , 0 , 0 , 0 , 0 , 0 , 50, 49, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 106, 1),
43 : ((0 , 0 , 0 , 0 , 0 , 0 , 50, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 24, 1),
44 : ((0 , 0 , 0 , 0 , 0 , 0 , 51, 52, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
45 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 53, 45, 45, 45, 57, 45, 45, 45, 45, 45, 45), False, 0 , 6),
46 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 54, 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
47 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 54, 0 , 0 , 0 , 0 ), True , 18, 1),
48 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
49 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
50 : ((0 , 0 , 0 , 0 , 0 , 0 , 55, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
51 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
52 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
53 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 56, 45, 45, 45, 56, 45, 45, 45, 45, 45, 45), False, 0 , 6),
54 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 23, 1),
55 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
56 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 53, 45, 45, 45, 57, 45, 45, 45, 45, 45, 45), False, 0 , 6),
57 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 21, 1),
58 : ((59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 58, 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 7),
59 : ((59, 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 14, 1),
60 : ((60, 60, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 60, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 15, 1),
61 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 62, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 9),
62 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 103, 1),
63 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 64, 0 , 0 , 0 ), True , 107, 1),
64 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 104, 1),
65 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 7 , 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
66 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 7 , 69, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
67 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 70, 65, 70, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
69 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 71, 71, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
70 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 7 , 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
71 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 7 , 69, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
72 : ((-11, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 4 , 1),
-1 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , -2, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 100, 1),
-2 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0, 1),
-3 : ((-3, -3, 0 , -3, -3, -3, -3, -10, -3, -3, -3, -3, -3, -4, -5, -3, -3, -3, -3, -3, -3, -3, -3, 0, -3, -3), True , 0 , 1),
-4 : ((-4, -4, -4, -4, -4, -4, -4, -4 , -4, -4, -4, -4, -4, -3, -4, -6, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4), True , 0 , 1),
-5 : ((-5, -5, -5, -5, -5, -5, -5, -5 , -5, -5, -5, -5, -5, -5, -3, -7, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5), True , 0 , 1),
-6 : ((-4, -4, -4, -4, -4, -4, -4, -4 , -4, -4, -4, -4, -4, -8, -4, -8, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4), True , 0 , 1),
-7 : ((-5, -5, -5, -5, -5, -5, -5, -5 , -5, -5, -5, -5, -5, -5, -9, -9, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5), True , 0 , 1),
-8 : ((-4, -4, -4, -4, -4, -4, -4, -4 , -4, -4, -4, -4, -4, -3, -4, -6, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4), True , 0 , 1),
-9 : ((-5, -5, -5, -5, -5, -5, -5, -5 , -5, -5, -5, -5, -5, -5, -3, -7, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5), True , 0 , 1),
-10 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 101, 1),
-11 : ((-11,-11,-12,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11), True , 0 , 1),
-12 : ((-14, 0 , 0 , 0 , 0 ,-13, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0 , 1),
-13 : ((-21, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0 , 1),
-14 : ((-14,-14, 0 ,-14,-14,-14,-14,-22,-14,-14,-14,-14,-14,-15,-16,-14,-14,-14,-14,-14,-14,-14,-14, 0 ,-14,-14), True , 0 , 1),
-15 : ((-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-14,-15,-17,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15), True , 0 , 1),
-16 : ((-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-14,-18,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16), True , 0 , 1),
-17 : ((-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-19,-15,-19,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15), True , 0 , 1),
-18 : ((-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-20,-20,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16), True , 0 , 1),
-19 : ((-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-14,-15,-17,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15), True , 0 , 1),
-20 : ((-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-14,-18,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16), True , 0 , 1),
-21 : ((-21,-21, 0 , 0 , 0 , 0 , 0 ,-22, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 0 , 1),
-22 : (( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 102, 1),
100: ((0 , 0 , 0 , 20, 20, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 26, 1),
101: ((7 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
102: ((103, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 1),
103: ((103, 103, 0, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103), True , 126, 1)
}
| 83.294798 | 151 | 0.373352 |
83ade6c082d1004a672714e19137f8f4cc8ec685 | 748 | py | Python | notifications_utils/__init__.py | cds-snc/notifier-utils | c3a205ac4381312fe1884a39ffafa7ffb862736f | [
"MIT"
] | 3 | 2020-04-29T17:13:43.000Z | 2020-12-04T21:08:33.000Z | notifications_utils/__init__.py | cds-snc/notifier-utils | c3a205ac4381312fe1884a39ffafa7ffb862736f | [
"MIT"
] | 21 | 2020-04-16T12:29:46.000Z | 2022-02-28T17:17:15.000Z | notifications_utils/__init__.py | cds-snc/notifier-utils | c3a205ac4381312fe1884a39ffafa7ffb862736f | [
"MIT"
] | 4 | 2020-02-21T20:20:00.000Z | 2021-02-11T19:00:59.000Z | import re
SMS_CHAR_COUNT_LIMIT = 612 # 153 * 4
# regexes for use in recipients.validate_email_address.
# Valid characters taken from https://en.wikipedia.org/wiki/Email_address#Local-part
# Note: Normal apostrophe eg `Firstname-o'surname@domain.com` is allowed.
hostname_part = re.compile(r"^(xn-|[a-z0-9]+)(-[a-z0-9]+)*$", re.IGNORECASE)
tld_part = re.compile(r"^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$", re.IGNORECASE)
VALID_LOCAL_CHARS = r"a-zA-Z-0-9.!#$%&'*+/=?^_`{|}~\-"
EMAIL_REGEX_PATTERN = r"^[{}]+@([^.@][^@\s]+)$".format(VALID_LOCAL_CHARS)
email_with_smart_quotes_regex = re.compile(
# matches wider than an email - everything between an at sign and the nearest whitespace
r"(^|\s)\S+@\S+(\s|$)",
flags=re.MULTILINE,
)
| 44 | 92 | 0.667112 |
83af8ba0d0f4e817ed4ef6eadece62ddc41fd7db | 1,230 | py | Python | respondd/Cache.py | FreiFunkMuenster/py-respondd | 4b59b0fa2418ed021abe2dca5906b8290e4600d0 | [
"MIT"
] | null | null | null | respondd/Cache.py | FreiFunkMuenster/py-respondd | 4b59b0fa2418ed021abe2dca5906b8290e4600d0 | [
"MIT"
] | null | null | null | respondd/Cache.py | FreiFunkMuenster/py-respondd | 4b59b0fa2418ed021abe2dca5906b8290e4600d0 | [
"MIT"
] | null | null | null | import time | 22.363636 | 66 | 0.652846 |
83b09d2ad07562da3b8e8e789cc7815800d89928 | 1,338 | py | Python | jsonclasses_cli/package/swift/main_program_content.py | forever9717/jsonclasses-cli | b20d10cdf2d6c970a879e2a64f31555d4d808829 | [
"MIT"
] | null | null | null | jsonclasses_cli/package/swift/main_program_content.py | forever9717/jsonclasses-cli | b20d10cdf2d6c970a879e2a64f31555d4d808829 | [
"MIT"
] | null | null | null | jsonclasses_cli/package/swift/main_program_content.py | forever9717/jsonclasses-cli | b20d10cdf2d6c970a879e2a64f31555d4d808829 | [
"MIT"
] | null | null | null | from jsonclasses.cgraph import CGraph
from .import_lines import import_lines
from .string_query import string_query
from .int_query import int_query
from .float_query import float_query
from .bool_query import bool_query
from .sort_order import sort_order
from .data_enum import data_enum
from .data_class import data_class
from .session_items import session_items
from .session import session
from .response import response_struct
from .user_default import user_default
from .session_manager import session_manager
from .sign_out import sign_out
from .request_manager import request_manager
from ...utils.join_lines import join_lines
| 34.307692 | 64 | 0.724963 |
83b0ae650bd55397213c23d819dd2927624d8665 | 121 | py | Python | common/utils/__init__.py | jl1990/alpha-zero-general | 6a1549f9cd1b2ebdffee30f8de1be9cbefecd5f4 | [
"MIT"
] | null | null | null | common/utils/__init__.py | jl1990/alpha-zero-general | 6a1549f9cd1b2ebdffee30f8de1be9cbefecd5f4 | [
"MIT"
] | null | null | null | common/utils/__init__.py | jl1990/alpha-zero-general | 6a1549f9cd1b2ebdffee30f8de1be9cbefecd5f4 | [
"MIT"
] | null | null | null | """Useful utils
"""
from .eval import *
from .misc import *
# progress bar
from .progress.progress.bar import Bar as Bar
| 17.285714 | 45 | 0.719008 |
83b152d0790dab9900fa13fb39789949a2ecb7fe | 6,664 | py | Python | examples/move_presets.py | crazy-djactor/OnVifControlCam | 36b1d70b4c025b1bce8ed8ddc1d95c04fe298e1d | [
"MIT"
] | null | null | null | examples/move_presets.py | crazy-djactor/OnVifControlCam | 36b1d70b4c025b1bce8ed8ddc1d95c04fe298e1d | [
"MIT"
] | null | null | null | examples/move_presets.py | crazy-djactor/OnVifControlCam | 36b1d70b4c025b1bce8ed8ddc1d95c04fe298e1d | [
"MIT"
] | null | null | null | import zeep
import asyncio, sys
from onvif import ONVIFCamera
import cv2
import numpy as np
import urllib
from urllib.request import urlopen
IP="192.168.2.22" # Camera IP address
PORT=80 # Port
USER="admin" # Username
PASS="C0nc3ll0M4r1n" # Password
XMAX = 1
XMIN = -1
YMAX = 1
YMIN = -1
moverequest = None
ptz = None
active = False
zeep.xsd.simple.AnySimpleType.pythonvalue = zeep_pythonvalue
if __name__ == '__main__':
# url_to_image('http://192.168.1.108/onvifsnapshot/media_service/snapshot?channel=1&subtype=0')
# setup_move()
camera = CameraController()
camera.get_presets()
camera.get_current_preset() | 36.415301 | 144 | 0.620048 |
83b20a373bfc0ad0b76d049c2ba241c013b10033 | 737 | py | Python | utils.py | OttrOne/suivi | 9e53a39b0f50054b89cb960eb9055fd0a28a5ebf | [
"MIT"
] | null | null | null | utils.py | OttrOne/suivi | 9e53a39b0f50054b89cb960eb9055fd0a28a5ebf | [
"MIT"
] | 2 | 2022-01-11T15:50:04.000Z | 2022-01-13T01:53:53.000Z | utils.py | OttrOne/suivi | 9e53a39b0f50054b89cb960eb9055fd0a28a5ebf | [
"MIT"
] | null | null | null | from string import ascii_lowercase, digits
from random import choice
from re import compile
| 28.346154 | 72 | 0.58616 |
83b36d8e12e62551ee0f6bc0e1772654ff4d0f33 | 232 | py | Python | comment/admin.py | Samurai-XHe/myblog | c9e182b84c3cb06b3207e7359f0a4d352c28d043 | [
"MIT"
] | 1 | 2018-09-25T09:11:17.000Z | 2018-09-25T09:11:17.000Z | comment/admin.py | Samurai-XHe/myblog | c9e182b84c3cb06b3207e7359f0a4d352c28d043 | [
"MIT"
] | null | null | null | comment/admin.py | Samurai-XHe/myblog | c9e182b84c3cb06b3207e7359f0a4d352c28d043 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Comment
| 29 | 105 | 0.728448 |
83b3965c73ce131b836c28e365aa820a33396d8f | 2,602 | py | Python | DiSPy/core/path.py | munrojm/DiSPy | c1ae9e213d16bfd098b362e7d54d997cd95f8919 | [
"MIT"
] | 19 | 2018-10-05T01:49:36.000Z | 2021-11-23T13:35:22.000Z | DiSPy/core/path.py | munrojm/DiSPy | c1ae9e213d16bfd098b362e7d54d997cd95f8919 | [
"MIT"
] | 1 | 2019-03-27T20:13:08.000Z | 2019-03-28T23:22:22.000Z | DiSPy/core/path.py | munrojm/DiSPy | c1ae9e213d16bfd098b362e7d54d997cd95f8919 | [
"MIT"
] | 6 | 2019-06-05T21:41:16.000Z | 2021-04-07T09:23:42.000Z | import numpy as np
from typing import Dict, List
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.symmetry.groups import SymmOp
from DiSPy.core.dg import DistortionGroup
from DiSPy.core.vecutils import closewrapped
# -- Path object and its attributes
| 30.97619 | 112 | 0.593774 |
83b4ad592dcb04cd2de490a6b0d70c2d9d26c009 | 2,815 | py | Python | photos/models.py | kimutaimeshack/Instagram_clone | bdb035087f85fe055da29634cf7bc5dcb843897f | [
"MIT"
] | null | null | null | photos/models.py | kimutaimeshack/Instagram_clone | bdb035087f85fe055da29634cf7bc5dcb843897f | [
"MIT"
] | null | null | null | photos/models.py | kimutaimeshack/Instagram_clone | bdb035087f85fe055da29634cf7bc5dcb843897f | [
"MIT"
] | null | null | null | from django.db import models
import datetime as dt
from django.urls import reverse
# Create your models here.
from cloudinary.models import CloudinaryField
# class Post(models.Model):
# title = models.CharField(max_length=200)
# author = models.ForeignKey('auth.User',
# on_delete=models.CASCADE,)
# body = models.TextField()
# def __str__(self):
# return self.title | 29.946809 | 79 | 0.671403 |
83b730a44041eeddf60233b1f8b68fb907f48e86 | 2,386 | py | Python | tests/unit/test_product.py | jeantardelli/architecture-patterns-with-python | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | [
"MIT"
] | 1 | 2021-04-07T18:04:56.000Z | 2021-04-07T18:04:56.000Z | tests/unit/test_product.py | jeantardelli/architecture-patterns-with-python | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | [
"MIT"
] | null | null | null | tests/unit/test_product.py | jeantardelli/architecture-patterns-with-python | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | [
"MIT"
] | null | null | null | from datetime import date, timedelta
from allocation.domain import events
from allocation.domain.model import Product, OrderLine, Batch
today = date.today()
tomorrow = today + timedelta(days=1)
later = tomorrow + timedelta(days=10)
| 39.114754 | 86 | 0.720034 |
83b9a7791d97770f25fe0980d7aaeedc83bafde6 | 6,129 | py | Python | tests/unit/states/test_grafana.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | tests/unit/states/test_grafana.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/states/test_grafana.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
# Import Salt Libs
import salt.utils.json
import salt.states.grafana as grafana
from salt.exceptions import SaltInvocationError
| 44.093525 | 81 | 0.498287 |
83b9dae35ff849de97a8ab5c1b5b328eee4bf2a8 | 517 | py | Python | 08.Graph/Kruskal.py | SP2021-2/Algorithm | 2e629eb5234212fad8bbc11491aad068e5783780 | [
"MIT"
] | 1 | 2021-11-21T06:03:06.000Z | 2021-11-21T06:03:06.000Z | 08.Graph/Kruskal.py | SP2021-2/Algorithm | 2e629eb5234212fad8bbc11491aad068e5783780 | [
"MIT"
] | 2 | 2021-10-13T07:21:09.000Z | 2021-11-14T13:53:08.000Z | 08.Graph/Kruskal.py | SP2021-2/Algorithm | 2e629eb5234212fad8bbc11491aad068e5783780 | [
"MIT"
] | null | null | null | # 5 7
# 0 1 1
# 0 2 3
# 1 2 3
# 1 3 6
# 2 3 4
# 2 4 2
# 3 4 5
import sys
import heapq as hq
N, M = map(int, sys.stdin.readline().split(" "))
W = [[float('inf')] * N for _ in range(N)]
h = []
for _ in range(M):
i, j, w = map(int, sys.stdin.readline().split(" "))
hq.heappush(h, (w, i, j))
print(h)
print(Kruskal(h, 0)) | 16.15625 | 55 | 0.537718 |
83babd001889716c3b9b2382b50058698f6c9529 | 4,701 | py | Python | resources.py | kordimsan/FreeWorker-Api | f4566d2d500328725c88d5fc5df7a4763cb4c185 | [
"MIT"
] | null | null | null | resources.py | kordimsan/FreeWorker-Api | f4566d2d500328725c88d5fc5df7a4763cb4c185 | [
"MIT"
] | null | null | null | resources.py | kordimsan/FreeWorker-Api | f4566d2d500328725c88d5fc5df7a4763cb4c185 | [
"MIT"
] | null | null | null | #from flask_restful import Resource, reqparse
from flask_restplus import Resource, reqparse,fields
from models import UserModel, RevokedTokenModel
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt)
from run import api
parser = reqparse.RequestParser()
parser.add_argument('username', help = 'This field cannot be blank', required = True)
parser.add_argument('password', help = 'This field cannot be blank', required = True)
param = api.model('User registration', {'username' : fields.String('username'), 'password' : fields.String('password')})
param = api.model('User login', {'username' : fields.String('username'), 'password' : fields.String('password')})
| 34.313869 | 147 | 0.605829 |
83bb637db13a5d4678648b8d28c8559126ac4919 | 3,176 | py | Python | archivist/parser.py | Serhiy1/archivist-python | 70c7acf29eecd303bb1517d3636663d83f36cc2c | [
"MIT"
] | 2 | 2021-05-04T15:12:37.000Z | 2021-09-08T10:04:41.000Z | archivist/parser.py | Serhiy1/archivist-python | 70c7acf29eecd303bb1517d3636663d83f36cc2c | [
"MIT"
] | 35 | 2021-05-04T12:39:26.000Z | 2022-03-28T09:20:19.000Z | archivist/parser.py | Serhiy1/archivist-python | 70c7acf29eecd303bb1517d3636663d83f36cc2c | [
"MIT"
] | 6 | 2021-04-28T14:49:48.000Z | 2022-01-07T15:29:05.000Z | """common parser argument
"""
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
import argparse
from enum import Enum
import logging
from sys import exit as sys_exit
from . import archivist
from .logger import set_logger
from .proof_mechanism import ProofMechanism
LOGGER = logging.getLogger(__name__)
# from https://stackoverflow.com/questions/43968006/support-for-enum-arguments-in-argparse
def common_parser(description):
"""Construct parser with security option for token/auth authentication"""
parser = argparse.ArgumentParser(
description=description,
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="print verbose debugging",
)
parser.add_argument(
"-u",
"--url",
type=str,
dest="url",
action="store",
default="https://rkvst.poc.jitsuin.io",
help="location of Archivist service",
)
parser.add_argument(
"-p",
"--proof-mechanism",
type=ProofMechanism,
action=EnumAction,
dest="proof_mechanism",
default=ProofMechanism.SIMPLE_HASH,
help="mechanism for proving the evidence for events on the Asset",
)
security = parser.add_mutually_exclusive_group(required=True)
security.add_argument(
"-t",
"--auth-token",
type=str,
dest="auth_token_file",
action="store",
default=".auth_token",
reqyuired=True,
help="FILE containing API authentication token",
)
return parser, security
| 25.821138 | 90 | 0.630668 |
83bc85a7d09d10f1f239ce0341b95393b82459b8 | 6,635 | py | Python | skytap/models/UserData.py | mapledyne/skytap | c7fb43e7d2b3e97c619948a9e5b3f03472b5cd45 | [
"MIT"
] | 3 | 2019-04-17T13:07:30.000Z | 2021-09-09T22:01:14.000Z | skytap/models/UserData.py | FulcrumIT/skytap | c7fb43e7d2b3e97c619948a9e5b3f03472b5cd45 | [
"MIT"
] | 10 | 2016-11-02T20:48:38.000Z | 2021-09-15T15:29:34.000Z | skytap/models/UserData.py | FulcrumIT/skytap | c7fb43e7d2b3e97c619948a9e5b3f03472b5cd45 | [
"MIT"
] | 3 | 2016-03-03T07:25:13.000Z | 2016-08-30T15:33:03.000Z | """Support for the UserData resource in Skytap.
Specifically, this is for custom ('user data') that's applied to an environment
or VM. This data can be text or, in the context of using it with this Skytap
script, it can also be JSON or YAML and will then be re-parsed.
This allows users to put data into a VM user data block and it'll filter down
and be accessible to this script. We use this to expose variables to the user
like shutdown time and other automation pieces.
"""
from skytap.framework.ApiClient import ApiClient
import skytap.framework.Utils as Utils
from skytap.models.SkytapResource import SkytapResource
| 29.753363 | 79 | 0.505803 |
83bdd2c382d1213ade5ce2c23b12fd693e9a4c32 | 700 | py | Python | example.py | six-two/py_derive_cmd | ae821f16a057a809166b287ab9f203a8cf4b34b3 | [
"MIT"
] | null | null | null | example.py | six-two/py_derive_cmd | ae821f16a057a809166b287ab9f203a8cf4b34b3 | [
"MIT"
] | null | null | null | example.py | six-two/py_derive_cmd | ae821f16a057a809166b287ab9f203a8cf4b34b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=unused-wildcard-import
from py_derive_cmd import *
import cmd
s = Settings(MyCmd, print_warnings=False)
CommandInfo(s, test_register, ['registered', 'r'], 'Test for register', raw_arg=True).register()
shell = MyCmd()
shell.cmdloop() | 25 | 96 | 0.708571 |
83be1a3a3ee334b7ce8506d005b0c751dcb0f57c | 2,474 | py | Python | tf_quant_finance/datetime/constants.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 3,138 | 2019-07-24T21:43:17.000Z | 2022-03-30T12:11:09.000Z | tf_quant_finance/datetime/constants.py | Aarif1430/tf-quant-finance | 9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6 | [
"Apache-2.0"
] | 63 | 2019-09-07T19:16:03.000Z | 2022-03-29T19:29:40.000Z | tf_quant_finance/datetime/constants.py | Aarif1430/tf-quant-finance | 9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6 | [
"Apache-2.0"
] | 423 | 2019-07-26T21:28:05.000Z | 2022-03-26T13:07:44.000Z | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Date-related constants and enums."""
import enum
# TODO(b/148011715): add NEAREST convention.
| 26.891304 | 80 | 0.696847 |
83bf94a78ac2eb29dfd1c2b50e991146823fcf6e | 2,345 | py | Python | generate_trajectories.py | keuntaeklee/pytorch-PPUU | 0ba8c953df9cdb1e9937e301ed3384ac6b66ea73 | [
"MIT"
] | 159 | 2019-01-23T07:17:36.000Z | 2022-03-29T14:33:31.000Z | generate_trajectories.py | keuntaeklee/pytorch-PPUU | 0ba8c953df9cdb1e9937e301ed3384ac6b66ea73 | [
"MIT"
] | 44 | 2019-04-29T15:11:44.000Z | 2022-02-21T18:28:46.000Z | generate_trajectories.py | keuntaeklee/pytorch-PPUU | 0ba8c953df9cdb1e9937e301ed3384ac6b66ea73 | [
"MIT"
] | 61 | 2019-01-23T12:31:54.000Z | 2022-03-07T09:25:20.000Z | import argparse, pdb
import gym
import numpy as np
import os
import pickle
import random
import torch
import scipy.misc
from gym.envs.registration import register
parser = argparse.ArgumentParser()
parser.add_argument('-display', type=int, default=0)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-lanes', type=int, default=3)
parser.add_argument('-traffic_rate', type=int, default=15)
parser.add_argument('-state_image', type=int, default=1)
parser.add_argument('-save_images', type=int, default=0)
parser.add_argument('-store', type=int, default=1)
parser.add_argument('-data_dir', type=str, default='traffic-data/state-action-cost/')
parser.add_argument('-fps', type=int, default=30)
parser.add_argument('-time_slot', type=int, default=0)
parser.add_argument('-map', type=str, default='i80', choices={'ai', 'i80', 'us101', 'lanker', 'peach'})
parser.add_argument('-delta_t', type=float, default=0.1)
opt = parser.parse_args()
opt.state_image = (opt.state_image == 1)
opt.store = (opt.store == 1)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
os.system("mkdir -p " + opt.data_dir)
kwargs = dict(
display=opt.display,
state_image=opt.state_image,
store=opt.store,
fps=opt.fps,
nb_lanes=opt.lanes,
traffic_rate=opt.traffic_rate,
data_dir=opt.data_dir,
delta_t=opt.delta_t,
)
register(
id='Traffic-v0',
entry_point='traffic_gym:Simulator',
kwargs=kwargs
)
register(
id='I-80-v0',
entry_point='map_i80:I80',
kwargs=kwargs
)
gym.envs.registration.register(
id='US-101-v0',
entry_point='map_us101:US101',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Lankershim-v0',
entry_point='map_lanker:Lankershim',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Peachtree-v0',
entry_point='map_peach:Peachtree',
kwargs=kwargs,
)
env_names = {
'ai': 'Traffic-v0',
'i80': 'I-80-v0',
'us101': 'US-101-v0',
'lanker': 'Lankershim-v0',
'peach': 'Peachtree-v0',
}
print('Building the environment (loading data, if any)')
env = gym.make(env_names[opt.map])
env.reset(frame=0, time_slot=opt.time_slot)
done = False
while not done:
observation, reward, done, info = env.step()
env.render()
print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')
| 24.946809 | 103 | 0.704478 |
83c0b7884ac12f94ceaeb582cc3c5f5cebb5a227 | 999 | py | Python | main.py | mvazifeh/gridart | 78c01d6e660ca9c61f1220e102975ca632a2af6b | [
"MIT"
] | null | null | null | main.py | mvazifeh/gridart | 78c01d6e660ca9c61f1220e102975ca632a2af6b | [
"MIT"
] | null | null | null | main.py | mvazifeh/gridart | 78c01d6e660ca9c61f1220e102975ca632a2af6b | [
"MIT"
] | null | null | null | import matplotlib.pylab as plt
import numpy as np
import random
from scipy.ndimage import gaussian_filter
mu =9
N = 50
k = 10
eta =10
sigma = 2
p0 = 0.5
inverse_random = False
L = range(N*N)
Q = np.zeros((N*mu,N*mu))
for o in range(mu*mu):
print(o)
F = 1000*k
a = np.ones((N,N))
for k_ in range(1000):
linear_idx = random.choices(L, weights=a.ravel()/float(a.sum()), k = k)
x, y = np.unravel_index(linear_idx, a.shape)
x += np.random.randint(-eta,eta,k)
y += np.random.randint(-eta,eta,k)
cond = (x<0) | (x>=N) | (y<0) | (y>=N)
x_ = np.delete(x, np.where(cond))
y_ = np.delete(y, np.where(cond))
a[x_,y_]+=F
a = gaussian_filter(a,sigma =sigma)
if np.random.random()>p0 and inverse_random:
a = a.max()-a
Mx,My = np.unravel_index(o,(mu,mu))
Q[Mx*N:(Mx+1)*N,My*N:(My+1)*N] = a
fig,ax = plt.subplots(1,1,figsize = (20,20))
plt.imshow(Q, interpolation='nearest')
plt.axis('off')
| 24.975 | 79 | 0.573574 |
83c0d18d58ec56ff811ed70776d16216d48d95ed | 9,841 | py | Python | fixture/contact.py | ruslankl9/python_training | 7bcaf2606a80935a4a0c458af4e6a078f241fb38 | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | ruslankl9/python_training | 7bcaf2606a80935a4a0c458af4e6a078f241fb38 | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | ruslankl9/python_training | 7bcaf2606a80935a4a0c458af4e6a078f241fb38 | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
import re | 43.737778 | 117 | 0.648206 |
83c1d004633b6b337c6d2bc2c9a3fefc61d57d42 | 789 | py | Python | setup.py | nakagami/pure-pyawabi | 5ffafcaa381727af7f84013cf036a4e8f7dd51da | [
"MIT"
] | 1 | 2021-12-13T11:29:04.000Z | 2021-12-13T11:29:04.000Z | setup.py | nakagami/pure-pyawabi | 5ffafcaa381727af7f84013cf036a4e8f7dd51da | [
"MIT"
] | null | null | null | setup.py | nakagami/pure-pyawabi | 5ffafcaa381727af7f84013cf036a4e8f7dd51da | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="pure-pyawabi",
version="0.2.4",
description='A morphological analyzer awabi clone',
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type="text/markdown",
url='https://github.com/nakagami/pure-pyawabi/',
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
],
keywords=['MeCab'],
license="MIT",
author='Hajime Nakagami',
author_email='nakagami@gmail.com',
test_suite="tests",
packages=['pyawabi'],
scripts=['bin/pyawabi'],
)
| 30.346154 | 64 | 0.628644 |
83c2085a8eb1b76f57b29dca121b213d911376c1 | 3,137 | py | Python | vacancies_and_studentships/models.py | okyame/Arkestra | 4aa22816b33d8f2d7a6bc8f7a498957134b557dd | [
"BSD-2-Clause"
] | 1 | 2020-01-15T15:17:06.000Z | 2020-01-15T15:17:06.000Z | vacancies_and_studentships/models.py | okyame/Arkestra | 4aa22816b33d8f2d7a6bc8f7a498957134b557dd | [
"BSD-2-Clause"
] | null | null | null | vacancies_and_studentships/models.py | okyame/Arkestra | 4aa22816b33d8f2d7a6bc8f7a498957134b557dd | [
"BSD-2-Clause"
] | null | null | null | from django.db import models
# from cms.models.fields import PlaceholderField
from cms.models import CMSPlugin
# from filer.fields.image import FilerImageField
from arkestra_utilities.output_libraries.dates import nice_date
# from arkestra_utilities.models import ArkestraGenericModel
from arkestra_utilities.generic_models import ArkestraGenericPluginOptions, ArkestraGenericModel
from arkestra_utilities.mixins import URLModelMixin
from arkestra_utilities.settings import PLUGIN_HEADING_LEVELS, PLUGIN_HEADING_LEVEL_DEFAULT
from contacts_and_people.models import Entity, Person #, default_entity_id
# from links.models import ExternalLink
from managers import VacancyManager, StudentshipManager
| 36.057471 | 157 | 0.723621 |
83c27bbb12a53e327e73c5820df10eeefe5bccca | 239 | py | Python | examples/django/015_deploy_app/project/example/views.py | HalfBottleOfMind/website | 9bc83f11127ebb4f65124f794a7138373c95ab81 | [
"Apache-2.0"
] | 12 | 2020-09-08T03:33:43.000Z | 2021-09-03T12:31:38.000Z | examples/django/015_deploy_app/project/example/views.py | HalfBottleOfMind/website | 9bc83f11127ebb4f65124f794a7138373c95ab81 | [
"Apache-2.0"
] | 39 | 2021-01-29T16:11:45.000Z | 2021-12-08T08:34:27.000Z | examples/django/015_deploy_app/project/example/views.py | HalfBottleOfMind/website | 9bc83f11127ebb4f65124f794a7138373c95ab81 | [
"Apache-2.0"
] | 23 | 2020-09-07T14:42:39.000Z | 2021-12-07T20:41:29.000Z | from rest_framework import viewsets
from .models import Label
from .serizalizers import LabelSerializer
| 23.9 | 49 | 0.799163 |
83c2e6b596e3c848fe9f97b575c98a5ef638509f | 2,660 | py | Python | src/olympia/github/tests/test_views.py | gijsk/addons-server | 7c38f379e3a0b4a5ca231f98ac0c049450c224bd | [
"BSD-3-Clause"
] | null | null | null | src/olympia/github/tests/test_views.py | gijsk/addons-server | 7c38f379e3a0b4a5ca231f98ac0c049450c224bd | [
"BSD-3-Clause"
] | null | null | null | src/olympia/github/tests/test_views.py | gijsk/addons-server | 7c38f379e3a0b4a5ca231f98ac0c049450c224bd | [
"BSD-3-Clause"
] | null | null | null | import json
from django.utils.http import urlencode
import mock
import requests
from olympia.amo.tests import AMOPaths, TestCase
from olympia.amo.urlresolvers import reverse
from olympia.files.models import FileUpload
from olympia.github.tests.test_github import (
GithubBaseTestCase, example_pull_request)
| 31.666667 | 77 | 0.642105 |
83c369bbe9d3c23a66d5fe993029ea43352f5559 | 676 | py | Python | exopy_qm/tasks/tasks/GetIOValuesTask.py | rassouly/exopy_qm | 82eb7e4b4fc7364df3462bb7faa7a0880d699afc | [
"BSD-3-Clause"
] | null | null | null | exopy_qm/tasks/tasks/GetIOValuesTask.py | rassouly/exopy_qm | 82eb7e4b4fc7364df3462bb7faa7a0880d699afc | [
"BSD-3-Clause"
] | null | null | null | exopy_qm/tasks/tasks/GetIOValuesTask.py | rassouly/exopy_qm | 82eb7e4b4fc7364df3462bb7faa7a0880d699afc | [
"BSD-3-Clause"
] | null | null | null | from exopy.tasks.api import (InstrumentTask)
from atom.api import Unicode, Bool, set_default
import sys
from exopy_qm.utils.dynamic_importer import *
| 26 | 58 | 0.653846 |
83c3b7af49b5b0a425d6a463dbe982452346eedf | 4,381 | py | Python | src/ns_web_api/web/ptx/thsr.py | steny138/PyNintendoEPrice | def9c95690cf3cf72615ae4216fee8fca2934de1 | [
"Apache-2.0"
] | null | null | null | src/ns_web_api/web/ptx/thsr.py | steny138/PyNintendoEPrice | def9c95690cf3cf72615ae4216fee8fca2934de1 | [
"Apache-2.0"
] | 3 | 2020-06-22T15:38:18.000Z | 2021-11-24T02:01:51.000Z | src/ns_web_api/web/ptx/thsr.py | steny138/PyNintendoEPrice | def9c95690cf3cf72615ae4216fee8fca2934de1 | [
"Apache-2.0"
] | 1 | 2018-08-04T08:15:05.000Z | 2018-08-04T08:15:05.000Z | import requests
import logging
from .auth import Auth
domain = "https://ptx.transportdata.tw/MOTC/v2/Rail/THSR/"
default_limit_count = 20
logger = logging.getLogger('flask.app')
auth = Auth()
def get_station():
"""GET /v2/Rail/THSR/Station
Returns:
[dict] --
"""
action = "Station"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_station_id(station_names):
"""id
Arguments:
station_names {[list]} --
Returns:
[dictionary] -- key: station name, value: station id
"""
all_stations = get_station()
matchs = {}
for station_name in station_names:
match = None
try:
match = next(filter(lambda x:
station_name.strip() in x['StationName']['Zh_tw'].strip(), all_stations))
except StopIteration:
pass
if match:
matchs[station_name.strip()] = match['StationID']
return matchs
def get_fare(departure, destination):
"""GET /v2/Rail/THSR/ODFare/{OriginStationID}/to/{DestinationStationID}
[]
Arguments:
departure {str} -- id
destination {str} -- id
"""
if not departure:
return {}
if not destination:
return {}
action = "ODFare/{}/to/{}".format(departure, destination)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_timetable(no=''):
"""GET /v2/Rail/THSR/GeneralTimetable
Arguments:
no {str} --
"""
action = "GeneralTimetable"
if no:
action += "/TrainNo/{}".format(no)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_seat(id):
"""GET /v2/Rail/THSR/AvailableSeatStatusList/{StationID}
[]
"""
if not id:
return {}
action = "AvailableSeatStatusList/{}".format(id)
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
logger.info(r)
return {}
def get_news():
"""GET /v2/Rail/THSR/News
"""
action = "News"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def get_alert():
"""GET /v2/Rail/THSR/AlertInfo
"""
action = "AlertInfo"
url = domain + action + '?a=' + __get_odata_parameter()
headers = {}
headers.update(auth.get_auth_header())
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
return {}
def __get_odata_parameter(top=0, skip=0, format="", orderby="", filter=""):
"""odata
Keyword Arguments:
top {int} -- (default: {0})
skip {int} -- (default: {0})
format {str} -- json or xml (default: {""})
orderby {str} -- , response (default: {""})
filter {str} -- (default: {""})
Returns:
[type] -- odata parameterquerystring
"""
param = {'top': top, 'skip': skip, 'orderby': orderby,
'format': format, 'filter': filter}
result = ""
if top > 0:
result += "&$top={top}"
if skip > 0:
result += "&$skip={skip}"
if orderby:
result += "&$orderby={orderby}"
if format:
result += "&$format={format}"
if filter:
result += "&$filter={filter}"
return result.format(**param)
if __name__ == '__main__':
pass
| 20.471963 | 105 | 0.582059 |
83c3ff2e3e7205ac40f986b841cd8fa73336b765 | 1,158 | py | Python | src/mem/ruby/network/garnet/fixed-pipeline/GarnetRouter_PNET_Container_d.py | pnkfb9/gem5_priority | fbf766277df78a470758cf7d798d12fb1e7c51c4 | [
"BSD-3-Clause"
] | null | null | null | src/mem/ruby/network/garnet/fixed-pipeline/GarnetRouter_PNET_Container_d.py | pnkfb9/gem5_priority | fbf766277df78a470758cf7d798d12fb1e7c51c4 | [
"BSD-3-Clause"
] | null | null | null | src/mem/ruby/network/garnet/fixed-pipeline/GarnetRouter_PNET_Container_d.py | pnkfb9/gem5_priority | fbf766277df78a470758cf7d798d12fb1e7c51c4 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Davide Zoni
from m5.params import *
from m5.proxy import *
from BasicRouter import BasicRouter
| 52.636364 | 120 | 0.740069 |
83c5d489d870a160b110a47da3fc244bdbe0bd80 | 4,699 | py | Python | Wrappers/Python/Testing/MasterTest.py | gregmedlock/roadrunnerwork | 11f18f78ef3e381bc59c546a8d5e3ed46d8ab596 | [
"Apache-2.0"
] | null | null | null | Wrappers/Python/Testing/MasterTest.py | gregmedlock/roadrunnerwork | 11f18f78ef3e381bc59c546a8d5e3ed46d8ab596 | [
"Apache-2.0"
] | null | null | null | Wrappers/Python/Testing/MasterTest.py | gregmedlock/roadrunnerwork | 11f18f78ef3e381bc59c546a8d5e3ed46d8ab596 | [
"Apache-2.0"
] | null | null | null | import os
location = os.path.join(os.path.dirname(__file__), 'Functions\\')
#location = 'Tests\\'
execfile(location + 'getVersion.py')
execfile(location + 'writeSBML.py')
#execfile(location + 'computeSteadyStateValues.py')
execfile(location + 'evalModel.py')
#execfile(location + 'getAvailableSymbols.py')
execfile(location + 'getBoundarySpeciesByIndex.py')
execfile(location + 'getBoundarySpeciesIds.py')
execfile(location + 'getBuildDate.py')
#execfile(location + 'getCCode.py')
#execfile(location + 'getCCodeHeader.py')
#execfile(location + 'getCCodeSource.py')
execfile(location + 'getCapabilities.py')
execfile(location + 'getCompartmentByIndex.py')
execfile(location + 'getCompartmentIds.py')
execfile(location + 'getConcentrationControlCoefficientIds.py')
execfile(location + 'getConservationMatrix.py')
execfile(location + 'getCopyright.py')
#execfile(location + 'getuCC.py')
#execfile(location + 'getuEE.py')
#execfile(location + 'getCC.py')
#execfile(location + 'getEE.py')
execfile(location + 'getEigenValueIds.py')
execfile(location + 'getElasticityCoefficientIds.py')
execfile(location + 'getFloatingSpeciesByIndex.py')
execfile(location + 'getFloatingSpeciesInitialConcentrations.py')
execfile(location + 'getFloatingSpeciesInitialConditionIds.py')
execfile(location + 'getFloatingSpeciesIds.py')
execfile(location + 'getFloatingSpeciesConcentrations.py')
execfile(location + 'getFluxControlCoefficientIds.py')
#execfile(location + 'getFullJacobian.py') Causes crash
execfile(location + 'getGlobalParameterByIndex.py')
execfile(location + 'getGlobalParameterIds.py')
execfile(location + 'getGlobalParameterValues.py')
execfile(location + 'getLastError.py')
execfile(location + 'getLinkMatrix.py')
execfile(location + 'getNrMatrix.py')
execfile(location + 'getL0Matrix.py')
execfile(location + 'getMatrixNumCols.py')
execfile(location + 'getMatrixNumRows.py')
execfile(location + 'getMatrixElement.py')
execfile(location + 'getNumberOfBoundarySpecies.py')
execfile(location + 'getNumberOfCompartments.py')
execfile(location + 'getNumberOfDependentSpecies.py')
execfile(location + 'getNumberOfFloatingSpecies.py')
execfile(location + 'getNumberOfGlobalParameters.py')
execfile(location + 'getNumberOfIndependentSpecies.py')
execfile(location + 'getNumberOfReactions.py')
execfile(location + 'getParamPromotedSBML.py')
execfile(location + 'getRRInstance.py')
execfile(location + 'getRateOfChange.py')
execfile(location + 'getRatesOfChange.py')
execfile(location + 'getRatesOfChangeEx.py')
execfile(location + 'getRatesOfChangeIds.py')
execfile(location + 'getReactionIds.py')
execfile(location + 'getReactionRate.py')
execfile(location + 'getReactionRates.py')
execfile(location + 'getReactionRatesEx.py')
#execfile(location + 'getReducedJacobian.py')
execfile(location + 'getResultColumnLabel.py')
execfile(location + 'getResultElement.py')
execfile(location + 'getResultNumCols.py')
execfile(location + 'getResultNumRows.py')
execfile(location + 'getSBML.py')
#execfile(location + 'getScaledElasticityMatrix.py')
#execfile(location + 'getScaledFloatingSpeciesElasticity.py')
execfile(location + 'getSelectionList.py')
execfile(location + 'getSteadyStateSelectionList.py')
execfile(location + 'getStoichiometryMatrix.py')
#execfile(location + 'getStringListElement.py')
#execfile(location + 'getStringListLength.py')
execfile(location + 'getTempFolder.py')
execfile(location + 'getValue.py')
#execfile(location + 'getVectorElement.py')
#execfile(location + 'getVectorLength.py')
#execfile(location + 'hasError.py')
execfile(location + 'loadSBML.py')
execfile(location + 'loadSBMLFromFile.py')
#execfile(location + 'oneStep.py')
execfile(location + 'printList.py')
execfile(location + 'printMatrix.py')
#execfile(location + 'printResult.py')
#execfile(location + 'printVector.py')
execfile(location + 'reset.py')
execfile(location + 'setBoundarySpeciesByIndex.py')
execfile(location + 'setCapabilities.py')
execfile(location + 'setCompartmentByIndex.py')
execfile(location + 'setComputeAndAssignConservationLaws.py')
execfile(location + 'setFloatingSpeciesByIndex.py')
execfile(location + 'setGlobalParameterByIndex.py')
execfile(location + 'setNumPoints.py')
execfile(location + 'setSelectionList.py')
execfile(location + 'setSteadyStateSelectionList.py')
execfile(location + 'setTempFolder.py')
execfile(location + 'setTimeEnd.py')
execfile(location + 'setTimeStart.py')
execfile(location + 'setValue.py')
#execfile(location + 'setVectorElement.py')
#execfile(location + 'simulate.py')
#execfile(location + 'simulateEx.py')
#execfile(location + 'steadyState.py') | 44.330189 | 72 | 0.770377 |
83c6045e3e675c58b5fec8fdf9881ebf94e9feda | 334 | py | Python | condition/models.py | SamusChief/myth-caster-api | 76a43f48b70c6a4b509c90757d7906689799cc25 | [
"MIT"
] | null | null | null | condition/models.py | SamusChief/myth-caster-api | 76a43f48b70c6a4b509c90757d7906689799cc25 | [
"MIT"
] | null | null | null | condition/models.py | SamusChief/myth-caster-api | 76a43f48b70c6a4b509c90757d7906689799cc25 | [
"MIT"
] | 1 | 2021-08-14T18:46:52.000Z | 2021-08-14T18:46:52.000Z | """ Models for Conditions app """
from django.db import models
from common.models import OwnedModel
| 23.857143 | 71 | 0.700599 |
83c68825efc5cb85db8af2cf295d7be0c83834f7 | 11,214 | py | Python | curriculum/experiments/goals/point_nd/goal_point_nd_trpo.py | coco-robotics/rllab-curriculum | f55b50224fcf5a9a5c064542eb0850a966cab223 | [
"MIT"
] | 115 | 2017-12-06T16:31:10.000Z | 2022-03-01T13:13:55.000Z | curriculum/experiments/goals/point_nd/goal_point_nd_trpo.py | coco-robotics/rllab-curriculum | f55b50224fcf5a9a5c064542eb0850a966cab223 | [
"MIT"
] | 21 | 2017-11-15T18:28:16.000Z | 2021-04-22T15:26:45.000Z | curriculum/experiments/goals/point_nd/goal_point_nd_trpo.py | coco-robotics/rllab-curriculum | f55b50224fcf5a9a5c064542eb0850a966cab223 | [
"MIT"
] | 46 | 2017-12-22T22:26:01.000Z | 2022-02-17T06:34:15.000Z | from curriculum.utils import set_env_no_gpu, format_experiment_prefix
set_env_no_gpu()
import argparse
import math
import os
import os.path as osp
import sys
import random
from multiprocessing import cpu_count
import numpy as np
import tensorflow as tf
from rllab.misc.instrument import run_experiment_lite
from rllab import config
from rllab.misc.instrument import VariantGenerator
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from curriculum.envs.ndim_point.point_env import PointEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from curriculum.envs.goal_env import GoalExplorationEnv, evaluate_goal_env
from curriculum.envs.base import FixedStateGenerator, UniformStateGenerator
from curriculum.state.evaluator import *
from curriculum.logging.html_report import format_dict, HTMLReport
from curriculum.logging.visualization import *
from curriculum.logging.logger import ExperimentLogger
from curriculum.experiments.goals.point_nd.utils import plot_policy_performance
EXPERIMENT_TYPE = osp.basename(__file__).split('.')[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ec2', '-e', action='store_true', default=False, help="add flag to run in ec2")
parser.add_argument('--clone', '-c', action='store_true', default=False,
help="add flag to copy file and checkout current")
parser.add_argument('--local_docker', '-d', action='store_true', default=False,
help="add flag to run in local dock")
parser.add_argument('--type', '-t', type=str, default='', help='set instance type')
parser.add_argument('--price', '-p', type=str, default='', help='set betting price')
parser.add_argument('--subnet', '-sn', type=str, default='', help='set subnet like us-west-1a')
parser.add_argument('--name', '-n', type=str, default='', help='set exp prefix name and new file name')
parser.add_argument('--debug', action='store_true', default=False, help="run code without multiprocessing")
parser.add_argument(
'--prefix', type=str, default=None,
help='set the additional name for experiment prefix'
)
args = parser.parse_args()
# setup ec2
ec2_instance = args.type if args.type else 'm4.4xlarge'
# configure instance
info = config.INSTANCE_TYPE_INFO[ec2_instance]
config.AWS_INSTANCE_TYPE = ec2_instance
config.AWS_SPOT_PRICE = str(info["price"])
n_parallel = int(info["vCPU"]) # make the default 4 if not using ec2
if args.ec2:
mode = 'ec2'
elif args.local_docker:
mode = 'local_docker'
n_parallel = cpu_count() if not args.debug else 1
else:
mode = 'local'
n_parallel = cpu_count() if not args.debug else 1
default_prefix = 'goal-point-nd-trpo'
if args.prefix is None:
exp_prefix = format_experiment_prefix(default_prefix)
elif args.prefix == '':
exp_prefix = default_prefix
else:
exp_prefix = '{}_{}'.format(default_prefix, args.prefix)
vg = VariantGenerator()
vg.add('seed', range(30, 90, 20))
# # GeneratorEnv params
vg.add('goal_size', [2, 3, 4, 5, 6]) # this is the ultimate goal we care about: getting the pendulum upright
vg.add('terminal_eps', lambda goal_size: [math.sqrt(goal_size) / math.sqrt(2) * 0.3])
vg.add('only_feasible', [True])
vg.add('goal_range', [5]) # this will be used also as bound of the state_space
vg.add('state_bounds', lambda goal_range, goal_size, terminal_eps:
[(1, goal_range) + (0.3,) * (goal_size - 2) + (goal_range, ) * goal_size])
vg.add('sample_unif_feas', [True])
vg.add('distance_metric', ['L2'])
vg.add('goal_weight', [1])
#############################################
vg.add('min_reward', lambda goal_weight: [goal_weight * 0.1]) # now running it with only the terminal reward of 1!
vg.add('max_reward', lambda goal_weight: [goal_weight * 0.9])
vg.add('horizon', [200])
vg.add('outer_iters', [200])
vg.add('inner_iters', [5])
vg.add('pg_batch_size', [20000])
# policy initialization
vg.add('output_gain', [1])
vg.add('policy_init_std', [1])
print('Running {} inst. on type {}, with price {}, parallel {}'.format(
vg.size, config.AWS_INSTANCE_TYPE,
config.AWS_SPOT_PRICE, n_parallel
))
for vv in vg.variants():
if mode in ['ec2', 'local_docker']:
run_experiment_lite(
# use_cloudpickle=False,
stub_method_call=run_task,
variant=vv,
mode=mode,
# Number of parallel workers for sampling
n_parallel=n_parallel,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
seed=vv['seed'],
# plot=True,
exp_prefix=exp_prefix,
# exp_name=exp_name,
sync_s3_pkl=True,
# for sync the pkl file also during the training
sync_s3_png=True,
sync_s3_html=True,
# # use this ONLY with ec2 or local_docker!!!
pre_commands=[
'export MPLBACKEND=Agg',
'pip install --upgrade pip',
'pip install --upgrade -I tensorflow',
'pip install git+https://github.com/tflearn/tflearn.git',
'pip install dominate',
'pip install multiprocessing_on_dill',
'pip install scikit-image',
'conda install numpy -n rllab3 -y',
],
)
if mode == 'local_docker':
sys.exit()
else:
run_experiment_lite(
# use_cloudpickle=False,
stub_method_call=run_task,
variant=vv,
mode='local',
n_parallel=n_parallel,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
seed=vv['seed'],
exp_prefix=exp_prefix,
print_command=False,
)
if args.debug:
sys.exit()
| 40.05 | 124 | 0.626895 |
83c83c646b4979fd4f5db5513084e63e8c7ce3e0 | 2,176 | py | Python | health_reminder.py | carlkho-cvk/tbe_discord | f1dc05d0cd288b1be4e8d164f58056422627fcc1 | [
"MIT"
] | null | null | null | health_reminder.py | carlkho-cvk/tbe_discord | f1dc05d0cd288b1be4e8d164f58056422627fcc1 | [
"MIT"
] | null | null | null | health_reminder.py | carlkho-cvk/tbe_discord | f1dc05d0cd288b1be4e8d164f58056422627fcc1 | [
"MIT"
] | null | null | null | # Fitness monday variables
morning_1 = "10:00"
morning_2 = "8:00"
afternoon_1 = "13:00"
afternoon_2 = "14:30"
afternoon_3 = "15:30"
afternoon_4 = "17:55"
evening_1 = "20:30"
evening_2 = "21:10"
date_announce = [1, 2, 3, 4, 5]
image_file_list = [
'Exercise_Three.png',
'Exercise_Two_2.png'
]
# Messages to be display for fitness monday
# messageContentVariables
morning_1_msg = "Do some stretches! @everyone."
morning_2_msg = "Drink water! @everyone."
afternoon_1_msg = "Breath fresh air outside before starting your afternoon shift. @everyone."
afternoon_2_msg = "Drink a glass of water. Stay hydrated, @everyone!"
afternoon_3_msg = "Get up and stretch! @everyone."
afternoon_4_msg = "Go out and breathe before the evening sync. @everyone."
evening_1_msg = "Do some stretches! @everyone."
evening_2_msg = "Drink water. Good night, @everyone."
# Handler for all task in fitness monday
| 31.085714 | 94 | 0.664522 |
83ca8eae1114abccb3186c9a6251ba6c788bcf35 | 6,559 | py | Python | Image_Content_Analysis/deeplab-pytorch-master/labelImsTest.py | PonceLab/as-simple-as-possible | a4093651f226d749b204c48b623acb28221c3bc2 | [
"MIT"
] | 1 | 2021-04-16T02:08:39.000Z | 2021-04-16T02:08:39.000Z | Image_Content_Analysis/deeplab-pytorch-master/labelImsTest.py | PonceLab/as-simple-as-possible | a4093651f226d749b204c48b623acb28221c3bc2 | [
"MIT"
] | 1 | 2021-07-27T16:17:41.000Z | 2021-07-27T16:17:41.000Z | Image_Content_Analysis/deeplab-pytorch-master/labelImsTest.py | PonceLab/as-simple-as-possible | a4093651f226d749b204c48b623acb28221c3bc2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: https://kazuto1011.github.io
# Date: 07 January 2019
from __future__ import absolute_import, division, print_function
import click
import cv2
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import OmegaConf
from libs.models import *
from libs.utils import DenseCRF
def singleHierarchy(config_path, model_path, image_path, cuda, crf, sizeThresh=1/9, nIterations=10, doPlot=True):
"""
Inference from a single image
"""
# Setup
CONFIG = OmegaConf.load(config_path)
device = get_device(cuda)
torch.set_grad_enabled(False)
classes = get_classtable(CONFIG)
postprocessor = setup_postprocessor(CONFIG) if crf else None
model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
model.eval()
model.to(device)
print("Model:", CONFIG.MODEL.NAME)
# Inference
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image, raw_image = preprocessing(image, device, CONFIG)
# labelmap = inference(model, image, raw_image, postprocessor)
labelmapList = inferenceHierarchy(model, image, raw_image, postprocessor, sizeThresh, nIterations)
if doPlot:
for labelmap in labelmapList:
labels = np.unique(labelmap)
# Show result for each class
rows = np.floor(np.sqrt(len(labels) + 1))
cols = np.ceil((len(labels) + 1) / rows)
plt.figure(figsize=(10, 10))
ax = plt.subplot(rows, cols, 1)
ax.set_title("Input image")
ax.imshow(raw_image[:, :, ::-1])
ax.axis("off")
for i, label in enumerate(labels):
mask = labelmap == label
ax = plt.subplot(rows, cols, i + 2)
ax.set_title(classes[label])
ax.imshow(raw_image[..., ::-1])
ax.imshow(mask.astype(np.float32), alpha=0.5)
ax.axis("off")
plt.tight_layout()
plt.show()
#single(r".\configs\cocostuff164k.yaml",r"C:\Users\ponce\Desktop\CarlosSetUpFilesHere\CompressionPaperReviewResponse\resources\deeplab-pytorch-master\data\models\coco\deeplabv1_resnet101\caffemodel\deeplabv2_resnet101_msc-cocostuff164k-100000.pth",r"image.jpg",True,True)
#python demo.py single --config-path .\configs\voc12.yaml --model-path "C:\Users\ponce\Desktop\CarlosSetUpFilesHere\CompressionPaperReviewResponse\resources\deeplab-pytorch-master\data\models\voc12\deeplabv2_resnet101_msc\caffemodel\deeplabv2_resnet101_msc-vocaug.pth" --image-path image.jpg
#python demo.py single --config-path .\configs\cocostuff164k.yaml --model-path "C:\Users\ponce\Desktop\CarlosSetUpFilesHere\CompressionPaperReviewResponse\resources\deeplab-pytorch-master\data\models\coco\deeplabv1_resnet101\caffemodel\deeplabv2_resnet101_msc-cocostuff164k-100000.pth" --image-path image.jpg
if __name__ == "__main__":
singleHierarchy()
| 34.161458 | 309 | 0.639579 |
83cb1b811a0c4db430f4a4ca89a5f71fcbd3b310 | 1,131 | py | Python | setup.py | kvietcong/md-tangle | 4170c72f7119adc62eeb75822081a6858ed3c9dc | [
"MIT"
] | 14 | 2019-04-15T08:51:10.000Z | 2022-03-25T20:37:28.000Z | setup.py | kvietcong/md-tangle | 4170c72f7119adc62eeb75822081a6858ed3c9dc | [
"MIT"
] | 4 | 2019-03-09T22:02:50.000Z | 2021-08-24T21:03:48.000Z | setup.py | kvietcong/md-tangle | 4170c72f7119adc62eeb75822081a6858ed3c9dc | [
"MIT"
] | 3 | 2020-12-24T05:23:53.000Z | 2022-03-23T14:00:44.000Z | import setuptools
import md_tangle
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name=md_tangle.__title__,
version=md_tangle.__version__,
license=md_tangle.__license__,
author=md_tangle.__author__,
author_email=md_tangle.__author_email__,
description="Generates ('tangles') source code from Markdown documents",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/joakimmj/md-tangle",
packages=setuptools.find_packages(),
keywords=['markdown', 'tangle', 'literate programming'],
platforms=['any'],
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Natural Language :: English",
'Topic :: Text Processing :: Markup',
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3"
],
entry_points={
'console_scripts': [
'md-tangle = md_tangle.main:main',
]
},
)
| 31.416667 | 76 | 0.650752 |
83cb7ebba4b89b28bb78615faadb44744d2cc3e7 | 3,658 | py | Python | src/dvi/bayes_models.py | luoyan407/predict_trustworthiness | 8f394fc511b9aa31a766a30f0e1b059481aa5f76 | [
"MIT"
] | 5 | 2021-10-04T06:11:21.000Z | 2022-02-22T17:57:43.000Z | src/dvi/bayes_models.py | luoyan407/predict_trustworthiness | 8f394fc511b9aa31a766a30f0e1b059481aa5f76 | [
"MIT"
] | null | null | null | src/dvi/bayes_models.py | luoyan407/predict_trustworthiness | 8f394fc511b9aa31a766a30f0e1b059481aa5f76 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from .bayes_layers import VariationalLinearCertainActivations, VariationalLinearReLU
from .variables import GaussianVar
| 37.71134 | 124 | 0.61099 |
83cdd7dc6122382de1e26b815171dea31d9c7f62 | 917 | py | Python | rondgang/models.py | eternallyBaffled/rondgang | 6d0442279b60a75518579df88bfc53a57d98c2b3 | [
"MIT"
] | null | null | null | rondgang/models.py | eternallyBaffled/rondgang | 6d0442279b60a75518579df88bfc53a57d98c2b3 | [
"MIT"
] | null | null | null | rondgang/models.py | eternallyBaffled/rondgang | 6d0442279b60a75518579df88bfc53a57d98c2b3 | [
"MIT"
] | null | null | null | from datetime import date
from django.db import models
# Create your models here.
| 28.65625 | 66 | 0.715376 |